]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/power/domain.c
PM / domains: Ignore callbacks for subsys generic_pm_domain_data
[karo-tx-linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pm_domain.h>
13 #include <linux/pm_qos.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
19
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
21 ({                                                              \
22         type (*__routine)(struct device *__d);                  \
23         type __ret = (type)0;                                   \
24                                                                 \
25         __routine = genpd->dev_ops.callback;                    \
26         if (__routine) {                                        \
27                 __ret = __routine(dev);                         \
28         }                                                       \
29         __ret;                                                  \
30 })
31
32 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
33 ({                                                                              \
34         ktime_t __start = ktime_get();                                          \
35         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
36         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
37         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
38         if (!__retval && __elapsed > __td->field) {                             \
39                 __td->field = __elapsed;                                        \
40                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
41                         __elapsed);                                             \
42                 genpd->max_off_time_changed = true;                             \
43                 __td->constraint_changed = true;                                \
44         }                                                                       \
45         __retval;                                                               \
46 })
47
48 static LIST_HEAD(gpd_list);
49 static DEFINE_MUTEX(gpd_list_lock);
50
51 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
52 {
53         struct generic_pm_domain *genpd = NULL, *gpd;
54
55         if (IS_ERR_OR_NULL(domain_name))
56                 return NULL;
57
58         mutex_lock(&gpd_list_lock);
59         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
60                 if (!strcmp(gpd->name, domain_name)) {
61                         genpd = gpd;
62                         break;
63                 }
64         }
65         mutex_unlock(&gpd_list_lock);
66         return genpd;
67 }
68
69 #ifdef CONFIG_PM
70
71 struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73         if (IS_ERR_OR_NULL(dev->pm_domain))
74                 return ERR_PTR(-EINVAL);
75
76         return pd_to_genpd(dev->pm_domain);
77 }
78
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
82                                         stop_latency_ns, "stop");
83 }
84
85 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86 {
87         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
88                                         start_latency_ns, "start");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93         bool ret = false;
94
95         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96                 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98         return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103         atomic_inc(&genpd->sd_count);
104         smp_mb__after_atomic_inc();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109         DEFINE_WAIT(wait);
110
111         mutex_lock(&genpd->lock);
112         /*
113          * Wait for the domain to transition into either the active,
114          * or the power off state.
115          */
116         for (;;) {
117                 prepare_to_wait(&genpd->status_wait_queue, &wait,
118                                 TASK_UNINTERRUPTIBLE);
119                 if (genpd->status == GPD_STATE_ACTIVE
120                     || genpd->status == GPD_STATE_POWER_OFF)
121                         break;
122                 mutex_unlock(&genpd->lock);
123
124                 schedule();
125
126                 mutex_lock(&genpd->lock);
127         }
128         finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133         mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138         if (genpd->resume_count == 0)
139                 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143 {
144         s64 usecs64;
145
146         if (!genpd->cpu_data)
147                 return;
148
149         usecs64 = genpd->power_on_latency_ns;
150         do_div(usecs64, NSEC_PER_USEC);
151         usecs64 += genpd->cpu_data->saved_exit_latency;
152         genpd->cpu_data->idle_state->exit_latency = usecs64;
153 }
154
155 /**
156  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
157  * @genpd: PM domain to power up.
158  *
159  * Restore power to @genpd and all of its masters so that it is possible to
160  * resume a device belonging to it.
161  */
162 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
163         __releases(&genpd->lock) __acquires(&genpd->lock)
164 {
165         struct gpd_link *link;
166         DEFINE_WAIT(wait);
167         int ret = 0;
168
169         /* If the domain's master is being waited for, we have to wait too. */
170         for (;;) {
171                 prepare_to_wait(&genpd->status_wait_queue, &wait,
172                                 TASK_UNINTERRUPTIBLE);
173                 if (genpd->status != GPD_STATE_WAIT_MASTER)
174                         break;
175                 mutex_unlock(&genpd->lock);
176
177                 schedule();
178
179                 mutex_lock(&genpd->lock);
180         }
181         finish_wait(&genpd->status_wait_queue, &wait);
182
183         if (genpd->status == GPD_STATE_ACTIVE
184             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
185                 return 0;
186
187         if (genpd->status != GPD_STATE_POWER_OFF) {
188                 genpd_set_active(genpd);
189                 return 0;
190         }
191
192         if (genpd->cpu_data) {
193                 cpuidle_pause_and_lock();
194                 genpd->cpu_data->idle_state->disabled = true;
195                 cpuidle_resume_and_unlock();
196                 goto out;
197         }
198
199         /*
200          * The list is guaranteed not to change while the loop below is being
201          * executed, unless one of the masters' .power_on() callbacks fiddles
202          * with it.
203          */
204         list_for_each_entry(link, &genpd->slave_links, slave_node) {
205                 genpd_sd_counter_inc(link->master);
206                 genpd->status = GPD_STATE_WAIT_MASTER;
207
208                 mutex_unlock(&genpd->lock);
209
210                 ret = pm_genpd_poweron(link->master);
211
212                 mutex_lock(&genpd->lock);
213
214                 /*
215                  * The "wait for parent" status is guaranteed not to change
216                  * while the master is powering on.
217                  */
218                 genpd->status = GPD_STATE_POWER_OFF;
219                 wake_up_all(&genpd->status_wait_queue);
220                 if (ret) {
221                         genpd_sd_counter_dec(link->master);
222                         goto err;
223                 }
224         }
225
226         if (genpd->power_on) {
227                 ktime_t time_start = ktime_get();
228                 s64 elapsed_ns;
229
230                 ret = genpd->power_on(genpd);
231                 if (ret)
232                         goto err;
233
234                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
235                 if (elapsed_ns > genpd->power_on_latency_ns) {
236                         genpd->power_on_latency_ns = elapsed_ns;
237                         genpd->max_off_time_changed = true;
238                         genpd_recalc_cpu_exit_latency(genpd);
239                         if (genpd->name)
240                                 pr_warning("%s: Power-on latency exceeded, "
241                                         "new value %lld ns\n", genpd->name,
242                                         elapsed_ns);
243                 }
244         }
245
246  out:
247         genpd_set_active(genpd);
248
249         return 0;
250
251  err:
252         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
253                 genpd_sd_counter_dec(link->master);
254
255         return ret;
256 }
257
258 /**
259  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
260  * @genpd: PM domain to power up.
261  */
262 int pm_genpd_poweron(struct generic_pm_domain *genpd)
263 {
264         int ret;
265
266         mutex_lock(&genpd->lock);
267         ret = __pm_genpd_poweron(genpd);
268         mutex_unlock(&genpd->lock);
269         return ret;
270 }
271
272 /**
273  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
274  * @domain_name: Name of the PM domain to power up.
275  */
276 int pm_genpd_name_poweron(const char *domain_name)
277 {
278         struct generic_pm_domain *genpd;
279
280         genpd = pm_genpd_lookup_name(domain_name);
281         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
282 }
283
284 #endif /* CONFIG_PM */
285
286 #ifdef CONFIG_PM_RUNTIME
287
288 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
289                                      struct device *dev)
290 {
291         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
292 }
293
294 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
295 {
296         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
297                                         save_state_latency_ns, "state save");
298 }
299
300 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
301 {
302         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
303                                         restore_state_latency_ns,
304                                         "state restore");
305 }
306
307 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
308                                      unsigned long val, void *ptr)
309 {
310         struct generic_pm_domain_data *gpd_data;
311         struct device *dev;
312
313         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
314
315         mutex_lock(&gpd_data->lock);
316         dev = gpd_data->base.dev;
317         if (!dev) {
318                 mutex_unlock(&gpd_data->lock);
319                 return NOTIFY_DONE;
320         }
321         mutex_unlock(&gpd_data->lock);
322
323         for (;;) {
324                 struct generic_pm_domain *genpd;
325                 struct pm_domain_data *pdd;
326
327                 spin_lock_irq(&dev->power.lock);
328
329                 pdd = dev->power.subsys_data ?
330                                 dev->power.subsys_data->domain_data : NULL;
331                 if (pdd && pdd->dev) {
332                         to_gpd_data(pdd)->td.constraint_changed = true;
333                         genpd = dev_to_genpd(dev);
334                 } else {
335                         genpd = ERR_PTR(-ENODATA);
336                 }
337
338                 spin_unlock_irq(&dev->power.lock);
339
340                 if (!IS_ERR(genpd)) {
341                         mutex_lock(&genpd->lock);
342                         genpd->max_off_time_changed = true;
343                         mutex_unlock(&genpd->lock);
344                 }
345
346                 dev = dev->parent;
347                 if (!dev || dev->power.ignore_children)
348                         break;
349         }
350
351         return NOTIFY_DONE;
352 }
353
354 /**
355  * __pm_genpd_save_device - Save the pre-suspend state of a device.
356  * @pdd: Domain data of the device to save the state of.
357  * @genpd: PM domain the device belongs to.
358  */
359 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
360                                   struct generic_pm_domain *genpd)
361         __releases(&genpd->lock) __acquires(&genpd->lock)
362 {
363         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
364         struct device *dev = pdd->dev;
365         int ret = 0;
366
367         if (gpd_data->need_restore)
368                 return 0;
369
370         mutex_unlock(&genpd->lock);
371
372         genpd_start_dev(genpd, dev);
373         ret = genpd_save_dev(genpd, dev);
374         genpd_stop_dev(genpd, dev);
375
376         mutex_lock(&genpd->lock);
377
378         if (!ret)
379                 gpd_data->need_restore = true;
380
381         return ret;
382 }
383
384 /**
385  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
386  * @pdd: Domain data of the device to restore the state of.
387  * @genpd: PM domain the device belongs to.
388  */
389 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
390                                       struct generic_pm_domain *genpd)
391         __releases(&genpd->lock) __acquires(&genpd->lock)
392 {
393         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
394         struct device *dev = pdd->dev;
395         bool need_restore = gpd_data->need_restore;
396
397         gpd_data->need_restore = false;
398         mutex_unlock(&genpd->lock);
399
400         genpd_start_dev(genpd, dev);
401         if (need_restore)
402                 genpd_restore_dev(genpd, dev);
403
404         mutex_lock(&genpd->lock);
405 }
406
407 /**
408  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
409  * @genpd: PM domain to check.
410  *
411  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
412  * a "power off" operation, which means that a "power on" has occured in the
413  * meantime, or if its resume_count field is different from zero, which means
414  * that one of its devices has been resumed in the meantime.
415  */
416 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
417 {
418         return genpd->status == GPD_STATE_WAIT_MASTER
419                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
420 }
421
422 /**
423  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
424  * @genpd: PM domait to power off.
425  *
426  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
427  * before.
428  */
429 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
430 {
431         queue_work(pm_wq, &genpd->power_off_work);
432 }
433
434 /**
435  * pm_genpd_poweroff - Remove power from a given PM domain.
436  * @genpd: PM domain to power down.
437  *
438  * If all of the @genpd's devices have been suspended and all of its subdomains
439  * have been powered down, run the runtime suspend callbacks provided by all of
440  * the @genpd's devices' drivers and remove power from @genpd.
441  */
442 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
443         __releases(&genpd->lock) __acquires(&genpd->lock)
444 {
445         struct pm_domain_data *pdd;
446         struct gpd_link *link;
447         unsigned int not_suspended;
448         int ret = 0;
449
450  start:
451         /*
452          * Do not try to power off the domain in the following situations:
453          * (1) The domain is already in the "power off" state.
454          * (2) The domain is waiting for its master to power up.
455          * (3) One of the domain's devices is being resumed right now.
456          * (4) System suspend is in progress.
457          */
458         if (genpd->status == GPD_STATE_POWER_OFF
459             || genpd->status == GPD_STATE_WAIT_MASTER
460             || genpd->resume_count > 0 || genpd->prepared_count > 0)
461                 return 0;
462
463         if (atomic_read(&genpd->sd_count) > 0)
464                 return -EBUSY;
465
466         not_suspended = 0;
467         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
468                 enum pm_qos_flags_status stat;
469
470                 stat = dev_pm_qos_flags(pdd->dev,
471                                         PM_QOS_FLAG_NO_POWER_OFF
472                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
473                 if (stat > PM_QOS_FLAGS_NONE)
474                         return -EBUSY;
475
476                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
477                     || pdd->dev->power.irq_safe))
478                         not_suspended++;
479         }
480
481         if (not_suspended > genpd->in_progress)
482                 return -EBUSY;
483
484         if (genpd->poweroff_task) {
485                 /*
486                  * Another instance of pm_genpd_poweroff() is executing
487                  * callbacks, so tell it to start over and return.
488                  */
489                 genpd->status = GPD_STATE_REPEAT;
490                 return 0;
491         }
492
493         if (genpd->gov && genpd->gov->power_down_ok) {
494                 if (!genpd->gov->power_down_ok(&genpd->domain))
495                         return -EAGAIN;
496         }
497
498         genpd->status = GPD_STATE_BUSY;
499         genpd->poweroff_task = current;
500
501         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
502                 ret = atomic_read(&genpd->sd_count) == 0 ?
503                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
504
505                 if (genpd_abort_poweroff(genpd))
506                         goto out;
507
508                 if (ret) {
509                         genpd_set_active(genpd);
510                         goto out;
511                 }
512
513                 if (genpd->status == GPD_STATE_REPEAT) {
514                         genpd->poweroff_task = NULL;
515                         goto start;
516                 }
517         }
518
519         if (genpd->cpu_data) {
520                 /*
521                  * If cpu_data is set, cpuidle should turn the domain off when
522                  * the CPU in it is idle.  In that case we don't decrement the
523                  * subdomain counts of the master domains, so that power is not
524                  * removed from the current domain prematurely as a result of
525                  * cutting off the masters' power.
526                  */
527                 genpd->status = GPD_STATE_POWER_OFF;
528                 cpuidle_pause_and_lock();
529                 genpd->cpu_data->idle_state->disabled = false;
530                 cpuidle_resume_and_unlock();
531                 goto out;
532         }
533
534         if (genpd->power_off) {
535                 ktime_t time_start;
536                 s64 elapsed_ns;
537
538                 if (atomic_read(&genpd->sd_count) > 0) {
539                         ret = -EBUSY;
540                         goto out;
541                 }
542
543                 time_start = ktime_get();
544
545                 /*
546                  * If sd_count > 0 at this point, one of the subdomains hasn't
547                  * managed to call pm_genpd_poweron() for the master yet after
548                  * incrementing it.  In that case pm_genpd_poweron() will wait
549                  * for us to drop the lock, so we can call .power_off() and let
550                  * the pm_genpd_poweron() restore power for us (this shouldn't
551                  * happen very often).
552                  */
553                 ret = genpd->power_off(genpd);
554                 if (ret == -EBUSY) {
555                         genpd_set_active(genpd);
556                         goto out;
557                 }
558
559                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
560                 if (elapsed_ns > genpd->power_off_latency_ns) {
561                         genpd->power_off_latency_ns = elapsed_ns;
562                         genpd->max_off_time_changed = true;
563                         if (genpd->name)
564                                 pr_warning("%s: Power-off latency exceeded, "
565                                         "new value %lld ns\n", genpd->name,
566                                         elapsed_ns);
567                 }
568         }
569
570         genpd->status = GPD_STATE_POWER_OFF;
571
572         list_for_each_entry(link, &genpd->slave_links, slave_node) {
573                 genpd_sd_counter_dec(link->master);
574                 genpd_queue_power_off_work(link->master);
575         }
576
577  out:
578         genpd->poweroff_task = NULL;
579         wake_up_all(&genpd->status_wait_queue);
580         return ret;
581 }
582
583 /**
584  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
585  * @work: Work structure used for scheduling the execution of this function.
586  */
587 static void genpd_power_off_work_fn(struct work_struct *work)
588 {
589         struct generic_pm_domain *genpd;
590
591         genpd = container_of(work, struct generic_pm_domain, power_off_work);
592
593         genpd_acquire_lock(genpd);
594         pm_genpd_poweroff(genpd);
595         genpd_release_lock(genpd);
596 }
597
598 /**
599  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
600  * @dev: Device to suspend.
601  *
602  * Carry out a runtime suspend of a device under the assumption that its
603  * pm_domain field points to the domain member of an object of type
604  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
605  */
606 static int pm_genpd_runtime_suspend(struct device *dev)
607 {
608         struct generic_pm_domain *genpd;
609         bool (*stop_ok)(struct device *__dev);
610         int ret;
611
612         dev_dbg(dev, "%s()\n", __func__);
613
614         genpd = dev_to_genpd(dev);
615         if (IS_ERR(genpd))
616                 return -EINVAL;
617
618         might_sleep_if(!genpd->dev_irq_safe);
619
620         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
621         if (stop_ok && !stop_ok(dev))
622                 return -EBUSY;
623
624         ret = genpd_stop_dev(genpd, dev);
625         if (ret)
626                 return ret;
627
628         /*
629          * If power.irq_safe is set, this routine will be run with interrupts
630          * off, so it can't use mutexes.
631          */
632         if (dev->power.irq_safe)
633                 return 0;
634
635         mutex_lock(&genpd->lock);
636         genpd->in_progress++;
637         pm_genpd_poweroff(genpd);
638         genpd->in_progress--;
639         mutex_unlock(&genpd->lock);
640
641         return 0;
642 }
643
644 /**
645  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
646  * @dev: Device to resume.
647  *
648  * Carry out a runtime resume of a device under the assumption that its
649  * pm_domain field points to the domain member of an object of type
650  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
651  */
652 static int pm_genpd_runtime_resume(struct device *dev)
653 {
654         struct generic_pm_domain *genpd;
655         DEFINE_WAIT(wait);
656         int ret;
657
658         dev_dbg(dev, "%s()\n", __func__);
659
660         genpd = dev_to_genpd(dev);
661         if (IS_ERR(genpd))
662                 return -EINVAL;
663
664         might_sleep_if(!genpd->dev_irq_safe);
665
666         /* If power.irq_safe, the PM domain is never powered off. */
667         if (dev->power.irq_safe)
668                 return genpd_start_dev_no_timing(genpd, dev);
669
670         mutex_lock(&genpd->lock);
671         ret = __pm_genpd_poweron(genpd);
672         if (ret) {
673                 mutex_unlock(&genpd->lock);
674                 return ret;
675         }
676         genpd->status = GPD_STATE_BUSY;
677         genpd->resume_count++;
678         for (;;) {
679                 prepare_to_wait(&genpd->status_wait_queue, &wait,
680                                 TASK_UNINTERRUPTIBLE);
681                 /*
682                  * If current is the powering off task, we have been called
683                  * reentrantly from one of the device callbacks, so we should
684                  * not wait.
685                  */
686                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
687                         break;
688                 mutex_unlock(&genpd->lock);
689
690                 schedule();
691
692                 mutex_lock(&genpd->lock);
693         }
694         finish_wait(&genpd->status_wait_queue, &wait);
695         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
696         genpd->resume_count--;
697         genpd_set_active(genpd);
698         wake_up_all(&genpd->status_wait_queue);
699         mutex_unlock(&genpd->lock);
700
701         return 0;
702 }
703
704 static bool pd_ignore_unused;
705 static int __init pd_ignore_unused_setup(char *__unused)
706 {
707         pd_ignore_unused = true;
708         return 1;
709 }
710 __setup("pd_ignore_unused", pd_ignore_unused_setup);
711
712 /**
713  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
714  */
715 void pm_genpd_poweroff_unused(void)
716 {
717         struct generic_pm_domain *genpd;
718
719         if (pd_ignore_unused) {
720                 pr_warn("genpd: Not disabling unused power domains\n");
721                 return;
722         }
723
724         mutex_lock(&gpd_list_lock);
725
726         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
727                 genpd_queue_power_off_work(genpd);
728
729         mutex_unlock(&gpd_list_lock);
730 }
731
732 #else
733
734 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
735                                             unsigned long val, void *ptr)
736 {
737         return NOTIFY_DONE;
738 }
739
740 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
741
742 #define pm_genpd_runtime_suspend        NULL
743 #define pm_genpd_runtime_resume         NULL
744
745 #endif /* CONFIG_PM_RUNTIME */
746
747 #ifdef CONFIG_PM_SLEEP
748
749 /**
750  * pm_genpd_present - Check if the given PM domain has been initialized.
751  * @genpd: PM domain to check.
752  */
753 static bool pm_genpd_present(struct generic_pm_domain *genpd)
754 {
755         struct generic_pm_domain *gpd;
756
757         if (IS_ERR_OR_NULL(genpd))
758                 return false;
759
760         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
761                 if (gpd == genpd)
762                         return true;
763
764         return false;
765 }
766
767 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
768                                     struct device *dev)
769 {
770         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
771 }
772
773 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
774 {
775         return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
776 }
777
778 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
779 {
780         return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
781 }
782
783 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
784 {
785         return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
786 }
787
788 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
789 {
790         return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
791 }
792
793 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
794 {
795         return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
796 }
797
798 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
799 {
800         return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
801 }
802
803 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
804 {
805         return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
806 }
807
808 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
809 {
810         return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
811 }
812
813 /**
814  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
815  * @genpd: PM domain to power off, if possible.
816  *
817  * Check if the given PM domain can be powered off (during system suspend or
818  * hibernation) and do that if so.  Also, in that case propagate to its masters.
819  *
820  * This function is only called in "noirq" and "syscore" stages of system power
821  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
822  * executed sequentially, so it is guaranteed that it will never run twice in
823  * parallel).
824  */
825 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
826 {
827         struct gpd_link *link;
828
829         if (genpd->status == GPD_STATE_POWER_OFF)
830                 return;
831
832         if (genpd->suspended_count != genpd->device_count
833             || atomic_read(&genpd->sd_count) > 0)
834                 return;
835
836         if (genpd->power_off)
837                 genpd->power_off(genpd);
838
839         genpd->status = GPD_STATE_POWER_OFF;
840
841         list_for_each_entry(link, &genpd->slave_links, slave_node) {
842                 genpd_sd_counter_dec(link->master);
843                 pm_genpd_sync_poweroff(link->master);
844         }
845 }
846
847 /**
848  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
849  * @genpd: PM domain to power on.
850  *
851  * This function is only called in "noirq" and "syscore" stages of system power
852  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
853  * executed sequentially, so it is guaranteed that it will never run twice in
854  * parallel).
855  */
856 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
857 {
858         struct gpd_link *link;
859
860         if (genpd->status != GPD_STATE_POWER_OFF)
861                 return;
862
863         list_for_each_entry(link, &genpd->slave_links, slave_node) {
864                 pm_genpd_sync_poweron(link->master);
865                 genpd_sd_counter_inc(link->master);
866         }
867
868         if (genpd->power_on)
869                 genpd->power_on(genpd);
870
871         genpd->status = GPD_STATE_ACTIVE;
872 }
873
874 /**
875  * resume_needed - Check whether to resume a device before system suspend.
876  * @dev: Device to check.
877  * @genpd: PM domain the device belongs to.
878  *
879  * There are two cases in which a device that can wake up the system from sleep
880  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
881  * to wake up the system and it has to remain active for this purpose while the
882  * system is in the sleep state and (2) if the device is not enabled to wake up
883  * the system from sleep states and it generally doesn't generate wakeup signals
884  * by itself (those signals are generated on its behalf by other parts of the
885  * system).  In the latter case it may be necessary to reconfigure the device's
886  * wakeup settings during system suspend, because it may have been set up to
887  * signal remote wakeup from the system's working state as needed by runtime PM.
888  * Return 'true' in either of the above cases.
889  */
890 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
891 {
892         bool active_wakeup;
893
894         if (!device_can_wakeup(dev))
895                 return false;
896
897         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
898         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
899 }
900
901 /**
902  * pm_genpd_prepare - Start power transition of a device in a PM domain.
903  * @dev: Device to start the transition of.
904  *
905  * Start a power transition of a device (during a system-wide power transition)
906  * under the assumption that its pm_domain field points to the domain member of
907  * an object of type struct generic_pm_domain representing a PM domain
908  * consisting of I/O devices.
909  */
910 static int pm_genpd_prepare(struct device *dev)
911 {
912         struct generic_pm_domain *genpd;
913         int ret;
914
915         dev_dbg(dev, "%s()\n", __func__);
916
917         genpd = dev_to_genpd(dev);
918         if (IS_ERR(genpd))
919                 return -EINVAL;
920
921         /*
922          * If a wakeup request is pending for the device, it should be woken up
923          * at this point and a system wakeup event should be reported if it's
924          * set up to wake up the system from sleep states.
925          */
926         pm_runtime_get_noresume(dev);
927         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
928                 pm_wakeup_event(dev, 0);
929
930         if (pm_wakeup_pending()) {
931                 pm_runtime_put(dev);
932                 return -EBUSY;
933         }
934
935         if (resume_needed(dev, genpd))
936                 pm_runtime_resume(dev);
937
938         genpd_acquire_lock(genpd);
939
940         if (genpd->prepared_count++ == 0) {
941                 genpd->suspended_count = 0;
942                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
943         }
944
945         genpd_release_lock(genpd);
946
947         if (genpd->suspend_power_off) {
948                 pm_runtime_put_noidle(dev);
949                 return 0;
950         }
951
952         /*
953          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
954          * so pm_genpd_poweron() will return immediately, but if the device
955          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
956          * to make it operational.
957          */
958         pm_runtime_resume(dev);
959         __pm_runtime_disable(dev, false);
960
961         ret = pm_generic_prepare(dev);
962         if (ret) {
963                 mutex_lock(&genpd->lock);
964
965                 if (--genpd->prepared_count == 0)
966                         genpd->suspend_power_off = false;
967
968                 mutex_unlock(&genpd->lock);
969                 pm_runtime_enable(dev);
970         }
971
972         pm_runtime_put(dev);
973         return ret;
974 }
975
976 /**
977  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
978  * @dev: Device to suspend.
979  *
980  * Suspend a device under the assumption that its pm_domain field points to the
981  * domain member of an object of type struct generic_pm_domain representing
982  * a PM domain consisting of I/O devices.
983  */
984 static int pm_genpd_suspend(struct device *dev)
985 {
986         struct generic_pm_domain *genpd;
987
988         dev_dbg(dev, "%s()\n", __func__);
989
990         genpd = dev_to_genpd(dev);
991         if (IS_ERR(genpd))
992                 return -EINVAL;
993
994         return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
995 }
996
997 /**
998  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
999  * @dev: Device to suspend.
1000  *
1001  * Carry out a late suspend of a device under the assumption that its
1002  * pm_domain field points to the domain member of an object of type
1003  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1004  */
1005 static int pm_genpd_suspend_late(struct device *dev)
1006 {
1007         struct generic_pm_domain *genpd;
1008
1009         dev_dbg(dev, "%s()\n", __func__);
1010
1011         genpd = dev_to_genpd(dev);
1012         if (IS_ERR(genpd))
1013                 return -EINVAL;
1014
1015         return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1016 }
1017
1018 /**
1019  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1020  * @dev: Device to suspend.
1021  *
1022  * Stop the device and remove power from the domain if all devices in it have
1023  * been stopped.
1024  */
1025 static int pm_genpd_suspend_noirq(struct device *dev)
1026 {
1027         struct generic_pm_domain *genpd;
1028
1029         dev_dbg(dev, "%s()\n", __func__);
1030
1031         genpd = dev_to_genpd(dev);
1032         if (IS_ERR(genpd))
1033                 return -EINVAL;
1034
1035         if (genpd->suspend_power_off
1036             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1037                 return 0;
1038
1039         genpd_stop_dev(genpd, dev);
1040
1041         /*
1042          * Since all of the "noirq" callbacks are executed sequentially, it is
1043          * guaranteed that this function will never run twice in parallel for
1044          * the same PM domain, so it is not necessary to use locking here.
1045          */
1046         genpd->suspended_count++;
1047         pm_genpd_sync_poweroff(genpd);
1048
1049         return 0;
1050 }
1051
1052 /**
1053  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1054  * @dev: Device to resume.
1055  *
1056  * Restore power to the device's PM domain, if necessary, and start the device.
1057  */
1058 static int pm_genpd_resume_noirq(struct device *dev)
1059 {
1060         struct generic_pm_domain *genpd;
1061
1062         dev_dbg(dev, "%s()\n", __func__);
1063
1064         genpd = dev_to_genpd(dev);
1065         if (IS_ERR(genpd))
1066                 return -EINVAL;
1067
1068         if (genpd->suspend_power_off
1069             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1070                 return 0;
1071
1072         /*
1073          * Since all of the "noirq" callbacks are executed sequentially, it is
1074          * guaranteed that this function will never run twice in parallel for
1075          * the same PM domain, so it is not necessary to use locking here.
1076          */
1077         pm_genpd_sync_poweron(genpd);
1078         genpd->suspended_count--;
1079
1080         return genpd_start_dev(genpd, dev);
1081 }
1082
1083 /**
1084  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1085  * @dev: Device to resume.
1086  *
1087  * Carry out an early resume of a device under the assumption that its
1088  * pm_domain field points to the domain member of an object of type
1089  * struct generic_pm_domain representing a power domain consisting of I/O
1090  * devices.
1091  */
1092 static int pm_genpd_resume_early(struct device *dev)
1093 {
1094         struct generic_pm_domain *genpd;
1095
1096         dev_dbg(dev, "%s()\n", __func__);
1097
1098         genpd = dev_to_genpd(dev);
1099         if (IS_ERR(genpd))
1100                 return -EINVAL;
1101
1102         return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1103 }
1104
1105 /**
1106  * pm_genpd_resume - Resume of device in an I/O PM domain.
1107  * @dev: Device to resume.
1108  *
1109  * Resume a device under the assumption that its pm_domain field points to the
1110  * domain member of an object of type struct generic_pm_domain representing
1111  * a power domain consisting of I/O devices.
1112  */
1113 static int pm_genpd_resume(struct device *dev)
1114 {
1115         struct generic_pm_domain *genpd;
1116
1117         dev_dbg(dev, "%s()\n", __func__);
1118
1119         genpd = dev_to_genpd(dev);
1120         if (IS_ERR(genpd))
1121                 return -EINVAL;
1122
1123         return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1124 }
1125
1126 /**
1127  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1128  * @dev: Device to freeze.
1129  *
1130  * Freeze a device under the assumption that its pm_domain field points to the
1131  * domain member of an object of type struct generic_pm_domain representing
1132  * a power domain consisting of I/O devices.
1133  */
1134 static int pm_genpd_freeze(struct device *dev)
1135 {
1136         struct generic_pm_domain *genpd;
1137
1138         dev_dbg(dev, "%s()\n", __func__);
1139
1140         genpd = dev_to_genpd(dev);
1141         if (IS_ERR(genpd))
1142                 return -EINVAL;
1143
1144         return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1145 }
1146
1147 /**
1148  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1149  * @dev: Device to freeze.
1150  *
1151  * Carry out a late freeze of a device under the assumption that its
1152  * pm_domain field points to the domain member of an object of type
1153  * struct generic_pm_domain representing a power domain consisting of I/O
1154  * devices.
1155  */
1156 static int pm_genpd_freeze_late(struct device *dev)
1157 {
1158         struct generic_pm_domain *genpd;
1159
1160         dev_dbg(dev, "%s()\n", __func__);
1161
1162         genpd = dev_to_genpd(dev);
1163         if (IS_ERR(genpd))
1164                 return -EINVAL;
1165
1166         return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1167 }
1168
1169 /**
1170  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1171  * @dev: Device to freeze.
1172  *
1173  * Carry out a late freeze of a device under the assumption that its
1174  * pm_domain field points to the domain member of an object of type
1175  * struct generic_pm_domain representing a power domain consisting of I/O
1176  * devices.
1177  */
1178 static int pm_genpd_freeze_noirq(struct device *dev)
1179 {
1180         struct generic_pm_domain *genpd;
1181
1182         dev_dbg(dev, "%s()\n", __func__);
1183
1184         genpd = dev_to_genpd(dev);
1185         if (IS_ERR(genpd))
1186                 return -EINVAL;
1187
1188         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1189 }
1190
1191 /**
1192  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1193  * @dev: Device to thaw.
1194  *
1195  * Start the device, unless power has been removed from the domain already
1196  * before the system transition.
1197  */
1198 static int pm_genpd_thaw_noirq(struct device *dev)
1199 {
1200         struct generic_pm_domain *genpd;
1201
1202         dev_dbg(dev, "%s()\n", __func__);
1203
1204         genpd = dev_to_genpd(dev);
1205         if (IS_ERR(genpd))
1206                 return -EINVAL;
1207
1208         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1209 }
1210
1211 /**
1212  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1213  * @dev: Device to thaw.
1214  *
1215  * Carry out an early thaw of a device under the assumption that its
1216  * pm_domain field points to the domain member of an object of type
1217  * struct generic_pm_domain representing a power domain consisting of I/O
1218  * devices.
1219  */
1220 static int pm_genpd_thaw_early(struct device *dev)
1221 {
1222         struct generic_pm_domain *genpd;
1223
1224         dev_dbg(dev, "%s()\n", __func__);
1225
1226         genpd = dev_to_genpd(dev);
1227         if (IS_ERR(genpd))
1228                 return -EINVAL;
1229
1230         return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1231 }
1232
1233 /**
1234  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1235  * @dev: Device to thaw.
1236  *
1237  * Thaw a device under the assumption that its pm_domain field points to the
1238  * domain member of an object of type struct generic_pm_domain representing
1239  * a power domain consisting of I/O devices.
1240  */
1241 static int pm_genpd_thaw(struct device *dev)
1242 {
1243         struct generic_pm_domain *genpd;
1244
1245         dev_dbg(dev, "%s()\n", __func__);
1246
1247         genpd = dev_to_genpd(dev);
1248         if (IS_ERR(genpd))
1249                 return -EINVAL;
1250
1251         return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1252 }
1253
1254 /**
1255  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1256  * @dev: Device to resume.
1257  *
1258  * Make sure the domain will be in the same power state as before the
1259  * hibernation the system is resuming from and start the device if necessary.
1260  */
1261 static int pm_genpd_restore_noirq(struct device *dev)
1262 {
1263         struct generic_pm_domain *genpd;
1264
1265         dev_dbg(dev, "%s()\n", __func__);
1266
1267         genpd = dev_to_genpd(dev);
1268         if (IS_ERR(genpd))
1269                 return -EINVAL;
1270
1271         /*
1272          * Since all of the "noirq" callbacks are executed sequentially, it is
1273          * guaranteed that this function will never run twice in parallel for
1274          * the same PM domain, so it is not necessary to use locking here.
1275          *
1276          * At this point suspended_count == 0 means we are being run for the
1277          * first time for the given domain in the present cycle.
1278          */
1279         if (genpd->suspended_count++ == 0) {
1280                 /*
1281                  * The boot kernel might put the domain into arbitrary state,
1282                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1283                  * so that it tries to power it on in case it was really off.
1284                  */
1285                 genpd->status = GPD_STATE_POWER_OFF;
1286                 if (genpd->suspend_power_off) {
1287                         /*
1288                          * If the domain was off before the hibernation, make
1289                          * sure it will be off going forward.
1290                          */
1291                         if (genpd->power_off)
1292                                 genpd->power_off(genpd);
1293
1294                         return 0;
1295                 }
1296         }
1297
1298         if (genpd->suspend_power_off)
1299                 return 0;
1300
1301         pm_genpd_sync_poweron(genpd);
1302
1303         return genpd_start_dev(genpd, dev);
1304 }
1305
1306 /**
1307  * pm_genpd_complete - Complete power transition of a device in a power domain.
1308  * @dev: Device to complete the transition of.
1309  *
1310  * Complete a power transition of a device (during a system-wide power
1311  * transition) under the assumption that its pm_domain field points to the
1312  * domain member of an object of type struct generic_pm_domain representing
1313  * a power domain consisting of I/O devices.
1314  */
1315 static void pm_genpd_complete(struct device *dev)
1316 {
1317         struct generic_pm_domain *genpd;
1318         bool run_complete;
1319
1320         dev_dbg(dev, "%s()\n", __func__);
1321
1322         genpd = dev_to_genpd(dev);
1323         if (IS_ERR(genpd))
1324                 return;
1325
1326         mutex_lock(&genpd->lock);
1327
1328         run_complete = !genpd->suspend_power_off;
1329         if (--genpd->prepared_count == 0)
1330                 genpd->suspend_power_off = false;
1331
1332         mutex_unlock(&genpd->lock);
1333
1334         if (run_complete) {
1335                 pm_generic_complete(dev);
1336                 pm_runtime_set_active(dev);
1337                 pm_runtime_enable(dev);
1338                 pm_request_idle(dev);
1339         }
1340 }
1341
1342 /**
1343  * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1344  * @dev: Device that normally is marked as "always on" to switch power for.
1345  *
1346  * This routine may only be called during the system core (syscore) suspend or
1347  * resume phase for devices whose "always on" flags are set.
1348  */
1349 void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1350 {
1351         struct generic_pm_domain *genpd;
1352
1353         genpd = dev_to_genpd(dev);
1354         if (!pm_genpd_present(genpd))
1355                 return;
1356
1357         if (suspend) {
1358                 genpd->suspended_count++;
1359                 pm_genpd_sync_poweroff(genpd);
1360         } else {
1361                 pm_genpd_sync_poweron(genpd);
1362                 genpd->suspended_count--;
1363         }
1364 }
1365 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1366
1367 #else
1368
1369 #define pm_genpd_prepare                NULL
1370 #define pm_genpd_suspend                NULL
1371 #define pm_genpd_suspend_late           NULL
1372 #define pm_genpd_suspend_noirq          NULL
1373 #define pm_genpd_resume_early           NULL
1374 #define pm_genpd_resume_noirq           NULL
1375 #define pm_genpd_resume                 NULL
1376 #define pm_genpd_freeze                 NULL
1377 #define pm_genpd_freeze_late            NULL
1378 #define pm_genpd_freeze_noirq           NULL
1379 #define pm_genpd_thaw_early             NULL
1380 #define pm_genpd_thaw_noirq             NULL
1381 #define pm_genpd_thaw                   NULL
1382 #define pm_genpd_restore_noirq          NULL
1383 #define pm_genpd_complete               NULL
1384
1385 #endif /* CONFIG_PM_SLEEP */
1386
1387 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1388 {
1389         struct generic_pm_domain_data *gpd_data;
1390
1391         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1392         if (!gpd_data)
1393                 return NULL;
1394
1395         mutex_init(&gpd_data->lock);
1396         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1397         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1398         return gpd_data;
1399 }
1400
1401 static void __pm_genpd_free_dev_data(struct device *dev,
1402                                      struct generic_pm_domain_data *gpd_data)
1403 {
1404         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1405         kfree(gpd_data);
1406 }
1407
1408 /**
1409  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1410  * @genpd: PM domain to add the device to.
1411  * @dev: Device to be added.
1412  * @td: Set of PM QoS timing parameters to attach to the device.
1413  */
1414 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1415                           struct gpd_timing_data *td)
1416 {
1417         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1418         struct pm_domain_data *pdd;
1419         int ret = 0;
1420
1421         dev_dbg(dev, "%s()\n", __func__);
1422
1423         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1424                 return -EINVAL;
1425
1426         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1427         if (!gpd_data_new)
1428                 return -ENOMEM;
1429
1430         genpd_acquire_lock(genpd);
1431
1432         if (genpd->prepared_count > 0) {
1433                 ret = -EAGAIN;
1434                 goto out;
1435         }
1436
1437         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1438                 if (pdd->dev == dev) {
1439                         ret = -EINVAL;
1440                         goto out;
1441                 }
1442
1443         ret = dev_pm_get_subsys_data(dev);
1444         if (ret)
1445                 goto out;
1446
1447         genpd->device_count++;
1448         genpd->max_off_time_changed = true;
1449
1450         spin_lock_irq(&dev->power.lock);
1451
1452         dev->pm_domain = &genpd->domain;
1453         if (dev->power.subsys_data->domain_data) {
1454                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1455         } else {
1456                 gpd_data = gpd_data_new;
1457                 dev->power.subsys_data->domain_data = &gpd_data->base;
1458         }
1459         gpd_data->refcount++;
1460         if (td)
1461                 gpd_data->td = *td;
1462
1463         spin_unlock_irq(&dev->power.lock);
1464
1465         mutex_lock(&gpd_data->lock);
1466         gpd_data->base.dev = dev;
1467         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1468         gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1469         gpd_data->td.constraint_changed = true;
1470         gpd_data->td.effective_constraint_ns = -1;
1471         mutex_unlock(&gpd_data->lock);
1472
1473  out:
1474         genpd_release_lock(genpd);
1475
1476         if (gpd_data != gpd_data_new)
1477                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1478
1479         return ret;
1480 }
1481
1482 /**
1483  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1484  * @genpd_node: Device tree node pointer representing a PM domain to which the
1485  *   the device is added to.
1486  * @dev: Device to be added.
1487  * @td: Set of PM QoS timing parameters to attach to the device.
1488  */
1489 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1490                              struct gpd_timing_data *td)
1491 {
1492         struct generic_pm_domain *genpd = NULL, *gpd;
1493
1494         dev_dbg(dev, "%s()\n", __func__);
1495
1496         if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1497                 return -EINVAL;
1498
1499         mutex_lock(&gpd_list_lock);
1500         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1501                 if (gpd->of_node == genpd_node) {
1502                         genpd = gpd;
1503                         break;
1504                 }
1505         }
1506         mutex_unlock(&gpd_list_lock);
1507
1508         if (!genpd)
1509                 return -EINVAL;
1510
1511         return __pm_genpd_add_device(genpd, dev, td);
1512 }
1513
1514
1515 /**
1516  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1517  * @domain_name: Name of the PM domain to add the device to.
1518  * @dev: Device to be added.
1519  * @td: Set of PM QoS timing parameters to attach to the device.
1520  */
1521 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1522                                struct gpd_timing_data *td)
1523 {
1524         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1525 }
1526
1527 /**
1528  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1529  * @genpd: PM domain to remove the device from.
1530  * @dev: Device to be removed.
1531  */
1532 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1533                            struct device *dev)
1534 {
1535         struct generic_pm_domain_data *gpd_data;
1536         struct pm_domain_data *pdd;
1537         bool remove = false;
1538         int ret = 0;
1539
1540         dev_dbg(dev, "%s()\n", __func__);
1541
1542         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1543             ||  IS_ERR_OR_NULL(dev->pm_domain)
1544             ||  pd_to_genpd(dev->pm_domain) != genpd)
1545                 return -EINVAL;
1546
1547         genpd_acquire_lock(genpd);
1548
1549         if (genpd->prepared_count > 0) {
1550                 ret = -EAGAIN;
1551                 goto out;
1552         }
1553
1554         genpd->device_count--;
1555         genpd->max_off_time_changed = true;
1556
1557         spin_lock_irq(&dev->power.lock);
1558
1559         dev->pm_domain = NULL;
1560         pdd = dev->power.subsys_data->domain_data;
1561         list_del_init(&pdd->list_node);
1562         gpd_data = to_gpd_data(pdd);
1563         if (--gpd_data->refcount == 0) {
1564                 dev->power.subsys_data->domain_data = NULL;
1565                 remove = true;
1566         }
1567
1568         spin_unlock_irq(&dev->power.lock);
1569
1570         mutex_lock(&gpd_data->lock);
1571         pdd->dev = NULL;
1572         mutex_unlock(&gpd_data->lock);
1573
1574         genpd_release_lock(genpd);
1575
1576         dev_pm_put_subsys_data(dev);
1577         if (remove)
1578                 __pm_genpd_free_dev_data(dev, gpd_data);
1579
1580         return 0;
1581
1582  out:
1583         genpd_release_lock(genpd);
1584
1585         return ret;
1586 }
1587
1588 /**
1589  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1590  * @dev: Device to set/unset the flag for.
1591  * @val: The new value of the device's "need restore" flag.
1592  */
1593 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1594 {
1595         struct pm_subsys_data *psd;
1596         unsigned long flags;
1597
1598         spin_lock_irqsave(&dev->power.lock, flags);
1599
1600         psd = dev_to_psd(dev);
1601         if (psd && psd->domain_data)
1602                 to_gpd_data(psd->domain_data)->need_restore = val;
1603
1604         spin_unlock_irqrestore(&dev->power.lock, flags);
1605 }
1606 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1607
1608 /**
1609  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1610  * @genpd: Master PM domain to add the subdomain to.
1611  * @subdomain: Subdomain to be added.
1612  */
1613 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1614                            struct generic_pm_domain *subdomain)
1615 {
1616         struct gpd_link *link;
1617         int ret = 0;
1618
1619         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1620             || genpd == subdomain)
1621                 return -EINVAL;
1622
1623  start:
1624         genpd_acquire_lock(genpd);
1625         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1626
1627         if (subdomain->status != GPD_STATE_POWER_OFF
1628             && subdomain->status != GPD_STATE_ACTIVE) {
1629                 mutex_unlock(&subdomain->lock);
1630                 genpd_release_lock(genpd);
1631                 goto start;
1632         }
1633
1634         if (genpd->status == GPD_STATE_POWER_OFF
1635             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1636                 ret = -EINVAL;
1637                 goto out;
1638         }
1639
1640         list_for_each_entry(link, &genpd->master_links, master_node) {
1641                 if (link->slave == subdomain && link->master == genpd) {
1642                         ret = -EINVAL;
1643                         goto out;
1644                 }
1645         }
1646
1647         link = kzalloc(sizeof(*link), GFP_KERNEL);
1648         if (!link) {
1649                 ret = -ENOMEM;
1650                 goto out;
1651         }
1652         link->master = genpd;
1653         list_add_tail(&link->master_node, &genpd->master_links);
1654         link->slave = subdomain;
1655         list_add_tail(&link->slave_node, &subdomain->slave_links);
1656         if (subdomain->status != GPD_STATE_POWER_OFF)
1657                 genpd_sd_counter_inc(genpd);
1658
1659  out:
1660         mutex_unlock(&subdomain->lock);
1661         genpd_release_lock(genpd);
1662
1663         return ret;
1664 }
1665
1666 /**
1667  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1668  * @master_name: Name of the master PM domain to add the subdomain to.
1669  * @subdomain_name: Name of the subdomain to be added.
1670  */
1671 int pm_genpd_add_subdomain_names(const char *master_name,
1672                                  const char *subdomain_name)
1673 {
1674         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1675
1676         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1677                 return -EINVAL;
1678
1679         mutex_lock(&gpd_list_lock);
1680         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1681                 if (!master && !strcmp(gpd->name, master_name))
1682                         master = gpd;
1683
1684                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1685                         subdomain = gpd;
1686
1687                 if (master && subdomain)
1688                         break;
1689         }
1690         mutex_unlock(&gpd_list_lock);
1691
1692         return pm_genpd_add_subdomain(master, subdomain);
1693 }
1694
1695 /**
1696  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1697  * @genpd: Master PM domain to remove the subdomain from.
1698  * @subdomain: Subdomain to be removed.
1699  */
1700 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1701                               struct generic_pm_domain *subdomain)
1702 {
1703         struct gpd_link *link;
1704         int ret = -EINVAL;
1705
1706         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1707                 return -EINVAL;
1708
1709  start:
1710         genpd_acquire_lock(genpd);
1711
1712         list_for_each_entry(link, &genpd->master_links, master_node) {
1713                 if (link->slave != subdomain)
1714                         continue;
1715
1716                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1717
1718                 if (subdomain->status != GPD_STATE_POWER_OFF
1719                     && subdomain->status != GPD_STATE_ACTIVE) {
1720                         mutex_unlock(&subdomain->lock);
1721                         genpd_release_lock(genpd);
1722                         goto start;
1723                 }
1724
1725                 list_del(&link->master_node);
1726                 list_del(&link->slave_node);
1727                 kfree(link);
1728                 if (subdomain->status != GPD_STATE_POWER_OFF)
1729                         genpd_sd_counter_dec(genpd);
1730
1731                 mutex_unlock(&subdomain->lock);
1732
1733                 ret = 0;
1734                 break;
1735         }
1736
1737         genpd_release_lock(genpd);
1738
1739         return ret;
1740 }
1741
1742 /**
1743  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1744  * @genpd: PM domain to be connected with cpuidle.
1745  * @state: cpuidle state this domain can disable/enable.
1746  *
1747  * Make a PM domain behave as though it contained a CPU core, that is, instead
1748  * of calling its power down routine it will enable the given cpuidle state so
1749  * that the cpuidle subsystem can power it down (if possible and desirable).
1750  */
1751 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1752 {
1753         struct cpuidle_driver *cpuidle_drv;
1754         struct gpd_cpu_data *cpu_data;
1755         struct cpuidle_state *idle_state;
1756         int ret = 0;
1757
1758         if (IS_ERR_OR_NULL(genpd) || state < 0)
1759                 return -EINVAL;
1760
1761         genpd_acquire_lock(genpd);
1762
1763         if (genpd->cpu_data) {
1764                 ret = -EEXIST;
1765                 goto out;
1766         }
1767         cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1768         if (!cpu_data) {
1769                 ret = -ENOMEM;
1770                 goto out;
1771         }
1772         cpuidle_drv = cpuidle_driver_ref();
1773         if (!cpuidle_drv) {
1774                 ret = -ENODEV;
1775                 goto err_drv;
1776         }
1777         if (cpuidle_drv->state_count <= state) {
1778                 ret = -EINVAL;
1779                 goto err;
1780         }
1781         idle_state = &cpuidle_drv->states[state];
1782         if (!idle_state->disabled) {
1783                 ret = -EAGAIN;
1784                 goto err;
1785         }
1786         cpu_data->idle_state = idle_state;
1787         cpu_data->saved_exit_latency = idle_state->exit_latency;
1788         genpd->cpu_data = cpu_data;
1789         genpd_recalc_cpu_exit_latency(genpd);
1790
1791  out:
1792         genpd_release_lock(genpd);
1793         return ret;
1794
1795  err:
1796         cpuidle_driver_unref();
1797
1798  err_drv:
1799         kfree(cpu_data);
1800         goto out;
1801 }
1802
1803 /**
1804  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1805  * @name: Name of the domain to connect to cpuidle.
1806  * @state: cpuidle state this domain can manipulate.
1807  */
1808 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1809 {
1810         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1811 }
1812
1813 /**
1814  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1815  * @genpd: PM domain to remove the cpuidle connection from.
1816  *
1817  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1818  * given PM domain.
1819  */
1820 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1821 {
1822         struct gpd_cpu_data *cpu_data;
1823         struct cpuidle_state *idle_state;
1824         int ret = 0;
1825
1826         if (IS_ERR_OR_NULL(genpd))
1827                 return -EINVAL;
1828
1829         genpd_acquire_lock(genpd);
1830
1831         cpu_data = genpd->cpu_data;
1832         if (!cpu_data) {
1833                 ret = -ENODEV;
1834                 goto out;
1835         }
1836         idle_state = cpu_data->idle_state;
1837         if (!idle_state->disabled) {
1838                 ret = -EAGAIN;
1839                 goto out;
1840         }
1841         idle_state->exit_latency = cpu_data->saved_exit_latency;
1842         cpuidle_driver_unref();
1843         genpd->cpu_data = NULL;
1844         kfree(cpu_data);
1845
1846  out:
1847         genpd_release_lock(genpd);
1848         return ret;
1849 }
1850
1851 /**
1852  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1853  * @name: Name of the domain to disconnect cpuidle from.
1854  */
1855 int pm_genpd_name_detach_cpuidle(const char *name)
1856 {
1857         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1858 }
1859
1860 /* Default device callbacks for generic PM domains. */
1861
1862 /**
1863  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1864  * @dev: Device to handle.
1865  */
1866 static int pm_genpd_default_save_state(struct device *dev)
1867 {
1868         int (*cb)(struct device *__dev);
1869
1870         if (dev->type && dev->type->pm)
1871                 cb = dev->type->pm->runtime_suspend;
1872         else if (dev->class && dev->class->pm)
1873                 cb = dev->class->pm->runtime_suspend;
1874         else if (dev->bus && dev->bus->pm)
1875                 cb = dev->bus->pm->runtime_suspend;
1876         else
1877                 cb = NULL;
1878
1879         if (!cb && dev->driver && dev->driver->pm)
1880                 cb = dev->driver->pm->runtime_suspend;
1881
1882         return cb ? cb(dev) : 0;
1883 }
1884
1885 /**
1886  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1887  * @dev: Device to handle.
1888  */
1889 static int pm_genpd_default_restore_state(struct device *dev)
1890 {
1891         int (*cb)(struct device *__dev);
1892
1893         if (dev->type && dev->type->pm)
1894                 cb = dev->type->pm->runtime_resume;
1895         else if (dev->class && dev->class->pm)
1896                 cb = dev->class->pm->runtime_resume;
1897         else if (dev->bus && dev->bus->pm)
1898                 cb = dev->bus->pm->runtime_resume;
1899         else
1900                 cb = NULL;
1901
1902         if (!cb && dev->driver && dev->driver->pm)
1903                 cb = dev->driver->pm->runtime_resume;
1904
1905         return cb ? cb(dev) : 0;
1906 }
1907
1908 /**
1909  * pm_genpd_init - Initialize a generic I/O PM domain object.
1910  * @genpd: PM domain object to initialize.
1911  * @gov: PM domain governor to associate with the domain (may be NULL).
1912  * @is_off: Initial value of the domain's power_is_off field.
1913  */
1914 void pm_genpd_init(struct generic_pm_domain *genpd,
1915                    struct dev_power_governor *gov, bool is_off)
1916 {
1917         if (IS_ERR_OR_NULL(genpd))
1918                 return;
1919
1920         INIT_LIST_HEAD(&genpd->master_links);
1921         INIT_LIST_HEAD(&genpd->slave_links);
1922         INIT_LIST_HEAD(&genpd->dev_list);
1923         mutex_init(&genpd->lock);
1924         genpd->gov = gov;
1925         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1926         genpd->in_progress = 0;
1927         atomic_set(&genpd->sd_count, 0);
1928         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1929         init_waitqueue_head(&genpd->status_wait_queue);
1930         genpd->poweroff_task = NULL;
1931         genpd->resume_count = 0;
1932         genpd->device_count = 0;
1933         genpd->max_off_time_ns = -1;
1934         genpd->max_off_time_changed = true;
1935         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1936         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1937         genpd->domain.ops.prepare = pm_genpd_prepare;
1938         genpd->domain.ops.suspend = pm_genpd_suspend;
1939         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1940         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1941         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1942         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1943         genpd->domain.ops.resume = pm_genpd_resume;
1944         genpd->domain.ops.freeze = pm_genpd_freeze;
1945         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1946         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1947         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1948         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1949         genpd->domain.ops.thaw = pm_genpd_thaw;
1950         genpd->domain.ops.poweroff = pm_genpd_suspend;
1951         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1952         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1953         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1954         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1955         genpd->domain.ops.restore = pm_genpd_resume;
1956         genpd->domain.ops.complete = pm_genpd_complete;
1957         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1958         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1959         genpd->dev_ops.suspend = pm_generic_suspend;
1960         genpd->dev_ops.suspend_late = pm_generic_suspend_late;
1961         genpd->dev_ops.resume_early = pm_generic_resume_early;
1962         genpd->dev_ops.resume = pm_generic_resume;
1963         genpd->dev_ops.freeze = pm_generic_freeze;
1964         genpd->dev_ops.freeze_late = pm_generic_freeze_late;
1965         genpd->dev_ops.thaw_early = pm_generic_thaw_early;
1966         genpd->dev_ops.thaw = pm_generic_thaw;
1967         mutex_lock(&gpd_list_lock);
1968         list_add(&genpd->gpd_list_node, &gpd_list);
1969         mutex_unlock(&gpd_list_lock);
1970 }