2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
22 static DEFINE_SPINLOCK(enable_lock);
23 static DEFINE_MUTEX(prepare_lock);
25 static HLIST_HEAD(clk_root_list);
26 static HLIST_HEAD(clk_orphan_list);
27 static LIST_HEAD(clk_notifier_list);
29 /*** debugfs support ***/
31 #ifdef CONFIG_COMMON_CLK_DEBUG
32 #include <linux/debugfs.h>
34 static struct dentry *rootdir;
35 static struct dentry *orphandir;
36 static int inited = 0;
38 /* caller must hold prepare_lock */
39 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
44 if (!clk || !pdentry) {
49 d = debugfs_create_dir(clk->name, pdentry);
55 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
60 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
65 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
66 (u32 *)&clk->prepare_count);
70 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
71 (u32 *)&clk->enable_count);
75 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
76 (u32 *)&clk->notifier_count);
84 debugfs_remove(clk->dentry);
89 /* caller must hold prepare_lock */
90 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
93 struct hlist_node *tmp;
99 ret = clk_debug_create_one(clk, pdentry);
104 hlist_for_each_entry(child, tmp, &clk->children, child_node)
105 clk_debug_create_subtree(child, clk->dentry);
113 * clk_debug_register - add a clk node to the debugfs clk tree
114 * @clk: the clk being added to the debugfs clk tree
116 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
117 * initialized. Otherwise it bails out early since the debugfs clk tree
118 * will be created lazily by clk_debug_init as part of a late_initcall.
120 * Caller must hold prepare_lock. Only clk_init calls this function (so
121 * far) so this is taken care.
123 static int clk_debug_register(struct clk *clk)
126 struct dentry *pdentry;
132 parent = clk->parent;
135 * Check to see if a clk is a root clk. Also check that it is
136 * safe to add this clk to debugfs
139 if (clk->flags & CLK_IS_ROOT)
145 pdentry = parent->dentry;
149 ret = clk_debug_create_subtree(clk, pdentry);
156 * clk_debug_init - lazily create the debugfs clk tree visualization
158 * clks are often initialized very early during boot before memory can
159 * be dynamically allocated and well before debugfs is setup.
160 * clk_debug_init walks the clk tree hierarchy while holding
161 * prepare_lock and creates the topology as part of a late_initcall,
162 * thus insuring that clks initialized very early will still be
163 * represented in the debugfs clk tree. This function should only be
164 * called once at boot-time, and all other clks added dynamically will
165 * be done so with clk_debug_register.
167 static int __init clk_debug_init(void)
170 struct hlist_node *tmp;
172 rootdir = debugfs_create_dir("clk", NULL);
177 orphandir = debugfs_create_dir("orphans", rootdir);
182 mutex_lock(&prepare_lock);
184 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
185 clk_debug_create_subtree(clk, rootdir);
187 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
188 clk_debug_create_subtree(clk, orphandir);
192 mutex_unlock(&prepare_lock);
196 late_initcall(clk_debug_init);
198 static inline int clk_debug_register(struct clk *clk) { return 0; }
201 /* caller must hold prepare_lock */
202 static void clk_disable_unused_subtree(struct clk *clk)
205 struct hlist_node *tmp;
211 hlist_for_each_entry(child, tmp, &clk->children, child_node)
212 clk_disable_unused_subtree(child);
214 spin_lock_irqsave(&enable_lock, flags);
216 if (clk->enable_count)
219 if (clk->flags & CLK_IGNORE_UNUSED)
222 if (__clk_is_enabled(clk) && clk->ops->disable)
223 clk->ops->disable(clk->hw);
226 spin_unlock_irqrestore(&enable_lock, flags);
232 static int clk_disable_unused(void)
235 struct hlist_node *tmp;
237 mutex_lock(&prepare_lock);
239 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
240 clk_disable_unused_subtree(clk);
242 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
243 clk_disable_unused_subtree(clk);
245 mutex_unlock(&prepare_lock);
249 late_initcall(clk_disable_unused);
251 /*** helper functions ***/
253 inline const char *__clk_get_name(struct clk *clk)
255 return !clk ? NULL : clk->name;
258 inline struct clk_hw *__clk_get_hw(struct clk *clk)
260 return !clk ? NULL : clk->hw;
263 inline u8 __clk_get_num_parents(struct clk *clk)
265 return !clk ? 0 : clk->num_parents;
268 inline struct clk *__clk_get_parent(struct clk *clk)
270 return !clk ? NULL : clk->parent;
273 inline unsigned int __clk_get_enable_count(struct clk *clk)
275 return !clk ? 0 : clk->enable_count;
278 inline unsigned int __clk_get_prepare_count(struct clk *clk)
280 return !clk ? 0 : clk->prepare_count;
283 unsigned long __clk_get_rate(struct clk *clk)
294 if (clk->flags & CLK_IS_ROOT)
304 inline unsigned long __clk_get_flags(struct clk *clk)
306 return !clk ? 0 : clk->flags;
309 bool __clk_is_enabled(struct clk *clk)
317 * .is_enabled is only mandatory for clocks that gate
318 * fall back to software usage counter if .is_enabled is missing
320 if (!clk->ops->is_enabled) {
321 ret = clk->enable_count ? 1 : 0;
325 ret = clk->ops->is_enabled(clk->hw);
330 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
334 struct hlist_node *tmp;
336 if (!strcmp(clk->name, name))
339 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
340 ret = __clk_lookup_subtree(name, child);
348 struct clk *__clk_lookup(const char *name)
350 struct clk *root_clk;
352 struct hlist_node *tmp;
357 /* search the 'proper' clk tree first */
358 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
359 ret = __clk_lookup_subtree(name, root_clk);
364 /* if not found, then search the orphan tree */
365 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
366 ret = __clk_lookup_subtree(name, root_clk);
376 void __clk_unprepare(struct clk *clk)
381 if (WARN_ON(clk->prepare_count == 0))
384 if (--clk->prepare_count > 0)
387 WARN_ON(clk->enable_count > 0);
389 if (clk->ops->unprepare)
390 clk->ops->unprepare(clk->hw);
392 __clk_unprepare(clk->parent);
396 * clk_unprepare - undo preparation of a clock source
397 * @clk: the clk being unprepare
399 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
400 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
401 * if the operation may sleep. One example is a clk which is accessed over
402 * I2c. In the complex case a clk gate operation may require a fast and a slow
403 * part. It is this reason that clk_unprepare and clk_disable are not mutually
404 * exclusive. In fact clk_disable must be called before clk_unprepare.
406 void clk_unprepare(struct clk *clk)
408 mutex_lock(&prepare_lock);
409 __clk_unprepare(clk);
410 mutex_unlock(&prepare_lock);
412 EXPORT_SYMBOL_GPL(clk_unprepare);
414 int __clk_prepare(struct clk *clk)
421 if (clk->prepare_count == 0) {
422 ret = __clk_prepare(clk->parent);
426 if (clk->ops->prepare) {
427 ret = clk->ops->prepare(clk->hw);
429 __clk_unprepare(clk->parent);
435 clk->prepare_count++;
441 * clk_prepare - prepare a clock source
442 * @clk: the clk being prepared
444 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
445 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
446 * operation may sleep. One example is a clk which is accessed over I2c. In
447 * the complex case a clk ungate operation may require a fast and a slow part.
448 * It is this reason that clk_prepare and clk_enable are not mutually
449 * exclusive. In fact clk_prepare must be called before clk_enable.
450 * Returns 0 on success, -EERROR otherwise.
452 int clk_prepare(struct clk *clk)
456 mutex_lock(&prepare_lock);
457 ret = __clk_prepare(clk);
458 mutex_unlock(&prepare_lock);
462 EXPORT_SYMBOL_GPL(clk_prepare);
464 static void __clk_disable(struct clk *clk)
469 if (WARN_ON(IS_ERR(clk)))
472 if (WARN_ON(clk->enable_count == 0))
475 if (--clk->enable_count > 0)
478 if (clk->ops->disable)
479 clk->ops->disable(clk->hw);
481 __clk_disable(clk->parent);
485 * clk_disable - gate a clock
486 * @clk: the clk being gated
488 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
489 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
490 * clk if the operation is fast and will never sleep. One example is a
491 * SoC-internal clk which is controlled via simple register writes. In the
492 * complex case a clk gate operation may require a fast and a slow part. It is
493 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
494 * In fact clk_disable must be called before clk_unprepare.
496 void clk_disable(struct clk *clk)
500 spin_lock_irqsave(&enable_lock, flags);
502 spin_unlock_irqrestore(&enable_lock, flags);
504 EXPORT_SYMBOL_GPL(clk_disable);
506 static int __clk_enable(struct clk *clk)
513 if (WARN_ON(clk->prepare_count == 0))
516 if (clk->enable_count == 0) {
517 ret = __clk_enable(clk->parent);
522 if (clk->ops->enable) {
523 ret = clk->ops->enable(clk->hw);
525 __clk_disable(clk->parent);
536 * clk_enable - ungate a clock
537 * @clk: the clk being ungated
539 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
540 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
541 * if the operation will never sleep. One example is a SoC-internal clk which
542 * is controlled via simple register writes. In the complex case a clk ungate
543 * operation may require a fast and a slow part. It is this reason that
544 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
545 * must be called before clk_enable. Returns 0 on success, -EERROR
548 int clk_enable(struct clk *clk)
553 spin_lock_irqsave(&enable_lock, flags);
554 ret = __clk_enable(clk);
555 spin_unlock_irqrestore(&enable_lock, flags);
559 EXPORT_SYMBOL_GPL(clk_enable);
562 * __clk_round_rate - round the given rate for a clk
563 * @clk: round the rate of this clock
565 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
567 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
569 unsigned long parent_rate = 0;
574 if (!clk->ops->round_rate) {
575 if (clk->flags & CLK_SET_RATE_PARENT)
576 return __clk_round_rate(clk->parent, rate);
582 parent_rate = clk->parent->rate;
584 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
588 * clk_round_rate - round the given rate for a clk
589 * @clk: the clk for which we are rounding a rate
590 * @rate: the rate which is to be rounded
592 * Takes in a rate as input and rounds it to a rate that the clk can actually
593 * use which is then returned. If clk doesn't support round_rate operation
594 * then the parent rate is returned.
596 long clk_round_rate(struct clk *clk, unsigned long rate)
600 mutex_lock(&prepare_lock);
601 ret = __clk_round_rate(clk, rate);
602 mutex_unlock(&prepare_lock);
606 EXPORT_SYMBOL_GPL(clk_round_rate);
609 * __clk_notify - call clk notifier chain
610 * @clk: struct clk * that is changing rate
611 * @msg: clk notifier type (see include/linux/clk.h)
612 * @old_rate: old clk rate
613 * @new_rate: new clk rate
615 * Triggers a notifier call chain on the clk rate-change notification
616 * for 'clk'. Passes a pointer to the struct clk and the previous
617 * and current rates to the notifier callback. Intended to be called by
618 * internal clock code only. Returns NOTIFY_DONE from the last driver
619 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
620 * a driver returns that.
622 static int __clk_notify(struct clk *clk, unsigned long msg,
623 unsigned long old_rate, unsigned long new_rate)
625 struct clk_notifier *cn;
626 struct clk_notifier_data cnd;
627 int ret = NOTIFY_DONE;
630 cnd.old_rate = old_rate;
631 cnd.new_rate = new_rate;
633 list_for_each_entry(cn, &clk_notifier_list, node) {
634 if (cn->clk == clk) {
635 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
646 * @clk: first clk in the subtree
647 * @msg: notification type (see include/linux/clk.h)
649 * Walks the subtree of clks starting with clk and recalculates rates as it
650 * goes. Note that if a clk does not implement the .recalc_rate callback then
651 * it is assumed that the clock will take on the rate of it's parent.
653 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
656 * Caller must hold prepare_lock.
658 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
660 unsigned long old_rate;
661 unsigned long parent_rate = 0;
662 struct hlist_node *tmp;
665 old_rate = clk->rate;
668 parent_rate = clk->parent->rate;
670 if (clk->ops->recalc_rate)
671 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
673 clk->rate = parent_rate;
676 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
677 * & ABORT_RATE_CHANGE notifiers
679 if (clk->notifier_count && msg)
680 __clk_notify(clk, msg, old_rate, clk->rate);
682 hlist_for_each_entry(child, tmp, &clk->children, child_node)
683 __clk_recalc_rates(child, msg);
687 * clk_get_rate - return the rate of clk
688 * @clk: the clk whose rate is being returned
690 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
691 * is set, which means a recalc_rate will be issued.
692 * If clk is NULL then returns 0.
694 unsigned long clk_get_rate(struct clk *clk)
698 mutex_lock(&prepare_lock);
700 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
701 __clk_recalc_rates(clk, 0);
703 rate = __clk_get_rate(clk);
704 mutex_unlock(&prepare_lock);
708 EXPORT_SYMBOL_GPL(clk_get_rate);
711 * __clk_speculate_rates
712 * @clk: first clk in the subtree
713 * @parent_rate: the "future" rate of clk's parent
715 * Walks the subtree of clks starting with clk, speculating rates as it
716 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
718 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
719 * pre-rate change notifications and returns early if no clks in the
720 * subtree have subscribed to the notifications. Note that if a clk does not
721 * implement the .recalc_rate callback then it is assumed that the clock will
722 * take on the rate of it's parent.
724 * Caller must hold prepare_lock.
726 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
728 struct hlist_node *tmp;
730 unsigned long new_rate;
731 int ret = NOTIFY_DONE;
733 if (clk->ops->recalc_rate)
734 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
736 new_rate = parent_rate;
738 /* abort the rate change if a driver returns NOTIFY_BAD */
739 if (clk->notifier_count)
740 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
742 if (ret == NOTIFY_BAD)
745 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
746 ret = __clk_speculate_rates(child, new_rate);
747 if (ret == NOTIFY_BAD)
755 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
758 struct hlist_node *tmp;
760 clk->new_rate = new_rate;
762 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
763 if (child->ops->recalc_rate)
764 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
766 child->new_rate = new_rate;
767 clk_calc_subtree(child, child->new_rate);
772 * calculate the new rates returning the topmost clock that has to be
775 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
777 struct clk *top = clk;
778 unsigned long best_parent_rate = 0;
779 unsigned long new_rate;
782 if (IS_ERR_OR_NULL(clk))
785 /* save parent rate, if it exists */
787 best_parent_rate = clk->parent->rate;
789 /* never propagate up to the parent */
790 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
791 if (!clk->ops->round_rate) {
792 clk->new_rate = clk->rate;
795 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
799 /* need clk->parent from here on out */
801 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
805 if (!clk->ops->round_rate) {
806 top = clk_calc_new_rates(clk->parent, rate);
807 new_rate = clk->parent->new_rate;
812 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
814 if (best_parent_rate != clk->parent->rate) {
815 top = clk_calc_new_rates(clk->parent, best_parent_rate);
821 clk_calc_subtree(clk, new_rate);
827 * Notify about rate changes in a subtree. Always walk down the whole tree
828 * so that in case of an error we can walk down the whole tree again and
831 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
833 struct hlist_node *tmp;
834 struct clk *child, *fail_clk = NULL;
835 int ret = NOTIFY_DONE;
837 if (clk->rate == clk->new_rate)
840 if (clk->notifier_count) {
841 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
842 if (ret == NOTIFY_BAD)
846 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
847 clk = clk_propagate_rate_change(child, event);
856 * walk down a subtree and set the new rates notifying the rate
859 static void clk_change_rate(struct clk *clk)
862 unsigned long old_rate;
863 unsigned long best_parent_rate = 0;
864 struct hlist_node *tmp;
866 old_rate = clk->rate;
869 best_parent_rate = clk->parent->rate;
871 if (clk->ops->set_rate)
872 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
874 if (clk->ops->recalc_rate)
875 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
877 clk->rate = best_parent_rate;
879 if (clk->notifier_count && old_rate != clk->rate)
880 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
882 hlist_for_each_entry(child, tmp, &clk->children, child_node)
883 clk_change_rate(child);
887 * clk_set_rate - specify a new rate for clk
888 * @clk: the clk whose rate is being changed
889 * @rate: the new rate for clk
891 * In the simplest case clk_set_rate will only adjust the rate of clk.
893 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
894 * propagate up to clk's parent; whether or not this happens depends on the
895 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
896 * after calling .round_rate then upstream parent propagation is ignored. If
897 * *parent_rate comes back with a new rate for clk's parent then we propagate
898 * up to clk's parent and set it's rate. Upward propagation will continue
899 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
900 * .round_rate stops requesting changes to clk's parent_rate.
902 * Rate changes are accomplished via tree traversal that also recalculates the
903 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
905 * Returns 0 on success, -EERROR otherwise.
907 int clk_set_rate(struct clk *clk, unsigned long rate)
909 struct clk *top, *fail_clk;
912 /* prevent racing with updates to the clock topology */
913 mutex_lock(&prepare_lock);
915 /* bail early if nothing to do */
916 if (rate == clk->rate)
919 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
924 /* calculate new rates and get the topmost changed clock */
925 top = clk_calc_new_rates(clk, rate);
931 /* notify that we are about to change rates */
932 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
934 pr_warn("%s: failed to set %s rate\n", __func__,
936 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
941 /* change the rates */
942 clk_change_rate(top);
944 mutex_unlock(&prepare_lock);
948 mutex_unlock(&prepare_lock);
952 EXPORT_SYMBOL_GPL(clk_set_rate);
955 * clk_get_parent - return the parent of a clk
956 * @clk: the clk whose parent gets returned
958 * Simply returns clk->parent. Returns NULL if clk is NULL.
960 struct clk *clk_get_parent(struct clk *clk)
964 mutex_lock(&prepare_lock);
965 parent = __clk_get_parent(clk);
966 mutex_unlock(&prepare_lock);
970 EXPORT_SYMBOL_GPL(clk_get_parent);
973 * .get_parent is mandatory for clocks with multiple possible parents. It is
974 * optional for single-parent clocks. Always call .get_parent if it is
975 * available and WARN if it is missing for multi-parent clocks.
977 * For single-parent clocks without .get_parent, first check to see if the
978 * .parents array exists, and if so use it to avoid an expensive tree
979 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
981 static struct clk *__clk_init_parent(struct clk *clk)
983 struct clk *ret = NULL;
986 /* handle the trivial cases */
988 if (!clk->num_parents)
991 if (clk->num_parents == 1) {
992 if (IS_ERR_OR_NULL(clk->parent))
993 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
998 if (!clk->ops->get_parent) {
999 WARN(!clk->ops->get_parent,
1000 "%s: multi-parent clocks must implement .get_parent\n",
1006 * Do our best to cache parent clocks in clk->parents. This prevents
1007 * unnecessary and expensive calls to __clk_lookup. We don't set
1008 * clk->parent here; that is done by the calling function
1011 index = clk->ops->get_parent(clk->hw);
1015 kzalloc((sizeof(struct clk*) * clk->num_parents),
1019 ret = __clk_lookup(clk->parent_names[index]);
1020 else if (!clk->parents[index])
1021 ret = clk->parents[index] =
1022 __clk_lookup(clk->parent_names[index]);
1024 ret = clk->parents[index];
1030 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1032 #ifdef CONFIG_COMMON_CLK_DEBUG
1034 struct dentry *new_parent_d;
1037 if (!clk || !new_parent)
1040 hlist_del(&clk->child_node);
1043 hlist_add_head(&clk->child_node, &new_parent->children);
1045 hlist_add_head(&clk->child_node, &clk_orphan_list);
1047 #ifdef CONFIG_COMMON_CLK_DEBUG
1052 new_parent_d = new_parent->dentry;
1054 new_parent_d = orphandir;
1056 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1057 new_parent_d, clk->name);
1061 pr_debug("%s: failed to rename debugfs entry for %s\n",
1062 __func__, clk->name);
1066 clk->parent = new_parent;
1068 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1071 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1073 struct clk *old_parent;
1074 unsigned long flags;
1078 old_parent = clk->parent;
1081 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1085 * find index of new parent clock using cached parent ptrs,
1086 * or if not yet cached, use string name comparison and cache
1087 * them now to avoid future calls to __clk_lookup.
1089 for (i = 0; i < clk->num_parents; i++) {
1090 if (clk->parents && clk->parents[i] == parent)
1092 else if (!strcmp(clk->parent_names[i], parent->name)) {
1094 clk->parents[i] = __clk_lookup(parent->name);
1099 if (i == clk->num_parents) {
1100 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1101 __func__, parent->name, clk->name);
1105 /* migrate prepare and enable */
1106 if (clk->prepare_count)
1107 __clk_prepare(parent);
1109 /* FIXME replace with clk_is_enabled(clk) someday */
1110 spin_lock_irqsave(&enable_lock, flags);
1111 if (clk->enable_count)
1112 __clk_enable(parent);
1113 spin_unlock_irqrestore(&enable_lock, flags);
1115 /* change clock input source */
1116 ret = clk->ops->set_parent(clk->hw, i);
1118 /* clean up old prepare and enable */
1119 spin_lock_irqsave(&enable_lock, flags);
1120 if (clk->enable_count)
1121 __clk_disable(old_parent);
1122 spin_unlock_irqrestore(&enable_lock, flags);
1124 if (clk->prepare_count)
1125 __clk_unprepare(old_parent);
1132 * clk_set_parent - switch the parent of a mux clk
1133 * @clk: the mux clk whose input we are switching
1134 * @parent: the new input to clk
1136 * Re-parent clk to use parent as it's new input source. If clk has the
1137 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1138 * operation to succeed. After successfully changing clk's parent
1139 * clk_set_parent will update the clk topology, sysfs topology and
1140 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1141 * success, -EERROR otherwise.
1143 int clk_set_parent(struct clk *clk, struct clk *parent)
1147 if (!clk || !clk->ops)
1150 if (!clk->ops->set_parent)
1153 /* prevent racing with updates to the clock topology */
1154 mutex_lock(&prepare_lock);
1156 if (clk->parent == parent)
1159 /* propagate PRE_RATE_CHANGE notifications */
1160 if (clk->notifier_count)
1161 ret = __clk_speculate_rates(clk, parent->rate);
1163 /* abort if a driver objects */
1164 if (ret == NOTIFY_STOP)
1167 /* only re-parent if the clock is not in use */
1168 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1171 ret = __clk_set_parent(clk, parent);
1173 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1175 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1179 /* propagate rate recalculation downstream */
1180 __clk_reparent(clk, parent);
1183 mutex_unlock(&prepare_lock);
1187 EXPORT_SYMBOL_GPL(clk_set_parent);
1190 * __clk_init - initialize the data structures in a struct clk
1191 * @dev: device initializing this clk, placeholder for now
1192 * @clk: clk being initialized
1194 * Initializes the lists in struct clk, queries the hardware for the
1195 * parent and rate and sets them both.
1197 int __clk_init(struct device *dev, struct clk *clk)
1201 struct hlist_node *tmp, *tmp2;
1206 mutex_lock(&prepare_lock);
1208 /* check to see if a clock with this name is already registered */
1209 if (__clk_lookup(clk->name)) {
1210 pr_debug("%s: clk %s already initialized\n",
1211 __func__, clk->name);
1216 /* check that clk_ops are sane. See Documentation/clk.txt */
1217 if (clk->ops->set_rate &&
1218 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1219 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1220 __func__, clk->name);
1225 if (clk->ops->set_parent && !clk->ops->get_parent) {
1226 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1227 __func__, clk->name);
1232 /* throw a WARN if any entries in parent_names are NULL */
1233 for (i = 0; i < clk->num_parents; i++)
1234 WARN(!clk->parent_names[i],
1235 "%s: invalid NULL in %s's .parent_names\n",
1236 __func__, clk->name);
1239 * Allocate an array of struct clk *'s to avoid unnecessary string
1240 * look-ups of clk's possible parents. This can fail for clocks passed
1241 * in to clk_init during early boot; thus any access to clk->parents[]
1242 * must always check for a NULL pointer and try to populate it if
1245 * If clk->parents is not NULL we skip this entire block. This allows
1246 * for clock drivers to statically initialize clk->parents.
1248 if (clk->num_parents > 1 && !clk->parents) {
1249 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1252 * __clk_lookup returns NULL for parents that have not been
1253 * clk_init'd; thus any access to clk->parents[] must check
1254 * for a NULL pointer. We can always perform lazy lookups for
1255 * missing parents later on.
1258 for (i = 0; i < clk->num_parents; i++)
1260 __clk_lookup(clk->parent_names[i]);
1263 clk->parent = __clk_init_parent(clk);
1266 * Populate clk->parent if parent has already been __clk_init'd. If
1267 * parent has not yet been __clk_init'd then place clk in the orphan
1268 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1271 * Every time a new clk is clk_init'd then we walk the list of orphan
1272 * clocks and re-parent any that are children of the clock currently
1276 hlist_add_head(&clk->child_node,
1277 &clk->parent->children);
1278 else if (clk->flags & CLK_IS_ROOT)
1279 hlist_add_head(&clk->child_node, &clk_root_list);
1281 hlist_add_head(&clk->child_node, &clk_orphan_list);
1284 * Set clk's rate. The preferred method is to use .recalc_rate. For
1285 * simple clocks and lazy developers the default fallback is to use the
1286 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1287 * then rate is set to zero.
1289 if (clk->ops->recalc_rate)
1290 clk->rate = clk->ops->recalc_rate(clk->hw,
1291 __clk_get_rate(clk->parent));
1292 else if (clk->parent)
1293 clk->rate = clk->parent->rate;
1298 * walk the list of orphan clocks and reparent any that are children of
1301 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1302 if (orphan->ops->get_parent) {
1303 i = orphan->ops->get_parent(orphan->hw);
1304 if (!strcmp(clk->name, orphan->parent_names[i]))
1305 __clk_reparent(orphan, clk);
1309 for (i = 0; i < orphan->num_parents; i++)
1310 if (!strcmp(clk->name, orphan->parent_names[i])) {
1311 __clk_reparent(orphan, clk);
1317 * optional platform-specific magic
1319 * The .init callback is not used by any of the basic clock types, but
1320 * exists for weird hardware that must perform initialization magic.
1321 * Please consider other ways of solving initialization problems before
1322 * using this callback, as it's use is discouraged.
1325 clk->ops->init(clk->hw);
1327 clk_debug_register(clk);
1330 mutex_unlock(&prepare_lock);
1336 * __clk_register - register a clock and return a cookie.
1338 * Same as clk_register, except that the .clk field inside hw shall point to a
1339 * preallocated (generally statically allocated) struct clk. None of the fields
1340 * of the struct clk need to be initialized.
1342 * The data pointed to by .init and .clk field shall NOT be marked as init
1345 * __clk_register is only exposed via clk-private.h and is intended for use with
1346 * very large numbers of clocks that need to be statically initialized. It is
1347 * a layering violation to include clk-private.h from any code which implements
1348 * a clock's .ops; as such any statically initialized clock data MUST be in a
1349 * separate C file from the logic that implements it's operations. Returns 0
1350 * on success, otherwise an error code.
1352 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1358 clk->name = hw->init->name;
1359 clk->ops = hw->init->ops;
1361 clk->flags = hw->init->flags;
1362 clk->parent_names = hw->init->parent_names;
1363 clk->num_parents = hw->init->num_parents;
1365 ret = __clk_init(dev, clk);
1367 return ERR_PTR(ret);
1371 EXPORT_SYMBOL_GPL(__clk_register);
1373 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1377 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1379 pr_err("%s: could not allocate clk->name\n", __func__);
1383 clk->ops = hw->init->ops;
1385 clk->flags = hw->init->flags;
1386 clk->num_parents = hw->init->num_parents;
1389 /* allocate local copy in case parent_names is __initdata */
1390 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1393 if (!clk->parent_names) {
1394 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1396 goto fail_parent_names;
1400 /* copy each string name in case parent_names is __initdata */
1401 for (i = 0; i < clk->num_parents; i++) {
1402 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1404 if (!clk->parent_names[i]) {
1405 pr_err("%s: could not copy parent_names\n", __func__);
1407 goto fail_parent_names_copy;
1411 ret = __clk_init(dev, clk);
1415 fail_parent_names_copy:
1417 kfree(clk->parent_names[i]);
1418 kfree(clk->parent_names);
1426 * clk_register - allocate a new clock, register it and return an opaque cookie
1427 * @dev: device that is registering this clock
1428 * @hw: link to hardware-specific clock data
1430 * clk_register is the primary interface for populating the clock tree with new
1431 * clock nodes. It returns a pointer to the newly allocated struct clk which
1432 * cannot be dereferenced by driver code but may be used in conjuction with the
1433 * rest of the clock API. In the event of an error clk_register will return an
1434 * error code; drivers must test for an error code after calling clk_register.
1436 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1441 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1443 pr_err("%s: could not allocate clk\n", __func__);
1448 ret = _clk_register(dev, hw, clk);
1454 return ERR_PTR(ret);
1456 EXPORT_SYMBOL_GPL(clk_register);
1459 * clk_unregister - unregister a currently registered clock
1460 * @clk: clock to unregister
1462 * Currently unimplemented.
1464 void clk_unregister(struct clk *clk) {}
1465 EXPORT_SYMBOL_GPL(clk_unregister);
1467 static void devm_clk_release(struct device *dev, void *res)
1469 clk_unregister(res);
1473 * devm_clk_register - resource managed clk_register()
1474 * @dev: device that is registering this clock
1475 * @hw: link to hardware-specific clock data
1477 * Managed clk_register(). Clocks returned from this function are
1478 * automatically clk_unregister()ed on driver detach. See clk_register() for
1481 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1486 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1488 return ERR_PTR(-ENOMEM);
1490 ret = _clk_register(dev, hw, clk);
1492 devres_add(dev, clk);
1500 EXPORT_SYMBOL_GPL(devm_clk_register);
1502 static int devm_clk_match(struct device *dev, void *res, void *data)
1504 struct clk *c = res;
1511 * devm_clk_unregister - resource managed clk_unregister()
1512 * @clk: clock to unregister
1514 * Deallocate a clock allocated with devm_clk_register(). Normally
1515 * this function will not need to be called and the resource management
1516 * code will ensure that the resource is freed.
1518 void devm_clk_unregister(struct device *dev, struct clk *clk)
1520 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1522 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1524 /*** clk rate change notifiers ***/
1527 * clk_notifier_register - add a clk rate change notifier
1528 * @clk: struct clk * to watch
1529 * @nb: struct notifier_block * with callback info
1531 * Request notification when clk's rate changes. This uses an SRCU
1532 * notifier because we want it to block and notifier unregistrations are
1533 * uncommon. The callbacks associated with the notifier must not
1534 * re-enter into the clk framework by calling any top-level clk APIs;
1535 * this will cause a nested prepare_lock mutex.
1537 * Pre-change notifier callbacks will be passed the current, pre-change
1538 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1539 * post-change rate of the clk is passed via struct
1540 * clk_notifier_data.new_rate.
1542 * Post-change notifiers will pass the now-current, post-change rate of
1543 * the clk in both struct clk_notifier_data.old_rate and struct
1544 * clk_notifier_data.new_rate.
1546 * Abort-change notifiers are effectively the opposite of pre-change
1547 * notifiers: the original pre-change clk rate is passed in via struct
1548 * clk_notifier_data.new_rate and the failed post-change rate is passed
1549 * in via struct clk_notifier_data.old_rate.
1551 * clk_notifier_register() must be called from non-atomic context.
1552 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1553 * allocation failure; otherwise, passes along the return value of
1554 * srcu_notifier_chain_register().
1556 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1558 struct clk_notifier *cn;
1564 mutex_lock(&prepare_lock);
1566 /* search the list of notifiers for this clk */
1567 list_for_each_entry(cn, &clk_notifier_list, node)
1571 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1572 if (cn->clk != clk) {
1573 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1578 srcu_init_notifier_head(&cn->notifier_head);
1580 list_add(&cn->node, &clk_notifier_list);
1583 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1585 clk->notifier_count++;
1588 mutex_unlock(&prepare_lock);
1592 EXPORT_SYMBOL_GPL(clk_notifier_register);
1595 * clk_notifier_unregister - remove a clk rate change notifier
1596 * @clk: struct clk *
1597 * @nb: struct notifier_block * with callback info
1599 * Request no further notification for changes to 'clk' and frees memory
1600 * allocated in clk_notifier_register.
1602 * Returns -EINVAL if called with null arguments; otherwise, passes
1603 * along the return value of srcu_notifier_chain_unregister().
1605 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1607 struct clk_notifier *cn = NULL;
1613 mutex_lock(&prepare_lock);
1615 list_for_each_entry(cn, &clk_notifier_list, node)
1619 if (cn->clk == clk) {
1620 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1622 clk->notifier_count--;
1624 /* XXX the notifier code should handle this better */
1625 if (!cn->notifier_head.head) {
1626 srcu_cleanup_notifier_head(&cn->notifier_head);
1634 mutex_unlock(&prepare_lock);
1638 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1642 * struct of_clk_provider - Clock provider registration structure
1643 * @link: Entry in global list of clock providers
1644 * @node: Pointer to device tree node of clock provider
1645 * @get: Get clock callback. Returns NULL or a struct clk for the
1646 * given clock specifier
1647 * @data: context pointer to be passed into @get callback
1649 struct of_clk_provider {
1650 struct list_head link;
1652 struct device_node *node;
1653 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1657 static LIST_HEAD(of_clk_providers);
1658 static DEFINE_MUTEX(of_clk_lock);
1660 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1665 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1667 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1669 struct clk_onecell_data *clk_data = data;
1670 unsigned int idx = clkspec->args[0];
1672 if (idx >= clk_data->clk_num) {
1673 pr_err("%s: invalid clock index %d\n", __func__, idx);
1674 return ERR_PTR(-EINVAL);
1677 return clk_data->clks[idx];
1679 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1682 * of_clk_add_provider() - Register a clock provider for a node
1683 * @np: Device node pointer associated with clock provider
1684 * @clk_src_get: callback for decoding clock
1685 * @data: context pointer for @clk_src_get callback.
1687 int of_clk_add_provider(struct device_node *np,
1688 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1692 struct of_clk_provider *cp;
1694 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1698 cp->node = of_node_get(np);
1700 cp->get = clk_src_get;
1702 mutex_lock(&of_clk_lock);
1703 list_add(&cp->link, &of_clk_providers);
1704 mutex_unlock(&of_clk_lock);
1705 pr_debug("Added clock from %s\n", np->full_name);
1709 EXPORT_SYMBOL_GPL(of_clk_add_provider);
1712 * of_clk_del_provider() - Remove a previously registered clock provider
1713 * @np: Device node pointer associated with clock provider
1715 void of_clk_del_provider(struct device_node *np)
1717 struct of_clk_provider *cp;
1719 mutex_lock(&of_clk_lock);
1720 list_for_each_entry(cp, &of_clk_providers, link) {
1721 if (cp->node == np) {
1722 list_del(&cp->link);
1723 of_node_put(cp->node);
1728 mutex_unlock(&of_clk_lock);
1730 EXPORT_SYMBOL_GPL(of_clk_del_provider);
1732 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1734 struct of_clk_provider *provider;
1735 struct clk *clk = ERR_PTR(-ENOENT);
1737 /* Check if we have such a provider in our array */
1738 mutex_lock(&of_clk_lock);
1739 list_for_each_entry(provider, &of_clk_providers, link) {
1740 if (provider->node == clkspec->np)
1741 clk = provider->get(clkspec, provider->data);
1745 mutex_unlock(&of_clk_lock);
1750 const char *of_clk_get_parent_name(struct device_node *np, int index)
1752 struct of_phandle_args clkspec;
1753 const char *clk_name;
1759 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1764 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1765 clkspec.args_count ? clkspec.args[0] : 0,
1767 clk_name = clkspec.np->name;
1769 of_node_put(clkspec.np);
1772 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1775 * of_clk_init() - Scan and init clock providers from the DT
1776 * @matches: array of compatible values and init functions for providers.
1778 * This function scans the device tree for matching clock providers and
1779 * calls their initialization functions
1781 void __init of_clk_init(const struct of_device_id *matches)
1783 struct device_node *np;
1785 for_each_matching_node(np, matches) {
1786 const struct of_device_id *match = of_match_node(matches, np);
1787 of_clk_init_cb_t clk_init_cb = match->data;