]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/clk/mvebu/clk-cpu.c
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[karo-tx-linux.git] / drivers / clk / mvebu / clk-cpu.c
1 /*
2  * Marvell MVEBU CPU clock handling.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Gregory CLEMENT <gregory.clement@free-electrons.com>
7  *
8  * This file is licensed under the terms of the GNU General Public
9  * License version 2.  This program is licensed "as is" without any
10  * warranty of any kind, whether express or implied.
11  */
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/of_address.h>
17 #include <linux/io.h>
18 #include <linux/of.h>
19 #include <linux/delay.h>
20 #include <linux/mvebu-pmsu.h>
21 #include <asm/smp_plat.h>
22
23 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET               0x0
24 #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL          0xff
25 #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT        8
26 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET              0x8
27 #define   SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
28 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET              0xC
29 #define SYS_CTRL_CLK_DIVIDER_MASK                      0x3F
30
31 #define PMU_DFS_RATIO_SHIFT 16
32 #define PMU_DFS_RATIO_MASK  0x3F
33
34 #define MAX_CPU     4
35 struct cpu_clk {
36         struct clk_hw hw;
37         int cpu;
38         const char *clk_name;
39         const char *parent_name;
40         void __iomem *reg_base;
41         void __iomem *pmu_dfs;
42 };
43
44 static struct clk **clks;
45
46 static struct clk_onecell_data clk_data;
47
48 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
49
50 static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
51                                          unsigned long parent_rate)
52 {
53         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
54         u32 reg, div;
55
56         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
57         div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
58         return parent_rate / div;
59 }
60
61 static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
62                                unsigned long *parent_rate)
63 {
64         /* Valid ratio are 1:1, 1:2 and 1:3 */
65         u32 div;
66
67         div = *parent_rate / rate;
68         if (div == 0)
69                 div = 1;
70         else if (div > 3)
71                 div = 3;
72
73         return *parent_rate / div;
74 }
75
76 static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
77                                 unsigned long parent_rate)
78
79 {
80         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
81         u32 reg, div;
82         u32 reload_mask;
83
84         div = parent_rate / rate;
85         reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
86                 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
87                 | (div << (cpuclk->cpu * 8));
88         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
89         /* Set clock divider reload smooth bit mask */
90         reload_mask = 1 << (20 + cpuclk->cpu);
91
92         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
93             | reload_mask;
94         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
95
96         /* Now trigger the clock update */
97         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
98             | 1 << 24;
99         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
100
101         /* Wait for clocks to settle down then clear reload request */
102         udelay(1000);
103         reg &= ~(reload_mask | 1 << 24);
104         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
105         udelay(1000);
106
107         return 0;
108 }
109
110 static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
111                                unsigned long parent_rate)
112 {
113         u32 reg;
114         unsigned long fabric_div, target_div, cur_rate;
115         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
116
117         /*
118          * PMU DFS registers are not mapped, Device Tree does not
119          * describes them. We cannot change the frequency dynamically.
120          */
121         if (!cpuclk->pmu_dfs)
122                 return -ENODEV;
123
124         cur_rate = clk_hw_get_rate(hwclk);
125
126         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
127         fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
128                 SYS_CTRL_CLK_DIVIDER_MASK;
129
130         /* Frequency is going up */
131         if (rate == 2 * cur_rate)
132                 target_div = fabric_div / 2;
133         /* Frequency is going down */
134         else
135                 target_div = fabric_div;
136
137         if (target_div == 0)
138                 target_div = 1;
139
140         reg = readl(cpuclk->pmu_dfs);
141         reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
142         reg |= (target_div << PMU_DFS_RATIO_SHIFT);
143         writel(reg, cpuclk->pmu_dfs);
144
145         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
146         reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
147                 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
148         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
149
150         return mvebu_pmsu_dfs_request(cpuclk->cpu);
151 }
152
153 static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
154                             unsigned long parent_rate)
155 {
156         if (__clk_is_enabled(hwclk->clk))
157                 return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
158         else
159                 return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
160 }
161
162 static const struct clk_ops cpu_ops = {
163         .recalc_rate = clk_cpu_recalc_rate,
164         .round_rate = clk_cpu_round_rate,
165         .set_rate = clk_cpu_set_rate,
166 };
167
168 static void __init of_cpu_clk_setup(struct device_node *node)
169 {
170         struct cpu_clk *cpuclk;
171         void __iomem *clock_complex_base = of_iomap(node, 0);
172         void __iomem *pmu_dfs_base = of_iomap(node, 1);
173         int ncpus = 0;
174         struct device_node *dn;
175
176         if (clock_complex_base == NULL) {
177                 pr_err("%s: clock-complex base register not set\n",
178                         __func__);
179                 return;
180         }
181
182         if (pmu_dfs_base == NULL)
183                 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
184                         __func__);
185
186         for_each_node_by_type(dn, "cpu")
187                 ncpus++;
188
189         cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
190         if (WARN_ON(!cpuclk))
191                 goto cpuclk_out;
192
193         clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
194         if (WARN_ON(!clks))
195                 goto clks_out;
196
197         for_each_node_by_type(dn, "cpu") {
198                 struct clk_init_data init;
199                 struct clk *clk;
200                 struct clk *parent_clk;
201                 char *clk_name = kzalloc(5, GFP_KERNEL);
202                 int cpu, err;
203
204                 if (WARN_ON(!clk_name))
205                         goto bail_out;
206
207                 err = of_property_read_u32(dn, "reg", &cpu);
208                 if (WARN_ON(err))
209                         goto bail_out;
210
211                 sprintf(clk_name, "cpu%d", cpu);
212                 parent_clk = of_clk_get(node, 0);
213
214                 cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
215                 cpuclk[cpu].clk_name = clk_name;
216                 cpuclk[cpu].cpu = cpu;
217                 cpuclk[cpu].reg_base = clock_complex_base;
218                 if (pmu_dfs_base)
219                         cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
220                 cpuclk[cpu].hw.init = &init;
221
222                 init.name = cpuclk[cpu].clk_name;
223                 init.ops = &cpu_ops;
224                 init.flags = 0;
225                 init.parent_names = &cpuclk[cpu].parent_name;
226                 init.num_parents = 1;
227
228                 clk = clk_register(NULL, &cpuclk[cpu].hw);
229                 if (WARN_ON(IS_ERR(clk)))
230                         goto bail_out;
231                 clks[cpu] = clk;
232         }
233         clk_data.clk_num = MAX_CPU;
234         clk_data.clks = clks;
235         of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
236
237         return;
238 bail_out:
239         kfree(clks);
240         while(ncpus--)
241                 kfree(cpuclk[ncpus].clk_name);
242 clks_out:
243         kfree(cpuclk);
244 cpuclk_out:
245         iounmap(clock_complex_base);
246 }
247
248 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
249                                          of_cpu_clk_setup);