2 * Copyright 2012-2015 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/delay.h>
16 #include <linux/imx_sema4.h>
18 #include <linux/slab.h>
19 #include <linux/jiffies.h>
20 #include <linux/err.h>
24 #define PLL_NUM_OFFSET 0x10
25 #define PLL_DENOM_OFFSET 0x20
27 #define BM_PLL_POWER (0x1 << 12)
28 #define BM_PLL_LOCK (0x1 << 31)
31 * struct clk_pllv3 - IMX PLL clock version 3
32 * @clk_hw: clock source
33 * @base: base address of PLL registers
34 * @powerup_set: set POWER bit to power up the PLL
35 * @div_mask: mask of divider bits
37 * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
38 * is actually a multiplier, and always sits at bit 0.
47 #define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
49 static int clk_pllv3_wait_lock(struct clk_pllv3 *pll)
51 unsigned long timeout = jiffies + msecs_to_jiffies(10);
52 u32 val = readl_relaxed(pll->base) & BM_PLL_POWER;
54 /* No need to wait for lock when pll is not powered up */
55 if ((pll->powerup_set && !val) || (!pll->powerup_set && val))
58 /* Wait for PLL to lock */
60 if (readl_relaxed(pll->base) & BM_PLL_LOCK)
62 if (time_after(jiffies, timeout))
66 return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT;
69 static int clk_pllv3_do_hardware(struct clk_hw *hw, bool enable)
71 struct clk_pllv3 *pll = to_clk_pllv3(hw);
75 val = readl_relaxed(pll->base);
81 writel_relaxed(val, pll->base);
83 ret = clk_pllv3_wait_lock(pll);
91 writel_relaxed(val, pll->base);
97 static void clk_pllv3_do_shared_clks(struct clk_hw *hw, bool enable)
99 if (imx_src_is_m4_enabled()) {
100 if (!amp_power_mutex || !shared_mem) {
102 clk_pllv3_do_hardware(hw, enable);
106 imx_sema4_mutex_lock(amp_power_mutex);
107 if (shared_mem->ca9_valid != SHARED_MEM_MAGIC_NUMBER ||
108 shared_mem->cm4_valid != SHARED_MEM_MAGIC_NUMBER) {
109 imx_sema4_mutex_unlock(amp_power_mutex);
113 if (!imx_update_shared_mem(hw, enable)) {
114 imx_sema4_mutex_unlock(amp_power_mutex);
117 clk_pllv3_do_hardware(hw, enable);
119 imx_sema4_mutex_unlock(amp_power_mutex);
121 clk_pllv3_do_hardware(hw, enable);
125 static int clk_pllv3_prepare(struct clk_hw *hw)
127 clk_pllv3_do_shared_clks(hw, true);
132 static void clk_pllv3_unprepare(struct clk_hw *hw)
134 clk_pllv3_do_shared_clks(hw, false);
137 static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
138 unsigned long parent_rate)
140 struct clk_pllv3 *pll = to_clk_pllv3(hw);
141 u32 div = readl_relaxed(pll->base) & pll->div_mask;
143 return (div == 1) ? parent_rate * 22 : parent_rate * 20;
146 static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
147 unsigned long *prate)
149 unsigned long parent_rate = *prate;
151 return (rate >= parent_rate * 22) ? parent_rate * 22 :
155 static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
156 unsigned long parent_rate)
158 struct clk_pllv3 *pll = to_clk_pllv3(hw);
161 if (rate == parent_rate * 22)
163 else if (rate == parent_rate * 20)
168 val = readl_relaxed(pll->base);
169 val &= ~pll->div_mask;
171 writel_relaxed(val, pll->base);
173 return clk_pllv3_wait_lock(pll);
176 static const struct clk_ops clk_pllv3_ops = {
177 .prepare = clk_pllv3_prepare,
178 .unprepare = clk_pllv3_unprepare,
179 .recalc_rate = clk_pllv3_recalc_rate,
180 .round_rate = clk_pllv3_round_rate,
181 .set_rate = clk_pllv3_set_rate,
184 static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
185 unsigned long parent_rate)
187 struct clk_pllv3 *pll = to_clk_pllv3(hw);
188 u32 div = readl_relaxed(pll->base) & pll->div_mask;
190 return parent_rate * div / 2;
193 static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
194 unsigned long *prate)
196 unsigned long parent_rate = *prate;
197 unsigned long min_rate = parent_rate * 54 / 2;
198 unsigned long max_rate = parent_rate * 108 / 2;
203 else if (rate < min_rate)
205 div = rate * 2 / parent_rate;
207 return parent_rate * div / 2;
210 static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
211 unsigned long parent_rate)
213 struct clk_pllv3 *pll = to_clk_pllv3(hw);
214 unsigned long min_rate = parent_rate * 54 / 2;
215 unsigned long max_rate = parent_rate * 108 / 2;
218 if (rate < min_rate || rate > max_rate)
221 div = rate * 2 / parent_rate;
222 val = readl_relaxed(pll->base);
223 val &= ~pll->div_mask;
225 writel_relaxed(val, pll->base);
227 return clk_pllv3_wait_lock(pll);
230 static const struct clk_ops clk_pllv3_sys_ops = {
231 .prepare = clk_pllv3_prepare,
232 .unprepare = clk_pllv3_unprepare,
233 .recalc_rate = clk_pllv3_sys_recalc_rate,
234 .round_rate = clk_pllv3_sys_round_rate,
235 .set_rate = clk_pllv3_sys_set_rate,
238 static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
239 unsigned long parent_rate)
241 struct clk_pllv3 *pll = to_clk_pllv3(hw);
242 u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
243 u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
244 u32 div = readl_relaxed(pll->base) & pll->div_mask;
246 return (parent_rate * div) + ((parent_rate / mfd) * mfn);
249 static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
250 unsigned long *prate)
252 unsigned long parent_rate = *prate;
253 unsigned long min_rate = parent_rate * 27;
254 unsigned long max_rate = parent_rate * 54;
256 u32 mfn, mfd = 1000000;
261 else if (rate < min_rate)
264 div = rate / parent_rate;
265 temp64 = (u64) (rate - div * parent_rate);
267 do_div(temp64, parent_rate);
270 return parent_rate * div + parent_rate / mfd * mfn;
273 static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
274 unsigned long parent_rate)
276 struct clk_pllv3 *pll = to_clk_pllv3(hw);
277 unsigned long min_rate = parent_rate * 27;
278 unsigned long max_rate = parent_rate * 54;
280 u32 mfn, mfd = 1000000;
283 if (rate < min_rate || rate > max_rate)
286 div = rate / parent_rate;
287 temp64 = (u64) (rate - div * parent_rate);
289 do_div(temp64, parent_rate);
292 val = readl_relaxed(pll->base);
293 val &= ~pll->div_mask;
295 writel_relaxed(val, pll->base);
296 writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
297 writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
299 return clk_pllv3_wait_lock(pll);
302 static const struct clk_ops clk_pllv3_av_ops = {
303 .prepare = clk_pllv3_prepare,
304 .unprepare = clk_pllv3_unprepare,
305 .recalc_rate = clk_pllv3_av_recalc_rate,
306 .round_rate = clk_pllv3_av_round_rate,
307 .set_rate = clk_pllv3_av_set_rate,
310 static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
311 unsigned long parent_rate)
316 static const struct clk_ops clk_pllv3_enet_ops = {
317 .prepare = clk_pllv3_prepare,
318 .unprepare = clk_pllv3_unprepare,
319 .recalc_rate = clk_pllv3_enet_recalc_rate,
322 struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
323 const char *parent_name, void __iomem *base,
326 struct clk_pllv3 *pll;
327 const struct clk_ops *ops;
329 struct clk_init_data init;
331 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
333 return ERR_PTR(-ENOMEM);
337 ops = &clk_pllv3_sys_ops;
340 ops = &clk_pllv3_ops;
341 pll->powerup_set = true;
344 ops = &clk_pllv3_av_ops;
347 ops = &clk_pllv3_enet_ops;
350 ops = &clk_pllv3_ops;
353 pll->div_mask = div_mask;
357 init.flags = CLK_SET_RATE_GATE | CLK_GET_RATE_NOCACHE;
358 init.parent_names = &parent_name;
359 init.num_parents = 1;
361 pll->hw.init = &init;
363 clk = clk_register(NULL, &pll->hw);