2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
4 * Copyright (C) 2012 Marvell
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
14 * The Armada 370 and Armada XP SOCs have a power management service
15 * unit which is responsible for powering down and waking up CPUs and
19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt
21 #include <linux/clk.h>
22 #include <linux/cpu_pm.h>
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_opp.h>
31 #include <linux/smp.h>
32 #include <linux/resource.h>
33 #include <linux/slab.h>
34 #include <asm/cacheflush.h>
36 #include <asm/smp_plat.h>
37 #include <asm/suspend.h>
38 #include <asm/tlbflush.h>
40 #include "armada-370-xp.h"
42 static void __iomem *pmsu_mp_base;
44 #define PMSU_BASE_OFFSET 0x100
45 #define PMSU_REG_SIZE 0x1000
47 /* PMSU MP registers */
48 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
49 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
50 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
51 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
53 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
55 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
57 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
58 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
59 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
60 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
61 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
62 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
63 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
64 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
66 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
67 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
68 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
70 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
72 /* PMSU fabric registers */
73 #define L2C_NFABRIC_PM_CTL 0x4
74 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20)
76 extern void ll_disable_coherency(void);
77 extern void ll_enable_coherency(void);
79 static struct platform_device armada_xp_cpuidle_device = {
80 .name = "cpuidle-armada-370-xp",
83 static struct of_device_id of_pmsu_table[] = {
84 { .compatible = "marvell,armada-370-pmsu", },
85 { .compatible = "marvell,armada-370-xp-pmsu", },
86 { .compatible = "marvell,armada-380-pmsu", },
87 { /* end of list */ },
90 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
92 writel(virt_to_phys(boot_addr), pmsu_mp_base +
93 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
96 static int __init armada_370_xp_pmsu_init(void)
98 struct device_node *np;
102 np = of_find_matching_node(NULL, of_pmsu_table);
106 pr_info("Initializing Power Management Service Unit\n");
108 if (of_address_to_resource(np, 0, &res)) {
109 pr_err("unable to get resource\n");
114 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) {
115 pr_warn(FW_WARN "deprecated pmsu binding\n");
116 res.start = res.start - PMSU_BASE_OFFSET;
117 res.end = res.start + PMSU_REG_SIZE - 1;
120 if (!request_mem_region(res.start, resource_size(&res),
122 pr_err("unable to request region\n");
127 pmsu_mp_base = ioremap(res.start, resource_size(&res));
129 pr_err("unable to map registers\n");
130 release_mem_region(res.start, resource_size(&res));
140 static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
144 if (pmsu_mp_base == NULL)
147 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */
148 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL);
149 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN;
150 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
153 static void armada_370_xp_cpu_resume(void)
155 asm volatile("bl ll_add_cpu_to_smp_group\n\t"
156 "bl ll_enable_coherency\n\t"
160 /* No locking is needed because we only access per-CPU registers */
161 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle)
163 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
166 if (pmsu_mp_base == NULL)
170 * Adjust the PMSU configuration to wait for WFI signal, enable
171 * IRQ and FIQ as wakeup events, set wait for snoop queue empty
172 * indication and mask IRQ and FIQ from CPU
174 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
175 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
176 PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
177 PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
178 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
179 PMSU_STATUS_AND_MASK_IRQ_MASK |
180 PMSU_STATUS_AND_MASK_FIQ_MASK;
181 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
183 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
184 /* ask HW to power down the L2 Cache if needed */
186 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
188 /* request power down */
189 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
190 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
192 /* Disable snoop disable by HW - SW is taking care of it */
193 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
194 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
195 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
197 v7_exit_coherency_flush(all);
199 ll_disable_coherency();
205 /* If we are here, wfi failed. As processors run out of
206 * coherency for some time, tlbs might be stale, so flush them
208 local_flush_tlb_all();
210 ll_enable_coherency();
212 /* Test the CR_C bit and set it if it was cleared */
214 "mrc p15, 0, %0, c1, c0, 0 \n\t"
215 "tst %0, #(1 << 2) \n\t"
216 "orreq %0, %0, #(1 << 2) \n\t"
217 "mcreq p15, 0, %0, c1, c0, 0 \n\t"
221 pr_warn("Failed to suspend the system\n");
226 static int armada_370_xp_cpu_suspend(unsigned long deepidle)
228 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter);
231 /* No locking is needed because we only access per-CPU registers */
232 void armada_370_xp_pmsu_idle_exit(void)
234 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
237 if (pmsu_mp_base == NULL)
240 /* cancel ask HW to power down the L2 Cache if possible */
241 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
242 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
243 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
245 /* cancel Enable wakeup events and mask interrupts */
246 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
247 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
248 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
249 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
250 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
251 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
254 static int armada_370_xp_cpu_pm_notify(struct notifier_block *self,
255 unsigned long action, void *hcpu)
257 if (action == CPU_PM_ENTER) {
258 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
259 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, armada_370_xp_cpu_resume);
260 } else if (action == CPU_PM_EXIT) {
261 armada_370_xp_pmsu_idle_exit();
267 static struct notifier_block armada_370_xp_cpu_pm_notifier = {
268 .notifier_call = armada_370_xp_cpu_pm_notify,
271 static int __init armada_370_xp_cpu_pm_init(void)
273 struct device_node *np;
276 * Check that all the requirements are available to enable
277 * cpuidle. So far, it is only supported on Armada XP, cpuidle
278 * needs the coherency fabric and the PMSU enabled
281 if (!of_machine_is_compatible("marvell,armadaxp"))
284 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
289 np = of_find_matching_node(NULL, of_pmsu_table);
294 armada_370_xp_pmsu_enable_l2_powerdown_onidle();
295 armada_xp_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
296 platform_device_register(&armada_xp_cpuidle_device);
297 cpu_pm_register_notifier(&armada_370_xp_cpu_pm_notifier);
302 arch_initcall(armada_370_xp_cpu_pm_init);
303 early_initcall(armada_370_xp_pmsu_init);
305 static void mvebu_pmsu_dfs_request_local(void *data)
308 u32 cpu = smp_processor_id();
311 local_irq_save(flags);
313 /* Prepare to enter idle */
314 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
315 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
316 PMSU_STATUS_AND_MASK_IRQ_MASK |
317 PMSU_STATUS_AND_MASK_FIQ_MASK;
318 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
320 /* Request the DFS transition */
321 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
322 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
323 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
325 /* The fact of entering idle will trigger the DFS transition */
329 * We're back from idle, the DFS transition has completed,
330 * clear the idle wait indication.
332 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
333 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
334 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
336 local_irq_restore(flags);
339 int mvebu_pmsu_dfs_request(int cpu)
341 unsigned long timeout;
342 int hwcpu = cpu_logical_map(cpu);
345 /* Clear any previous DFS DONE event */
346 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
347 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE;
348 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
350 /* Mask the DFS done interrupt, since we are going to poll */
351 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
352 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
353 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
355 /* Trigger the DFS on the appropriate CPU */
356 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
359 /* Poll until the DFS done event is generated */
360 timeout = jiffies + HZ;
361 while (time_before(jiffies, timeout)) {
362 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
363 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
368 if (time_after(jiffies, timeout))
371 /* Restore the DFS mask to its original state */
372 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
373 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
374 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
379 static int __init armada_xp_pmsu_cpufreq_init(void)
381 struct device_node *np;
385 if (!of_machine_is_compatible("marvell,armadaxp"))
389 * In order to have proper cpufreq handling, we need to ensure
390 * that the Device Tree description of the CPU clock includes
391 * the definition of the PMU DFS registers. If not, we do not
392 * register the clock notifier and the cpufreq driver. This
393 * piece of code is only for compatibility with old Device
396 np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
400 ret = of_address_to_resource(np, 1, &res);
402 pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
410 * For each CPU, this loop registers the operating points
411 * supported (which are the nominal CPU frequency and half of
412 * it), and registers the clock notifier that will take care
413 * of doing the PMSU part of a frequency transition.
415 for_each_possible_cpu(cpu) {
416 struct device *cpu_dev;
420 cpu_dev = get_cpu_device(cpu);
422 pr_err("Cannot get CPU %d\n", cpu);
426 clk = clk_get(cpu_dev, 0);
428 pr_err("Cannot get clock for CPU %d\n", cpu);
433 * In case of a failure of dev_pm_opp_add(), we don't
434 * bother with cleaning up the registered OPP (there's
435 * no function to do so), and simply cancel the
436 * registration of the cpufreq device.
438 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
444 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
451 platform_device_register_simple("cpufreq-generic", -1, NULL, 0);
455 device_initcall(armada_xp_pmsu_cpufreq_init);