]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/clocksource/exynos_mct.c
509a6019c96c89375f441fbf92c0d92c3d36b1a0
[karo-tx-linux.git] / drivers / clocksource / exynos_mct.c
1 /* linux/arch/arm/mach-exynos4/mct.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *              http://www.samsung.com
5  *
6  * EXYNOS4 MCT(Multi-Core Timer) support
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11 */
12
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/err.h>
17 #include <linux/clk.h>
18 #include <linux/clockchips.h>
19 #include <linux/platform_device.h>
20 #include <linux/delay.h>
21 #include <linux/percpu.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/clocksource.h>
26
27 #include <asm/arch_timer.h>
28 #include <asm/localtimer.h>
29
30 #include <plat/cpu.h>
31
32 #include <mach/map.h>
33 #include <mach/irqs.h>
34 #include <asm/mach/time.h>
35
36 #define EXYNOS4_MCTREG(x)               (x)
37 #define EXYNOS4_MCT_G_CNT_L             EXYNOS4_MCTREG(0x100)
38 #define EXYNOS4_MCT_G_CNT_U             EXYNOS4_MCTREG(0x104)
39 #define EXYNOS4_MCT_G_CNT_WSTAT         EXYNOS4_MCTREG(0x110)
40 #define EXYNOS4_MCT_G_COMP0_L           EXYNOS4_MCTREG(0x200)
41 #define EXYNOS4_MCT_G_COMP0_U           EXYNOS4_MCTREG(0x204)
42 #define EXYNOS4_MCT_G_COMP0_ADD_INCR    EXYNOS4_MCTREG(0x208)
43 #define EXYNOS4_MCT_G_TCON              EXYNOS4_MCTREG(0x240)
44 #define EXYNOS4_MCT_G_INT_CSTAT         EXYNOS4_MCTREG(0x244)
45 #define EXYNOS4_MCT_G_INT_ENB           EXYNOS4_MCTREG(0x248)
46 #define EXYNOS4_MCT_G_WSTAT             EXYNOS4_MCTREG(0x24C)
47 #define _EXYNOS4_MCT_L_BASE             EXYNOS4_MCTREG(0x300)
48 #define EXYNOS4_MCT_L_BASE(x)           (_EXYNOS4_MCT_L_BASE + (0x100 * x))
49 #define EXYNOS4_MCT_L_MASK              (0xffffff00)
50
51 #define MCT_L_TCNTB_OFFSET              (0x00)
52 #define MCT_L_ICNTB_OFFSET              (0x08)
53 #define MCT_L_TCON_OFFSET               (0x20)
54 #define MCT_L_INT_CSTAT_OFFSET          (0x30)
55 #define MCT_L_INT_ENB_OFFSET            (0x34)
56 #define MCT_L_WSTAT_OFFSET              (0x40)
57 #define MCT_G_TCON_START                (1 << 8)
58 #define MCT_G_TCON_COMP0_AUTO_INC       (1 << 1)
59 #define MCT_G_TCON_COMP0_ENABLE         (1 << 0)
60 #define MCT_L_TCON_INTERVAL_MODE        (1 << 2)
61 #define MCT_L_TCON_INT_START            (1 << 1)
62 #define MCT_L_TCON_TIMER_START          (1 << 0)
63
64 #define TICK_BASE_CNT   1
65
66 enum {
67         MCT_INT_SPI,
68         MCT_INT_PPI
69 };
70
71 enum {
72         MCT_G0_IRQ,
73         MCT_G1_IRQ,
74         MCT_G2_IRQ,
75         MCT_G3_IRQ,
76         MCT_L0_IRQ,
77         MCT_L1_IRQ,
78         MCT_L2_IRQ,
79         MCT_L3_IRQ,
80         MCT_NR_IRQS,
81 };
82
83 static void __iomem *reg_base;
84 static unsigned long clk_rate;
85 static unsigned int mct_int_type;
86 static int mct_irqs[MCT_NR_IRQS];
87
88 struct mct_clock_event_device {
89         struct clock_event_device *evt;
90         unsigned long base;
91         char name[10];
92 };
93
94 static void exynos4_mct_write(unsigned int value, unsigned long offset)
95 {
96         unsigned long stat_addr;
97         u32 mask;
98         u32 i;
99
100         __raw_writel(value, reg_base + offset);
101
102         if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
103                 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
104                 switch (offset & EXYNOS4_MCT_L_MASK) {
105                 case MCT_L_TCON_OFFSET:
106                         mask = 1 << 3;          /* L_TCON write status */
107                         break;
108                 case MCT_L_ICNTB_OFFSET:
109                         mask = 1 << 1;          /* L_ICNTB write status */
110                         break;
111                 case MCT_L_TCNTB_OFFSET:
112                         mask = 1 << 0;          /* L_TCNTB write status */
113                         break;
114                 default:
115                         return;
116                 }
117         } else {
118                 switch (offset) {
119                 case EXYNOS4_MCT_G_TCON:
120                         stat_addr = EXYNOS4_MCT_G_WSTAT;
121                         mask = 1 << 16;         /* G_TCON write status */
122                         break;
123                 case EXYNOS4_MCT_G_COMP0_L:
124                         stat_addr = EXYNOS4_MCT_G_WSTAT;
125                         mask = 1 << 0;          /* G_COMP0_L write status */
126                         break;
127                 case EXYNOS4_MCT_G_COMP0_U:
128                         stat_addr = EXYNOS4_MCT_G_WSTAT;
129                         mask = 1 << 1;          /* G_COMP0_U write status */
130                         break;
131                 case EXYNOS4_MCT_G_COMP0_ADD_INCR:
132                         stat_addr = EXYNOS4_MCT_G_WSTAT;
133                         mask = 1 << 2;          /* G_COMP0_ADD_INCR w status */
134                         break;
135                 case EXYNOS4_MCT_G_CNT_L:
136                         stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
137                         mask = 1 << 0;          /* G_CNT_L write status */
138                         break;
139                 case EXYNOS4_MCT_G_CNT_U:
140                         stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
141                         mask = 1 << 1;          /* G_CNT_U write status */
142                         break;
143                 default:
144                         return;
145                 }
146         }
147
148         /* Wait maximum 1 ms until written values are applied */
149         for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
150                 if (__raw_readl(reg_base + stat_addr) & mask) {
151                         __raw_writel(mask, reg_base + stat_addr);
152                         return;
153                 }
154
155         panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
156 }
157
158 /* Clocksource handling */
159 static void exynos4_mct_frc_start(u32 hi, u32 lo)
160 {
161         u32 reg;
162
163         exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
164         exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
165
166         reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
167         reg |= MCT_G_TCON_START;
168         exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
169 }
170
171 static cycle_t exynos4_frc_read(struct clocksource *cs)
172 {
173         unsigned int lo, hi;
174         u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
175
176         do {
177                 hi = hi2;
178                 lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
179                 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
180         } while (hi != hi2);
181
182         return ((cycle_t)hi << 32) | lo;
183 }
184
185 static void exynos4_frc_resume(struct clocksource *cs)
186 {
187         exynos4_mct_frc_start(0, 0);
188 }
189
190 struct clocksource mct_frc = {
191         .name           = "mct-frc",
192         .rating         = 400,
193         .read           = exynos4_frc_read,
194         .mask           = CLOCKSOURCE_MASK(64),
195         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
196         .resume         = exynos4_frc_resume,
197 };
198
199 static void __init exynos4_clocksource_init(void)
200 {
201         exynos4_mct_frc_start(0, 0);
202
203         if (clocksource_register_hz(&mct_frc, clk_rate))
204                 panic("%s: can't register clocksource\n", mct_frc.name);
205 }
206
207 static void exynos4_mct_comp0_stop(void)
208 {
209         unsigned int tcon;
210
211         tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
212         tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
213
214         exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
215         exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
216 }
217
218 static void exynos4_mct_comp0_start(enum clock_event_mode mode,
219                                     unsigned long cycles)
220 {
221         unsigned int tcon;
222         cycle_t comp_cycle;
223
224         tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
225
226         if (mode == CLOCK_EVT_MODE_PERIODIC) {
227                 tcon |= MCT_G_TCON_COMP0_AUTO_INC;
228                 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
229         }
230
231         comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
232         exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
233         exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
234
235         exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
236
237         tcon |= MCT_G_TCON_COMP0_ENABLE;
238         exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
239 }
240
241 static int exynos4_comp_set_next_event(unsigned long cycles,
242                                        struct clock_event_device *evt)
243 {
244         exynos4_mct_comp0_start(evt->mode, cycles);
245
246         return 0;
247 }
248
249 static void exynos4_comp_set_mode(enum clock_event_mode mode,
250                                   struct clock_event_device *evt)
251 {
252         unsigned long cycles_per_jiffy;
253         exynos4_mct_comp0_stop();
254
255         switch (mode) {
256         case CLOCK_EVT_MODE_PERIODIC:
257                 cycles_per_jiffy =
258                         (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
259                 exynos4_mct_comp0_start(mode, cycles_per_jiffy);
260                 break;
261
262         case CLOCK_EVT_MODE_ONESHOT:
263         case CLOCK_EVT_MODE_UNUSED:
264         case CLOCK_EVT_MODE_SHUTDOWN:
265         case CLOCK_EVT_MODE_RESUME:
266                 break;
267         }
268 }
269
270 static struct clock_event_device mct_comp_device = {
271         .name           = "mct-comp",
272         .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
273         .rating         = 250,
274         .set_next_event = exynos4_comp_set_next_event,
275         .set_mode       = exynos4_comp_set_mode,
276 };
277
278 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
279 {
280         struct clock_event_device *evt = dev_id;
281
282         exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
283
284         evt->event_handler(evt);
285
286         return IRQ_HANDLED;
287 }
288
289 static struct irqaction mct_comp_event_irq = {
290         .name           = "mct_comp_irq",
291         .flags          = IRQF_TIMER | IRQF_IRQPOLL,
292         .handler        = exynos4_mct_comp_isr,
293         .dev_id         = &mct_comp_device,
294 };
295
296 static void exynos4_clockevent_init(void)
297 {
298         mct_comp_device.cpumask = cpumask_of(0);
299         clockevents_config_and_register(&mct_comp_device, clk_rate,
300                                         0xf, 0xffffffff);
301         setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
302 }
303
304 #ifdef CONFIG_LOCAL_TIMERS
305
306 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
307
308 /* Clock event handling */
309 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
310 {
311         unsigned long tmp;
312         unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
313         unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
314
315         tmp = __raw_readl(reg_base + offset);
316         if (tmp & mask) {
317                 tmp &= ~mask;
318                 exynos4_mct_write(tmp, offset);
319         }
320 }
321
322 static void exynos4_mct_tick_start(unsigned long cycles,
323                                    struct mct_clock_event_device *mevt)
324 {
325         unsigned long tmp;
326
327         exynos4_mct_tick_stop(mevt);
328
329         tmp = (1 << 31) | cycles;       /* MCT_L_UPDATE_ICNTB */
330
331         /* update interrupt count buffer */
332         exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
333
334         /* enable MCT tick interrupt */
335         exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
336
337         tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
338         tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
339                MCT_L_TCON_INTERVAL_MODE;
340         exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
341 }
342
343 static int exynos4_tick_set_next_event(unsigned long cycles,
344                                        struct clock_event_device *evt)
345 {
346         struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
347
348         exynos4_mct_tick_start(cycles, mevt);
349
350         return 0;
351 }
352
353 static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
354                                          struct clock_event_device *evt)
355 {
356         struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
357         unsigned long cycles_per_jiffy;
358
359         exynos4_mct_tick_stop(mevt);
360
361         switch (mode) {
362         case CLOCK_EVT_MODE_PERIODIC:
363                 cycles_per_jiffy =
364                         (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
365                 exynos4_mct_tick_start(cycles_per_jiffy, mevt);
366                 break;
367
368         case CLOCK_EVT_MODE_ONESHOT:
369         case CLOCK_EVT_MODE_UNUSED:
370         case CLOCK_EVT_MODE_SHUTDOWN:
371         case CLOCK_EVT_MODE_RESUME:
372                 break;
373         }
374 }
375
376 static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
377 {
378         struct clock_event_device *evt = mevt->evt;
379
380         /*
381          * This is for supporting oneshot mode.
382          * Mct would generate interrupt periodically
383          * without explicit stopping.
384          */
385         if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
386                 exynos4_mct_tick_stop(mevt);
387
388         /* Clear the MCT tick interrupt */
389         if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
390                 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
391                 return 1;
392         } else {
393                 return 0;
394         }
395 }
396
397 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
398 {
399         struct mct_clock_event_device *mevt = dev_id;
400         struct clock_event_device *evt = mevt->evt;
401
402         exynos4_mct_tick_clear(mevt);
403
404         evt->event_handler(evt);
405
406         return IRQ_HANDLED;
407 }
408
409 static struct irqaction mct_tick0_event_irq = {
410         .name           = "mct_tick0_irq",
411         .flags          = IRQF_TIMER | IRQF_NOBALANCING,
412         .handler        = exynos4_mct_tick_isr,
413 };
414
415 static struct irqaction mct_tick1_event_irq = {
416         .name           = "mct_tick1_irq",
417         .flags          = IRQF_TIMER | IRQF_NOBALANCING,
418         .handler        = exynos4_mct_tick_isr,
419 };
420
421 static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
422 {
423         struct mct_clock_event_device *mevt;
424         unsigned int cpu = smp_processor_id();
425
426         mevt = this_cpu_ptr(&percpu_mct_tick);
427         mevt->evt = evt;
428
429         mevt->base = EXYNOS4_MCT_L_BASE(cpu);
430         sprintf(mevt->name, "mct_tick%d", cpu);
431
432         evt->name = mevt->name;
433         evt->cpumask = cpumask_of(cpu);
434         evt->set_next_event = exynos4_tick_set_next_event;
435         evt->set_mode = exynos4_tick_set_mode;
436         evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
437         evt->rating = 450;
438         clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
439                                         0xf, 0x7fffffff);
440
441         exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
442
443         if (mct_int_type == MCT_INT_SPI) {
444                 if (cpu == 0) {
445                         mct_tick0_event_irq.dev_id = mevt;
446                         evt->irq = mct_irqs[MCT_L0_IRQ];
447                         setup_irq(evt->irq, &mct_tick0_event_irq);
448                 } else {
449                         mct_tick1_event_irq.dev_id = mevt;
450                         evt->irq = mct_irqs[MCT_L1_IRQ];
451                         setup_irq(evt->irq, &mct_tick1_event_irq);
452                         irq_set_affinity(evt->irq, cpumask_of(1));
453                 }
454         } else {
455                 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
456         }
457
458         return 0;
459 }
460
461 static void exynos4_local_timer_stop(struct clock_event_device *evt)
462 {
463         unsigned int cpu = smp_processor_id();
464         evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
465         if (mct_int_type == MCT_INT_SPI)
466                 if (cpu == 0)
467                         remove_irq(evt->irq, &mct_tick0_event_irq);
468                 else
469                         remove_irq(evt->irq, &mct_tick1_event_irq);
470         else
471                 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
472 }
473
474 static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
475         .setup  = exynos4_local_timer_setup,
476         .stop   = exynos4_local_timer_stop,
477 };
478 #endif /* CONFIG_LOCAL_TIMERS */
479
480 static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
481 {
482         struct clk *mct_clk, *tick_clk;
483
484         tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
485                                 clk_get(NULL, "fin_pll");
486         if (IS_ERR(tick_clk))
487                 panic("%s: unable to determine tick clock rate\n", __func__);
488         clk_rate = clk_get_rate(tick_clk);
489
490         mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
491         if (IS_ERR(mct_clk))
492                 panic("%s: unable to retrieve mct clock instance\n", __func__);
493         clk_prepare_enable(mct_clk);
494
495         reg_base = base;
496         if (!reg_base)
497                 panic("%s: unable to ioremap mct address space\n", __func__);
498
499 #ifdef CONFIG_LOCAL_TIMERS
500         if (mct_int_type == MCT_INT_PPI) {
501                 int err;
502
503                 err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
504                                          exynos4_mct_tick_isr, "MCT",
505                                          &percpu_mct_tick);
506                 WARN(err, "MCT: can't request IRQ %d (%d)\n",
507                      mct_irqs[MCT_L0_IRQ], err);
508         }
509
510         local_timer_register(&exynos4_mct_tick_ops);
511 #endif /* CONFIG_LOCAL_TIMERS */
512 }
513
514 void __init mct_init(void)
515 {
516         if (soc_is_exynos4210()) {
517                 mct_irqs[MCT_G0_IRQ] = EXYNOS4_IRQ_MCT_G0;
518                 mct_irqs[MCT_L0_IRQ] = EXYNOS4_IRQ_MCT_L0;
519                 mct_irqs[MCT_L1_IRQ] = EXYNOS4_IRQ_MCT_L1;
520                 mct_int_type = MCT_INT_SPI;
521         } else {
522                 panic("unable to determine mct controller type\n");
523         }
524
525         exynos4_timer_resources(NULL, S5P_VA_SYSTIMER);
526         exynos4_clocksource_init();
527         exynos4_clockevent_init();
528 }
529
530 static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
531 {
532         u32 nr_irqs, i;
533
534         mct_int_type = int_type;
535
536         /* This driver uses only one global timer interrupt */
537         mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
538
539         /*
540          * Find out the number of local irqs specified. The local
541          * timer irqs are specified after the four global timer
542          * irqs are specified.
543          */
544         nr_irqs = of_irq_count(np);
545         for (i = MCT_L0_IRQ; i < nr_irqs; i++)
546                 mct_irqs[i] = irq_of_parse_and_map(np, i);
547
548         exynos4_timer_resources(np, of_iomap(np, 0));
549         exynos4_clocksource_init();
550         exynos4_clockevent_init();
551 }
552
553
554 static void __init mct_init_spi(struct device_node *np)
555 {
556         return mct_init_dt(np, MCT_INT_SPI);
557 }
558
559 static void __init mct_init_ppi(struct device_node *np)
560 {
561         return mct_init_dt(np, MCT_INT_PPI);
562 }
563 CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
564 CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);