2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc.
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
17 #define M_PERFCTL_EXL (1UL << 0)
18 #define M_PERFCTL_KERNEL (1UL << 1)
19 #define M_PERFCTL_SUPERVISOR (1UL << 2)
20 #define M_PERFCTL_USER (1UL << 3)
21 #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
22 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
23 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
24 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
25 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
26 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
27 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
28 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
29 #define M_PERFCTL_WIDE (1UL << 30)
30 #define M_PERFCTL_MORE (1UL << 31)
32 #define M_COUNTER_OVERFLOW (1UL << 31)
34 #ifdef CONFIG_MIPS_MT_SMP
35 static int cpu_has_mipsmt_pertccounters;
36 #define WHAT (M_TC_EN_VPE | \
37 M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
38 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
39 0 : cpu_data[smp_processor_id()].vpe_id)
42 * The number of bits to shift to convert between counters per core and
43 * counters per VPE. There is no reasonable interface atm to obtain the
44 * number of VPEs used by Linux and in the 34K this number is fixed to two
45 * anyways so we hardcore a few things here for the moment. The way it's
46 * done here will ensure that oprofile VSMP kernel will run right on a lesser
47 * core like a 24K also or with maxcpus=1.
49 static inline unsigned int vpe_shift(void)
51 if (num_possible_cpus() > 1)
62 static inline unsigned int vpe_shift(void)
69 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
71 return counters >> vpe_shift();
74 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
76 return counters << vpe_shift();
79 #define __define_perf_accessors(r, n, np) \
81 static inline unsigned int r_c0_ ## r ## n(void) \
83 unsigned int cpu = vpe_id(); \
87 return read_c0_ ## r ## n(); \
89 return read_c0_ ## r ## np(); \
96 static inline void w_c0_ ## r ## n(unsigned int value) \
98 unsigned int cpu = vpe_id(); \
102 write_c0_ ## r ## n(value); \
105 write_c0_ ## r ## np(value); \
113 __define_perf_accessors(perfcntr, 0, 2)
114 __define_perf_accessors(perfcntr, 1, 3)
115 __define_perf_accessors(perfcntr, 2, 0)
116 __define_perf_accessors(perfcntr, 3, 1)
118 __define_perf_accessors(perfctrl, 0, 2)
119 __define_perf_accessors(perfctrl, 1, 3)
120 __define_perf_accessors(perfctrl, 2, 0)
121 __define_perf_accessors(perfctrl, 3, 1)
123 struct op_mips_model op_model_mipsxx_ops;
125 static struct mipsxx_register_config {
126 unsigned int control[4];
127 unsigned int counter[4];
130 /* Compute all of the registers in preparation for enabling profiling. */
132 static void mipsxx_reg_setup(struct op_counter_config *ctr)
134 unsigned int counters = op_model_mipsxx_ops.num_counters;
137 /* Compute the performance counter control word. */
138 for (i = 0; i < counters; i++) {
145 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
146 M_PERFCTL_INTERRUPT_ENABLE;
148 reg.control[i] |= M_PERFCTL_KERNEL;
150 reg.control[i] |= M_PERFCTL_USER;
152 reg.control[i] |= M_PERFCTL_EXL;
153 reg.counter[i] = 0x80000000 - ctr[i].count;
157 /* Program all of the registers in preparation for enabling profiling. */
159 static void mipsxx_cpu_setup(void *args)
161 unsigned int counters = op_model_mipsxx_ops.num_counters;
166 w_c0_perfcntr3(reg.counter[3]);
169 w_c0_perfcntr2(reg.counter[2]);
172 w_c0_perfcntr1(reg.counter[1]);
175 w_c0_perfcntr0(reg.counter[0]);
179 /* Start all counters on current CPU */
180 static void mipsxx_cpu_start(void *args)
182 unsigned int counters = op_model_mipsxx_ops.num_counters;
186 w_c0_perfctrl3(WHAT | reg.control[3]);
188 w_c0_perfctrl2(WHAT | reg.control[2]);
190 w_c0_perfctrl1(WHAT | reg.control[1]);
192 w_c0_perfctrl0(WHAT | reg.control[0]);
196 /* Stop all counters on current CPU */
197 static void mipsxx_cpu_stop(void *args)
199 unsigned int counters = op_model_mipsxx_ops.num_counters;
213 static int mipsxx_perfcount_handler(void)
215 unsigned int counters = op_model_mipsxx_ops.num_counters;
216 unsigned int control;
217 unsigned int counter;
218 int handled = IRQ_NONE;
220 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
224 #define HANDLE_COUNTER(n) \
226 control = r_c0_perfctrl ## n(); \
227 counter = r_c0_perfcntr ## n(); \
228 if ((control & M_PERFCTL_INTERRUPT_ENABLE) && \
229 (counter & M_COUNTER_OVERFLOW)) { \
230 oprofile_add_sample(get_irq_regs(), n); \
231 w_c0_perfcntr ## n(reg.counter[n]); \
232 handled = IRQ_HANDLED; \
243 #define M_CONFIG1_PC (1 << 4)
245 static inline int __n_counters(void)
247 if (!(read_c0_config1() & M_CONFIG1_PC))
249 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
251 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
253 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
259 static inline int n_counters(void)
263 switch (current_cpu_type()) {
274 counters = __n_counters();
280 static void reset_counters(void *arg)
282 int counters = (int)arg;
299 static int __init mipsxx_init(void)
303 counters = n_counters();
305 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
309 #ifdef CONFIG_MIPS_MT_SMP
310 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
311 if (!cpu_has_mipsmt_pertccounters)
312 counters = counters_total_to_per_cpu(counters);
314 on_each_cpu(reset_counters, (void *)counters, 0, 1);
316 op_model_mipsxx_ops.num_counters = counters;
317 switch (current_cpu_type()) {
319 op_model_mipsxx_ops.cpu_type = "mips/20K";
323 op_model_mipsxx_ops.cpu_type = "mips/24K";
327 op_model_mipsxx_ops.cpu_type = "mips/25K";
332 /* FIXME: report as 34K for now */
333 op_model_mipsxx_ops.cpu_type = "mips/1004K";
338 op_model_mipsxx_ops.cpu_type = "mips/34K";
342 op_model_mipsxx_ops.cpu_type = "mips/74K";
346 op_model_mipsxx_ops.cpu_type = "mips/5K";
350 if ((current_cpu_data.processor_id & 0xff) == 0x20)
351 op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
353 op_model_mipsxx_ops.cpu_type = "mips/r10000";
358 op_model_mipsxx_ops.cpu_type = "mips/r12000";
363 op_model_mipsxx_ops.cpu_type = "mips/sb1";
367 printk(KERN_ERR "Profiling unsupported for this CPU\n");
372 perf_irq = mipsxx_perfcount_handler;
377 static void mipsxx_exit(void)
379 int counters = op_model_mipsxx_ops.num_counters;
381 counters = counters_per_cpu_to_total(counters);
382 on_each_cpu(reset_counters, (void *)counters, 0, 1);
384 perf_irq = null_perf_irq;
387 struct op_mips_model op_model_mipsxx_ops = {
388 .reg_setup = mipsxx_reg_setup,
389 .cpu_setup = mipsxx_cpu_setup,
392 .cpu_start = mipsxx_cpu_start,
393 .cpu_stop = mipsxx_cpu_stop,