2 * Meta performance counter support.
3 * Copyright (C) 2012 Imagination Technologies Ltd
5 * This code is based on the sh pmu code:
6 * Copyright (C) 2009 Paul Mundt
8 * and on the arm pmu code:
9 * Copyright (C) 2009 picoChip Designs, Ltd., James Iles
10 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/atomic.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/irqchip/metag.h>
21 #include <linux/perf_event.h>
22 #include <linux/slab.h>
24 #include <asm/core_reg.h>
25 #include <asm/hwthread.h>
29 #include "perf_event.h"
31 static int _hw_perf_event_init(struct perf_event *);
32 static void _hw_perf_event_destroy(struct perf_event *);
34 /* Determines which core type we are */
35 static struct metag_pmu *metag_pmu __read_mostly;
37 /* Processor specific data */
38 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
41 const char *perf_pmu_name(void)
44 return metag_pmu->pmu.name;
48 EXPORT_SYMBOL_GPL(perf_pmu_name);
50 int perf_num_counters(void)
53 return metag_pmu->max_events;
57 EXPORT_SYMBOL_GPL(perf_num_counters);
59 static inline int metag_pmu_initialised(void)
64 static void release_pmu_hardware(void)
67 unsigned int version = (metag_pmu->version &
68 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
71 /* Early cores don't have overflow interrupts */
75 irq = internal_irq_map(17);
77 free_irq(irq, (void *)1);
79 irq = internal_irq_map(16);
81 free_irq(irq, (void *)0);
84 static int reserve_pmu_hardware(void)
87 unsigned int version = (metag_pmu->version &
88 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
91 /* Early cores don't have overflow interrupts */
96 * Bit 16 on HWSTATMETA is the interrupt for performance counter 0;
97 * similarly, 17 is the interrupt for performance counter 1.
98 * We can't (yet) interrupt on the cycle counter, because it's a
99 * register, however it holds a 32-bit value as opposed to 24-bit.
101 irq[0] = internal_irq_map(16);
103 pr_err("unable to map internal IRQ %d\n", 16);
106 err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING,
107 "metagpmu0", (void *)0);
109 pr_err("unable to request IRQ%d for metag PMU counters\n",
114 irq[1] = internal_irq_map(17);
116 pr_err("unable to map internal IRQ %d\n", 17);
119 err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING,
120 "metagpmu1", (void *)1);
122 pr_err("unable to request IRQ%d for metag PMU counters\n",
130 free_irq(irq[0], (void *)0);
136 static void metag_pmu_enable(struct pmu *pmu)
140 static void metag_pmu_disable(struct pmu *pmu)
144 static int metag_pmu_event_init(struct perf_event *event)
147 atomic_t *active_events = &metag_pmu->active_events;
149 if (!metag_pmu_initialised()) {
154 if (has_branch_stack(event))
157 event->destroy = _hw_perf_event_destroy;
159 if (!atomic_inc_not_zero(active_events)) {
160 mutex_lock(&metag_pmu->reserve_mutex);
161 if (atomic_read(active_events) == 0)
162 err = reserve_pmu_hardware();
165 atomic_inc(active_events);
167 mutex_unlock(&metag_pmu->reserve_mutex);
170 /* Hardware and caches counters */
171 switch (event->attr.type) {
172 case PERF_TYPE_HARDWARE:
173 case PERF_TYPE_HW_CACHE:
174 err = _hw_perf_event_init(event);
182 event->destroy(event);
188 void metag_pmu_event_update(struct perf_event *event,
189 struct hw_perf_event *hwc, int idx)
191 u64 prev_raw_count, new_raw_count;
195 * If this counter is chained, it may be that the previous counter
196 * value has been changed beneath us.
198 * To get around this, we read and exchange the new raw count, then
199 * add the delta (new - prev) to the generic counter atomically.
201 * Without interrupts, this is the simplest approach.
204 prev_raw_count = local64_read(&hwc->prev_count);
205 new_raw_count = metag_pmu->read(idx);
207 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
208 new_raw_count) != prev_raw_count)
212 * Calculate the delta and add it to the counter.
214 delta = new_raw_count - prev_raw_count;
216 local64_add(delta, &event->count);
219 int metag_pmu_event_set_period(struct perf_event *event,
220 struct hw_perf_event *hwc, int idx)
222 s64 left = local64_read(&hwc->period_left);
223 s64 period = hwc->sample_period;
226 if (unlikely(left <= -period)) {
228 local64_set(&hwc->period_left, left);
229 hwc->last_period = period;
233 if (unlikely(left <= 0)) {
235 local64_set(&hwc->period_left, left);
236 hwc->last_period = period;
240 if (left > (s64)metag_pmu->max_period)
241 left = metag_pmu->max_period;
243 if (metag_pmu->write)
244 metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD);
246 perf_event_update_userpage(event);
251 static void metag_pmu_start(struct perf_event *event, int flags)
253 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
254 struct hw_perf_event *hwc = &event->hw;
257 if (WARN_ON_ONCE(idx == -1))
261 * We always have to reprogram the period, so ignore PERF_EF_RELOAD.
263 if (flags & PERF_EF_RELOAD)
264 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
270 * Some counters can't be stopped (i.e. are core global), so when the
271 * counter was 'stopped' we merely disabled the IRQ. If we don't reset
272 * the period, then we'll either: a) get an overflow too soon;
273 * or b) too late if the overflow happened since disabling.
274 * Obviously, this has little bearing on cores without the overflow
275 * interrupt, as the performance counter resets to zero on write
278 if (metag_pmu->max_period)
279 metag_pmu_event_set_period(event, hwc, hwc->idx);
280 cpuc->events[idx] = event;
281 metag_pmu->enable(hwc, idx);
284 static void metag_pmu_stop(struct perf_event *event, int flags)
286 struct hw_perf_event *hwc = &event->hw;
289 * We should always update the counter on stop; see comment above
292 if (!(hwc->state & PERF_HES_STOPPED)) {
293 metag_pmu_event_update(event, hwc, hwc->idx);
294 metag_pmu->disable(hwc, hwc->idx);
295 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
299 static int metag_pmu_add(struct perf_event *event, int flags)
301 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
302 struct hw_perf_event *hwc = &event->hw;
303 int idx = 0, ret = 0;
305 perf_pmu_disable(event->pmu);
307 /* check whether we're counting instructions */
308 if (hwc->config == 0x100) {
309 if (__test_and_set_bit(METAG_INST_COUNTER,
314 idx = METAG_INST_COUNTER;
316 /* Check whether we have a spare counter */
317 idx = find_first_zero_bit(cpuc->used_mask,
318 atomic_read(&metag_pmu->active_events));
319 if (idx >= METAG_INST_COUNTER) {
324 __set_bit(idx, cpuc->used_mask);
328 /* Make sure the counter is disabled */
329 metag_pmu->disable(hwc, idx);
331 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
332 if (flags & PERF_EF_START)
333 metag_pmu_start(event, PERF_EF_RELOAD);
335 perf_event_update_userpage(event);
337 perf_pmu_enable(event->pmu);
341 static void metag_pmu_del(struct perf_event *event, int flags)
343 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
344 struct hw_perf_event *hwc = &event->hw;
348 metag_pmu_stop(event, PERF_EF_UPDATE);
349 cpuc->events[idx] = NULL;
350 __clear_bit(idx, cpuc->used_mask);
352 perf_event_update_userpage(event);
355 static void metag_pmu_read(struct perf_event *event)
357 struct hw_perf_event *hwc = &event->hw;
359 /* Don't read disabled counters! */
363 metag_pmu_event_update(event, hwc, hwc->idx);
366 static struct pmu pmu = {
367 .pmu_enable = metag_pmu_enable,
368 .pmu_disable = metag_pmu_disable,
370 .event_init = metag_pmu_event_init,
372 .add = metag_pmu_add,
373 .del = metag_pmu_del,
374 .start = metag_pmu_start,
375 .stop = metag_pmu_stop,
376 .read = metag_pmu_read,
379 /* Core counter specific functions */
380 static const int metag_general_events[] = {
381 [PERF_COUNT_HW_CPU_CYCLES] = 0x03,
382 [PERF_COUNT_HW_INSTRUCTIONS] = 0x100,
383 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
384 [PERF_COUNT_HW_CACHE_MISSES] = -1,
385 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
386 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
387 [PERF_COUNT_HW_BUS_CYCLES] = -1,
388 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1,
389 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1,
390 [PERF_COUNT_HW_REF_CPU_CYCLES] = -1,
393 static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
396 [C(RESULT_ACCESS)] = 0x08,
397 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
400 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
401 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
404 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
405 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
410 [C(RESULT_ACCESS)] = 0x09,
411 [C(RESULT_MISS)] = 0x0a,
414 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
415 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
418 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
419 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
424 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
425 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
428 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
429 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
438 [C(RESULT_ACCESS)] = 0xd0,
439 [C(RESULT_MISS)] = 0xd2,
442 [C(RESULT_ACCESS)] = 0xd4,
443 [C(RESULT_MISS)] = 0xd5,
446 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
447 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
452 [C(RESULT_ACCESS)] = 0xd1,
453 [C(RESULT_MISS)] = 0xd3,
456 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
457 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
460 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
461 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
466 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
467 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
470 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
471 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
474 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
475 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
480 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
481 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
484 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
485 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
488 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
489 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
495 static void _hw_perf_event_destroy(struct perf_event *event)
497 atomic_t *active_events = &metag_pmu->active_events;
498 struct mutex *pmu_mutex = &metag_pmu->reserve_mutex;
500 if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) {
501 release_pmu_hardware();
502 mutex_unlock(pmu_mutex);
506 static int _hw_perf_cache_event(int config, int *evp)
508 unsigned long type, op, result;
511 if (!metag_pmu->cache_events)
515 type = config & 0xff;
516 op = (config >> 8) & 0xff;
517 result = (config >> 16) & 0xff;
519 if (type >= PERF_COUNT_HW_CACHE_MAX ||
520 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
521 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
524 ev = (*metag_pmu->cache_events)[type][op][result];
533 static int _hw_perf_event_init(struct perf_event *event)
535 struct perf_event_attr *attr = &event->attr;
536 struct hw_perf_event *hwc = &event->hw;
537 int mapping = 0, err;
539 switch (attr->type) {
540 case PERF_TYPE_HARDWARE:
541 if (attr->config >= PERF_COUNT_HW_MAX)
544 mapping = metag_pmu->event_map(attr->config);
547 case PERF_TYPE_HW_CACHE:
548 err = _hw_perf_cache_event(attr->config, &mapping);
554 /* Return early if the event is unsupported */
559 * Early cores have "limited" counters - they have no overflow
560 * interrupts - and so are unable to do sampling without extra work
561 * and timer assistance.
563 if (metag_pmu->max_period == 0) {
564 if (hwc->sample_period)
569 * Don't assign an index until the event is placed into the hardware.
570 * -1 signifies that we're still deciding where to put it. On SMP
571 * systems each core has its own set of counters, so we can't do any
572 * constraint checking yet.
576 /* Store the event encoding */
577 hwc->config |= (unsigned long)mapping;
580 * For non-sampling runs, limit the sample_period to half of the
581 * counter width. This way, the new counter value should be less
582 * likely to overtake the previous one (unless there are IRQ latency
585 if (metag_pmu->max_period) {
586 if (!hwc->sample_period) {
587 hwc->sample_period = metag_pmu->max_period >> 1;
588 hwc->last_period = hwc->sample_period;
589 local64_set(&hwc->period_left, hwc->sample_period);
596 static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
598 struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
599 unsigned int config = event->config;
600 unsigned int tmp = config & 0xf0;
603 raw_spin_lock_irqsave(&events->pmu_lock, flags);
606 * Check if we're enabling the instruction counter (index of
609 if (METAG_INST_COUNTER == idx) {
610 WARN_ONCE((config != 0x100),
611 "invalid configuration (%d) for counter (%d)\n",
614 /* Reset the cycle count */
615 __core_reg_set(TXTACTCYC, 0);
619 /* Check for a core internal or performance channel event. */
621 void *perf_addr = (void *)PERF_COUNT(idx);
624 * Anything other than a cycle count will write the low-
625 * nibble to the correct counter register.
629 perf_addr = (void *)PERF_ICORE(idx);
633 perf_addr = (void *)PERF_CHAN(idx);
637 metag_out32((tmp & 0x0f), perf_addr);
640 * Now we use the high nibble as the performance event to
647 * Enabled counters start from 0. Early cores clear the count on
648 * write but newer cores don't, so we make sure that the count is
651 tmp = ((config & 0xf) << 28) |
652 ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
653 metag_out32(tmp, PERF_COUNT(idx));
655 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
658 static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
660 struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
661 unsigned int tmp = 0;
665 * The cycle counter can't be disabled per se, as it's a hardware
666 * thread register which is always counting. We merely return if this
667 * is the counter we're attempting to disable.
669 if (METAG_INST_COUNTER == idx)
673 * The counter value _should_ have been read prior to disabling,
674 * as if we're running on an early core then the value gets reset to
675 * 0, and any read after that would be useless. On the newer cores,
676 * however, it's better to read-modify-update this for purposes of
677 * the overflow interrupt.
678 * Here we remove the thread id AND the event nibble (there are at
679 * least two events that count events that are core global and ignore
680 * the thread id mask). This only works because we don't mix thread
681 * performance counts, and event 0x00 requires a thread id mask!
683 raw_spin_lock_irqsave(&events->pmu_lock, flags);
685 tmp = metag_in32(PERF_COUNT(idx));
687 metag_out32(tmp, PERF_COUNT(idx));
689 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
692 static u64 metag_pmu_read_counter(int idx)
696 /* The act of reading the cycle counter also clears it */
697 if (METAG_INST_COUNTER == idx) {
698 __core_reg_swap(TXTACTCYC, tmp);
702 tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
707 static void metag_pmu_write_counter(int idx, u32 val)
709 struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
714 * This _shouldn't_ happen, but if it does, then we can just
715 * ignore the write, as the register is read-only and clear-on-write.
717 if (METAG_INST_COUNTER == idx)
721 * We'll keep the thread mask and event id, and just update the
722 * counter itself. Also , we should bound the value to 24-bits.
724 raw_spin_lock_irqsave(&events->pmu_lock, flags);
727 tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000;
729 metag_out32(val, PERF_COUNT(idx));
731 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
734 static int metag_pmu_event_map(int idx)
736 return metag_general_events[idx];
739 static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
742 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
743 struct perf_event *event = cpuhw->events[idx];
744 struct hw_perf_event *hwc = &event->hw;
745 struct pt_regs *regs = get_irq_regs();
746 struct perf_sample_data sampledata;
751 * We need to stop the core temporarily from generating another
752 * interrupt while we disable this counter. However, we don't want
753 * to flag the counter as free
755 __global_lock2(flags);
756 counter = metag_in32(PERF_COUNT(idx));
757 metag_out32((counter & 0x00ffffff), PERF_COUNT(idx));
758 __global_unlock2(flags);
760 /* Update the counts and reset the sample period */
761 metag_pmu_event_update(event, hwc, idx);
762 perf_sample_data_init(&sampledata, 0, hwc->last_period);
763 metag_pmu_event_set_period(event, hwc, idx);
766 * Enable the counter again once core overflow processing has
769 if (!perf_event_overflow(event, &sampledata, regs))
770 metag_out32(counter, PERF_COUNT(idx));
775 static struct metag_pmu _metag_pmu = {
776 .handle_irq = metag_pmu_counter_overflow,
777 .enable = metag_pmu_enable_counter,
778 .disable = metag_pmu_disable_counter,
779 .read = metag_pmu_read_counter,
780 .write = metag_pmu_write_counter,
781 .event_map = metag_pmu_event_map,
782 .cache_events = &metag_pmu_cache_events,
783 .max_period = MAX_PERIOD,
784 .max_events = MAX_HWEVENTS,
787 /* PMU CPU hotplug notifier */
788 static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
789 unsigned long action, void *hcpu)
791 unsigned int cpu = (unsigned int)hcpu;
792 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
794 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
797 memset(cpuc, 0, sizeof(struct cpu_hw_events));
798 raw_spin_lock_init(&cpuc->pmu_lock);
803 static struct notifier_block __cpuinitdata metag_pmu_notifier = {
804 .notifier_call = metag_pmu_cpu_notify,
807 /* PMU Initialisation */
808 static int __init init_hw_perf_events(void)
811 u32 version = *(u32 *)METAC_ID;
812 int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S;
813 int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS))
816 /* Not a Meta 2 core, then not supported */
818 pr_info("no hardware counter support available\n");
820 } else if (0x02 == major) {
821 metag_pmu = &_metag_pmu;
823 if (min_rev < 0x0104) {
825 * A core without overflow interrupts, and clear-on-
828 metag_pmu->handle_irq = NULL;
829 metag_pmu->write = NULL;
830 metag_pmu->max_period = 0;
833 metag_pmu->name = "Meta 2";
834 metag_pmu->version = version;
835 metag_pmu->pmu = pmu;
838 pr_info("enabled with %s PMU driver, %d counters available\n",
839 metag_pmu->name, metag_pmu->max_events);
841 /* Initialise the active events and reservation mutex */
842 atomic_set(&metag_pmu->active_events, 0);
843 mutex_init(&metag_pmu->reserve_mutex);
845 /* Clear the counters */
846 metag_out32(0, PERF_COUNT(0));
847 metag_out32(0, PERF_COUNT(1));
849 for_each_possible_cpu(cpu) {
850 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
852 memset(cpuc, 0, sizeof(struct cpu_hw_events));
853 raw_spin_lock_init(&cpuc->pmu_lock);
856 register_cpu_notifier(&metag_pmu_notifier);
857 ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW);
861 early_initcall(init_hw_perf_events);