]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/perf_event.c
arm64: perf: add Cortex-A53 support
[karo-tx-linux.git] / arch / arm64 / kernel / perf_event.c
1 /*
2  * PMU support
3  *
4  * Copyright (C) 2012 ARM Limited
5  * Author: Will Deacon <will.deacon@arm.com>
6  *
7  * This code is based heavily on the ARMv7 perf event code.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <asm/irq_regs.h>
23
24 #include <linux/of.h>
25 #include <linux/perf/arm_pmu.h>
26 #include <linux/platform_device.h>
27
28 /*
29  * ARMv8 PMUv3 Performance Events handling code.
30  * Common event types.
31  */
32 enum armv8_pmuv3_perf_types {
33         /* Required events. */
34         ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR                        = 0x00,
35         ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL                    = 0x03,
36         ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS                    = 0x04,
37         ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED                  = 0x10,
38         ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES                        = 0x11,
39         ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED                      = 0x12,
40
41         /* At least one of the following is required. */
42         ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED                      = 0x08,
43         ARMV8_PMUV3_PERFCTR_OP_SPEC                             = 0x1B,
44
45         /* Common architectural events. */
46         ARMV8_PMUV3_PERFCTR_MEM_READ                            = 0x06,
47         ARMV8_PMUV3_PERFCTR_MEM_WRITE                           = 0x07,
48         ARMV8_PMUV3_PERFCTR_EXC_TAKEN                           = 0x09,
49         ARMV8_PMUV3_PERFCTR_EXC_EXECUTED                        = 0x0A,
50         ARMV8_PMUV3_PERFCTR_CID_WRITE                           = 0x0B,
51         ARMV8_PMUV3_PERFCTR_PC_WRITE                            = 0x0C,
52         ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH                       = 0x0D,
53         ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN                      = 0x0E,
54         ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS                = 0x0F,
55         ARMV8_PMUV3_PERFCTR_TTBR_WRITE                          = 0x1C,
56
57         /* Common microarchitectural events. */
58         ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL                    = 0x01,
59         ARMV8_PMUV3_PERFCTR_ITLB_REFILL                         = 0x02,
60         ARMV8_PMUV3_PERFCTR_DTLB_REFILL                         = 0x05,
61         ARMV8_PMUV3_PERFCTR_MEM_ACCESS                          = 0x13,
62         ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS                    = 0x14,
63         ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB                        = 0x15,
64         ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS                     = 0x16,
65         ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL                     = 0x17,
66         ARMV8_PMUV3_PERFCTR_L2_CACHE_WB                         = 0x18,
67         ARMV8_PMUV3_PERFCTR_BUS_ACCESS                          = 0x19,
68         ARMV8_PMUV3_PERFCTR_MEM_ERROR                           = 0x1A,
69         ARMV8_PMUV3_PERFCTR_BUS_CYCLES                          = 0x1D,
70 };
71
72 /* ARMv8 Cortex-A53 specific event types. */
73 enum armv8_a53_pmu_perf_types {
74         ARMV8_A53_PERFCTR_PREFETCH_LINEFILL                     = 0xC2,
75 };
76
77 /* PMUv3 HW events mapping. */
78 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
79         PERF_MAP_ALL_UNSUPPORTED,
80         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
81         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
82         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
83         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
84         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
85 };
86
87 /* ARM Cortex-A53 HW events mapping. */
88 static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
89         PERF_MAP_ALL_UNSUPPORTED,
90         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
91         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
92         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
93         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
94         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV8_PMUV3_PERFCTR_PC_WRITE,
95         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
96         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
97 };
98
99 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
100                                                 [PERF_COUNT_HW_CACHE_OP_MAX]
101                                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
102         PERF_CACHE_MAP_ALL_UNSUPPORTED,
103
104         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
105         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
106         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
107         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
108
109         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
110         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
111         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
112         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
113 };
114
115 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
116                                               [PERF_COUNT_HW_CACHE_OP_MAX]
117                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
118         PERF_CACHE_MAP_ALL_UNSUPPORTED,
119
120         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
121         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
122         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
123         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
124         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
125
126         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
127         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
128
129         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
130
131         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
132         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
133         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
134         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
135 };
136
137 /*
138  * Perf Events' indices
139  */
140 #define ARMV8_IDX_CYCLE_COUNTER 0
141 #define ARMV8_IDX_COUNTER0      1
142 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
143         (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
144
145 #define ARMV8_MAX_COUNTERS      32
146 #define ARMV8_COUNTER_MASK      (ARMV8_MAX_COUNTERS - 1)
147
148 /*
149  * ARMv8 low level PMU access
150  */
151
152 /*
153  * Perf Event to low level counters mapping
154  */
155 #define ARMV8_IDX_TO_COUNTER(x) \
156         (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
157
158 /*
159  * Per-CPU PMCR: config reg
160  */
161 #define ARMV8_PMCR_E            (1 << 0) /* Enable all counters */
162 #define ARMV8_PMCR_P            (1 << 1) /* Reset all counters */
163 #define ARMV8_PMCR_C            (1 << 2) /* Cycle counter reset */
164 #define ARMV8_PMCR_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
165 #define ARMV8_PMCR_X            (1 << 4) /* Export to ETM */
166 #define ARMV8_PMCR_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
167 #define ARMV8_PMCR_N_SHIFT      11       /* Number of counters supported */
168 #define ARMV8_PMCR_N_MASK       0x1f
169 #define ARMV8_PMCR_MASK         0x3f     /* Mask for writable bits */
170
171 /*
172  * PMOVSR: counters overflow flag status reg
173  */
174 #define ARMV8_OVSR_MASK         0xffffffff      /* Mask for writable bits */
175 #define ARMV8_OVERFLOWED_MASK   ARMV8_OVSR_MASK
176
177 /*
178  * PMXEVTYPER: Event selection reg
179  */
180 #define ARMV8_EVTYPE_MASK       0xc80003ff      /* Mask for writable bits */
181 #define ARMV8_EVTYPE_EVENT      0x3ff           /* Mask for EVENT bits */
182
183 /*
184  * Event filters for PMUv3
185  */
186 #define ARMV8_EXCLUDE_EL1       (1 << 31)
187 #define ARMV8_EXCLUDE_EL0       (1 << 30)
188 #define ARMV8_INCLUDE_EL2       (1 << 27)
189
190 static inline u32 armv8pmu_pmcr_read(void)
191 {
192         u32 val;
193         asm volatile("mrs %0, pmcr_el0" : "=r" (val));
194         return val;
195 }
196
197 static inline void armv8pmu_pmcr_write(u32 val)
198 {
199         val &= ARMV8_PMCR_MASK;
200         isb();
201         asm volatile("msr pmcr_el0, %0" :: "r" (val));
202 }
203
204 static inline int armv8pmu_has_overflowed(u32 pmovsr)
205 {
206         return pmovsr & ARMV8_OVERFLOWED_MASK;
207 }
208
209 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
210 {
211         return idx >= ARMV8_IDX_CYCLE_COUNTER &&
212                 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
213 }
214
215 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
216 {
217         return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
218 }
219
220 static inline int armv8pmu_select_counter(int idx)
221 {
222         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
223         asm volatile("msr pmselr_el0, %0" :: "r" (counter));
224         isb();
225
226         return idx;
227 }
228
229 static inline u32 armv8pmu_read_counter(struct perf_event *event)
230 {
231         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
232         struct hw_perf_event *hwc = &event->hw;
233         int idx = hwc->idx;
234         u32 value = 0;
235
236         if (!armv8pmu_counter_valid(cpu_pmu, idx))
237                 pr_err("CPU%u reading wrong counter %d\n",
238                         smp_processor_id(), idx);
239         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
240                 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
241         else if (armv8pmu_select_counter(idx) == idx)
242                 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
243
244         return value;
245 }
246
247 static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
248 {
249         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
250         struct hw_perf_event *hwc = &event->hw;
251         int idx = hwc->idx;
252
253         if (!armv8pmu_counter_valid(cpu_pmu, idx))
254                 pr_err("CPU%u writing wrong counter %d\n",
255                         smp_processor_id(), idx);
256         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
257                 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
258         else if (armv8pmu_select_counter(idx) == idx)
259                 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
260 }
261
262 static inline void armv8pmu_write_evtype(int idx, u32 val)
263 {
264         if (armv8pmu_select_counter(idx) == idx) {
265                 val &= ARMV8_EVTYPE_MASK;
266                 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
267         }
268 }
269
270 static inline int armv8pmu_enable_counter(int idx)
271 {
272         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
273         asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
274         return idx;
275 }
276
277 static inline int armv8pmu_disable_counter(int idx)
278 {
279         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
280         asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
281         return idx;
282 }
283
284 static inline int armv8pmu_enable_intens(int idx)
285 {
286         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
287         asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
288         return idx;
289 }
290
291 static inline int armv8pmu_disable_intens(int idx)
292 {
293         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
294         asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
295         isb();
296         /* Clear the overflow flag in case an interrupt is pending. */
297         asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
298         isb();
299
300         return idx;
301 }
302
303 static inline u32 armv8pmu_getreset_flags(void)
304 {
305         u32 value;
306
307         /* Read */
308         asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
309
310         /* Write to clear flags */
311         value &= ARMV8_OVSR_MASK;
312         asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
313
314         return value;
315 }
316
317 static void armv8pmu_enable_event(struct perf_event *event)
318 {
319         unsigned long flags;
320         struct hw_perf_event *hwc = &event->hw;
321         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
322         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
323         int idx = hwc->idx;
324
325         /*
326          * Enable counter and interrupt, and set the counter to count
327          * the event that we're interested in.
328          */
329         raw_spin_lock_irqsave(&events->pmu_lock, flags);
330
331         /*
332          * Disable counter
333          */
334         armv8pmu_disable_counter(idx);
335
336         /*
337          * Set event (if destined for PMNx counters).
338          */
339         armv8pmu_write_evtype(idx, hwc->config_base);
340
341         /*
342          * Enable interrupt for this counter
343          */
344         armv8pmu_enable_intens(idx);
345
346         /*
347          * Enable counter
348          */
349         armv8pmu_enable_counter(idx);
350
351         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
352 }
353
354 static void armv8pmu_disable_event(struct perf_event *event)
355 {
356         unsigned long flags;
357         struct hw_perf_event *hwc = &event->hw;
358         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
359         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
360         int idx = hwc->idx;
361
362         /*
363          * Disable counter and interrupt
364          */
365         raw_spin_lock_irqsave(&events->pmu_lock, flags);
366
367         /*
368          * Disable counter
369          */
370         armv8pmu_disable_counter(idx);
371
372         /*
373          * Disable interrupt for this counter
374          */
375         armv8pmu_disable_intens(idx);
376
377         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
378 }
379
380 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
381 {
382         u32 pmovsr;
383         struct perf_sample_data data;
384         struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
385         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
386         struct pt_regs *regs;
387         int idx;
388
389         /*
390          * Get and reset the IRQ flags
391          */
392         pmovsr = armv8pmu_getreset_flags();
393
394         /*
395          * Did an overflow occur?
396          */
397         if (!armv8pmu_has_overflowed(pmovsr))
398                 return IRQ_NONE;
399
400         /*
401          * Handle the counter(s) overflow(s)
402          */
403         regs = get_irq_regs();
404
405         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
406                 struct perf_event *event = cpuc->events[idx];
407                 struct hw_perf_event *hwc;
408
409                 /* Ignore if we don't have an event. */
410                 if (!event)
411                         continue;
412
413                 /*
414                  * We have a single interrupt for all counters. Check that
415                  * each counter has overflowed before we process it.
416                  */
417                 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
418                         continue;
419
420                 hwc = &event->hw;
421                 armpmu_event_update(event);
422                 perf_sample_data_init(&data, 0, hwc->last_period);
423                 if (!armpmu_event_set_period(event))
424                         continue;
425
426                 if (perf_event_overflow(event, &data, regs))
427                         cpu_pmu->disable(event);
428         }
429
430         /*
431          * Handle the pending perf events.
432          *
433          * Note: this call *must* be run with interrupts disabled. For
434          * platforms that can have the PMU interrupts raised as an NMI, this
435          * will not work.
436          */
437         irq_work_run();
438
439         return IRQ_HANDLED;
440 }
441
442 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
443 {
444         unsigned long flags;
445         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
446
447         raw_spin_lock_irqsave(&events->pmu_lock, flags);
448         /* Enable all counters */
449         armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
450         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
451 }
452
453 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
454 {
455         unsigned long flags;
456         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
457
458         raw_spin_lock_irqsave(&events->pmu_lock, flags);
459         /* Disable all counters */
460         armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
461         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
462 }
463
464 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
465                                   struct perf_event *event)
466 {
467         int idx;
468         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
469         struct hw_perf_event *hwc = &event->hw;
470         unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
471
472         /* Always place a cycle counter into the cycle counter. */
473         if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
474                 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
475                         return -EAGAIN;
476
477                 return ARMV8_IDX_CYCLE_COUNTER;
478         }
479
480         /*
481          * For anything other than a cycle counter, try and use
482          * the events counters
483          */
484         for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
485                 if (!test_and_set_bit(idx, cpuc->used_mask))
486                         return idx;
487         }
488
489         /* The counters are all in use. */
490         return -EAGAIN;
491 }
492
493 /*
494  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
495  */
496 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
497                                      struct perf_event_attr *attr)
498 {
499         unsigned long config_base = 0;
500
501         if (attr->exclude_idle)
502                 return -EPERM;
503         if (attr->exclude_user)
504                 config_base |= ARMV8_EXCLUDE_EL0;
505         if (attr->exclude_kernel)
506                 config_base |= ARMV8_EXCLUDE_EL1;
507         if (!attr->exclude_hv)
508                 config_base |= ARMV8_INCLUDE_EL2;
509
510         /*
511          * Install the filter into config_base as this is used to
512          * construct the event type.
513          */
514         event->config_base = config_base;
515
516         return 0;
517 }
518
519 static void armv8pmu_reset(void *info)
520 {
521         struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
522         u32 idx, nb_cnt = cpu_pmu->num_events;
523
524         /* The counter and interrupt enable registers are unknown at reset. */
525         for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
526                 armv8pmu_disable_counter(idx);
527                 armv8pmu_disable_intens(idx);
528         }
529
530         /* Initialize & Reset PMNC: C and P bits. */
531         armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
532
533         /* Disable access from userspace. */
534         asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
535 }
536
537 static int armv8_pmuv3_map_event(struct perf_event *event)
538 {
539         return armpmu_map_event(event, &armv8_pmuv3_perf_map,
540                                 &armv8_pmuv3_perf_cache_map,
541                                 ARMV8_EVTYPE_EVENT);
542 }
543
544 static int armv8_a53_map_event(struct perf_event *event)
545 {
546         return armpmu_map_event(event, &armv8_a53_perf_map,
547                                 &armv8_a53_perf_cache_map,
548                                 ARMV8_EVTYPE_EVENT);
549 }
550
551 static void armv8pmu_read_num_pmnc_events(void *info)
552 {
553         int *nb_cnt = info;
554
555         /* Read the nb of CNTx counters supported from PMNC */
556         *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
557
558         /* Add the CPU cycles counter */
559         *nb_cnt += 1;
560 }
561
562 static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
563 {
564         return smp_call_function_any(&arm_pmu->supported_cpus,
565                                     armv8pmu_read_num_pmnc_events,
566                                     &arm_pmu->num_events, 1);
567 }
568
569 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
570 {
571         cpu_pmu->handle_irq             = armv8pmu_handle_irq,
572         cpu_pmu->enable                 = armv8pmu_enable_event,
573         cpu_pmu->disable                = armv8pmu_disable_event,
574         cpu_pmu->read_counter           = armv8pmu_read_counter,
575         cpu_pmu->write_counter          = armv8pmu_write_counter,
576         cpu_pmu->get_event_idx          = armv8pmu_get_event_idx,
577         cpu_pmu->start                  = armv8pmu_start,
578         cpu_pmu->stop                   = armv8pmu_stop,
579         cpu_pmu->reset                  = armv8pmu_reset,
580         cpu_pmu->max_period             = (1LLU << 32) - 1,
581         cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
582 }
583
584 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
585 {
586         armv8_pmu_init(cpu_pmu);
587         cpu_pmu->name                   = "armv8_pmuv3";
588         cpu_pmu->map_event              = armv8_pmuv3_map_event;
589         return armv8pmu_probe_num_events(cpu_pmu);
590 }
591
592 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
593 {
594         armv8_pmu_init(cpu_pmu);
595         cpu_pmu->name                   = "armv8_cortex_a53";
596         cpu_pmu->map_event              = armv8_a53_map_event;
597         return armv8pmu_probe_num_events(cpu_pmu);
598 }
599
600 static const struct of_device_id armv8_pmu_of_device_ids[] = {
601         {.compatible = "arm,armv8-pmuv3",       .data = armv8_pmuv3_init},
602         {.compatible = "arm,cortex-a53-pmu",    .data = armv8_a53_pmu_init},
603         {},
604 };
605
606 static int armv8_pmu_device_probe(struct platform_device *pdev)
607 {
608         return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
609 }
610
611 static struct platform_driver armv8_pmu_driver = {
612         .driver         = {
613                 .name   = "armv8-pmu",
614                 .of_match_table = armv8_pmu_of_device_ids,
615         },
616         .probe          = armv8_pmu_device_probe,
617 };
618
619 static int __init register_armv8_pmu_driver(void)
620 {
621         return platform_driver_register(&armv8_pmu_driver);
622 }
623 device_initcall(register_armv8_pmu_driver);