2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 #define DRIVER_NAME "CCI-400"
33 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
35 #define CCI_PORT_CTRL 0x0
36 #define CCI_CTRL_STATUS 0xc
38 #define CCI_ENABLE_SNOOP_REQ 0x1
39 #define CCI_ENABLE_DVM_REQ 0x2
40 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
44 unsigned int nb_ace_lite;
47 enum cci_ace_port_type {
48 ACE_INVALID_PORT = 0x0,
56 enum cci_ace_port_type type;
57 struct device_node *dn;
60 static struct cci_ace_port *ports;
61 static unsigned int nb_cci_ports;
63 static void __iomem *cci_ctrl_base;
64 static unsigned long cci_ctrl_phys;
66 #ifdef CONFIG_HW_PERF_EVENTS
68 #define CCI_PMCR 0x0100
69 #define CCI_PID2 0x0fe8
71 #define CCI_PMCR_CEN 0x00000001
72 #define CCI_PMCR_NCNT_MASK 0x0000f800
73 #define CCI_PMCR_NCNT_SHIFT 11
75 #define CCI_PID2_REV_MASK 0xf0
76 #define CCI_PID2_REV_SHIFT 4
90 #define CCI_REV_R1_PX 5
92 #define CCI_PMU_EVT_SEL 0x000
93 #define CCI_PMU_CNTR 0x004
94 #define CCI_PMU_CNTR_CTRL 0x008
95 #define CCI_PMU_OVRFLW 0x00c
97 #define CCI_PMU_OVRFLW_FLAG 1
99 #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
101 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
106 * make use of this event in hardware.
108 enum cci400_perf_events {
109 CCI_PMU_CYCLES = 0xff
112 #define CCI_PMU_EVENT_MASK 0xff
113 #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
114 #define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
116 #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
118 #define CCI_PMU_CYCLE_CNTR_IDX 0
119 #define CCI_PMU_CNTR0_IDX 1
120 #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
123 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
124 * ports and bits 4:0 are event codes. There are different event codes
125 * associated with each port type.
127 * Additionally, the range of events associated with the port types changed
128 * between Rev0 and Rev1.
130 * The constants below define the range of valid codes for each port type for
131 * the different revisions and are used to validate the event to be monitored.
134 #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
135 #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
136 #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
137 #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
139 #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
140 #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
141 #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
142 #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
144 struct pmu_port_event_ranges {
151 static struct pmu_port_event_ranges port_event_range[] = {
153 .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
154 .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
155 .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
156 .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
159 .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
160 .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
161 .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
162 .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
167 * Export different PMU names for the different revisions so userspace knows
168 * because the event ids are different
170 static char *const pmu_names[] = {
171 [CCI_REV_R0] = "CCI_400",
172 [CCI_REV_R1] = "CCI_400_r1",
175 struct cci_pmu_hw_events {
176 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
177 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
178 raw_spinlock_t pmu_lock;
185 int irqs[CCI_PMU_MAX_HW_EVENTS];
186 unsigned long active_irqs;
187 struct pmu_port_event_ranges *port_ranges;
188 struct cci_pmu_hw_events hw_events;
189 struct platform_device *plat_device;
191 atomic_t active_events;
192 struct mutex reserve_mutex;
195 static struct cci_pmu *pmu;
197 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
199 static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
203 for (i = 0; i < nr_irqs; i++)
210 static int probe_cci_revision(void)
213 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
214 rev >>= CCI_PID2_REV_SHIFT;
216 if (rev < CCI_REV_R1_PX)
222 static struct pmu_port_event_ranges *port_range_by_rev(void)
224 int rev = probe_cci_revision();
226 return &port_event_range[rev];
229 static int pmu_is_valid_slave_event(u8 ev_code)
231 return pmu->port_ranges->slave_min <= ev_code &&
232 ev_code <= pmu->port_ranges->slave_max;
235 static int pmu_is_valid_master_event(u8 ev_code)
237 return pmu->port_ranges->master_min <= ev_code &&
238 ev_code <= pmu->port_ranges->master_max;
241 static int pmu_validate_hw_event(u8 hw_event)
243 u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
244 u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
252 /* Slave Interface */
253 if (pmu_is_valid_slave_event(ev_code))
259 /* Master Interface */
260 if (pmu_is_valid_master_event(ev_code))
268 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
270 return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
271 idx <= CCI_PMU_CNTR_LAST(cci_pmu);
274 static u32 pmu_read_register(int idx, unsigned int offset)
276 return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
279 static void pmu_write_register(u32 value, int idx, unsigned int offset)
281 return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
284 static void pmu_disable_counter(int idx)
286 pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
289 static void pmu_enable_counter(int idx)
291 pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
294 static void pmu_set_event(int idx, unsigned long event)
296 event &= CCI_PMU_EVENT_MASK;
297 pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
300 static u32 pmu_get_max_counters(void)
302 u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
303 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
305 /* add 1 for cycle counter */
309 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
311 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
312 struct hw_perf_event *hw_event = &event->hw;
313 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
316 if (cci_event == CCI_PMU_CYCLES) {
317 if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
320 return CCI_PMU_CYCLE_CNTR_IDX;
323 for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
324 if (!test_and_set_bit(idx, hw->used_mask))
327 /* No counters available */
331 static int pmu_map_event(struct perf_event *event)
334 u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
336 if (event->attr.type < PERF_TYPE_MAX)
339 if (config == CCI_PMU_CYCLES)
342 mapping = pmu_validate_hw_event(config);
347 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
350 struct platform_device *pmu_device = cci_pmu->plat_device;
352 if (unlikely(!pmu_device))
355 if (pmu->nr_irqs < 1) {
356 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
361 * Register all available CCI PMU interrupts. In the interrupt handler
362 * we iterate over the counters checking for interrupt source (the
363 * overflowing counter) and clear it.
365 * This should allow handling of non-unique interrupt for the counters.
367 for (i = 0; i < pmu->nr_irqs; i++) {
368 int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
369 "arm-cci-pmu", cci_pmu);
371 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
376 set_bit(i, &pmu->active_irqs);
382 static void pmu_free_irq(struct cci_pmu *cci_pmu)
386 for (i = 0; i < pmu->nr_irqs; i++) {
387 if (!test_and_clear_bit(i, &pmu->active_irqs))
390 free_irq(pmu->irqs[i], cci_pmu);
394 static u32 pmu_read_counter(struct perf_event *event)
396 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
397 struct hw_perf_event *hw_counter = &event->hw;
398 int idx = hw_counter->idx;
401 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
402 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
405 value = pmu_read_register(idx, CCI_PMU_CNTR);
410 static void pmu_write_counter(struct perf_event *event, u32 value)
412 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
413 struct hw_perf_event *hw_counter = &event->hw;
414 int idx = hw_counter->idx;
416 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
417 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
419 pmu_write_register(value, idx, CCI_PMU_CNTR);
422 static u64 pmu_event_update(struct perf_event *event)
424 struct hw_perf_event *hwc = &event->hw;
425 u64 delta, prev_raw_count, new_raw_count;
428 prev_raw_count = local64_read(&hwc->prev_count);
429 new_raw_count = pmu_read_counter(event);
430 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
431 new_raw_count) != prev_raw_count);
433 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
435 local64_add(delta, &event->count);
437 return new_raw_count;
440 static void pmu_read(struct perf_event *event)
442 pmu_event_update(event);
445 void pmu_event_set_period(struct perf_event *event)
447 struct hw_perf_event *hwc = &event->hw;
449 * The CCI PMU counters have a period of 2^32. To account for the
450 * possiblity of extreme interrupt latency we program for a period of
451 * half that. Hopefully we can handle the interrupt before another 2^31
452 * events occur and the counter overtakes its previous value.
454 u64 val = 1ULL << 31;
455 local64_set(&hwc->prev_count, val);
456 pmu_write_counter(event, val);
459 static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
462 struct cci_pmu *cci_pmu = dev;
463 struct cci_pmu_hw_events *events = &pmu->hw_events;
464 int idx, handled = IRQ_NONE;
466 raw_spin_lock_irqsave(&events->pmu_lock, flags);
468 * Iterate over counters and update the corresponding perf events.
469 * This should work regardless of whether we have per-counter overflow
470 * interrupt or a combined overflow interrupt.
472 for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
473 struct perf_event *event = events->events[idx];
474 struct hw_perf_event *hw_counter;
479 hw_counter = &event->hw;
481 /* Did this counter overflow? */
482 if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) &
483 CCI_PMU_OVRFLW_FLAG))
486 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
488 pmu_event_update(event);
489 pmu_event_set_period(event);
490 handled = IRQ_HANDLED;
492 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
494 return IRQ_RETVAL(handled);
497 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
499 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
501 pmu_free_irq(cci_pmu);
507 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
509 pmu_free_irq(cci_pmu);
512 static void hw_perf_event_destroy(struct perf_event *event)
514 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
515 atomic_t *active_events = &cci_pmu->active_events;
516 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
518 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
519 cci_pmu_put_hw(cci_pmu);
520 mutex_unlock(reserve_mutex);
524 static void cci_pmu_enable(struct pmu *pmu)
526 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
527 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
528 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events);
535 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
537 /* Enable all the PMU counters. */
538 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
539 writel(val, cci_ctrl_base + CCI_PMCR);
540 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
544 static void cci_pmu_disable(struct pmu *pmu)
546 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
547 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
551 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
553 /* Disable all the PMU counters. */
554 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
555 writel(val, cci_ctrl_base + CCI_PMCR);
556 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
559 static void cci_pmu_start(struct perf_event *event, int pmu_flags)
561 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
562 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
563 struct hw_perf_event *hwc = &event->hw;
568 * To handle interrupt latency, we always reprogram the period
569 * regardlesss of PERF_EF_RELOAD.
571 if (pmu_flags & PERF_EF_RELOAD)
572 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
576 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
577 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
581 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
583 /* Configure the event to count, unless you are counting cycles */
584 if (idx != CCI_PMU_CYCLE_CNTR_IDX)
585 pmu_set_event(idx, hwc->config_base);
587 pmu_event_set_period(event);
588 pmu_enable_counter(idx);
590 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
593 static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
595 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
596 struct hw_perf_event *hwc = &event->hw;
599 if (hwc->state & PERF_HES_STOPPED)
602 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
603 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
608 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
611 pmu_disable_counter(idx);
612 pmu_event_update(event);
613 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
616 static int cci_pmu_add(struct perf_event *event, int flags)
618 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
619 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
620 struct hw_perf_event *hwc = &event->hw;
624 perf_pmu_disable(event->pmu);
626 /* If we don't have a space for the counter then finish early. */
627 idx = pmu_get_event_idx(hw_events, event);
634 hw_events->events[idx] = event;
636 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
637 if (flags & PERF_EF_START)
638 cci_pmu_start(event, PERF_EF_RELOAD);
640 /* Propagate our changes to the userspace mapping. */
641 perf_event_update_userpage(event);
644 perf_pmu_enable(event->pmu);
648 static void cci_pmu_del(struct perf_event *event, int flags)
650 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
651 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
652 struct hw_perf_event *hwc = &event->hw;
655 cci_pmu_stop(event, PERF_EF_UPDATE);
656 hw_events->events[idx] = NULL;
657 clear_bit(idx, hw_events->used_mask);
659 perf_event_update_userpage(event);
663 validate_event(struct pmu *cci_pmu,
664 struct cci_pmu_hw_events *hw_events,
665 struct perf_event *event)
667 if (is_software_event(event))
671 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
672 * core perf code won't check that the pmu->ctx == leader->ctx
673 * until after pmu->event_init(event).
675 if (event->pmu != cci_pmu)
678 if (event->state < PERF_EVENT_STATE_OFF)
681 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
684 return pmu_get_event_idx(hw_events, event) >= 0;
688 validate_group(struct perf_event *event)
690 struct perf_event *sibling, *leader = event->group_leader;
691 struct cci_pmu_hw_events fake_pmu = {
693 * Initialise the fake PMU. We only need to populate the
694 * used_mask for the purposes of validation.
696 .used_mask = CPU_BITS_NONE,
699 if (!validate_event(event->pmu, &fake_pmu, leader))
702 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
703 if (!validate_event(event->pmu, &fake_pmu, sibling))
707 if (!validate_event(event->pmu, &fake_pmu, event))
714 __hw_perf_event_init(struct perf_event *event)
716 struct hw_perf_event *hwc = &event->hw;
719 mapping = pmu_map_event(event);
722 pr_debug("event %x:%llx not supported\n", event->attr.type,
728 * We don't assign an index until we actually place the event onto
729 * hardware. Use -1 to signify that we haven't decided where to put it
733 hwc->config_base = 0;
738 * Store the event encoding into the config_base field.
740 hwc->config_base |= (unsigned long)mapping;
743 * Limit the sample_period to half of the counter width. That way, the
744 * new counter value is far less likely to overtake the previous one
745 * unless you have some serious IRQ latency issues.
747 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
748 hwc->last_period = hwc->sample_period;
749 local64_set(&hwc->period_left, hwc->sample_period);
751 if (event->group_leader != event) {
752 if (validate_group(event) != 0)
759 static int cci_pmu_event_init(struct perf_event *event)
761 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
762 atomic_t *active_events = &cci_pmu->active_events;
766 if (event->attr.type != event->pmu->type)
769 /* Shared by all CPUs, no meaningful state to sample */
770 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
773 /* We have no filtering of any kind */
774 if (event->attr.exclude_user ||
775 event->attr.exclude_kernel ||
776 event->attr.exclude_hv ||
777 event->attr.exclude_idle ||
778 event->attr.exclude_host ||
779 event->attr.exclude_guest)
783 * Following the example set by other "uncore" PMUs, we accept any CPU
784 * and rewrite its affinity dynamically rather than having perf core
785 * handle cpu == -1 and pid == -1 for this case.
787 * The perf core will pin online CPUs for the duration of this call and
788 * the event being installed into its context, so the PMU's CPU can't
789 * change under our feet.
791 cpu = cpumask_first(&cci_pmu->cpus);
792 if (event->cpu < 0 || cpu < 0)
796 event->destroy = hw_perf_event_destroy;
797 if (!atomic_inc_not_zero(active_events)) {
798 mutex_lock(&cci_pmu->reserve_mutex);
799 if (atomic_read(active_events) == 0)
800 err = cci_pmu_get_hw(cci_pmu);
802 atomic_inc(active_events);
803 mutex_unlock(&cci_pmu->reserve_mutex);
808 err = __hw_perf_event_init(event);
810 hw_perf_event_destroy(event);
815 static ssize_t pmu_attr_cpumask_show(struct device *dev,
816 struct device_attribute *attr, char *buf)
818 int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
819 cpumask_pr_args(&pmu->cpus));
825 static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL);
827 static struct attribute *pmu_attrs[] = {
828 &dev_attr_cpumask.attr,
832 static struct attribute_group pmu_attr_group = {
836 static const struct attribute_group *pmu_attr_groups[] = {
841 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
843 char *name = pmu_names[probe_cci_revision()];
844 cci_pmu->pmu = (struct pmu) {
845 .name = pmu_names[probe_cci_revision()],
846 .task_ctx_nr = perf_invalid_context,
847 .pmu_enable = cci_pmu_enable,
848 .pmu_disable = cci_pmu_disable,
849 .event_init = cci_pmu_event_init,
852 .start = cci_pmu_start,
853 .stop = cci_pmu_stop,
855 .attr_groups = pmu_attr_groups,
858 cci_pmu->plat_device = pdev;
859 cci_pmu->num_events = pmu_get_max_counters();
861 return perf_pmu_register(&cci_pmu->pmu, name, -1);
864 static int cci_pmu_cpu_notifier(struct notifier_block *self,
865 unsigned long action, void *hcpu)
867 unsigned int cpu = (long)hcpu;
870 switch (action & ~CPU_TASKS_FROZEN) {
871 case CPU_DOWN_PREPARE:
872 if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus))
874 target = cpumask_any_but(cpu_online_mask, cpu);
875 if (target < 0) // UP, last CPU
878 * TODO: migrate context once core races on event->ctx have
881 cpumask_set_cpu(target, &pmu->cpus);
889 static struct notifier_block cci_pmu_cpu_nb = {
890 .notifier_call = cci_pmu_cpu_notifier,
892 * to migrate uncore events, our notifier should be executed
893 * before perf core's notifier.
895 .priority = CPU_PRI_PERF + 1,
898 static const struct of_device_id arm_cci_pmu_matches[] = {
900 .compatible = "arm,cci-400-pmu",
905 static int cci_pmu_probe(struct platform_device *pdev)
907 struct resource *res;
910 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
915 pmu->base = devm_ioremap_resource(&pdev->dev, res);
916 if (IS_ERR(pmu->base))
920 * CCI PMU has 5 overflow signals - one per counter; but some may be tied
921 * together to a common interrupt.
924 for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
925 irq = platform_get_irq(pdev, i);
929 if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
932 pmu->irqs[pmu->nr_irqs++] = irq;
936 * Ensure that the device tree has as many interrupts as the number
939 if (i < CCI_PMU_MAX_HW_EVENTS) {
940 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
941 i, CCI_PMU_MAX_HW_EVENTS);
945 pmu->port_ranges = port_range_by_rev();
946 if (!pmu->port_ranges) {
947 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
951 raw_spin_lock_init(&pmu->hw_events.pmu_lock);
952 mutex_init(&pmu->reserve_mutex);
953 atomic_set(&pmu->active_events, 0);
954 cpumask_set_cpu(smp_processor_id(), &pmu->cpus);
956 ret = register_cpu_notifier(&cci_pmu_cpu_nb);
960 ret = cci_pmu_init(pmu, pdev);
967 static int cci_platform_probe(struct platform_device *pdev)
972 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
975 #endif /* CONFIG_HW_PERF_EVENTS */
983 * Use the port MSB as valid flag, shift can be made dynamic
984 * by computing number of bits required for port indexes.
985 * Code disabling CCI cpu ports runs with D-cache invalidated
986 * and SCTLR bit clear so data accesses must be kept to a minimum
987 * to improve performance; for now shift is left static to
988 * avoid one more data access while disabling the CCI port.
990 #define PORT_VALID_SHIFT 31
991 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
993 static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
995 port->port = PORT_VALID | index;
999 static inline bool cpu_port_is_valid(struct cpu_port *port)
1001 return !!(port->port & PORT_VALID);
1004 static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
1006 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
1009 static struct cpu_port cpu_port[NR_CPUS];
1012 * __cci_ace_get_port - Function to retrieve the port index connected to
1015 * @dn: device node of the device to look-up
1019 * - CCI port index if success
1020 * - -ENODEV if failure
1022 static int __cci_ace_get_port(struct device_node *dn, int type)
1026 struct device_node *cci_portn;
1028 cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
1029 for (i = 0; i < nb_cci_ports; i++) {
1030 ace_match = ports[i].type == type;
1031 if (ace_match && cci_portn == ports[i].dn)
1037 int cci_ace_get_port(struct device_node *dn)
1039 return __cci_ace_get_port(dn, ACE_LITE_PORT);
1041 EXPORT_SYMBOL_GPL(cci_ace_get_port);
1043 static void cci_ace_init_ports(void)
1046 struct device_node *cpun;
1049 * Port index look-up speeds up the function disabling ports by CPU,
1050 * since the logical to port index mapping is done once and does
1051 * not change after system boot.
1052 * The stashed index array is initialized for all possible CPUs
1055 for_each_possible_cpu(cpu) {
1056 /* too early to use cpu->of_node */
1057 cpun = of_get_cpu_node(cpu, NULL);
1059 if (WARN(!cpun, "Missing cpu device node\n"))
1062 port = __cci_ace_get_port(cpun, ACE_PORT);
1066 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
1069 for_each_possible_cpu(cpu) {
1070 WARN(!cpu_port_is_valid(&cpu_port[cpu]),
1071 "CPU %u does not have an associated CCI port\n",
1076 * Functions to enable/disable a CCI interconnect slave port
1078 * They are called by low-level power management code to disable slave
1079 * interfaces snoops and DVM broadcast.
1080 * Since they may execute with cache data allocation disabled and
1081 * after the caches have been cleaned and invalidated the functions provide
1082 * no explicit locking since they may run with D-cache disabled, so normal
1083 * cacheable kernel locks based on ldrex/strex may not work.
1084 * Locking has to be provided by BSP implementations to ensure proper
1089 * cci_port_control() - function to control a CCI port
1091 * @port: index of the port to setup
1092 * @enable: if true enables the port, if false disables it
1094 static void notrace cci_port_control(unsigned int port, bool enable)
1096 void __iomem *base = ports[port].base;
1098 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
1100 * This function is called from power down procedures
1101 * and must not execute any instruction that might
1102 * cause the processor to be put in a quiescent state
1103 * (eg wfi). Hence, cpu_relax() can not be added to this
1104 * read loop to optimize power, since it might hide possibly
1105 * disruptive operations.
1107 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
1112 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1115 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1117 * Disabling a CCI port for a CPU implies disabling the CCI port
1118 * controlling that CPU cluster. Code disabling CPU CCI ports
1119 * must make sure that the CPU running the code is the last active CPU
1120 * in the cluster ie all other CPUs are quiescent in a low power state.
1124 * -ENODEV on port look-up failure
1126 int notrace cci_disable_port_by_cpu(u64 mpidr)
1130 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1131 is_valid = cpu_port_is_valid(&cpu_port[cpu]);
1132 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
1133 cci_port_control(cpu_port[cpu].port, false);
1139 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
1142 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1144 * Enabling a CCI port for the calling CPU implies enabling the CCI
1145 * port controlling that CPU's cluster. Caller must make sure that the
1146 * CPU running the code is the first active CPU in the cluster and all
1147 * other CPUs are quiescent in a low power state or waiting for this CPU
1148 * to complete the CCI initialization.
1150 * Because this is called when the MMU is still off and with no stack,
1151 * the code must be position independent and ideally rely on callee
1152 * clobbered registers only. To achieve this we must code this function
1153 * entirely in assembler.
1155 * On success this returns with the proper CCI port enabled. In case of
1156 * any failure this never returns as the inability to enable the CCI is
1157 * fatal and there is no possible recovery at this stage.
1159 asmlinkage void __naked cci_enable_port_for_self(void)
1163 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1164 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
1167 " add r1, r1, r2 @ &cpu_port \n"
1168 " add ip, r1, %[sizeof_cpu_port] \n"
1170 /* Loop over the cpu_port array looking for a matching MPIDR */
1171 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1172 " cmp r2, r0 @ compare MPIDR \n"
1175 /* Found a match, now test port validity */
1176 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1177 " tst r3, #"__stringify(PORT_VALID)" \n"
1180 /* no match, loop with the next cpu_port entry */
1181 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1182 " cmp r1, ip @ done? \n"
1185 /* CCI port not found -- cheaply try to stall this CPU */
1186 "cci_port_not_found: \n"
1189 " b cci_port_not_found \n"
1191 /* Use matched port index to look up the corresponding ports entry */
1192 "3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
1194 " ldmia r0, {r1, r2} \n"
1195 " sub r1, r1, r0 @ virt - phys \n"
1196 " ldr r0, [r0, r2] @ *(&ports) \n"
1197 " mov r2, %[sizeof_struct_ace_port] \n"
1198 " mla r0, r2, r3, r0 @ &ports[index] \n"
1199 " sub r0, r0, r1 @ virt_to_phys() \n"
1201 /* Enable the CCI port */
1202 " ldr r0, [r0, %[offsetof_port_phys]] \n"
1203 " mov r3, %[cci_enable_req]\n"
1204 " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
1206 /* poll the status reg for completion */
1209 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
1210 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
1211 " tst r1, %[cci_control_status_bits] \n"
1218 "5: .word cpu_port - . \n"
1220 " .word ports - 6b \n"
1221 "7: .word cci_ctrl_phys - . \n"
1223 [sizeof_cpu_port] "i" (sizeof(cpu_port)),
1224 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
1225 [cci_control_status_bits] "i" cpu_to_le32(1),
1227 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
1229 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
1231 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
1232 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
1233 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
1234 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
1240 * __cci_control_port_by_device() - function to control a CCI port by device
1243 * @dn: device node pointer of the device whose CCI port should be
1245 * @enable: if true enables the port, if false disables it
1249 * -ENODEV on port look-up failure
1251 int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
1258 port = __cci_ace_get_port(dn, ACE_LITE_PORT);
1259 if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
1262 cci_port_control(port, enable);
1265 EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
1268 * __cci_control_port_by_index() - function to control a CCI port by port index
1270 * @port: port index previously retrieved with cci_ace_get_port()
1271 * @enable: if true enables the port, if false disables it
1275 * -ENODEV on port index out of range
1276 * -EPERM if operation carried out on an ACE PORT
1278 int notrace __cci_control_port_by_index(u32 port, bool enable)
1280 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
1283 * CCI control for ports connected to CPUS is extremely fragile
1284 * and must be made to go through a specific and controlled
1285 * interface (ie cci_disable_port_by_cpu(); control by general purpose
1286 * indexing is therefore disabled for ACE ports.
1288 if (ports[port].type == ACE_PORT)
1291 cci_port_control(port, enable);
1294 EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
1296 static const struct cci_nb_ports cci400_ports = {
1301 static const struct of_device_id arm_cci_matches[] = {
1302 {.compatible = "arm,cci-400", .data = &cci400_ports },
1306 static const struct of_device_id arm_cci_ctrl_if_matches[] = {
1307 {.compatible = "arm,cci-400-ctrl-if", },
1311 static int cci_probe(void)
1313 struct cci_nb_ports const *cci_config;
1314 int ret, i, nb_ace = 0, nb_ace_lite = 0;
1315 struct device_node *np, *cp;
1316 struct resource res;
1317 const char *match_str;
1320 np = of_find_matching_node(NULL, arm_cci_matches);
1324 if (!of_device_is_available(np))
1327 cci_config = of_match_node(arm_cci_matches, np)->data;
1331 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
1333 ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
1337 ret = of_address_to_resource(np, 0, &res);
1339 cci_ctrl_base = ioremap(res.start, resource_size(&res));
1340 cci_ctrl_phys = res.start;
1342 if (ret || !cci_ctrl_base) {
1343 WARN(1, "unable to ioremap CCI ctrl\n");
1348 for_each_child_of_node(np, cp) {
1349 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
1352 i = nb_ace + nb_ace_lite;
1354 if (i >= nb_cci_ports)
1357 if (of_property_read_string(cp, "interface-type",
1359 WARN(1, "node %s missing interface-type property\n",
1363 is_ace = strcmp(match_str, "ace") == 0;
1364 if (!is_ace && strcmp(match_str, "ace-lite")) {
1365 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
1370 ret = of_address_to_resource(cp, 0, &res);
1372 ports[i].base = ioremap(res.start, resource_size(&res));
1373 ports[i].phys = res.start;
1375 if (ret || !ports[i].base) {
1376 WARN(1, "unable to ioremap CCI port %d\n", i);
1381 if (WARN_ON(nb_ace >= cci_config->nb_ace))
1383 ports[i].type = ACE_PORT;
1386 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
1388 ports[i].type = ACE_LITE_PORT;
1394 /* initialize a stashed array of ACE ports to speed-up look-up */
1395 cci_ace_init_ports();
1398 * Multi-cluster systems may need this data when non-coherent, during
1399 * cluster power-up/power-down. Make sure it reaches main memory.
1401 sync_cache_w(&cci_ctrl_base);
1402 sync_cache_w(&cci_ctrl_phys);
1403 sync_cache_w(&ports);
1404 sync_cache_w(&cpu_port);
1405 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
1406 pr_info("ARM CCI driver probed\n");
1415 static int cci_init_status = -EAGAIN;
1416 static DEFINE_MUTEX(cci_probing);
1418 static int cci_init(void)
1420 if (cci_init_status != -EAGAIN)
1421 return cci_init_status;
1423 mutex_lock(&cci_probing);
1424 if (cci_init_status == -EAGAIN)
1425 cci_init_status = cci_probe();
1426 mutex_unlock(&cci_probing);
1427 return cci_init_status;
1430 #ifdef CONFIG_HW_PERF_EVENTS
1431 static struct platform_driver cci_pmu_driver = {
1433 .name = DRIVER_NAME_PMU,
1434 .of_match_table = arm_cci_pmu_matches,
1436 .probe = cci_pmu_probe,
1439 static struct platform_driver cci_platform_driver = {
1441 .name = DRIVER_NAME,
1442 .of_match_table = arm_cci_matches,
1444 .probe = cci_platform_probe,
1447 static int __init cci_platform_init(void)
1451 ret = platform_driver_register(&cci_pmu_driver);
1455 return platform_driver_register(&cci_platform_driver);
1460 static int __init cci_platform_init(void)
1467 * To sort out early init calls ordering a helper function is provided to
1468 * check if the CCI driver has beed initialized. Function check if the driver
1469 * has been initialized, if not it calls the init function that probes
1470 * the driver and updates the return value.
1472 bool cci_probed(void)
1474 return cci_init() == 0;
1476 EXPORT_SYMBOL_GPL(cci_probed);
1478 early_initcall(cci_init);
1479 core_initcall(cci_platform_init);
1480 MODULE_LICENSE("GPL");
1481 MODULE_DESCRIPTION("ARM CCI support");