1 #include <asm/cpu_device_id.h>
4 static struct intel_uncore_type *empty_uncore[] = { NULL, };
5 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
6 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
8 static bool pcidrv_registered;
9 struct pci_driver *uncore_pci_driver;
10 /* pci bus to socket mapping */
11 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
12 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
13 struct pci_extra_dev *uncore_extra_pci_dev;
14 static int max_packages;
16 /* mask of cpus that collect uncore events */
17 static cpumask_t uncore_cpu_mask;
19 /* constraint for the fixed counter */
20 static struct event_constraint uncore_constraint_fixed =
21 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
22 struct event_constraint uncore_constraint_empty =
23 EVENT_CONSTRAINT(0, 0, 0);
25 MODULE_LICENSE("GPL");
27 static int uncore_pcibus_to_physid(struct pci_bus *bus)
29 struct pci2phy_map *map;
32 raw_spin_lock(&pci2phy_map_lock);
33 list_for_each_entry(map, &pci2phy_map_head, list) {
34 if (map->segment == pci_domain_nr(bus)) {
35 phys_id = map->pbus_to_physid[bus->number];
39 raw_spin_unlock(&pci2phy_map_lock);
44 static void uncore_free_pcibus_map(void)
46 struct pci2phy_map *map, *tmp;
48 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
54 struct pci2phy_map *__find_pci2phy_map(int segment)
56 struct pci2phy_map *map, *alloc = NULL;
59 lockdep_assert_held(&pci2phy_map_lock);
62 list_for_each_entry(map, &pci2phy_map_head, list) {
63 if (map->segment == segment)
68 raw_spin_unlock(&pci2phy_map_lock);
69 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
70 raw_spin_lock(&pci2phy_map_lock);
80 map->segment = segment;
81 for (i = 0; i < 256; i++)
82 map->pbus_to_physid[i] = -1;
83 list_add_tail(&map->list, &pci2phy_map_head);
90 ssize_t uncore_event_show(struct kobject *kobj,
91 struct kobj_attribute *attr, char *buf)
93 struct uncore_event_desc *event =
94 container_of(attr, struct uncore_event_desc, attr);
95 return sprintf(buf, "%s", event->config);
98 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
100 return pmu->boxes[topology_logical_package_id(cpu)];
103 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
107 rdmsrl(event->hw.event_base, count);
113 * generic get constraint function for shared match/mask registers.
115 struct event_constraint *
116 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
118 struct intel_uncore_extra_reg *er;
119 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
120 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
125 * reg->alloc can be set due to existing state, so for fake box we
126 * need to ignore this, otherwise we might fail to allocate proper
127 * fake state for this extra reg constraint.
129 if (reg1->idx == EXTRA_REG_NONE ||
130 (!uncore_box_is_fake(box) && reg1->alloc))
133 er = &box->shared_regs[reg1->idx];
134 raw_spin_lock_irqsave(&er->lock, flags);
135 if (!atomic_read(&er->ref) ||
136 (er->config1 == reg1->config && er->config2 == reg2->config)) {
137 atomic_inc(&er->ref);
138 er->config1 = reg1->config;
139 er->config2 = reg2->config;
142 raw_spin_unlock_irqrestore(&er->lock, flags);
145 if (!uncore_box_is_fake(box))
150 return &uncore_constraint_empty;
153 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
155 struct intel_uncore_extra_reg *er;
156 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
159 * Only put constraint if extra reg was actually allocated. Also
160 * takes care of event which do not use an extra shared reg.
162 * Also, if this is a fake box we shouldn't touch any event state
163 * (reg->alloc) and we don't care about leaving inconsistent box
164 * state either since it will be thrown out.
166 if (uncore_box_is_fake(box) || !reg1->alloc)
169 er = &box->shared_regs[reg1->idx];
170 atomic_dec(&er->ref);
174 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
176 struct intel_uncore_extra_reg *er;
180 er = &box->shared_regs[idx];
182 raw_spin_lock_irqsave(&er->lock, flags);
184 raw_spin_unlock_irqrestore(&er->lock, flags);
189 static void uncore_assign_hw_event(struct intel_uncore_box *box,
190 struct perf_event *event, int idx)
192 struct hw_perf_event *hwc = &event->hw;
195 hwc->last_tag = ++box->tags[idx];
197 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
198 hwc->event_base = uncore_fixed_ctr(box);
199 hwc->config_base = uncore_fixed_ctl(box);
203 hwc->config_base = uncore_event_ctl(box, hwc->idx);
204 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
207 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
209 u64 prev_count, new_count, delta;
212 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
213 shift = 64 - uncore_fixed_ctr_bits(box);
215 shift = 64 - uncore_perf_ctr_bits(box);
217 /* the hrtimer might modify the previous event value */
219 prev_count = local64_read(&event->hw.prev_count);
220 new_count = uncore_read_counter(box, event);
221 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
224 delta = (new_count << shift) - (prev_count << shift);
227 local64_add(delta, &event->count);
231 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
232 * for SandyBridge. So we use hrtimer to periodically poll the counter
235 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
237 struct intel_uncore_box *box;
238 struct perf_event *event;
242 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
243 if (!box->n_active || box->cpu != smp_processor_id())
244 return HRTIMER_NORESTART;
246 * disable local interrupt to prevent uncore_pmu_event_start/stop
247 * to interrupt the update process
249 local_irq_save(flags);
252 * handle boxes with an active event list as opposed to active
255 list_for_each_entry(event, &box->active_list, active_entry) {
256 uncore_perf_event_update(box, event);
259 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
260 uncore_perf_event_update(box, box->events[bit]);
262 local_irq_restore(flags);
264 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
265 return HRTIMER_RESTART;
268 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
270 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
271 HRTIMER_MODE_REL_PINNED);
274 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
276 hrtimer_cancel(&box->hrtimer);
279 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
281 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
282 box->hrtimer.function = uncore_pmu_hrtimer;
285 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
288 int i, size, numshared = type->num_shared_regs ;
289 struct intel_uncore_box *box;
291 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
293 box = kzalloc_node(size, GFP_KERNEL, node);
297 for (i = 0; i < numshared; i++)
298 raw_spin_lock_init(&box->shared_regs[i].lock);
300 uncore_pmu_init_hrtimer(box);
302 box->pci_phys_id = -1;
305 /* set default hrtimer timeout */
306 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
308 INIT_LIST_HEAD(&box->active_list);
314 * Using uncore_pmu_event_init pmu event_init callback
315 * as a detection point for uncore events.
317 static int uncore_pmu_event_init(struct perf_event *event);
319 static bool is_uncore_event(struct perf_event *event)
321 return event->pmu->event_init == uncore_pmu_event_init;
325 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
328 struct perf_event *event;
331 max_count = box->pmu->type->num_counters;
332 if (box->pmu->type->fixed_ctl)
335 if (box->n_events >= max_count)
340 if (is_uncore_event(leader)) {
341 box->event_list[n] = leader;
348 list_for_each_entry(event, &leader->sibling_list, group_entry) {
349 if (!is_uncore_event(event) ||
350 event->state <= PERF_EVENT_STATE_OFF)
356 box->event_list[n] = event;
362 static struct event_constraint *
363 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
365 struct intel_uncore_type *type = box->pmu->type;
366 struct event_constraint *c;
368 if (type->ops->get_constraint) {
369 c = type->ops->get_constraint(box, event);
374 if (event->attr.config == UNCORE_FIXED_EVENT)
375 return &uncore_constraint_fixed;
377 if (type->constraints) {
378 for_each_event_constraint(c, type->constraints) {
379 if ((event->hw.config & c->cmask) == c->code)
384 return &type->unconstrainted;
387 static void uncore_put_event_constraint(struct intel_uncore_box *box,
388 struct perf_event *event)
390 if (box->pmu->type->ops->put_constraint)
391 box->pmu->type->ops->put_constraint(box, event);
394 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
396 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
397 struct event_constraint *c;
398 int i, wmin, wmax, ret = 0;
399 struct hw_perf_event *hwc;
401 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
403 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
404 c = uncore_get_event_constraint(box, box->event_list[i]);
405 box->event_constraint[i] = c;
406 wmin = min(wmin, c->weight);
407 wmax = max(wmax, c->weight);
410 /* fastpath, try to reuse previous register */
411 for (i = 0; i < n; i++) {
412 hwc = &box->event_list[i]->hw;
413 c = box->event_constraint[i];
419 /* constraint still honored */
420 if (!test_bit(hwc->idx, c->idxmsk))
423 /* not already used */
424 if (test_bit(hwc->idx, used_mask))
427 __set_bit(hwc->idx, used_mask);
429 assign[i] = hwc->idx;
433 ret = perf_assign_events(box->event_constraint, n,
434 wmin, wmax, n, assign);
436 if (!assign || ret) {
437 for (i = 0; i < n; i++)
438 uncore_put_event_constraint(box, box->event_list[i]);
440 return ret ? -EINVAL : 0;
443 static void uncore_pmu_event_start(struct perf_event *event, int flags)
445 struct intel_uncore_box *box = uncore_event_to_box(event);
446 int idx = event->hw.idx;
448 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
451 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
455 box->events[idx] = event;
457 __set_bit(idx, box->active_mask);
459 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
460 uncore_enable_event(box, event);
462 if (box->n_active == 1) {
463 uncore_enable_box(box);
464 uncore_pmu_start_hrtimer(box);
468 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
470 struct intel_uncore_box *box = uncore_event_to_box(event);
471 struct hw_perf_event *hwc = &event->hw;
473 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
474 uncore_disable_event(box, event);
476 box->events[hwc->idx] = NULL;
477 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
478 hwc->state |= PERF_HES_STOPPED;
480 if (box->n_active == 0) {
481 uncore_disable_box(box);
482 uncore_pmu_cancel_hrtimer(box);
486 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
488 * Drain the remaining delta count out of a event
489 * that we are disabling:
491 uncore_perf_event_update(box, event);
492 hwc->state |= PERF_HES_UPTODATE;
496 static int uncore_pmu_event_add(struct perf_event *event, int flags)
498 struct intel_uncore_box *box = uncore_event_to_box(event);
499 struct hw_perf_event *hwc = &event->hw;
500 int assign[UNCORE_PMC_IDX_MAX];
506 ret = n = uncore_collect_events(box, event, false);
510 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
511 if (!(flags & PERF_EF_START))
512 hwc->state |= PERF_HES_ARCH;
514 ret = uncore_assign_events(box, assign, n);
518 /* save events moving to new counters */
519 for (i = 0; i < box->n_events; i++) {
520 event = box->event_list[i];
523 if (hwc->idx == assign[i] &&
524 hwc->last_tag == box->tags[assign[i]])
527 * Ensure we don't accidentally enable a stopped
528 * counter simply because we rescheduled.
530 if (hwc->state & PERF_HES_STOPPED)
531 hwc->state |= PERF_HES_ARCH;
533 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
536 /* reprogram moved events into new counters */
537 for (i = 0; i < n; i++) {
538 event = box->event_list[i];
541 if (hwc->idx != assign[i] ||
542 hwc->last_tag != box->tags[assign[i]])
543 uncore_assign_hw_event(box, event, assign[i]);
544 else if (i < box->n_events)
547 if (hwc->state & PERF_HES_ARCH)
550 uncore_pmu_event_start(event, 0);
557 static void uncore_pmu_event_del(struct perf_event *event, int flags)
559 struct intel_uncore_box *box = uncore_event_to_box(event);
562 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
564 for (i = 0; i < box->n_events; i++) {
565 if (event == box->event_list[i]) {
566 uncore_put_event_constraint(box, event);
568 for (++i; i < box->n_events; i++)
569 box->event_list[i - 1] = box->event_list[i];
577 event->hw.last_tag = ~0ULL;
580 void uncore_pmu_event_read(struct perf_event *event)
582 struct intel_uncore_box *box = uncore_event_to_box(event);
583 uncore_perf_event_update(box, event);
587 * validation ensures the group can be loaded onto the
588 * PMU if it was the only group available.
590 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
591 struct perf_event *event)
593 struct perf_event *leader = event->group_leader;
594 struct intel_uncore_box *fake_box;
595 int ret = -EINVAL, n;
597 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
603 * the event is not yet connected with its
604 * siblings therefore we must first collect
605 * existing siblings, then add the new event
606 * before we can simulate the scheduling
608 n = uncore_collect_events(fake_box, leader, true);
612 fake_box->n_events = n;
613 n = uncore_collect_events(fake_box, event, false);
617 fake_box->n_events = n;
619 ret = uncore_assign_events(fake_box, NULL, n);
625 static int uncore_pmu_event_init(struct perf_event *event)
627 struct intel_uncore_pmu *pmu;
628 struct intel_uncore_box *box;
629 struct hw_perf_event *hwc = &event->hw;
632 if (event->attr.type != event->pmu->type)
635 pmu = uncore_event_to_pmu(event);
636 /* no device found for this pmu */
637 if (pmu->func_id < 0)
641 * Uncore PMU does measure at all privilege level all the time.
642 * So it doesn't make sense to specify any exclude bits.
644 if (event->attr.exclude_user || event->attr.exclude_kernel ||
645 event->attr.exclude_hv || event->attr.exclude_idle)
648 /* Sampling not supported yet */
649 if (hwc->sample_period)
653 * Place all uncore events for a particular physical package
658 box = uncore_pmu_to_box(pmu, event->cpu);
659 if (!box || box->cpu < 0)
661 event->cpu = box->cpu;
662 event->pmu_private = box;
665 event->hw.last_tag = ~0ULL;
666 event->hw.extra_reg.idx = EXTRA_REG_NONE;
667 event->hw.branch_reg.idx = EXTRA_REG_NONE;
669 if (event->attr.config == UNCORE_FIXED_EVENT) {
670 /* no fixed counter */
671 if (!pmu->type->fixed_ctl)
674 * if there is only one fixed counter, only the first pmu
675 * can access the fixed counter
677 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
680 /* fixed counters have event field hardcoded to zero */
683 hwc->config = event->attr.config & pmu->type->event_mask;
684 if (pmu->type->ops->hw_config) {
685 ret = pmu->type->ops->hw_config(box, event);
691 if (event->group_leader != event)
692 ret = uncore_validate_group(pmu, event);
699 static ssize_t uncore_get_attr_cpumask(struct device *dev,
700 struct device_attribute *attr, char *buf)
702 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
705 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
707 static struct attribute *uncore_pmu_attrs[] = {
708 &dev_attr_cpumask.attr,
712 static struct attribute_group uncore_pmu_attr_group = {
713 .attrs = uncore_pmu_attrs,
716 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
720 if (!pmu->type->pmu) {
721 pmu->pmu = (struct pmu) {
722 .attr_groups = pmu->type->attr_groups,
723 .task_ctx_nr = perf_invalid_context,
724 .event_init = uncore_pmu_event_init,
725 .add = uncore_pmu_event_add,
726 .del = uncore_pmu_event_del,
727 .start = uncore_pmu_event_start,
728 .stop = uncore_pmu_event_stop,
729 .read = uncore_pmu_event_read,
732 pmu->pmu = *pmu->type->pmu;
733 pmu->pmu.attr_groups = pmu->type->attr_groups;
736 if (pmu->type->num_boxes == 1) {
737 if (strlen(pmu->type->name) > 0)
738 sprintf(pmu->name, "uncore_%s", pmu->type->name);
740 sprintf(pmu->name, "uncore");
742 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
746 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
748 pmu->registered = true;
752 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
754 if (!pmu->registered)
756 perf_pmu_unregister(&pmu->pmu);
757 pmu->registered = false;
760 static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
762 struct intel_uncore_pmu *pmu = type->pmus;
763 struct intel_uncore_box *box;
767 pkg = topology_physical_package_id(cpu);
768 for (i = 0; i < type->num_boxes; i++, pmu++) {
769 box = pmu->boxes[pkg];
771 uncore_box_exit(box);
776 static void uncore_exit_boxes(void *dummy)
778 struct intel_uncore_type **types;
780 for (types = uncore_msr_uncores; *types; types++)
781 __uncore_exit_boxes(*types++, smp_processor_id());
784 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
788 for (pkg = 0; pkg < max_packages; pkg++)
789 kfree(pmu->boxes[pkg]);
793 static void uncore_type_exit(struct intel_uncore_type *type)
795 struct intel_uncore_pmu *pmu = type->pmus;
799 for (i = 0; i < type->num_boxes; i++, pmu++) {
800 uncore_pmu_unregister(pmu);
801 uncore_free_boxes(pmu);
806 kfree(type->events_group);
807 type->events_group = NULL;
810 static void uncore_types_exit(struct intel_uncore_type **types)
812 for (; *types; types++)
813 uncore_type_exit(*types);
816 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
818 struct intel_uncore_pmu *pmus;
819 struct attribute_group *attr_group;
820 struct attribute **attrs;
824 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
828 size = max_packages * sizeof(struct intel_uncore_box *);
830 for (i = 0; i < type->num_boxes; i++) {
831 pmus[i].func_id = setid ? i : -1;
834 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
840 type->unconstrainted = (struct event_constraint)
841 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
842 0, type->num_counters, 0, 0);
844 if (type->event_descs) {
845 for (i = 0; type->event_descs[i].attr.attr.name; i++);
847 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
848 sizeof(*attr_group), GFP_KERNEL);
852 attrs = (struct attribute **)(attr_group + 1);
853 attr_group->name = "events";
854 attr_group->attrs = attrs;
856 for (j = 0; j < i; j++)
857 attrs[j] = &type->event_descs[j].attr.attr;
859 type->events_group = attr_group;
862 type->pmu_group = &uncore_pmu_attr_group;
867 uncore_types_init(struct intel_uncore_type **types, bool setid)
871 for (; *types; types++) {
872 ret = uncore_type_init(*types, setid);
880 * add a pci uncore device
882 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
884 struct intel_uncore_type *type;
885 struct intel_uncore_pmu *pmu;
886 struct intel_uncore_box *box;
887 int phys_id, pkg, ret;
889 phys_id = uncore_pcibus_to_physid(pdev->bus);
893 pkg = topology_phys_to_logical_pkg(phys_id);
894 if (WARN_ON_ONCE(pkg < 0))
897 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
898 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
900 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
901 pci_set_drvdata(pdev, NULL);
905 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
907 * for performance monitoring unit with multiple boxes,
908 * each box has a different function id.
910 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
911 /* Knights Landing uses a common PCI device ID for multiple instances of
912 * an uncore PMU device type. There is only one entry per device type in
913 * the knl_uncore_pci_ids table inspite of multiple devices present for
914 * some device types. Hence PCI device idx would be 0 for all devices.
915 * So increment pmu pointer to point to an unused array element.
917 if (boot_cpu_data.x86_model == 87) {
918 while (pmu->func_id >= 0)
922 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
925 box = uncore_alloc_box(type, NUMA_NO_NODE);
929 if (pmu->func_id < 0)
930 pmu->func_id = pdev->devfn;
932 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
934 atomic_inc(&box->refcnt);
935 box->pci_phys_id = phys_id;
939 uncore_box_init(box);
940 pci_set_drvdata(pdev, box);
942 pmu->boxes[pkg] = box;
943 if (atomic_inc_return(&pmu->activeboxes) > 1)
946 /* First active box registers the pmu */
947 ret = uncore_pmu_register(pmu);
949 pci_set_drvdata(pdev, NULL);
950 pmu->boxes[pkg] = NULL;
951 uncore_box_exit(box);
957 static void uncore_pci_remove(struct pci_dev *pdev)
959 struct intel_uncore_box *box = pci_get_drvdata(pdev);
960 struct intel_uncore_pmu *pmu;
963 phys_id = uncore_pcibus_to_physid(pdev->bus);
964 pkg = topology_phys_to_logical_pkg(phys_id);
966 box = pci_get_drvdata(pdev);
968 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
969 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
970 uncore_extra_pci_dev[pkg].dev[i] = NULL;
974 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
979 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
982 pci_set_drvdata(pdev, NULL);
983 pmu->boxes[pkg] = NULL;
984 if (atomic_dec_return(&pmu->activeboxes) == 0)
985 uncore_pmu_unregister(pmu);
986 uncore_box_exit(box);
990 static int __init uncore_pci_init(void)
995 size = max_packages * sizeof(struct pci_extra_dev);
996 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
997 if (!uncore_extra_pci_dev) {
1002 ret = uncore_types_init(uncore_pci_uncores, false);
1006 uncore_pci_driver->probe = uncore_pci_probe;
1007 uncore_pci_driver->remove = uncore_pci_remove;
1009 ret = pci_register_driver(uncore_pci_driver);
1013 pcidrv_registered = true;
1017 uncore_types_exit(uncore_pci_uncores);
1018 kfree(uncore_extra_pci_dev);
1019 uncore_extra_pci_dev = NULL;
1020 uncore_free_pcibus_map();
1022 uncore_pci_uncores = empty_uncore;
1026 static void uncore_pci_exit(void)
1028 if (pcidrv_registered) {
1029 pcidrv_registered = false;
1030 pci_unregister_driver(uncore_pci_driver);
1031 uncore_types_exit(uncore_pci_uncores);
1032 kfree(uncore_extra_pci_dev);
1033 uncore_free_pcibus_map();
1037 static void uncore_cpu_dying(int cpu)
1039 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1040 struct intel_uncore_pmu *pmu;
1041 struct intel_uncore_box *box;
1044 pkg = topology_logical_package_id(cpu);
1045 for (; *types; types++) {
1048 for (i = 0; i < type->num_boxes; i++, pmu++) {
1049 box = pmu->boxes[pkg];
1050 if (box && atomic_dec_return(&box->refcnt) == 0)
1051 uncore_box_exit(box);
1056 static void uncore_cpu_starting(int cpu, bool init)
1058 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1059 struct intel_uncore_pmu *pmu;
1060 struct intel_uncore_box *box;
1061 int i, pkg, ncpus = 1;
1065 * On init we get the number of online cpus in the package
1066 * and set refcount for all of them.
1068 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1071 pkg = topology_logical_package_id(cpu);
1072 for (; *types; types++) {
1075 for (i = 0; i < type->num_boxes; i++, pmu++) {
1076 box = pmu->boxes[pkg];
1079 /* The first cpu on a package activates the box */
1080 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1081 uncore_box_init(box);
1086 static int uncore_cpu_prepare(int cpu)
1088 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1089 struct intel_uncore_pmu *pmu;
1090 struct intel_uncore_box *box;
1093 pkg = topology_logical_package_id(cpu);
1094 for (; *types; types++) {
1097 for (i = 0; i < type->num_boxes; i++, pmu++) {
1098 if (pmu->boxes[pkg])
1100 /* First cpu of a package allocates the box */
1101 box = uncore_alloc_box(type, cpu_to_node(cpu));
1106 pmu->boxes[pkg] = box;
1112 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1115 struct intel_uncore_pmu *pmu = type->pmus;
1116 struct intel_uncore_box *box;
1119 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1120 for (i = 0; i < type->num_boxes; i++, pmu++) {
1121 box = pmu->boxes[pkg];
1126 WARN_ON_ONCE(box->cpu != -1);
1131 WARN_ON_ONCE(box->cpu != old_cpu);
1136 uncore_pmu_cancel_hrtimer(box);
1137 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1142 static void uncore_change_context(struct intel_uncore_type **uncores,
1143 int old_cpu, int new_cpu)
1145 for (; *uncores; uncores++)
1146 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1149 static void uncore_event_exit_cpu(int cpu)
1153 /* Check if exiting cpu is used for collecting uncore events */
1154 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1157 /* Find a new cpu to collect uncore events */
1158 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1160 /* Migrate uncore events to the new target */
1161 if (target < nr_cpu_ids)
1162 cpumask_set_cpu(target, &uncore_cpu_mask);
1166 uncore_change_context(uncore_msr_uncores, cpu, target);
1167 uncore_change_context(uncore_pci_uncores, cpu, target);
1170 static void uncore_event_init_cpu(int cpu)
1175 * Check if there is an online cpu in the package
1176 * which collects uncore events already.
1178 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1179 if (target < nr_cpu_ids)
1182 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1184 uncore_change_context(uncore_msr_uncores, -1, cpu);
1185 uncore_change_context(uncore_pci_uncores, -1, cpu);
1188 static int uncore_cpu_notifier(struct notifier_block *self,
1189 unsigned long action, void *hcpu)
1191 unsigned int cpu = (long)hcpu;
1193 switch (action & ~CPU_TASKS_FROZEN) {
1194 case CPU_UP_PREPARE:
1195 return notifier_from_errno(uncore_cpu_prepare(cpu));
1198 uncore_cpu_starting(cpu, false);
1199 case CPU_DOWN_FAILED:
1200 uncore_event_init_cpu(cpu);
1203 case CPU_UP_CANCELED:
1205 uncore_cpu_dying(cpu);
1208 case CPU_DOWN_PREPARE:
1209 uncore_event_exit_cpu(cpu);
1215 static struct notifier_block uncore_cpu_nb = {
1216 .notifier_call = uncore_cpu_notifier,
1218 * to migrate uncore events, our notifier should be executed
1219 * before perf core's notifier.
1221 .priority = CPU_PRI_PERF + 1,
1224 static int __init type_pmu_register(struct intel_uncore_type *type)
1228 for (i = 0; i < type->num_boxes; i++) {
1229 ret = uncore_pmu_register(&type->pmus[i]);
1236 static int __init uncore_msr_pmus_register(void)
1238 struct intel_uncore_type **types = uncore_msr_uncores;
1241 for (; *types; types++) {
1242 ret = type_pmu_register(*types);
1249 static int __init uncore_cpu_init(void)
1253 ret = uncore_types_init(uncore_msr_uncores, true);
1257 ret = uncore_msr_pmus_register();
1262 uncore_types_exit(uncore_msr_uncores);
1263 uncore_msr_uncores = empty_uncore;
1267 static void __init uncore_cpu_setup(void *dummy)
1269 uncore_cpu_starting(smp_processor_id(), true);
1272 /* Lazy to avoid allocation of a few bytes for the normal case */
1273 static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
1275 static int __init uncore_cpumask_init(bool msr)
1279 for_each_online_cpu(cpu) {
1280 unsigned int pkg = topology_logical_package_id(cpu);
1283 if (test_and_set_bit(pkg, packages))
1286 * The first online cpu of each package allocates and takes
1287 * the refcounts for all other online cpus in that package.
1288 * If msrs are not enabled no allocation is required.
1291 ret = uncore_cpu_prepare(cpu);
1295 uncore_event_init_cpu(cpu);
1296 smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
1298 __register_cpu_notifier(&uncore_cpu_nb);
1302 #define X86_UNCORE_MODEL_MATCH(model, init) \
1303 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1305 struct intel_uncore_init_fun {
1306 void (*cpu_init)(void);
1307 int (*pci_init)(void);
1310 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1311 .cpu_init = nhm_uncore_cpu_init,
1314 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1315 .cpu_init = snb_uncore_cpu_init,
1316 .pci_init = snb_uncore_pci_init,
1319 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1320 .cpu_init = snb_uncore_cpu_init,
1321 .pci_init = ivb_uncore_pci_init,
1324 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1325 .cpu_init = snb_uncore_cpu_init,
1326 .pci_init = hsw_uncore_pci_init,
1329 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1330 .cpu_init = snb_uncore_cpu_init,
1331 .pci_init = bdw_uncore_pci_init,
1334 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1335 .cpu_init = snbep_uncore_cpu_init,
1336 .pci_init = snbep_uncore_pci_init,
1339 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1340 .cpu_init = nhmex_uncore_cpu_init,
1343 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1344 .cpu_init = ivbep_uncore_cpu_init,
1345 .pci_init = ivbep_uncore_pci_init,
1348 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1349 .cpu_init = hswep_uncore_cpu_init,
1350 .pci_init = hswep_uncore_pci_init,
1353 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1354 .cpu_init = bdx_uncore_cpu_init,
1355 .pci_init = bdx_uncore_pci_init,
1358 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1359 .cpu_init = knl_uncore_cpu_init,
1360 .pci_init = knl_uncore_pci_init,
1363 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1364 .pci_init = skl_uncore_pci_init,
1367 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1368 X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init), /* Nehalem */
1369 X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init),
1370 X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init), /* Westmere */
1371 X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init),
1372 X86_UNCORE_MODEL_MATCH(42, snb_uncore_init), /* Sandy Bridge */
1373 X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init), /* Ivy Bridge */
1374 X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init), /* Haswell */
1375 X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init), /* Haswell Celeron */
1376 X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init), /* Haswell */
1377 X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init), /* Broadwell */
1378 X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init), /* Broadwell */
1379 X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init), /* Sandy Bridge-EP */
1380 X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init), /* Nehalem-EX */
1381 X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init), /* Westmere-EX aka. Xeon E7 */
1382 X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init), /* Ivy Bridge-EP */
1383 X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init), /* Haswell-EP */
1384 X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init), /* BDX-EP */
1385 X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init), /* BDX-DE */
1386 X86_UNCORE_MODEL_MATCH(87, knl_uncore_init), /* Knights Landing */
1387 X86_UNCORE_MODEL_MATCH(94, skl_uncore_init), /* SkyLake */
1391 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1393 static int __init intel_uncore_init(void)
1395 const struct x86_cpu_id *id;
1396 struct intel_uncore_init_fun *uncore_init;
1397 int pret = 0, cret = 0, ret;
1399 id = x86_match_cpu(intel_uncore_match);
1403 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1406 max_packages = topology_max_packages();
1408 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1409 if (uncore_init->pci_init) {
1410 pret = uncore_init->pci_init();
1412 pret = uncore_pci_init();
1415 if (uncore_init->cpu_init) {
1416 uncore_init->cpu_init();
1417 cret = uncore_cpu_init();
1423 cpu_notifier_register_begin();
1424 ret = uncore_cpumask_init(!cret);
1427 cpu_notifier_register_done();
1431 /* Undo box->init_box() */
1432 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1433 uncore_types_exit(uncore_msr_uncores);
1435 cpu_notifier_register_done();
1438 module_init(intel_uncore_init);
1440 static void __exit intel_uncore_exit(void)
1442 cpu_notifier_register_begin();
1443 __unregister_cpu_notifier(&uncore_cpu_nb);
1444 uncore_types_exit(uncore_msr_uncores);
1446 cpu_notifier_register_done();
1448 module_exit(intel_uncore_exit);