]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/events/intel/uncore.c
Merge branch 'perf/urgent' into perf/core, to pick up fixes before merging new changes
[karo-tx-linux.git] / arch / x86 / events / intel / uncore.c
1 #include <asm/cpu_device_id.h>
2 #include <asm/intel-family.h>
3 #include "uncore.h"
4
5 static struct intel_uncore_type *empty_uncore[] = { NULL, };
6 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
7 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
8
9 static bool pcidrv_registered;
10 struct pci_driver *uncore_pci_driver;
11 /* pci bus to socket mapping */
12 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
13 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
14 struct pci_extra_dev *uncore_extra_pci_dev;
15 static int max_packages;
16
17 /* mask of cpus that collect uncore events */
18 static cpumask_t uncore_cpu_mask;
19
20 /* constraint for the fixed counter */
21 static struct event_constraint uncore_constraint_fixed =
22         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
23 struct event_constraint uncore_constraint_empty =
24         EVENT_CONSTRAINT(0, 0, 0);
25
26 MODULE_LICENSE("GPL");
27
28 static int uncore_pcibus_to_physid(struct pci_bus *bus)
29 {
30         struct pci2phy_map *map;
31         int phys_id = -1;
32
33         raw_spin_lock(&pci2phy_map_lock);
34         list_for_each_entry(map, &pci2phy_map_head, list) {
35                 if (map->segment == pci_domain_nr(bus)) {
36                         phys_id = map->pbus_to_physid[bus->number];
37                         break;
38                 }
39         }
40         raw_spin_unlock(&pci2phy_map_lock);
41
42         return phys_id;
43 }
44
45 static void uncore_free_pcibus_map(void)
46 {
47         struct pci2phy_map *map, *tmp;
48
49         list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
50                 list_del(&map->list);
51                 kfree(map);
52         }
53 }
54
55 struct pci2phy_map *__find_pci2phy_map(int segment)
56 {
57         struct pci2phy_map *map, *alloc = NULL;
58         int i;
59
60         lockdep_assert_held(&pci2phy_map_lock);
61
62 lookup:
63         list_for_each_entry(map, &pci2phy_map_head, list) {
64                 if (map->segment == segment)
65                         goto end;
66         }
67
68         if (!alloc) {
69                 raw_spin_unlock(&pci2phy_map_lock);
70                 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
71                 raw_spin_lock(&pci2phy_map_lock);
72
73                 if (!alloc)
74                         return NULL;
75
76                 goto lookup;
77         }
78
79         map = alloc;
80         alloc = NULL;
81         map->segment = segment;
82         for (i = 0; i < 256; i++)
83                 map->pbus_to_physid[i] = -1;
84         list_add_tail(&map->list, &pci2phy_map_head);
85
86 end:
87         kfree(alloc);
88         return map;
89 }
90
91 ssize_t uncore_event_show(struct kobject *kobj,
92                           struct kobj_attribute *attr, char *buf)
93 {
94         struct uncore_event_desc *event =
95                 container_of(attr, struct uncore_event_desc, attr);
96         return sprintf(buf, "%s", event->config);
97 }
98
99 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
100 {
101         return pmu->boxes[topology_logical_package_id(cpu)];
102 }
103
104 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
105 {
106         u64 count;
107
108         rdmsrl(event->hw.event_base, count);
109
110         return count;
111 }
112
113 /*
114  * generic get constraint function for shared match/mask registers.
115  */
116 struct event_constraint *
117 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
118 {
119         struct intel_uncore_extra_reg *er;
120         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
121         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
122         unsigned long flags;
123         bool ok = false;
124
125         /*
126          * reg->alloc can be set due to existing state, so for fake box we
127          * need to ignore this, otherwise we might fail to allocate proper
128          * fake state for this extra reg constraint.
129          */
130         if (reg1->idx == EXTRA_REG_NONE ||
131             (!uncore_box_is_fake(box) && reg1->alloc))
132                 return NULL;
133
134         er = &box->shared_regs[reg1->idx];
135         raw_spin_lock_irqsave(&er->lock, flags);
136         if (!atomic_read(&er->ref) ||
137             (er->config1 == reg1->config && er->config2 == reg2->config)) {
138                 atomic_inc(&er->ref);
139                 er->config1 = reg1->config;
140                 er->config2 = reg2->config;
141                 ok = true;
142         }
143         raw_spin_unlock_irqrestore(&er->lock, flags);
144
145         if (ok) {
146                 if (!uncore_box_is_fake(box))
147                         reg1->alloc = 1;
148                 return NULL;
149         }
150
151         return &uncore_constraint_empty;
152 }
153
154 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
155 {
156         struct intel_uncore_extra_reg *er;
157         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
158
159         /*
160          * Only put constraint if extra reg was actually allocated. Also
161          * takes care of event which do not use an extra shared reg.
162          *
163          * Also, if this is a fake box we shouldn't touch any event state
164          * (reg->alloc) and we don't care about leaving inconsistent box
165          * state either since it will be thrown out.
166          */
167         if (uncore_box_is_fake(box) || !reg1->alloc)
168                 return;
169
170         er = &box->shared_regs[reg1->idx];
171         atomic_dec(&er->ref);
172         reg1->alloc = 0;
173 }
174
175 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
176 {
177         struct intel_uncore_extra_reg *er;
178         unsigned long flags;
179         u64 config;
180
181         er = &box->shared_regs[idx];
182
183         raw_spin_lock_irqsave(&er->lock, flags);
184         config = er->config;
185         raw_spin_unlock_irqrestore(&er->lock, flags);
186
187         return config;
188 }
189
190 static void uncore_assign_hw_event(struct intel_uncore_box *box,
191                                    struct perf_event *event, int idx)
192 {
193         struct hw_perf_event *hwc = &event->hw;
194
195         hwc->idx = idx;
196         hwc->last_tag = ++box->tags[idx];
197
198         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
199                 hwc->event_base = uncore_fixed_ctr(box);
200                 hwc->config_base = uncore_fixed_ctl(box);
201                 return;
202         }
203
204         hwc->config_base = uncore_event_ctl(box, hwc->idx);
205         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
206 }
207
208 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
209 {
210         u64 prev_count, new_count, delta;
211         int shift;
212
213         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
214                 shift = 64 - uncore_fixed_ctr_bits(box);
215         else
216                 shift = 64 - uncore_perf_ctr_bits(box);
217
218         /* the hrtimer might modify the previous event value */
219 again:
220         prev_count = local64_read(&event->hw.prev_count);
221         new_count = uncore_read_counter(box, event);
222         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
223                 goto again;
224
225         delta = (new_count << shift) - (prev_count << shift);
226         delta >>= shift;
227
228         local64_add(delta, &event->count);
229 }
230
231 /*
232  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
233  * for SandyBridge. So we use hrtimer to periodically poll the counter
234  * to avoid overflow.
235  */
236 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
237 {
238         struct intel_uncore_box *box;
239         struct perf_event *event;
240         unsigned long flags;
241         int bit;
242
243         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
244         if (!box->n_active || box->cpu != smp_processor_id())
245                 return HRTIMER_NORESTART;
246         /*
247          * disable local interrupt to prevent uncore_pmu_event_start/stop
248          * to interrupt the update process
249          */
250         local_irq_save(flags);
251
252         /*
253          * handle boxes with an active event list as opposed to active
254          * counters
255          */
256         list_for_each_entry(event, &box->active_list, active_entry) {
257                 uncore_perf_event_update(box, event);
258         }
259
260         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
261                 uncore_perf_event_update(box, box->events[bit]);
262
263         local_irq_restore(flags);
264
265         hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
266         return HRTIMER_RESTART;
267 }
268
269 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
270 {
271         hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
272                       HRTIMER_MODE_REL_PINNED);
273 }
274
275 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
276 {
277         hrtimer_cancel(&box->hrtimer);
278 }
279
280 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
281 {
282         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
283         box->hrtimer.function = uncore_pmu_hrtimer;
284 }
285
286 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
287                                                  int node)
288 {
289         int i, size, numshared = type->num_shared_regs ;
290         struct intel_uncore_box *box;
291
292         size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
293
294         box = kzalloc_node(size, GFP_KERNEL, node);
295         if (!box)
296                 return NULL;
297
298         for (i = 0; i < numshared; i++)
299                 raw_spin_lock_init(&box->shared_regs[i].lock);
300
301         uncore_pmu_init_hrtimer(box);
302         box->cpu = -1;
303         box->pci_phys_id = -1;
304         box->pkgid = -1;
305
306         /* set default hrtimer timeout */
307         box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
308
309         INIT_LIST_HEAD(&box->active_list);
310
311         return box;
312 }
313
314 /*
315  * Using uncore_pmu_event_init pmu event_init callback
316  * as a detection point for uncore events.
317  */
318 static int uncore_pmu_event_init(struct perf_event *event);
319
320 static bool is_uncore_event(struct perf_event *event)
321 {
322         return event->pmu->event_init == uncore_pmu_event_init;
323 }
324
325 static int
326 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
327                       bool dogrp)
328 {
329         struct perf_event *event;
330         int n, max_count;
331
332         max_count = box->pmu->type->num_counters;
333         if (box->pmu->type->fixed_ctl)
334                 max_count++;
335
336         if (box->n_events >= max_count)
337                 return -EINVAL;
338
339         n = box->n_events;
340
341         if (is_uncore_event(leader)) {
342                 box->event_list[n] = leader;
343                 n++;
344         }
345
346         if (!dogrp)
347                 return n;
348
349         list_for_each_entry(event, &leader->sibling_list, group_entry) {
350                 if (!is_uncore_event(event) ||
351                     event->state <= PERF_EVENT_STATE_OFF)
352                         continue;
353
354                 if (n >= max_count)
355                         return -EINVAL;
356
357                 box->event_list[n] = event;
358                 n++;
359         }
360         return n;
361 }
362
363 static struct event_constraint *
364 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
365 {
366         struct intel_uncore_type *type = box->pmu->type;
367         struct event_constraint *c;
368
369         if (type->ops->get_constraint) {
370                 c = type->ops->get_constraint(box, event);
371                 if (c)
372                         return c;
373         }
374
375         if (event->attr.config == UNCORE_FIXED_EVENT)
376                 return &uncore_constraint_fixed;
377
378         if (type->constraints) {
379                 for_each_event_constraint(c, type->constraints) {
380                         if ((event->hw.config & c->cmask) == c->code)
381                                 return c;
382                 }
383         }
384
385         return &type->unconstrainted;
386 }
387
388 static void uncore_put_event_constraint(struct intel_uncore_box *box,
389                                         struct perf_event *event)
390 {
391         if (box->pmu->type->ops->put_constraint)
392                 box->pmu->type->ops->put_constraint(box, event);
393 }
394
395 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
396 {
397         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
398         struct event_constraint *c;
399         int i, wmin, wmax, ret = 0;
400         struct hw_perf_event *hwc;
401
402         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
403
404         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
405                 c = uncore_get_event_constraint(box, box->event_list[i]);
406                 box->event_constraint[i] = c;
407                 wmin = min(wmin, c->weight);
408                 wmax = max(wmax, c->weight);
409         }
410
411         /* fastpath, try to reuse previous register */
412         for (i = 0; i < n; i++) {
413                 hwc = &box->event_list[i]->hw;
414                 c = box->event_constraint[i];
415
416                 /* never assigned */
417                 if (hwc->idx == -1)
418                         break;
419
420                 /* constraint still honored */
421                 if (!test_bit(hwc->idx, c->idxmsk))
422                         break;
423
424                 /* not already used */
425                 if (test_bit(hwc->idx, used_mask))
426                         break;
427
428                 __set_bit(hwc->idx, used_mask);
429                 if (assign)
430                         assign[i] = hwc->idx;
431         }
432         /* slow path */
433         if (i != n)
434                 ret = perf_assign_events(box->event_constraint, n,
435                                          wmin, wmax, n, assign);
436
437         if (!assign || ret) {
438                 for (i = 0; i < n; i++)
439                         uncore_put_event_constraint(box, box->event_list[i]);
440         }
441         return ret ? -EINVAL : 0;
442 }
443
444 static void uncore_pmu_event_start(struct perf_event *event, int flags)
445 {
446         struct intel_uncore_box *box = uncore_event_to_box(event);
447         int idx = event->hw.idx;
448
449         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
450                 return;
451
452         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
453                 return;
454
455         event->hw.state = 0;
456         box->events[idx] = event;
457         box->n_active++;
458         __set_bit(idx, box->active_mask);
459
460         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
461         uncore_enable_event(box, event);
462
463         if (box->n_active == 1) {
464                 uncore_enable_box(box);
465                 uncore_pmu_start_hrtimer(box);
466         }
467 }
468
469 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
470 {
471         struct intel_uncore_box *box = uncore_event_to_box(event);
472         struct hw_perf_event *hwc = &event->hw;
473
474         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
475                 uncore_disable_event(box, event);
476                 box->n_active--;
477                 box->events[hwc->idx] = NULL;
478                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
479                 hwc->state |= PERF_HES_STOPPED;
480
481                 if (box->n_active == 0) {
482                         uncore_disable_box(box);
483                         uncore_pmu_cancel_hrtimer(box);
484                 }
485         }
486
487         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
488                 /*
489                  * Drain the remaining delta count out of a event
490                  * that we are disabling:
491                  */
492                 uncore_perf_event_update(box, event);
493                 hwc->state |= PERF_HES_UPTODATE;
494         }
495 }
496
497 static int uncore_pmu_event_add(struct perf_event *event, int flags)
498 {
499         struct intel_uncore_box *box = uncore_event_to_box(event);
500         struct hw_perf_event *hwc = &event->hw;
501         int assign[UNCORE_PMC_IDX_MAX];
502         int i, n, ret;
503
504         if (!box)
505                 return -ENODEV;
506
507         ret = n = uncore_collect_events(box, event, false);
508         if (ret < 0)
509                 return ret;
510
511         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
512         if (!(flags & PERF_EF_START))
513                 hwc->state |= PERF_HES_ARCH;
514
515         ret = uncore_assign_events(box, assign, n);
516         if (ret)
517                 return ret;
518
519         /* save events moving to new counters */
520         for (i = 0; i < box->n_events; i++) {
521                 event = box->event_list[i];
522                 hwc = &event->hw;
523
524                 if (hwc->idx == assign[i] &&
525                         hwc->last_tag == box->tags[assign[i]])
526                         continue;
527                 /*
528                  * Ensure we don't accidentally enable a stopped
529                  * counter simply because we rescheduled.
530                  */
531                 if (hwc->state & PERF_HES_STOPPED)
532                         hwc->state |= PERF_HES_ARCH;
533
534                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
535         }
536
537         /* reprogram moved events into new counters */
538         for (i = 0; i < n; i++) {
539                 event = box->event_list[i];
540                 hwc = &event->hw;
541
542                 if (hwc->idx != assign[i] ||
543                         hwc->last_tag != box->tags[assign[i]])
544                         uncore_assign_hw_event(box, event, assign[i]);
545                 else if (i < box->n_events)
546                         continue;
547
548                 if (hwc->state & PERF_HES_ARCH)
549                         continue;
550
551                 uncore_pmu_event_start(event, 0);
552         }
553         box->n_events = n;
554
555         return 0;
556 }
557
558 static void uncore_pmu_event_del(struct perf_event *event, int flags)
559 {
560         struct intel_uncore_box *box = uncore_event_to_box(event);
561         int i;
562
563         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
564
565         for (i = 0; i < box->n_events; i++) {
566                 if (event == box->event_list[i]) {
567                         uncore_put_event_constraint(box, event);
568
569                         for (++i; i < box->n_events; i++)
570                                 box->event_list[i - 1] = box->event_list[i];
571
572                         --box->n_events;
573                         break;
574                 }
575         }
576
577         event->hw.idx = -1;
578         event->hw.last_tag = ~0ULL;
579 }
580
581 void uncore_pmu_event_read(struct perf_event *event)
582 {
583         struct intel_uncore_box *box = uncore_event_to_box(event);
584         uncore_perf_event_update(box, event);
585 }
586
587 /*
588  * validation ensures the group can be loaded onto the
589  * PMU if it was the only group available.
590  */
591 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
592                                 struct perf_event *event)
593 {
594         struct perf_event *leader = event->group_leader;
595         struct intel_uncore_box *fake_box;
596         int ret = -EINVAL, n;
597
598         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
599         if (!fake_box)
600                 return -ENOMEM;
601
602         fake_box->pmu = pmu;
603         /*
604          * the event is not yet connected with its
605          * siblings therefore we must first collect
606          * existing siblings, then add the new event
607          * before we can simulate the scheduling
608          */
609         n = uncore_collect_events(fake_box, leader, true);
610         if (n < 0)
611                 goto out;
612
613         fake_box->n_events = n;
614         n = uncore_collect_events(fake_box, event, false);
615         if (n < 0)
616                 goto out;
617
618         fake_box->n_events = n;
619
620         ret = uncore_assign_events(fake_box, NULL, n);
621 out:
622         kfree(fake_box);
623         return ret;
624 }
625
626 static int uncore_pmu_event_init(struct perf_event *event)
627 {
628         struct intel_uncore_pmu *pmu;
629         struct intel_uncore_box *box;
630         struct hw_perf_event *hwc = &event->hw;
631         int ret;
632
633         if (event->attr.type != event->pmu->type)
634                 return -ENOENT;
635
636         pmu = uncore_event_to_pmu(event);
637         /* no device found for this pmu */
638         if (pmu->func_id < 0)
639                 return -ENOENT;
640
641         /*
642          * Uncore PMU does measure at all privilege level all the time.
643          * So it doesn't make sense to specify any exclude bits.
644          */
645         if (event->attr.exclude_user || event->attr.exclude_kernel ||
646                         event->attr.exclude_hv || event->attr.exclude_idle)
647                 return -EINVAL;
648
649         /* Sampling not supported yet */
650         if (hwc->sample_period)
651                 return -EINVAL;
652
653         /*
654          * Place all uncore events for a particular physical package
655          * onto a single cpu
656          */
657         if (event->cpu < 0)
658                 return -EINVAL;
659         box = uncore_pmu_to_box(pmu, event->cpu);
660         if (!box || box->cpu < 0)
661                 return -EINVAL;
662         event->cpu = box->cpu;
663         event->pmu_private = box;
664
665         event->hw.idx = -1;
666         event->hw.last_tag = ~0ULL;
667         event->hw.extra_reg.idx = EXTRA_REG_NONE;
668         event->hw.branch_reg.idx = EXTRA_REG_NONE;
669
670         if (event->attr.config == UNCORE_FIXED_EVENT) {
671                 /* no fixed counter */
672                 if (!pmu->type->fixed_ctl)
673                         return -EINVAL;
674                 /*
675                  * if there is only one fixed counter, only the first pmu
676                  * can access the fixed counter
677                  */
678                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
679                         return -EINVAL;
680
681                 /* fixed counters have event field hardcoded to zero */
682                 hwc->config = 0ULL;
683         } else {
684                 hwc->config = event->attr.config & pmu->type->event_mask;
685                 if (pmu->type->ops->hw_config) {
686                         ret = pmu->type->ops->hw_config(box, event);
687                         if (ret)
688                                 return ret;
689                 }
690         }
691
692         if (event->group_leader != event)
693                 ret = uncore_validate_group(pmu, event);
694         else
695                 ret = 0;
696
697         return ret;
698 }
699
700 static ssize_t uncore_get_attr_cpumask(struct device *dev,
701                                 struct device_attribute *attr, char *buf)
702 {
703         return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
704 }
705
706 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
707
708 static struct attribute *uncore_pmu_attrs[] = {
709         &dev_attr_cpumask.attr,
710         NULL,
711 };
712
713 static struct attribute_group uncore_pmu_attr_group = {
714         .attrs = uncore_pmu_attrs,
715 };
716
717 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
718 {
719         int ret;
720
721         if (!pmu->type->pmu) {
722                 pmu->pmu = (struct pmu) {
723                         .attr_groups    = pmu->type->attr_groups,
724                         .task_ctx_nr    = perf_invalid_context,
725                         .event_init     = uncore_pmu_event_init,
726                         .add            = uncore_pmu_event_add,
727                         .del            = uncore_pmu_event_del,
728                         .start          = uncore_pmu_event_start,
729                         .stop           = uncore_pmu_event_stop,
730                         .read           = uncore_pmu_event_read,
731                 };
732         } else {
733                 pmu->pmu = *pmu->type->pmu;
734                 pmu->pmu.attr_groups = pmu->type->attr_groups;
735         }
736
737         if (pmu->type->num_boxes == 1) {
738                 if (strlen(pmu->type->name) > 0)
739                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
740                 else
741                         sprintf(pmu->name, "uncore");
742         } else {
743                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
744                         pmu->pmu_idx);
745         }
746
747         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
748         if (!ret)
749                 pmu->registered = true;
750         return ret;
751 }
752
753 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
754 {
755         if (!pmu->registered)
756                 return;
757         perf_pmu_unregister(&pmu->pmu);
758         pmu->registered = false;
759 }
760
761 static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
762 {
763         struct intel_uncore_pmu *pmu = type->pmus;
764         struct intel_uncore_box *box;
765         int i, pkg;
766
767         if (pmu) {
768                 pkg = topology_physical_package_id(cpu);
769                 for (i = 0; i < type->num_boxes; i++, pmu++) {
770                         box = pmu->boxes[pkg];
771                         if (box)
772                                 uncore_box_exit(box);
773                 }
774         }
775 }
776
777 static void uncore_exit_boxes(void *dummy)
778 {
779         struct intel_uncore_type **types;
780
781         for (types = uncore_msr_uncores; *types; types++)
782                 __uncore_exit_boxes(*types++, smp_processor_id());
783 }
784
785 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
786 {
787         int pkg;
788
789         for (pkg = 0; pkg < max_packages; pkg++)
790                 kfree(pmu->boxes[pkg]);
791         kfree(pmu->boxes);
792 }
793
794 static void uncore_type_exit(struct intel_uncore_type *type)
795 {
796         struct intel_uncore_pmu *pmu = type->pmus;
797         int i;
798
799         if (pmu) {
800                 for (i = 0; i < type->num_boxes; i++, pmu++) {
801                         uncore_pmu_unregister(pmu);
802                         uncore_free_boxes(pmu);
803                 }
804                 kfree(type->pmus);
805                 type->pmus = NULL;
806         }
807         kfree(type->events_group);
808         type->events_group = NULL;
809 }
810
811 static void uncore_types_exit(struct intel_uncore_type **types)
812 {
813         for (; *types; types++)
814                 uncore_type_exit(*types);
815 }
816
817 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
818 {
819         struct intel_uncore_pmu *pmus;
820         struct attribute_group *attr_group;
821         struct attribute **attrs;
822         size_t size;
823         int i, j;
824
825         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
826         if (!pmus)
827                 return -ENOMEM;
828
829         size = max_packages * sizeof(struct intel_uncore_box *);
830
831         for (i = 0; i < type->num_boxes; i++) {
832                 pmus[i].func_id = setid ? i : -1;
833                 pmus[i].pmu_idx = i;
834                 pmus[i].type    = type;
835                 pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
836                 if (!pmus[i].boxes)
837                         return -ENOMEM;
838         }
839
840         type->pmus = pmus;
841         type->unconstrainted = (struct event_constraint)
842                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
843                                 0, type->num_counters, 0, 0);
844
845         if (type->event_descs) {
846                 for (i = 0; type->event_descs[i].attr.attr.name; i++);
847
848                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
849                                         sizeof(*attr_group), GFP_KERNEL);
850                 if (!attr_group)
851                         return -ENOMEM;
852
853                 attrs = (struct attribute **)(attr_group + 1);
854                 attr_group->name = "events";
855                 attr_group->attrs = attrs;
856
857                 for (j = 0; j < i; j++)
858                         attrs[j] = &type->event_descs[j].attr.attr;
859
860                 type->events_group = attr_group;
861         }
862
863         type->pmu_group = &uncore_pmu_attr_group;
864         return 0;
865 }
866
867 static int __init
868 uncore_types_init(struct intel_uncore_type **types, bool setid)
869 {
870         int ret;
871
872         for (; *types; types++) {
873                 ret = uncore_type_init(*types, setid);
874                 if (ret)
875                         return ret;
876         }
877         return 0;
878 }
879
880 /*
881  * add a pci uncore device
882  */
883 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
884 {
885         struct intel_uncore_type *type;
886         struct intel_uncore_pmu *pmu = NULL;
887         struct intel_uncore_box *box;
888         int phys_id, pkg, ret;
889
890         phys_id = uncore_pcibus_to_physid(pdev->bus);
891         if (phys_id < 0)
892                 return -ENODEV;
893
894         pkg = topology_phys_to_logical_pkg(phys_id);
895         if (pkg < 0)
896                 return -EINVAL;
897
898         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
899                 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
900
901                 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
902                 pci_set_drvdata(pdev, NULL);
903                 return 0;
904         }
905
906         type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
907
908         /*
909          * Some platforms, e.g.  Knights Landing, use a common PCI device ID
910          * for multiple instances of an uncore PMU device type. We should check
911          * PCI slot and func to indicate the uncore box.
912          */
913         if (id->driver_data & ~0xffff) {
914                 struct pci_driver *pci_drv = pdev->driver;
915                 const struct pci_device_id *ids = pci_drv->id_table;
916                 unsigned int devfn;
917
918                 while (ids && ids->vendor) {
919                         if ((ids->vendor == pdev->vendor) &&
920                             (ids->device == pdev->device)) {
921                                 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
922                                                   UNCORE_PCI_DEV_FUNC(ids->driver_data));
923                                 if (devfn == pdev->devfn) {
924                                         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
925                                         break;
926                                 }
927                         }
928                         ids++;
929                 }
930                 if (pmu == NULL)
931                         return -ENODEV;
932         } else {
933                 /*
934                  * for performance monitoring unit with multiple boxes,
935                  * each box has a different function id.
936                  */
937                 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
938         }
939
940         if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
941                 return -EINVAL;
942
943         box = uncore_alloc_box(type, NUMA_NO_NODE);
944         if (!box)
945                 return -ENOMEM;
946
947         if (pmu->func_id < 0)
948                 pmu->func_id = pdev->devfn;
949         else
950                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
951
952         atomic_inc(&box->refcnt);
953         box->pci_phys_id = phys_id;
954         box->pkgid = pkg;
955         box->pci_dev = pdev;
956         box->pmu = pmu;
957         uncore_box_init(box);
958         pci_set_drvdata(pdev, box);
959
960         pmu->boxes[pkg] = box;
961         if (atomic_inc_return(&pmu->activeboxes) > 1)
962                 return 0;
963
964         /* First active box registers the pmu */
965         ret = uncore_pmu_register(pmu);
966         if (ret) {
967                 pci_set_drvdata(pdev, NULL);
968                 pmu->boxes[pkg] = NULL;
969                 uncore_box_exit(box);
970                 kfree(box);
971         }
972         return ret;
973 }
974
975 static void uncore_pci_remove(struct pci_dev *pdev)
976 {
977         struct intel_uncore_box *box;
978         struct intel_uncore_pmu *pmu;
979         int i, phys_id, pkg;
980
981         phys_id = uncore_pcibus_to_physid(pdev->bus);
982         pkg = topology_phys_to_logical_pkg(phys_id);
983
984         box = pci_get_drvdata(pdev);
985         if (!box) {
986                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
987                         if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
988                                 uncore_extra_pci_dev[pkg].dev[i] = NULL;
989                                 break;
990                         }
991                 }
992                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
993                 return;
994         }
995
996         pmu = box->pmu;
997         if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
998                 return;
999
1000         pci_set_drvdata(pdev, NULL);
1001         pmu->boxes[pkg] = NULL;
1002         if (atomic_dec_return(&pmu->activeboxes) == 0)
1003                 uncore_pmu_unregister(pmu);
1004         uncore_box_exit(box);
1005         kfree(box);
1006 }
1007
1008 static int __init uncore_pci_init(void)
1009 {
1010         size_t size;
1011         int ret;
1012
1013         size = max_packages * sizeof(struct pci_extra_dev);
1014         uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1015         if (!uncore_extra_pci_dev) {
1016                 ret = -ENOMEM;
1017                 goto err;
1018         }
1019
1020         ret = uncore_types_init(uncore_pci_uncores, false);
1021         if (ret)
1022                 goto errtype;
1023
1024         uncore_pci_driver->probe = uncore_pci_probe;
1025         uncore_pci_driver->remove = uncore_pci_remove;
1026
1027         ret = pci_register_driver(uncore_pci_driver);
1028         if (ret)
1029                 goto errtype;
1030
1031         pcidrv_registered = true;
1032         return 0;
1033
1034 errtype:
1035         uncore_types_exit(uncore_pci_uncores);
1036         kfree(uncore_extra_pci_dev);
1037         uncore_extra_pci_dev = NULL;
1038         uncore_free_pcibus_map();
1039 err:
1040         uncore_pci_uncores = empty_uncore;
1041         return ret;
1042 }
1043
1044 static void uncore_pci_exit(void)
1045 {
1046         if (pcidrv_registered) {
1047                 pcidrv_registered = false;
1048                 pci_unregister_driver(uncore_pci_driver);
1049                 uncore_types_exit(uncore_pci_uncores);
1050                 kfree(uncore_extra_pci_dev);
1051                 uncore_free_pcibus_map();
1052         }
1053 }
1054
1055 static void uncore_cpu_dying(int cpu)
1056 {
1057         struct intel_uncore_type *type, **types = uncore_msr_uncores;
1058         struct intel_uncore_pmu *pmu;
1059         struct intel_uncore_box *box;
1060         int i, pkg;
1061
1062         pkg = topology_logical_package_id(cpu);
1063         for (; *types; types++) {
1064                 type = *types;
1065                 pmu = type->pmus;
1066                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1067                         box = pmu->boxes[pkg];
1068                         if (box && atomic_dec_return(&box->refcnt) == 0)
1069                                 uncore_box_exit(box);
1070                 }
1071         }
1072 }
1073
1074 static void uncore_cpu_starting(int cpu, bool init)
1075 {
1076         struct intel_uncore_type *type, **types = uncore_msr_uncores;
1077         struct intel_uncore_pmu *pmu;
1078         struct intel_uncore_box *box;
1079         int i, pkg, ncpus = 1;
1080
1081         if (init) {
1082                 /*
1083                  * On init we get the number of online cpus in the package
1084                  * and set refcount for all of them.
1085                  */
1086                 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1087         }
1088
1089         pkg = topology_logical_package_id(cpu);
1090         for (; *types; types++) {
1091                 type = *types;
1092                 pmu = type->pmus;
1093                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1094                         box = pmu->boxes[pkg];
1095                         if (!box)
1096                                 continue;
1097                         /* The first cpu on a package activates the box */
1098                         if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1099                                 uncore_box_init(box);
1100                 }
1101         }
1102 }
1103
1104 static int uncore_cpu_prepare(int cpu)
1105 {
1106         struct intel_uncore_type *type, **types = uncore_msr_uncores;
1107         struct intel_uncore_pmu *pmu;
1108         struct intel_uncore_box *box;
1109         int i, pkg;
1110
1111         pkg = topology_logical_package_id(cpu);
1112         for (; *types; types++) {
1113                 type = *types;
1114                 pmu = type->pmus;
1115                 for (i = 0; i < type->num_boxes; i++, pmu++) {
1116                         if (pmu->boxes[pkg])
1117                                 continue;
1118                         /* First cpu of a package allocates the box */
1119                         box = uncore_alloc_box(type, cpu_to_node(cpu));
1120                         if (!box)
1121                                 return -ENOMEM;
1122                         box->pmu = pmu;
1123                         box->pkgid = pkg;
1124                         pmu->boxes[pkg] = box;
1125                 }
1126         }
1127         return 0;
1128 }
1129
1130 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1131                                    int new_cpu)
1132 {
1133         struct intel_uncore_pmu *pmu = type->pmus;
1134         struct intel_uncore_box *box;
1135         int i, pkg;
1136
1137         pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1138         for (i = 0; i < type->num_boxes; i++, pmu++) {
1139                 box = pmu->boxes[pkg];
1140                 if (!box)
1141                         continue;
1142
1143                 if (old_cpu < 0) {
1144                         WARN_ON_ONCE(box->cpu != -1);
1145                         box->cpu = new_cpu;
1146                         continue;
1147                 }
1148
1149                 WARN_ON_ONCE(box->cpu != old_cpu);
1150                 box->cpu = -1;
1151                 if (new_cpu < 0)
1152                         continue;
1153
1154                 uncore_pmu_cancel_hrtimer(box);
1155                 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1156                 box->cpu = new_cpu;
1157         }
1158 }
1159
1160 static void uncore_change_context(struct intel_uncore_type **uncores,
1161                                   int old_cpu, int new_cpu)
1162 {
1163         for (; *uncores; uncores++)
1164                 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1165 }
1166
1167 static void uncore_event_exit_cpu(int cpu)
1168 {
1169         int target;
1170
1171         /* Check if exiting cpu is used for collecting uncore events */
1172         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1173                 return;
1174
1175         /* Find a new cpu to collect uncore events */
1176         target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1177
1178         /* Migrate uncore events to the new target */
1179         if (target < nr_cpu_ids)
1180                 cpumask_set_cpu(target, &uncore_cpu_mask);
1181         else
1182                 target = -1;
1183
1184         uncore_change_context(uncore_msr_uncores, cpu, target);
1185         uncore_change_context(uncore_pci_uncores, cpu, target);
1186 }
1187
1188 static void uncore_event_init_cpu(int cpu)
1189 {
1190         int target;
1191
1192         /*
1193          * Check if there is an online cpu in the package
1194          * which collects uncore events already.
1195          */
1196         target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1197         if (target < nr_cpu_ids)
1198                 return;
1199
1200         cpumask_set_cpu(cpu, &uncore_cpu_mask);
1201
1202         uncore_change_context(uncore_msr_uncores, -1, cpu);
1203         uncore_change_context(uncore_pci_uncores, -1, cpu);
1204 }
1205
1206 static int uncore_cpu_notifier(struct notifier_block *self,
1207                                unsigned long action, void *hcpu)
1208 {
1209         unsigned int cpu = (long)hcpu;
1210
1211         switch (action & ~CPU_TASKS_FROZEN) {
1212         case CPU_UP_PREPARE:
1213                 return notifier_from_errno(uncore_cpu_prepare(cpu));
1214
1215         case CPU_STARTING:
1216                 uncore_cpu_starting(cpu, false);
1217         case CPU_DOWN_FAILED:
1218                 uncore_event_init_cpu(cpu);
1219                 break;
1220
1221         case CPU_UP_CANCELED:
1222         case CPU_DYING:
1223                 uncore_cpu_dying(cpu);
1224                 break;
1225
1226         case CPU_DOWN_PREPARE:
1227                 uncore_event_exit_cpu(cpu);
1228                 break;
1229         }
1230         return NOTIFY_OK;
1231 }
1232
1233 static struct notifier_block uncore_cpu_nb = {
1234         .notifier_call  = uncore_cpu_notifier,
1235         /*
1236          * to migrate uncore events, our notifier should be executed
1237          * before perf core's notifier.
1238          */
1239         .priority       = CPU_PRI_PERF + 1,
1240 };
1241
1242 static int __init type_pmu_register(struct intel_uncore_type *type)
1243 {
1244         int i, ret;
1245
1246         for (i = 0; i < type->num_boxes; i++) {
1247                 ret = uncore_pmu_register(&type->pmus[i]);
1248                 if (ret)
1249                         return ret;
1250         }
1251         return 0;
1252 }
1253
1254 static int __init uncore_msr_pmus_register(void)
1255 {
1256         struct intel_uncore_type **types = uncore_msr_uncores;
1257         int ret;
1258
1259         for (; *types; types++) {
1260                 ret = type_pmu_register(*types);
1261                 if (ret)
1262                         return ret;
1263         }
1264         return 0;
1265 }
1266
1267 static int __init uncore_cpu_init(void)
1268 {
1269         int ret;
1270
1271         ret = uncore_types_init(uncore_msr_uncores, true);
1272         if (ret)
1273                 goto err;
1274
1275         ret = uncore_msr_pmus_register();
1276         if (ret)
1277                 goto err;
1278         return 0;
1279 err:
1280         uncore_types_exit(uncore_msr_uncores);
1281         uncore_msr_uncores = empty_uncore;
1282         return ret;
1283 }
1284
1285 static void __init uncore_cpu_setup(void *dummy)
1286 {
1287         uncore_cpu_starting(smp_processor_id(), true);
1288 }
1289
1290 /* Lazy to avoid allocation of a few bytes for the normal case */
1291 static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
1292
1293 static int __init uncore_cpumask_init(bool msr)
1294 {
1295         unsigned int cpu;
1296
1297         for_each_online_cpu(cpu) {
1298                 unsigned int pkg = topology_logical_package_id(cpu);
1299                 int ret;
1300
1301                 if (test_and_set_bit(pkg, packages))
1302                         continue;
1303                 /*
1304                  * The first online cpu of each package allocates and takes
1305                  * the refcounts for all other online cpus in that package.
1306                  * If msrs are not enabled no allocation is required.
1307                  */
1308                 if (msr) {
1309                         ret = uncore_cpu_prepare(cpu);
1310                         if (ret)
1311                                 return ret;
1312                 }
1313                 uncore_event_init_cpu(cpu);
1314                 smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
1315         }
1316         __register_cpu_notifier(&uncore_cpu_nb);
1317         return 0;
1318 }
1319
1320 #define X86_UNCORE_MODEL_MATCH(model, init)     \
1321         { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1322
1323 struct intel_uncore_init_fun {
1324         void    (*cpu_init)(void);
1325         int     (*pci_init)(void);
1326 };
1327
1328 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1329         .cpu_init = nhm_uncore_cpu_init,
1330 };
1331
1332 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1333         .cpu_init = snb_uncore_cpu_init,
1334         .pci_init = snb_uncore_pci_init,
1335 };
1336
1337 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1338         .cpu_init = snb_uncore_cpu_init,
1339         .pci_init = ivb_uncore_pci_init,
1340 };
1341
1342 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1343         .cpu_init = snb_uncore_cpu_init,
1344         .pci_init = hsw_uncore_pci_init,
1345 };
1346
1347 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1348         .cpu_init = snb_uncore_cpu_init,
1349         .pci_init = bdw_uncore_pci_init,
1350 };
1351
1352 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1353         .cpu_init = snbep_uncore_cpu_init,
1354         .pci_init = snbep_uncore_pci_init,
1355 };
1356
1357 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1358         .cpu_init = nhmex_uncore_cpu_init,
1359 };
1360
1361 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1362         .cpu_init = ivbep_uncore_cpu_init,
1363         .pci_init = ivbep_uncore_pci_init,
1364 };
1365
1366 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1367         .cpu_init = hswep_uncore_cpu_init,
1368         .pci_init = hswep_uncore_pci_init,
1369 };
1370
1371 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1372         .cpu_init = bdx_uncore_cpu_init,
1373         .pci_init = bdx_uncore_pci_init,
1374 };
1375
1376 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1377         .cpu_init = knl_uncore_cpu_init,
1378         .pci_init = knl_uncore_pci_init,
1379 };
1380
1381 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1382         .pci_init = skl_uncore_pci_init,
1383 };
1384
1385 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1386         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
1387         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
1388         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,       nhm_uncore_init),
1389         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,    nhm_uncore_init),
1390         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,    snb_uncore_init),
1391         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,      ivb_uncore_init),
1392         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,   hsw_uncore_init),
1393         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,    hsw_uncore_init),
1394         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,   hsw_uncore_init),
1395         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1396         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1397         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
1398         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,     nhmex_uncore_init),
1399         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,    nhmex_uncore_init),
1400         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,    ivbep_uncore_init),
1401         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,      hswep_uncore_init),
1402         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
1403         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1404         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
1405         X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1406         {},
1407 };
1408
1409 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1410
1411 static int __init intel_uncore_init(void)
1412 {
1413         const struct x86_cpu_id *id;
1414         struct intel_uncore_init_fun *uncore_init;
1415         int pret = 0, cret = 0, ret;
1416
1417         id = x86_match_cpu(intel_uncore_match);
1418         if (!id)
1419                 return -ENODEV;
1420
1421         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1422                 return -ENODEV;
1423
1424         max_packages = topology_max_packages();
1425
1426         uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1427         if (uncore_init->pci_init) {
1428                 pret = uncore_init->pci_init();
1429                 if (!pret)
1430                         pret = uncore_pci_init();
1431         }
1432
1433         if (uncore_init->cpu_init) {
1434                 uncore_init->cpu_init();
1435                 cret = uncore_cpu_init();
1436         }
1437
1438         if (cret && pret)
1439                 return -ENODEV;
1440
1441         cpu_notifier_register_begin();
1442         ret = uncore_cpumask_init(!cret);
1443         if (ret)
1444                 goto err;
1445         cpu_notifier_register_done();
1446         return 0;
1447
1448 err:
1449         /* Undo box->init_box() */
1450         on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1451         uncore_types_exit(uncore_msr_uncores);
1452         uncore_pci_exit();
1453         cpu_notifier_register_done();
1454         return ret;
1455 }
1456 module_init(intel_uncore_init);
1457
1458 static void __exit intel_uncore_exit(void)
1459 {
1460         cpu_notifier_register_begin();
1461         __unregister_cpu_notifier(&uncore_cpu_nb);
1462         uncore_types_exit(uncore_msr_uncores);
1463         uncore_pci_exit();
1464         cpu_notifier_register_done();
1465 }
1466 module_exit(intel_uncore_exit);