2 * BTS PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/debugfs.h>
23 #include <linux/device.h>
24 #include <linux/coredump.h>
26 #include <asm-generic/sizes.h>
27 #include <asm/perf_event.h>
29 #include "perf_event.h"
32 struct perf_output_handle handle;
33 struct debug_store ds_back;
37 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
39 #define BTS_RECORD_SIZE 24
40 #define BTS_SAFETY_MARGIN 4080
46 unsigned long displacement;
50 size_t real_size; /* multiple of BTS_RECORD_SIZE */
51 unsigned int nr_pages;
60 struct bts_phys buf[0];
65 static size_t buf_size(struct page *page)
67 return 1 << (PAGE_SHIFT + page_private(page));
71 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
73 struct bts_buffer *buf;
75 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
77 size_t size = nr_pages << PAGE_SHIFT;
80 /* count all the high order buffers */
81 for (pg = 0, nbuf = 0; pg < nr_pages;) {
82 page = virt_to_page(pages[pg]);
83 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
85 pg += 1 << page_private(page);
90 * to avoid interrupts in overwrite mode, only allow one physical
92 if (overwrite && nbuf > 1)
95 buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
99 buf->nr_pages = nr_pages;
101 buf->snapshot = overwrite;
102 buf->data_pages = pages;
103 buf->real_size = size - size % BTS_RECORD_SIZE;
105 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
106 unsigned int __nr_pages;
108 page = virt_to_page(pages[pg]);
109 __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
110 buf->buf[nbuf].page = page;
111 buf->buf[nbuf].offset = offset;
112 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
113 buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
114 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
115 buf->buf[nbuf].size -= pad;
118 offset += __nr_pages << PAGE_SHIFT;
124 static void bts_buffer_free_aux(void *data)
129 static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
131 return buf->buf[idx].offset + buf->buf[idx].displacement;
135 bts_config_buffer(struct bts_buffer *buf)
137 int cpu = raw_smp_processor_id();
138 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
139 struct bts_phys *phys = &buf->buf[buf->cur_buf];
140 unsigned long index, thresh = 0, end = phys->size;
141 struct page *page = phys->page;
143 index = local_read(&buf->head);
145 if (!buf->snapshot) {
146 if (buf->end < phys->offset + buf_size(page))
147 end = buf->end - phys->offset - phys->displacement;
149 index -= phys->offset + phys->displacement;
151 if (end - index > BTS_SAFETY_MARGIN)
152 thresh = end - BTS_SAFETY_MARGIN;
153 else if (end - index > BTS_RECORD_SIZE)
154 thresh = end - BTS_RECORD_SIZE;
159 ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
160 ds->bts_index = ds->bts_buffer_base + index;
161 ds->bts_absolute_maximum = ds->bts_buffer_base + end;
162 ds->bts_interrupt_threshold = !buf->snapshot
163 ? ds->bts_buffer_base + thresh
164 : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
167 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
169 unsigned long index = head - phys->offset;
171 memset(page_address(phys->page) + index, 0, phys->size - index);
174 static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts)
179 if (local_read(&buf->data_size) >= bts->handle.size ||
180 bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE)
186 static void bts_update(struct bts_ctx *bts)
188 int cpu = raw_smp_processor_id();
189 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
190 struct bts_buffer *buf = perf_get_aux(&bts->handle);
191 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
196 head = index + bts_buffer_offset(buf, buf->cur_buf);
197 old = local_xchg(&buf->head, head);
199 if (!buf->snapshot) {
203 if (ds->bts_index >= ds->bts_absolute_maximum)
204 local_inc(&buf->lost);
207 * old and head are always in the same physical buffer, so we
208 * can subtract them to get the data size.
210 local_add(head - old, &buf->data_size);
212 local_set(&buf->data_size, head);
216 static void __bts_event_start(struct perf_event *event)
218 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
219 struct bts_buffer *buf = perf_get_aux(&bts->handle);
222 if (!buf || bts_buffer_is_full(buf, bts))
228 config |= ARCH_PERFMON_EVENTSEL_INT;
229 if (!event->attr.exclude_kernel)
230 config |= ARCH_PERFMON_EVENTSEL_OS;
231 if (!event->attr.exclude_user)
232 config |= ARCH_PERFMON_EVENTSEL_USR;
234 bts_config_buffer(buf);
237 * local barrier to make sure that ds configuration made it
238 * before we enable BTS
242 intel_pmu_enable_bts(config);
245 static void bts_event_start(struct perf_event *event, int flags)
247 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
249 __bts_event_start(event);
251 /* PMI handler: this counter is running and likely generating PMIs */
252 ACCESS_ONCE(bts->started) = 1;
255 static void __bts_event_stop(struct perf_event *event)
258 * No extra synchronization is mandated by the documentation to have
259 * BTS data stores globally visible.
261 intel_pmu_disable_bts();
263 if (event->hw.state & PERF_HES_STOPPED)
266 ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
269 static void bts_event_stop(struct perf_event *event, int flags)
271 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
273 /* PMI handler: don't restart this counter */
274 ACCESS_ONCE(bts->started) = 0;
276 __bts_event_stop(event);
278 if (flags & PERF_EF_UPDATE)
282 void intel_bts_enable_local(void)
284 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
286 if (bts->handle.event && bts->started)
287 __bts_event_start(bts->handle.event);
290 void intel_bts_disable_local(void)
292 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
294 if (bts->handle.event)
295 __bts_event_stop(bts->handle.event);
299 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
301 unsigned long head, space, next_space, pad, gap, skip, wakeup;
302 unsigned int next_buf;
303 struct bts_phys *phys, *next_phys;
309 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
310 if (WARN_ON_ONCE(head != local_read(&buf->head)))
313 phys = &buf->buf[buf->cur_buf];
314 space = phys->offset + phys->displacement + phys->size - head;
316 if (space > handle->size) {
317 space = handle->size;
318 space -= space % BTS_RECORD_SIZE;
320 if (space <= BTS_SAFETY_MARGIN) {
321 /* See if next phys buffer has more space */
322 next_buf = buf->cur_buf + 1;
323 if (next_buf >= buf->nr_bufs)
325 next_phys = &buf->buf[next_buf];
326 gap = buf_size(phys->page) - phys->displacement - phys->size +
327 next_phys->displacement;
329 if (handle->size >= skip) {
330 next_space = next_phys->size;
331 if (next_space + skip > handle->size) {
332 next_space = handle->size - skip;
333 next_space -= next_space % BTS_RECORD_SIZE;
335 if (next_space > space || !space) {
337 bts_buffer_pad_out(phys, head);
338 ret = perf_aux_output_skip(handle, skip);
341 /* Advance to next phys buffer */
344 head = phys->offset + phys->displacement;
346 * After this, cur_buf and head won't match ds
347 * anymore, so we must not be racing with
350 buf->cur_buf = next_buf;
351 local_set(&buf->head, head);
356 /* Don't go far beyond wakeup watermark */
357 wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
359 if (space > wakeup) {
361 space -= space % BTS_RECORD_SIZE;
364 buf->end = head + space;
367 * If we have no space, the lost notification would have been sent when
368 * we hit absolute_maximum - see bts_update()
376 int intel_bts_interrupt(void)
378 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
379 struct perf_event *event = bts->handle.event;
380 struct bts_buffer *buf;
384 if (!event || !bts->started)
387 buf = perf_get_aux(&bts->handle);
389 * Skip snapshot counters: they don't use the interrupt, but
390 * there's no other way of telling, because the pointer will
393 if (!buf || buf->snapshot)
396 old_head = local_read(&buf->head);
400 if (old_head == local_read(&buf->head))
403 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
404 !!local_xchg(&buf->lost, 0));
406 buf = perf_aux_output_begin(&bts->handle, event);
410 err = bts_buffer_reset(buf, &bts->handle);
412 perf_aux_output_end(&bts->handle, 0, false);
417 static void bts_event_del(struct perf_event *event, int mode)
419 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
420 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
421 struct bts_buffer *buf = perf_get_aux(&bts->handle);
423 bts_event_stop(event, PERF_EF_UPDATE);
428 local_xchg(&buf->data_size,
429 buf->nr_pages << PAGE_SHIFT);
430 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
431 !!local_xchg(&buf->lost, 0));
434 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
435 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
436 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
437 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
440 static int bts_event_add(struct perf_event *event, int mode)
442 struct bts_buffer *buf;
443 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
444 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
445 struct hw_perf_event *hwc = &event->hw;
448 event->hw.state = PERF_HES_STOPPED;
450 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
453 if (bts->handle.event)
456 buf = perf_aux_output_begin(&bts->handle, event);
460 ret = bts_buffer_reset(buf, &bts->handle);
462 perf_aux_output_end(&bts->handle, 0, false);
466 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
467 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
468 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
470 if (mode & PERF_EF_START) {
471 bts_event_start(event, 0);
472 if (hwc->state & PERF_HES_STOPPED) {
473 bts_event_del(event, 0);
481 static void bts_event_destroy(struct perf_event *event)
483 x86_release_hardware();
484 x86_del_exclusive(x86_lbr_exclusive_bts);
487 static int bts_event_init(struct perf_event *event)
491 if (event->attr.type != bts_pmu.type)
494 if (x86_add_exclusive(x86_lbr_exclusive_bts))
497 ret = x86_reserve_hardware();
499 x86_del_exclusive(x86_lbr_exclusive_bts);
503 event->destroy = bts_event_destroy;
508 static void bts_event_read(struct perf_event *event)
512 static __init int bts_init(void)
514 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
517 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
518 bts_pmu.task_ctx_nr = perf_sw_context;
519 bts_pmu.event_init = bts_event_init;
520 bts_pmu.add = bts_event_add;
521 bts_pmu.del = bts_event_del;
522 bts_pmu.start = bts_event_start;
523 bts_pmu.stop = bts_event_stop;
524 bts_pmu.read = bts_event_read;
525 bts_pmu.setup_aux = bts_buffer_setup_aux;
526 bts_pmu.free_aux = bts_buffer_free_aux;
528 return perf_pmu_register(&bts_pmu, "intel_bts", -1);
530 arch_initcall(bts_init);