2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/bitops.h>
20 #include <linux/log2.h>
23 #include "../../perf.h"
24 #include "../../util/session.h"
25 #include "../../util/event.h"
26 #include "../../util/evlist.h"
27 #include "../../util/evsel.h"
28 #include "../../util/cpumap.h"
29 #include "../../util/parse-options.h"
30 #include "../../util/parse-events.h"
31 #include "../../util/pmu.h"
32 #include "../../util/debug.h"
33 #include "../../util/auxtrace.h"
34 #include "../../util/tsc.h"
35 #include "../../util/intel-pt.h"
37 #define KiB(x) ((x) * 1024)
38 #define MiB(x) ((x) * 1024 * 1024)
39 #define KiB_MASK(x) (KiB(x) - 1)
40 #define MiB_MASK(x) (MiB(x) - 1)
42 #define INTEL_PT_DEFAULT_SAMPLE_SIZE KiB(4)
44 #define INTEL_PT_MAX_SAMPLE_SIZE KiB(60)
46 #define INTEL_PT_PSB_PERIOD_NEAR 256
48 struct intel_pt_snapshot_ref {
54 struct intel_pt_recording {
55 struct auxtrace_record itr;
56 struct perf_pmu *intel_pt_pmu;
57 int have_sched_switch;
58 struct perf_evlist *evlist;
60 bool snapshot_init_done;
62 size_t snapshot_ref_buf_size;
64 struct intel_pt_snapshot_ref *snapshot_refs;
67 static int intel_pt_parse_terms_with_default(struct list_head *formats,
71 struct list_head *terms;
72 struct perf_event_attr attr = { .size = 0, };
75 terms = malloc(sizeof(struct list_head));
79 INIT_LIST_HEAD(terms);
81 err = parse_events_terms(terms, str);
85 attr.config = *config;
86 err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
90 *config = attr.config;
92 parse_events__free_terms(terms);
96 static int intel_pt_parse_terms(struct list_head *formats, const char *str,
100 return intel_pt_parse_terms_with_default(formats, str, config);
103 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
105 const u64 top_bit = 1ULL << 63;
109 for (i = 0; i < 64; i++) {
110 if (mask & top_bit) {
122 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
123 struct perf_evlist *evlist, u64 *res)
125 struct perf_evsel *evsel;
130 mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
134 evlist__for_each(evlist, evsel) {
135 if (evsel->attr.type == intel_pt_pmu->type) {
136 *res = intel_pt_masked_bits(mask, evsel->attr.config);
144 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
145 struct perf_evlist *evlist)
148 int err, topa_multiple_entries;
151 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
152 "%d", &topa_multiple_entries) != 1)
153 topa_multiple_entries = 0;
156 * Use caps/topa_multiple_entries to indicate early hardware that had
157 * extra frequent PSBs.
159 if (!topa_multiple_entries) {
164 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
168 psb_period = 1 << (val + 11);
170 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
174 static int intel_pt_pick_bit(int bits, int target)
178 for (pos = 0; bits; bits >>= 1, pos++) {
180 if (pos <= target || pick < 0)
190 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
193 int mtc, mtc_periods = 0, mtc_period;
194 int psb_cyc, psb_periods, psb_period;
198 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
200 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
205 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
209 mtc_period = intel_pt_pick_bit(mtc_periods, 3);
210 pos += scnprintf(buf + pos, sizeof(buf) - pos,
211 ",mtc,mtc_period=%d", mtc_period);
215 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
219 if (psb_cyc && mtc_periods) {
220 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
224 psb_period = intel_pt_pick_bit(psb_periods, 3);
225 pos += scnprintf(buf + pos, sizeof(buf) - pos,
226 ",psb_period=%d", psb_period);
230 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
232 intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
237 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
238 struct record_opts *opts,
241 struct intel_pt_recording *ptr =
242 container_of(itr, struct intel_pt_recording, itr);
243 unsigned long long snapshot_size = 0;
247 snapshot_size = strtoull(str, &endptr, 0);
248 if (*endptr || snapshot_size > SIZE_MAX)
252 opts->auxtrace_snapshot_mode = true;
253 opts->auxtrace_snapshot_size = snapshot_size;
255 ptr->snapshot_size = snapshot_size;
260 struct perf_event_attr *
261 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
263 struct perf_event_attr *attr;
265 attr = zalloc(sizeof(struct perf_event_attr));
269 attr->config = intel_pt_default_config(intel_pt_pmu);
271 intel_pt_pmu->selectable = true;
276 static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
278 return INTEL_PT_AUXTRACE_PRIV_SIZE;
281 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
283 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
285 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
290 static int intel_pt_info_fill(struct auxtrace_record *itr,
291 struct perf_session *session,
292 struct auxtrace_info_event *auxtrace_info,
295 struct intel_pt_recording *ptr =
296 container_of(itr, struct intel_pt_recording, itr);
297 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
298 struct perf_event_mmap_page *pc;
299 struct perf_tsc_conversion tc = { .time_mult = 0, };
300 bool cap_user_time_zero = false, per_cpu_mmaps;
301 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
302 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
305 if (priv_size != INTEL_PT_AUXTRACE_PRIV_SIZE)
308 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
309 intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
311 intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
312 mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
314 intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
316 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
318 if (!session->evlist->nr_mmaps)
321 pc = session->evlist->mmap[0].base;
323 err = perf_read_tsc_conversion(pc, &tc);
325 if (err != -EOPNOTSUPP)
328 cap_user_time_zero = tc.time_mult != 0;
330 if (!cap_user_time_zero)
331 ui__warning("Intel Processor Trace: TSC not available\n");
334 per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
336 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
337 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
338 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
339 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
340 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
341 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
342 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
343 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
344 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
345 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
346 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
347 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
348 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
349 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
350 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
351 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
356 static int intel_pt_track_switches(struct perf_evlist *evlist)
358 const char *sched_switch = "sched:sched_switch";
359 struct perf_evsel *evsel;
362 if (!perf_evlist__can_select_event(evlist, sched_switch))
365 err = parse_events(evlist, sched_switch, NULL);
367 pr_debug2("%s: failed to parse %s, error %d\n",
368 __func__, sched_switch, err);
372 evsel = perf_evlist__last(evlist);
374 perf_evsel__set_sample_bit(evsel, CPU);
375 perf_evsel__set_sample_bit(evsel, TIME);
377 evsel->system_wide = true;
378 evsel->no_aux_samples = true;
379 evsel->immediate = true;
384 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
386 unsigned int val, last = 0, state = 1;
391 for (val = 0; val <= 64; val++, valid >>= 1) {
396 p += scnprintf(str + p, len - p, ",");
399 p += scnprintf(str + p, len - p, "%u", val);
414 p += scnprintf(str + p, len - p, ",%u", last);
418 p += scnprintf(str + p, len - p, "-%u", last);
430 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
431 const char *caps, const char *name,
432 const char *supported, u64 config)
436 unsigned long long valid;
440 if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
444 perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
449 bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
453 for (shift = 0; bits && !(bits & 1); shift++)
461 if (valid & (1 << config))
464 intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
465 pr_err("Invalid %s for %s. Valid values are: %s\n",
466 name, INTEL_PT_PMU_NAME, valid_str);
470 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
471 struct perf_evsel *evsel)
478 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
479 "cyc_thresh", "caps/psb_cyc",
484 err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
485 "mtc_period", "caps/mtc",
490 return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
491 "psb_period", "caps/psb_cyc",
495 static int intel_pt_recording_options(struct auxtrace_record *itr,
496 struct perf_evlist *evlist,
497 struct record_opts *opts)
499 struct intel_pt_recording *ptr =
500 container_of(itr, struct intel_pt_recording, itr);
501 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
502 bool have_timing_info;
503 struct perf_evsel *evsel, *intel_pt_evsel = NULL;
504 const struct cpu_map *cpus = evlist->cpus;
505 bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
509 ptr->evlist = evlist;
510 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
512 evlist__for_each(evlist, evsel) {
513 if (evsel->attr.type == intel_pt_pmu->type) {
514 if (intel_pt_evsel) {
515 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
518 evsel->attr.freq = 0;
519 evsel->attr.sample_period = 1;
520 intel_pt_evsel = evsel;
521 opts->full_auxtrace = true;
525 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
526 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
530 if (opts->use_clockid) {
531 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
535 if (!opts->full_auxtrace)
538 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
542 /* Set default sizes for snapshot mode */
543 if (opts->auxtrace_snapshot_mode) {
544 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
546 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
548 opts->auxtrace_mmap_pages = MiB(4) / page_size;
550 opts->auxtrace_mmap_pages = KiB(128) / page_size;
551 if (opts->mmap_pages == UINT_MAX)
552 opts->mmap_pages = KiB(256) / page_size;
554 } else if (!opts->auxtrace_mmap_pages && !privileged &&
555 opts->mmap_pages == UINT_MAX) {
556 opts->mmap_pages = KiB(256) / page_size;
558 if (!opts->auxtrace_snapshot_size)
559 opts->auxtrace_snapshot_size =
560 opts->auxtrace_mmap_pages * (size_t)page_size;
561 if (!opts->auxtrace_mmap_pages) {
562 size_t sz = opts->auxtrace_snapshot_size;
564 sz = round_up(sz, page_size) / page_size;
565 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
567 if (opts->auxtrace_snapshot_size >
568 opts->auxtrace_mmap_pages * (size_t)page_size) {
569 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
570 opts->auxtrace_snapshot_size,
571 opts->auxtrace_mmap_pages * (size_t)page_size);
574 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
575 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
578 pr_debug2("Intel PT snapshot size: %zu\n",
579 opts->auxtrace_snapshot_size);
581 opts->auxtrace_snapshot_size <= psb_period +
582 INTEL_PT_PSB_PERIOD_NEAR)
583 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
584 opts->auxtrace_snapshot_size, psb_period);
587 /* Set default sizes for full trace mode */
588 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
590 opts->auxtrace_mmap_pages = MiB(4) / page_size;
592 opts->auxtrace_mmap_pages = KiB(128) / page_size;
593 if (opts->mmap_pages == UINT_MAX)
594 opts->mmap_pages = KiB(256) / page_size;
598 /* Validate auxtrace_mmap_pages */
599 if (opts->auxtrace_mmap_pages) {
600 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
603 if (opts->auxtrace_snapshot_mode)
608 if (sz < min_sz || !is_power_of_2(sz)) {
609 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
615 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
617 if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
618 have_timing_info = true;
620 have_timing_info = false;
623 * Per-cpu recording needs sched_switch events to distinguish different
626 if (have_timing_info && !cpu_map__empty(cpus)) {
627 err = intel_pt_track_switches(evlist);
629 pr_debug2("Unable to select sched:sched_switch\n");
633 ptr->have_sched_switch = 1;
636 if (intel_pt_evsel) {
638 * To obtain the auxtrace buffer file descriptor, the auxtrace
639 * event must come first.
641 perf_evlist__to_front(evlist, intel_pt_evsel);
643 * In the case of per-cpu mmaps, we need the CPU on the
646 if (!cpu_map__empty(cpus))
647 perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
650 /* Add dummy event to keep tracking */
651 if (opts->full_auxtrace) {
652 struct perf_evsel *tracking_evsel;
654 err = parse_events(evlist, "dummy:u", NULL);
658 tracking_evsel = perf_evlist__last(evlist);
660 perf_evlist__set_tracking_event(evlist, tracking_evsel);
662 tracking_evsel->attr.freq = 0;
663 tracking_evsel->attr.sample_period = 1;
665 /* In per-cpu case, always need the time of mmap events etc */
666 if (!cpu_map__empty(cpus))
667 perf_evsel__set_sample_bit(tracking_evsel, TIME);
671 * Warn the user when we do not have enough information to decode i.e.
672 * per-cpu with no sched_switch (except workload-only).
674 if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
675 !target__none(&opts->target))
676 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
681 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
683 struct intel_pt_recording *ptr =
684 container_of(itr, struct intel_pt_recording, itr);
685 struct perf_evsel *evsel;
687 evlist__for_each(ptr->evlist, evsel) {
688 if (evsel->attr.type == ptr->intel_pt_pmu->type)
689 return perf_evlist__disable_event(ptr->evlist, evsel);
694 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
696 struct intel_pt_recording *ptr =
697 container_of(itr, struct intel_pt_recording, itr);
698 struct perf_evsel *evsel;
700 evlist__for_each(ptr->evlist, evsel) {
701 if (evsel->attr.type == ptr->intel_pt_pmu->type)
702 return perf_evlist__enable_event(ptr->evlist, evsel);
707 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
709 const size_t sz = sizeof(struct intel_pt_snapshot_ref);
710 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
711 struct intel_pt_snapshot_ref *refs;
716 while (new_cnt <= idx)
719 refs = calloc(new_cnt, sz);
723 memcpy(refs, ptr->snapshot_refs, cnt * sz);
725 ptr->snapshot_refs = refs;
726 ptr->snapshot_ref_cnt = new_cnt;
731 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
735 for (i = 0; i < ptr->snapshot_ref_cnt; i++)
736 zfree(&ptr->snapshot_refs[i].ref_buf);
737 zfree(&ptr->snapshot_refs);
740 static void intel_pt_recording_free(struct auxtrace_record *itr)
742 struct intel_pt_recording *ptr =
743 container_of(itr, struct intel_pt_recording, itr);
745 intel_pt_free_snapshot_refs(ptr);
749 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
750 size_t snapshot_buf_size)
752 size_t ref_buf_size = ptr->snapshot_ref_buf_size;
755 ref_buf = zalloc(ref_buf_size);
759 ptr->snapshot_refs[idx].ref_buf = ref_buf;
760 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
765 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
766 size_t snapshot_buf_size)
768 const size_t max_size = 256 * 1024;
769 size_t buf_size = 0, psb_period;
771 if (ptr->snapshot_size <= 64 * 1024)
774 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
776 buf_size = psb_period * 2;
778 if (!buf_size || buf_size > max_size)
781 if (buf_size >= snapshot_buf_size)
784 if (buf_size >= ptr->snapshot_size / 2)
790 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
791 size_t snapshot_buf_size)
793 if (ptr->snapshot_init_done)
796 ptr->snapshot_init_done = true;
798 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
805 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
806 * @buf1: first buffer
807 * @compare_size: number of bytes to compare
808 * @buf2: second buffer (a circular buffer)
809 * @offs2: offset in second buffer
810 * @buf2_size: size of second buffer
812 * The comparison allows for the possibility that the bytes to compare in the
813 * circular buffer are not contiguous. It is assumed that @compare_size <=
814 * @buf2_size. This function returns %false if the bytes are identical, %true
817 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
818 void *buf2, size_t offs2, size_t buf2_size)
820 size_t end2 = offs2 + compare_size, part_size;
822 if (end2 <= buf2_size)
823 return memcmp(buf1, buf2 + offs2, compare_size);
825 part_size = end2 - buf2_size;
826 if (memcmp(buf1, buf2 + offs2, part_size))
829 compare_size -= part_size;
831 return memcmp(buf1 + part_size, buf2, compare_size);
834 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
835 size_t ref_size, size_t buf_size,
836 void *data, size_t head)
838 size_t ref_end = ref_offset + ref_size;
840 if (ref_end > buf_size) {
841 if (head > ref_offset || head < ref_end - buf_size)
843 } else if (head > ref_offset && head < ref_end) {
847 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
851 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
852 void *data, size_t head)
854 if (head >= ref_size) {
855 memcpy(ref_buf, data + head - ref_size, ref_size);
857 memcpy(ref_buf, data, head);
859 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
863 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
864 struct auxtrace_mmap *mm, unsigned char *data,
867 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
870 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
871 ptr->snapshot_ref_buf_size, mm->len,
874 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
880 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
889 for (i = a; i < b; i++) {
897 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
898 struct auxtrace_mmap *mm, unsigned char *data,
901 struct intel_pt_recording *ptr =
902 container_of(itr, struct intel_pt_recording, itr);
906 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
907 __func__, idx, (size_t)*old, (size_t)*head);
909 err = intel_pt_snapshot_init(ptr, mm->len);
913 if (idx >= ptr->snapshot_ref_cnt) {
914 err = intel_pt_alloc_snapshot_refs(ptr, idx);
919 if (ptr->snapshot_ref_buf_size) {
920 if (!ptr->snapshot_refs[idx].ref_buf) {
921 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
925 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
927 wrapped = ptr->snapshot_refs[idx].wrapped;
928 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
929 ptr->snapshot_refs[idx].wrapped = true;
935 * In full trace mode 'head' continually increases. However in snapshot
936 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
937 * are adjusted to match the full trace case which expects that 'old' is
938 * always less than 'head'.
952 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
953 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
958 pr_err("%s: failed, error %d\n", __func__, err);
962 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
967 static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
969 struct intel_pt_recording *ptr =
970 container_of(itr, struct intel_pt_recording, itr);
971 struct perf_evsel *evsel;
973 evlist__for_each(ptr->evlist, evsel) {
974 if (evsel->attr.type == ptr->intel_pt_pmu->type)
975 return perf_evlist__enable_event_idx(ptr->evlist, evsel,
981 struct auxtrace_record *intel_pt_recording_init(int *err)
983 struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
984 struct intel_pt_recording *ptr;
989 ptr = zalloc(sizeof(struct intel_pt_recording));
995 ptr->intel_pt_pmu = intel_pt_pmu;
996 ptr->itr.recording_options = intel_pt_recording_options;
997 ptr->itr.info_priv_size = intel_pt_info_priv_size;
998 ptr->itr.info_fill = intel_pt_info_fill;
999 ptr->itr.free = intel_pt_recording_free;
1000 ptr->itr.snapshot_start = intel_pt_snapshot_start;
1001 ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1002 ptr->itr.find_snapshot = intel_pt_find_snapshot;
1003 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1004 ptr->itr.reference = intel_pt_reference;
1005 ptr->itr.read_finish = intel_pt_read_finish;