9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
16 struct callchain_param callchain_param = {
17 .mode = CHAIN_GRAPH_REL,
19 .order = ORDER_CALLEE,
23 u16 hists__col_len(struct hists *hists, enum hist_column col)
25 return hists->col_len[col];
28 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
30 hists->col_len[col] = len;
33 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
35 if (len > hists__col_len(hists, col)) {
36 hists__set_col_len(hists, col, len);
42 void hists__reset_col_len(struct hists *hists)
46 for (col = 0; col < HISTC_NR_COLS; ++col)
47 hists__set_col_len(hists, col, 0);
50 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
52 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
54 if (hists__col_len(hists, dso) < unresolved_col_width &&
55 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
56 !symbol_conf.dso_list)
57 hists__set_col_len(hists, dso, unresolved_col_width);
60 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
62 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
67 * +4 accounts for '[x] ' priv level info
68 * +2 accounts for 0x prefix on raw addresses
69 * +3 accounts for ' y ' symtab origin info
72 symlen = h->ms.sym->namelen + 4;
74 symlen += BITS_PER_LONG / 4 + 2 + 3;
75 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 symlen = unresolved_col_width + 4 + 2;
78 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
79 hists__set_unres_dso_col_len(hists, HISTC_DSO);
82 len = thread__comm_len(h->thread);
83 if (hists__new_col_len(hists, HISTC_COMM, len))
84 hists__set_col_len(hists, HISTC_THREAD, len + 6);
87 len = dso__name_len(h->ms.map->dso);
88 hists__new_col_len(hists, HISTC_DSO, len);
92 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
95 if (h->branch_info->from.sym) {
96 symlen = (int)h->branch_info->from.sym->namelen + 4;
98 symlen += BITS_PER_LONG / 4 + 2 + 3;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101 symlen = dso__name_len(h->branch_info->from.map->dso);
102 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
104 symlen = unresolved_col_width + 4 + 2;
105 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
106 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
109 if (h->branch_info->to.sym) {
110 symlen = (int)h->branch_info->to.sym->namelen + 4;
112 symlen += BITS_PER_LONG / 4 + 2 + 3;
113 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
115 symlen = dso__name_len(h->branch_info->to.map->dso);
116 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
118 symlen = unresolved_col_width + 4 + 2;
119 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
120 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
125 if (h->mem_info->daddr.sym) {
126 symlen = (int)h->mem_info->daddr.sym->namelen + 4
127 + unresolved_col_width + 2;
128 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
135 if (h->mem_info->daddr.map) {
136 symlen = dso__name_len(h->mem_info->daddr.map->dso);
137 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
140 symlen = unresolved_col_width + 4 + 2;
141 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
150 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
151 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
152 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
153 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
154 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_TRANSACTION,
158 hist_entry__transaction_len());
161 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
163 struct rb_node *next = rb_first(&hists->entries);
164 struct hist_entry *n;
167 hists__reset_col_len(hists);
169 while (next && row++ < max_rows) {
170 n = rb_entry(next, struct hist_entry, rb_node);
172 hists__calc_col_len(hists, n);
173 next = rb_next(&n->rb_node);
177 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
178 unsigned int cpumode, u64 period)
181 case PERF_RECORD_MISC_KERNEL:
182 he_stat->period_sys += period;
184 case PERF_RECORD_MISC_USER:
185 he_stat->period_us += period;
187 case PERF_RECORD_MISC_GUEST_KERNEL:
188 he_stat->period_guest_sys += period;
190 case PERF_RECORD_MISC_GUEST_USER:
191 he_stat->period_guest_us += period;
198 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
202 he_stat->period += period;
203 he_stat->weight += weight;
204 he_stat->nr_events += 1;
207 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
209 dest->period += src->period;
210 dest->period_sys += src->period_sys;
211 dest->period_us += src->period_us;
212 dest->period_guest_sys += src->period_guest_sys;
213 dest->period_guest_us += src->period_guest_us;
214 dest->nr_events += src->nr_events;
215 dest->weight += src->weight;
218 static void he_stat__decay(struct he_stat *he_stat)
220 he_stat->period = (he_stat->period * 7) / 8;
221 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
222 /* XXX need decay for weight too? */
225 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
227 u64 prev_period = he->stat.period;
230 if (prev_period == 0)
233 he_stat__decay(&he->stat);
235 diff = prev_period - he->stat.period;
237 hists->stats.total_period -= diff;
239 hists->stats.total_non_filtered_period -= diff;
241 return he->stat.period == 0;
244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
246 struct rb_node *next = rb_first(&hists->entries);
247 struct hist_entry *n;
250 n = rb_entry(next, struct hist_entry, rb_node);
251 next = rb_next(&n->rb_node);
253 * We may be annotating this, for instance, so keep it here in
254 * case some it gets new samples, we'll eventually free it when
255 * the user stops browsing and it agains gets fully decayed.
257 if (((zap_user && n->level == '.') ||
258 (zap_kernel && n->level != '.') ||
259 hists__decay_entry(hists, n)) &&
261 rb_erase(&n->rb_node, &hists->entries);
263 if (sort__need_collapse)
264 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
268 --hists->nr_non_filtered_entries;
276 * histogram, sorted on item, collects periods
279 static struct hist_entry *hist_entry__new(struct hist_entry *template)
281 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
282 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
288 he->ms.map->referenced = true;
290 if (he->branch_info) {
292 * This branch info is (a part of) allocated from
293 * sample__resolve_bstack() and will be freed after
294 * adding new entries. So we need to save a copy.
296 he->branch_info = malloc(sizeof(*he->branch_info));
297 if (he->branch_info == NULL) {
302 memcpy(he->branch_info, template->branch_info,
303 sizeof(*he->branch_info));
305 if (he->branch_info->from.map)
306 he->branch_info->from.map->referenced = true;
307 if (he->branch_info->to.map)
308 he->branch_info->to.map->referenced = true;
312 if (he->mem_info->iaddr.map)
313 he->mem_info->iaddr.map->referenced = true;
314 if (he->mem_info->daddr.map)
315 he->mem_info->daddr.map->referenced = true;
318 if (symbol_conf.use_callchain)
319 callchain_init(he->callchain);
321 INIT_LIST_HEAD(&he->pairs.node);
327 static u8 symbol__parent_filter(const struct symbol *parent)
329 if (symbol_conf.exclude_other && parent == NULL)
330 return 1 << HIST_FILTER__PARENT;
334 static struct hist_entry *add_hist_entry(struct hists *hists,
335 struct hist_entry *entry,
336 struct addr_location *al)
339 struct rb_node *parent = NULL;
340 struct hist_entry *he;
342 u64 period = entry->stat.period;
343 u64 weight = entry->stat.weight;
345 p = &hists->entries_in->rb_node;
349 he = rb_entry(parent, struct hist_entry, rb_node_in);
352 * Make sure that it receives arguments in a same order as
353 * hist_entry__collapse() so that we can use an appropriate
354 * function when searching an entry regardless which sort
357 cmp = hist_entry__cmp(he, entry);
360 he_stat__add_period(&he->stat, period, weight);
363 * This mem info was allocated from sample__resolve_mem
364 * and will not be used anymore.
366 zfree(&entry->mem_info);
368 /* If the map of an existing hist_entry has
369 * become out-of-date due to an exec() or
370 * similar, update it. Otherwise we will
371 * mis-adjust symbol addresses when computing
372 * the history counter to increment.
374 if (he->ms.map != entry->ms.map) {
375 he->ms.map = entry->ms.map;
377 he->ms.map->referenced = true;
388 he = hist_entry__new(entry);
392 rb_link_node(&he->rb_node_in, parent, p);
393 rb_insert_color(&he->rb_node_in, hists->entries_in);
395 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
399 struct hist_entry *__hists__add_entry(struct hists *hists,
400 struct addr_location *al,
401 struct symbol *sym_parent,
402 struct branch_info *bi,
404 u64 period, u64 weight, u64 transaction)
406 struct hist_entry entry = {
407 .thread = al->thread,
408 .comm = thread__comm(al->thread),
421 .parent = sym_parent,
422 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
426 .transaction = transaction,
429 return add_hist_entry(hists, &entry, al);
433 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
435 struct perf_hpp_fmt *fmt;
438 perf_hpp__for_each_sort_list(fmt) {
439 if (perf_hpp__should_skip(fmt))
442 cmp = fmt->cmp(left, right);
451 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
453 struct perf_hpp_fmt *fmt;
456 perf_hpp__for_each_sort_list(fmt) {
457 if (perf_hpp__should_skip(fmt))
460 cmp = fmt->collapse(left, right);
468 void hist_entry__free(struct hist_entry *he)
470 zfree(&he->branch_info);
471 zfree(&he->mem_info);
472 free_srcline(he->srcline);
477 * collapse the histogram
480 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
481 struct rb_root *root,
482 struct hist_entry *he)
484 struct rb_node **p = &root->rb_node;
485 struct rb_node *parent = NULL;
486 struct hist_entry *iter;
491 iter = rb_entry(parent, struct hist_entry, rb_node_in);
493 cmp = hist_entry__collapse(iter, he);
496 he_stat__add_stat(&iter->stat, &he->stat);
498 if (symbol_conf.use_callchain) {
499 callchain_cursor_reset(&callchain_cursor);
500 callchain_merge(&callchain_cursor,
504 hist_entry__free(he);
514 rb_link_node(&he->rb_node_in, parent, p);
515 rb_insert_color(&he->rb_node_in, root);
519 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
521 struct rb_root *root;
523 pthread_mutex_lock(&hists->lock);
525 root = hists->entries_in;
526 if (++hists->entries_in > &hists->entries_in_array[1])
527 hists->entries_in = &hists->entries_in_array[0];
529 pthread_mutex_unlock(&hists->lock);
534 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
536 hists__filter_entry_by_dso(hists, he);
537 hists__filter_entry_by_thread(hists, he);
538 hists__filter_entry_by_symbol(hists, he);
541 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
543 struct rb_root *root;
544 struct rb_node *next;
545 struct hist_entry *n;
547 if (!sort__need_collapse)
550 root = hists__get_rotate_entries_in(hists);
551 next = rb_first(root);
556 n = rb_entry(next, struct hist_entry, rb_node_in);
557 next = rb_next(&n->rb_node_in);
559 rb_erase(&n->rb_node_in, root);
560 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
562 * If it wasn't combined with one of the entries already
563 * collapsed, we need to apply the filters that may have
564 * been set by, say, the hist_browser.
566 hists__apply_filters(hists, n);
569 ui_progress__update(prog, 1);
573 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
575 struct perf_hpp_fmt *fmt;
578 perf_hpp__for_each_sort_list(fmt) {
579 if (perf_hpp__should_skip(fmt))
582 cmp = fmt->sort(a, b);
590 static void hists__reset_filter_stats(struct hists *hists)
592 hists->nr_non_filtered_entries = 0;
593 hists->stats.total_non_filtered_period = 0;
596 void hists__reset_stats(struct hists *hists)
598 hists->nr_entries = 0;
599 hists->stats.total_period = 0;
601 hists__reset_filter_stats(hists);
604 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
606 hists->nr_non_filtered_entries++;
607 hists->stats.total_non_filtered_period += h->stat.period;
610 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
613 hists__inc_filter_stats(hists, h);
616 hists->stats.total_period += h->stat.period;
619 static void __hists__insert_output_entry(struct rb_root *entries,
620 struct hist_entry *he,
621 u64 min_callchain_hits)
623 struct rb_node **p = &entries->rb_node;
624 struct rb_node *parent = NULL;
625 struct hist_entry *iter;
627 if (symbol_conf.use_callchain)
628 callchain_param.sort(&he->sorted_chain, he->callchain,
629 min_callchain_hits, &callchain_param);
633 iter = rb_entry(parent, struct hist_entry, rb_node);
635 if (hist_entry__sort(he, iter) > 0)
641 rb_link_node(&he->rb_node, parent, p);
642 rb_insert_color(&he->rb_node, entries);
645 void hists__output_resort(struct hists *hists)
647 struct rb_root *root;
648 struct rb_node *next;
649 struct hist_entry *n;
650 u64 min_callchain_hits;
652 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
654 if (sort__need_collapse)
655 root = &hists->entries_collapsed;
657 root = hists->entries_in;
659 next = rb_first(root);
660 hists->entries = RB_ROOT;
662 hists__reset_stats(hists);
663 hists__reset_col_len(hists);
666 n = rb_entry(next, struct hist_entry, rb_node_in);
667 next = rb_next(&n->rb_node_in);
669 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
670 hists__inc_stats(hists, n);
673 hists__calc_col_len(hists, n);
677 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
678 enum hist_filter filter)
680 h->filtered &= ~(1 << filter);
684 /* force fold unfiltered entry for simplicity */
685 h->ms.unfolded = false;
688 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
690 hists__inc_filter_stats(hists, h);
691 hists__calc_col_len(hists, h);
695 static bool hists__filter_entry_by_dso(struct hists *hists,
696 struct hist_entry *he)
698 if (hists->dso_filter != NULL &&
699 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
700 he->filtered |= (1 << HIST_FILTER__DSO);
707 void hists__filter_by_dso(struct hists *hists)
711 hists->stats.nr_non_filtered_samples = 0;
713 hists__reset_filter_stats(hists);
714 hists__reset_col_len(hists);
716 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
717 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
719 if (symbol_conf.exclude_other && !h->parent)
722 if (hists__filter_entry_by_dso(hists, h))
725 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
729 static bool hists__filter_entry_by_thread(struct hists *hists,
730 struct hist_entry *he)
732 if (hists->thread_filter != NULL &&
733 he->thread != hists->thread_filter) {
734 he->filtered |= (1 << HIST_FILTER__THREAD);
741 void hists__filter_by_thread(struct hists *hists)
745 hists->stats.nr_non_filtered_samples = 0;
747 hists__reset_filter_stats(hists);
748 hists__reset_col_len(hists);
750 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
751 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
753 if (hists__filter_entry_by_thread(hists, h))
756 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
760 static bool hists__filter_entry_by_symbol(struct hists *hists,
761 struct hist_entry *he)
763 if (hists->symbol_filter_str != NULL &&
764 (!he->ms.sym || strstr(he->ms.sym->name,
765 hists->symbol_filter_str) == NULL)) {
766 he->filtered |= (1 << HIST_FILTER__SYMBOL);
773 void hists__filter_by_symbol(struct hists *hists)
777 hists->stats.nr_non_filtered_samples = 0;
779 hists__reset_filter_stats(hists);
780 hists__reset_col_len(hists);
782 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
783 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
785 if (hists__filter_entry_by_symbol(hists, h))
788 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
792 void events_stats__inc(struct events_stats *stats, u32 type)
794 ++stats->nr_events[0];
795 ++stats->nr_events[type];
798 void hists__inc_nr_events(struct hists *hists, u32 type)
800 events_stats__inc(&hists->stats, type);
803 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
804 struct hist_entry *pair)
806 struct rb_root *root;
808 struct rb_node *parent = NULL;
809 struct hist_entry *he;
812 if (sort__need_collapse)
813 root = &hists->entries_collapsed;
815 root = hists->entries_in;
821 he = rb_entry(parent, struct hist_entry, rb_node_in);
823 cmp = hist_entry__collapse(he, pair);
834 he = hist_entry__new(pair);
836 memset(&he->stat, 0, sizeof(he->stat));
838 rb_link_node(&he->rb_node_in, parent, p);
839 rb_insert_color(&he->rb_node_in, root);
840 hists__inc_stats(hists, he);
847 static struct hist_entry *hists__find_entry(struct hists *hists,
848 struct hist_entry *he)
852 if (sort__need_collapse)
853 n = hists->entries_collapsed.rb_node;
855 n = hists->entries_in->rb_node;
858 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
859 int64_t cmp = hist_entry__collapse(iter, he);
873 * Look for pairs to link to the leader buckets (hist_entries):
875 void hists__match(struct hists *leader, struct hists *other)
877 struct rb_root *root;
879 struct hist_entry *pos, *pair;
881 if (sort__need_collapse)
882 root = &leader->entries_collapsed;
884 root = leader->entries_in;
886 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
887 pos = rb_entry(nd, struct hist_entry, rb_node_in);
888 pair = hists__find_entry(other, pos);
891 hist_entry__add_pair(pair, pos);
896 * Look for entries in the other hists that are not present in the leader, if
897 * we find them, just add a dummy entry on the leader hists, with period=0,
898 * nr_events=0, to serve as the list header.
900 int hists__link(struct hists *leader, struct hists *other)
902 struct rb_root *root;
904 struct hist_entry *pos, *pair;
906 if (sort__need_collapse)
907 root = &other->entries_collapsed;
909 root = other->entries_in;
911 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
912 pos = rb_entry(nd, struct hist_entry, rb_node_in);
914 if (!hist_entry__has_pairs(pos)) {
915 pair = hists__add_dummy_entry(leader, pos);
918 hist_entry__add_pair(pos, pair);
925 u64 hists__total_period(struct hists *hists)
927 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
928 hists->stats.total_period;
931 int parse_filter_percentage(const struct option *opt __maybe_unused,
932 const char *arg, int unset __maybe_unused)
934 if (!strcmp(arg, "relative"))
935 symbol_conf.filter_relative = true;
936 else if (!strcmp(arg, "absolute"))
937 symbol_conf.filter_relative = false;
944 int perf_hist_config(const char *var, const char *value)
946 if (!strcmp(var, "hist.percentage"))
947 return parse_filter_percentage(NULL, value, 0);