9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 return hists->col_len[col];
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 hists->col_len[col] = len;
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
48 void hists__reset_col_len(struct hists *hists)
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
86 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 * +4 accounts for '[x] ' priv level info
92 * +2 account of 0x prefix on raw addresses
94 if (h->branch_info->from.sym) {
95 symlen = (int)h->branch_info->from.sym->namelen + 4;
96 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98 symlen = dso__name_len(h->branch_info->from.map->dso);
99 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 symlen = unresolved_col_width + 4 + 2;
102 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
103 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
106 if (h->branch_info->to.sym) {
107 symlen = (int)h->branch_info->to.sym->namelen + 4;
108 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 symlen = dso__name_len(h->branch_info->to.map->dso);
111 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 symlen = unresolved_col_width + 4 + 2;
114 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
115 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
120 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
122 struct rb_node *next = rb_first(&hists->entries);
123 struct hist_entry *n;
126 hists__reset_col_len(hists);
128 while (next && row++ < max_rows) {
129 n = rb_entry(next, struct hist_entry, rb_node);
131 hists__calc_col_len(hists, n);
132 next = rb_next(&n->rb_node);
136 static void hist_entry__add_cpumode_period(struct hist_entry *he,
137 unsigned int cpumode, u64 period)
140 case PERF_RECORD_MISC_KERNEL:
141 he->stat.period_sys += period;
143 case PERF_RECORD_MISC_USER:
144 he->stat.period_us += period;
146 case PERF_RECORD_MISC_GUEST_KERNEL:
147 he->stat.period_guest_sys += period;
149 case PERF_RECORD_MISC_GUEST_USER:
150 he->stat.period_guest_us += period;
157 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
159 he_stat->period += period;
160 he_stat->nr_events += 1;
163 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
165 dest->period += src->period;
166 dest->period_sys += src->period_sys;
167 dest->period_us += src->period_us;
168 dest->period_guest_sys += src->period_guest_sys;
169 dest->period_guest_us += src->period_guest_us;
170 dest->nr_events += src->nr_events;
173 static void hist_entry__decay(struct hist_entry *he)
175 he->stat.period = (he->stat.period * 7) / 8;
176 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
179 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
181 u64 prev_period = he->stat.period;
183 if (prev_period == 0)
186 hist_entry__decay(he);
189 hists->stats.total_period -= prev_period - he->stat.period;
191 return he->stat.period == 0;
194 static void __hists__decay_entries(struct hists *hists, bool zap_user,
195 bool zap_kernel, bool threaded)
197 struct rb_node *next = rb_first(&hists->entries);
198 struct hist_entry *n;
201 n = rb_entry(next, struct hist_entry, rb_node);
202 next = rb_next(&n->rb_node);
204 * We may be annotating this, for instance, so keep it here in
205 * case some it gets new samples, we'll eventually free it when
206 * the user stops browsing and it agains gets fully decayed.
208 if (((zap_user && n->level == '.') ||
209 (zap_kernel && n->level != '.') ||
210 hists__decay_entry(hists, n)) &&
212 rb_erase(&n->rb_node, &hists->entries);
214 if (sort__need_collapse || threaded)
215 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
223 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
225 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
228 void hists__decay_entries_threaded(struct hists *hists,
229 bool zap_user, bool zap_kernel)
231 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
235 * histogram, sorted on item, collects periods
238 static struct hist_entry *hist_entry__new(struct hist_entry *template)
240 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
241 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
247 he->ms.map->referenced = true;
249 if (he->branch_info) {
250 if (he->branch_info->from.map)
251 he->branch_info->from.map->referenced = true;
252 if (he->branch_info->to.map)
253 he->branch_info->to.map->referenced = true;
256 if (symbol_conf.use_callchain)
257 callchain_init(he->callchain);
259 INIT_LIST_HEAD(&he->pairs.node);
265 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
268 hists__calc_col_len(hists, h);
270 hists->stats.total_period += h->stat.period;
274 static u8 symbol__parent_filter(const struct symbol *parent)
276 if (symbol_conf.exclude_other && parent == NULL)
277 return 1 << HIST_FILTER__PARENT;
281 static struct hist_entry *add_hist_entry(struct hists *hists,
282 struct hist_entry *entry,
283 struct addr_location *al,
287 struct rb_node *parent = NULL;
288 struct hist_entry *he;
291 pthread_mutex_lock(&hists->lock);
293 p = &hists->entries_in->rb_node;
297 he = rb_entry(parent, struct hist_entry, rb_node_in);
300 * Make sure that it receives arguments in a same order as
301 * hist_entry__collapse() so that we can use an appropriate
302 * function when searching an entry regardless which sort
305 cmp = hist_entry__cmp(he, entry);
308 he_stat__add_period(&he->stat, period);
310 /* If the map of an existing hist_entry has
311 * become out-of-date due to an exec() or
312 * similar, update it. Otherwise we will
313 * mis-adjust symbol addresses when computing
314 * the history counter to increment.
316 if (he->ms.map != entry->ms.map) {
317 he->ms.map = entry->ms.map;
319 he->ms.map->referenced = true;
330 he = hist_entry__new(entry);
334 rb_link_node(&he->rb_node_in, parent, p);
335 rb_insert_color(&he->rb_node_in, hists->entries_in);
337 hist_entry__add_cpumode_period(he, al->cpumode, period);
339 pthread_mutex_unlock(&hists->lock);
343 struct hist_entry *__hists__add_branch_entry(struct hists *self,
344 struct addr_location *al,
345 struct symbol *sym_parent,
346 struct branch_info *bi,
349 struct hist_entry entry = {
350 .thread = al->thread,
362 .parent = sym_parent,
363 .filtered = symbol__parent_filter(sym_parent),
368 return add_hist_entry(self, &entry, al, period);
371 struct hist_entry *__hists__add_entry(struct hists *self,
372 struct addr_location *al,
373 struct symbol *sym_parent, u64 period)
375 struct hist_entry entry = {
376 .thread = al->thread,
388 .parent = sym_parent,
389 .filtered = symbol__parent_filter(sym_parent),
393 return add_hist_entry(self, &entry, al, period);
397 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
399 struct sort_entry *se;
402 list_for_each_entry(se, &hist_entry__sort_list, list) {
403 cmp = se->se_cmp(left, right);
412 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
414 struct sort_entry *se;
417 list_for_each_entry(se, &hist_entry__sort_list, list) {
418 int64_t (*f)(struct hist_entry *, struct hist_entry *);
420 f = se->se_collapse ?: se->se_cmp;
422 cmp = f(left, right);
430 void hist_entry__free(struct hist_entry *he)
432 free(he->branch_info);
437 * collapse the histogram
440 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
441 struct rb_root *root,
442 struct hist_entry *he)
444 struct rb_node **p = &root->rb_node;
445 struct rb_node *parent = NULL;
446 struct hist_entry *iter;
451 iter = rb_entry(parent, struct hist_entry, rb_node_in);
453 cmp = hist_entry__collapse(iter, he);
456 he_stat__add_stat(&iter->stat, &he->stat);
458 if (symbol_conf.use_callchain) {
459 callchain_cursor_reset(&callchain_cursor);
460 callchain_merge(&callchain_cursor,
464 hist_entry__free(he);
474 rb_link_node(&he->rb_node_in, parent, p);
475 rb_insert_color(&he->rb_node_in, root);
479 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
481 struct rb_root *root;
483 pthread_mutex_lock(&hists->lock);
485 root = hists->entries_in;
486 if (++hists->entries_in > &hists->entries_in_array[1])
487 hists->entries_in = &hists->entries_in_array[0];
489 pthread_mutex_unlock(&hists->lock);
494 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
496 hists__filter_entry_by_dso(hists, he);
497 hists__filter_entry_by_thread(hists, he);
498 hists__filter_entry_by_symbol(hists, he);
501 static void __hists__collapse_resort(struct hists *hists, bool threaded)
503 struct rb_root *root;
504 struct rb_node *next;
505 struct hist_entry *n;
507 if (!sort__need_collapse && !threaded)
510 root = hists__get_rotate_entries_in(hists);
511 next = rb_first(root);
514 n = rb_entry(next, struct hist_entry, rb_node_in);
515 next = rb_next(&n->rb_node_in);
517 rb_erase(&n->rb_node_in, root);
518 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
520 * If it wasn't combined with one of the entries already
521 * collapsed, we need to apply the filters that may have
522 * been set by, say, the hist_browser.
524 hists__apply_filters(hists, n);
529 void hists__collapse_resort(struct hists *hists)
531 return __hists__collapse_resort(hists, false);
534 void hists__collapse_resort_threaded(struct hists *hists)
536 return __hists__collapse_resort(hists, true);
540 * reverse the map, sort on period.
543 static void __hists__insert_output_entry(struct rb_root *entries,
544 struct hist_entry *he,
545 u64 min_callchain_hits)
547 struct rb_node **p = &entries->rb_node;
548 struct rb_node *parent = NULL;
549 struct hist_entry *iter;
551 if (symbol_conf.use_callchain)
552 callchain_param.sort(&he->sorted_chain, he->callchain,
553 min_callchain_hits, &callchain_param);
557 iter = rb_entry(parent, struct hist_entry, rb_node);
559 if (he->stat.period > iter->stat.period)
565 rb_link_node(&he->rb_node, parent, p);
566 rb_insert_color(&he->rb_node, entries);
569 static void __hists__output_resort(struct hists *hists, bool threaded)
571 struct rb_root *root;
572 struct rb_node *next;
573 struct hist_entry *n;
574 u64 min_callchain_hits;
576 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
578 if (sort__need_collapse || threaded)
579 root = &hists->entries_collapsed;
581 root = hists->entries_in;
583 next = rb_first(root);
584 hists->entries = RB_ROOT;
586 hists->nr_entries = 0;
587 hists->stats.total_period = 0;
588 hists__reset_col_len(hists);
591 n = rb_entry(next, struct hist_entry, rb_node_in);
592 next = rb_next(&n->rb_node_in);
594 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
595 hists__inc_nr_entries(hists, n);
599 void hists__output_resort(struct hists *hists)
601 return __hists__output_resort(hists, false);
604 void hists__output_resort_threaded(struct hists *hists)
606 return __hists__output_resort(hists, true);
609 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
610 enum hist_filter filter)
612 h->filtered &= ~(1 << filter);
618 hists->nr_entries += h->nr_rows;
620 hists->stats.total_period += h->stat.period;
621 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
623 hists__calc_col_len(hists, h);
627 static bool hists__filter_entry_by_dso(struct hists *hists,
628 struct hist_entry *he)
630 if (hists->dso_filter != NULL &&
631 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
632 he->filtered |= (1 << HIST_FILTER__DSO);
639 void hists__filter_by_dso(struct hists *hists)
643 hists->nr_entries = hists->stats.total_period = 0;
644 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
645 hists__reset_col_len(hists);
647 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
648 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
650 if (symbol_conf.exclude_other && !h->parent)
653 if (hists__filter_entry_by_dso(hists, h))
656 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
660 static bool hists__filter_entry_by_thread(struct hists *hists,
661 struct hist_entry *he)
663 if (hists->thread_filter != NULL &&
664 he->thread != hists->thread_filter) {
665 he->filtered |= (1 << HIST_FILTER__THREAD);
672 void hists__filter_by_thread(struct hists *hists)
676 hists->nr_entries = hists->stats.total_period = 0;
677 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
678 hists__reset_col_len(hists);
680 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
681 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
683 if (hists__filter_entry_by_thread(hists, h))
686 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
690 static bool hists__filter_entry_by_symbol(struct hists *hists,
691 struct hist_entry *he)
693 if (hists->symbol_filter_str != NULL &&
694 (!he->ms.sym || strstr(he->ms.sym->name,
695 hists->symbol_filter_str) == NULL)) {
696 he->filtered |= (1 << HIST_FILTER__SYMBOL);
703 void hists__filter_by_symbol(struct hists *hists)
707 hists->nr_entries = hists->stats.total_period = 0;
708 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
709 hists__reset_col_len(hists);
711 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
712 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
714 if (hists__filter_entry_by_symbol(hists, h))
717 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
721 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
723 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
726 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
728 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
731 void events_stats__inc(struct events_stats *stats, u32 type)
733 ++stats->nr_events[0];
734 ++stats->nr_events[type];
737 void hists__inc_nr_events(struct hists *hists, u32 type)
739 events_stats__inc(&hists->stats, type);
742 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
743 struct hist_entry *pair)
745 struct rb_root *root;
747 struct rb_node *parent = NULL;
748 struct hist_entry *he;
751 if (sort__need_collapse)
752 root = &hists->entries_collapsed;
754 root = hists->entries_in;
760 he = rb_entry(parent, struct hist_entry, rb_node_in);
762 cmp = hist_entry__collapse(he, pair);
773 he = hist_entry__new(pair);
775 memset(&he->stat, 0, sizeof(he->stat));
777 rb_link_node(&he->rb_node_in, parent, p);
778 rb_insert_color(&he->rb_node_in, root);
779 hists__inc_nr_entries(hists, he);
785 static struct hist_entry *hists__find_entry(struct hists *hists,
786 struct hist_entry *he)
790 if (sort__need_collapse)
791 n = hists->entries_collapsed.rb_node;
793 n = hists->entries_in->rb_node;
796 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
797 int64_t cmp = hist_entry__collapse(iter, he);
811 * Look for pairs to link to the leader buckets (hist_entries):
813 void hists__match(struct hists *leader, struct hists *other)
815 struct rb_root *root;
817 struct hist_entry *pos, *pair;
819 if (sort__need_collapse)
820 root = &leader->entries_collapsed;
822 root = leader->entries_in;
824 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
825 pos = rb_entry(nd, struct hist_entry, rb_node_in);
826 pair = hists__find_entry(other, pos);
829 hist_entry__add_pair(pair, pos);
834 * Look for entries in the other hists that are not present in the leader, if
835 * we find them, just add a dummy entry on the leader hists, with period=0,
836 * nr_events=0, to serve as the list header.
838 int hists__link(struct hists *leader, struct hists *other)
840 struct rb_root *root;
842 struct hist_entry *pos, *pair;
844 if (sort__need_collapse)
845 root = &other->entries_collapsed;
847 root = other->entries_in;
849 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
850 pos = rb_entry(nd, struct hist_entry, rb_node_in);
852 if (!hist_entry__has_pairs(pos)) {
853 pair = hists__add_dummy_entry(leader, pos);
856 hist_entry__add_pair(pos, pair);