9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
21 return hists->col_len[col];
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
26 hists->col_len[col] = len;
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
38 void hists__reset_col_len(struct hists *hists)
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(hists, col, 0);
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
68 symlen = h->ms.sym->namelen + 4;
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 len = thread__comm_len(h->thread);
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83 len = dso__name_len(h->ms.map->dso);
84 hists__new_col_len(hists, HISTC_DSO, len);
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 hists__new_col_len(hists, HISTC_CPU, 3);
148 hists__new_col_len(hists, HISTC_SOCKET, 6);
149 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
150 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
151 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
152 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
153 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
154 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
160 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
163 hists__new_col_len(hists, HISTC_TRANSACTION,
164 hist_entry__transaction_len());
167 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
169 struct rb_node *next = rb_first(&hists->entries);
170 struct hist_entry *n;
173 hists__reset_col_len(hists);
175 while (next && row++ < max_rows) {
176 n = rb_entry(next, struct hist_entry, rb_node);
178 hists__calc_col_len(hists, n);
179 next = rb_next(&n->rb_node);
183 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
184 unsigned int cpumode, u64 period)
187 case PERF_RECORD_MISC_KERNEL:
188 he_stat->period_sys += period;
190 case PERF_RECORD_MISC_USER:
191 he_stat->period_us += period;
193 case PERF_RECORD_MISC_GUEST_KERNEL:
194 he_stat->period_guest_sys += period;
196 case PERF_RECORD_MISC_GUEST_USER:
197 he_stat->period_guest_us += period;
204 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
208 he_stat->period += period;
209 he_stat->weight += weight;
210 he_stat->nr_events += 1;
213 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
215 dest->period += src->period;
216 dest->period_sys += src->period_sys;
217 dest->period_us += src->period_us;
218 dest->period_guest_sys += src->period_guest_sys;
219 dest->period_guest_us += src->period_guest_us;
220 dest->nr_events += src->nr_events;
221 dest->weight += src->weight;
224 static void he_stat__decay(struct he_stat *he_stat)
226 he_stat->period = (he_stat->period * 7) / 8;
227 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
228 /* XXX need decay for weight too? */
231 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
233 u64 prev_period = he->stat.period;
236 if (prev_period == 0)
239 he_stat__decay(&he->stat);
240 if (symbol_conf.cumulate_callchain)
241 he_stat__decay(he->stat_acc);
243 diff = prev_period - he->stat.period;
245 hists->stats.total_period -= diff;
247 hists->stats.total_non_filtered_period -= diff;
249 return he->stat.period == 0;
252 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
254 rb_erase(&he->rb_node, &hists->entries);
256 if (sort__need_collapse)
257 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
261 --hists->nr_non_filtered_entries;
263 hist_entry__delete(he);
266 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
268 struct rb_node *next = rb_first(&hists->entries);
269 struct hist_entry *n;
272 n = rb_entry(next, struct hist_entry, rb_node);
273 next = rb_next(&n->rb_node);
274 if (((zap_user && n->level == '.') ||
275 (zap_kernel && n->level != '.') ||
276 hists__decay_entry(hists, n))) {
277 hists__delete_entry(hists, n);
282 void hists__delete_entries(struct hists *hists)
284 struct rb_node *next = rb_first(&hists->entries);
285 struct hist_entry *n;
288 n = rb_entry(next, struct hist_entry, rb_node);
289 next = rb_next(&n->rb_node);
291 hists__delete_entry(hists, n);
296 * histogram, sorted on item, collects periods
299 static struct hist_entry *hist_entry__new(struct hist_entry *template,
302 size_t callchain_size = 0;
303 struct hist_entry *he;
305 if (symbol_conf.use_callchain)
306 callchain_size = sizeof(struct callchain_root);
308 he = zalloc(sizeof(*he) + callchain_size);
313 if (symbol_conf.cumulate_callchain) {
314 he->stat_acc = malloc(sizeof(he->stat));
315 if (he->stat_acc == NULL) {
319 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
321 memset(&he->stat, 0, sizeof(he->stat));
324 map__get(he->ms.map);
326 if (he->branch_info) {
328 * This branch info is (a part of) allocated from
329 * sample__resolve_bstack() and will be freed after
330 * adding new entries. So we need to save a copy.
332 he->branch_info = malloc(sizeof(*he->branch_info));
333 if (he->branch_info == NULL) {
334 map__zput(he->ms.map);
340 memcpy(he->branch_info, template->branch_info,
341 sizeof(*he->branch_info));
343 map__get(he->branch_info->from.map);
344 map__get(he->branch_info->to.map);
348 map__get(he->mem_info->iaddr.map);
349 map__get(he->mem_info->daddr.map);
352 if (symbol_conf.use_callchain)
353 callchain_init(he->callchain);
355 INIT_LIST_HEAD(&he->pairs.node);
356 thread__get(he->thread);
362 static u8 symbol__parent_filter(const struct symbol *parent)
364 if (symbol_conf.exclude_other && parent == NULL)
365 return 1 << HIST_FILTER__PARENT;
369 static struct hist_entry *hists__findnew_entry(struct hists *hists,
370 struct hist_entry *entry,
371 struct addr_location *al,
375 struct rb_node *parent = NULL;
376 struct hist_entry *he;
378 u64 period = entry->stat.period;
379 u64 weight = entry->stat.weight;
381 p = &hists->entries_in->rb_node;
385 he = rb_entry(parent, struct hist_entry, rb_node_in);
388 * Make sure that it receives arguments in a same order as
389 * hist_entry__collapse() so that we can use an appropriate
390 * function when searching an entry regardless which sort
393 cmp = hist_entry__cmp(he, entry);
397 he_stat__add_period(&he->stat, period, weight);
398 if (symbol_conf.cumulate_callchain)
399 he_stat__add_period(he->stat_acc, period, weight);
402 * This mem info was allocated from sample__resolve_mem
403 * and will not be used anymore.
405 zfree(&entry->mem_info);
407 /* If the map of an existing hist_entry has
408 * become out-of-date due to an exec() or
409 * similar, update it. Otherwise we will
410 * mis-adjust symbol addresses when computing
411 * the history counter to increment.
413 if (he->ms.map != entry->ms.map) {
414 map__put(he->ms.map);
415 he->ms.map = map__get(entry->ms.map);
426 he = hist_entry__new(entry, sample_self);
432 rb_link_node(&he->rb_node_in, parent, p);
433 rb_insert_color(&he->rb_node_in, hists->entries_in);
436 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
437 if (symbol_conf.cumulate_callchain)
438 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
442 struct hist_entry *__hists__add_entry(struct hists *hists,
443 struct addr_location *al,
444 struct symbol *sym_parent,
445 struct branch_info *bi,
447 u64 period, u64 weight, u64 transaction,
450 struct hist_entry entry = {
451 .thread = al->thread,
452 .comm = thread__comm(al->thread),
457 .socket = al->socket,
459 .cpumode = al->cpumode,
467 .parent = sym_parent,
468 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
472 .transaction = transaction,
475 return hists__findnew_entry(hists, &entry, al, sample_self);
479 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
480 struct addr_location *al __maybe_unused)
486 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
487 struct addr_location *al __maybe_unused)
493 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
495 struct perf_sample *sample = iter->sample;
498 mi = sample__resolve_mem(sample, al);
507 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510 struct mem_info *mi = iter->priv;
511 struct hists *hists = evsel__hists(iter->evsel);
512 struct hist_entry *he;
517 cost = iter->sample->weight;
522 * must pass period=weight in order to get the correct
523 * sorting from hists__collapse_resort() which is solely
524 * based on periods. We want sorting be done on nr_events * weight
525 * and this is indirectly achieved by passing period=weight here
526 * and the he_stat__add_period() function.
528 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
529 cost, cost, 0, true);
538 iter_finish_mem_entry(struct hist_entry_iter *iter,
539 struct addr_location *al __maybe_unused)
541 struct perf_evsel *evsel = iter->evsel;
542 struct hists *hists = evsel__hists(evsel);
543 struct hist_entry *he = iter->he;
549 hists__inc_nr_samples(hists, he->filtered);
551 err = hist_entry__append_callchain(he, iter->sample);
555 * We don't need to free iter->priv (mem_info) here since the mem info
556 * was either already freed in hists__findnew_entry() or passed to a
557 * new hist entry by hist_entry__new().
566 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
568 struct branch_info *bi;
569 struct perf_sample *sample = iter->sample;
571 bi = sample__resolve_bstack(sample, al);
576 iter->total = sample->branch_stack->nr;
583 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
584 struct addr_location *al __maybe_unused)
586 /* to avoid calling callback function */
593 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
595 struct branch_info *bi = iter->priv;
601 if (iter->curr >= iter->total)
604 al->map = bi[i].to.map;
605 al->sym = bi[i].to.sym;
606 al->addr = bi[i].to.addr;
611 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
613 struct branch_info *bi;
614 struct perf_evsel *evsel = iter->evsel;
615 struct hists *hists = evsel__hists(evsel);
616 struct hist_entry *he = NULL;
622 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
626 * The report shows the percentage of total branches captured
627 * and not events sampled. Thus we use a pseudo period of 1.
629 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
630 1, bi->flags.cycles ? bi->flags.cycles : 1,
635 hists__inc_nr_samples(hists, he->filtered);
644 iter_finish_branch_entry(struct hist_entry_iter *iter,
645 struct addr_location *al __maybe_unused)
650 return iter->curr >= iter->total ? 0 : -1;
654 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
655 struct addr_location *al __maybe_unused)
661 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
663 struct perf_evsel *evsel = iter->evsel;
664 struct perf_sample *sample = iter->sample;
665 struct hist_entry *he;
667 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
668 sample->period, sample->weight,
669 sample->transaction, true);
678 iter_finish_normal_entry(struct hist_entry_iter *iter,
679 struct addr_location *al __maybe_unused)
681 struct hist_entry *he = iter->he;
682 struct perf_evsel *evsel = iter->evsel;
683 struct perf_sample *sample = iter->sample;
690 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
692 return hist_entry__append_callchain(he, sample);
696 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
697 struct addr_location *al __maybe_unused)
699 struct hist_entry **he_cache;
701 callchain_cursor_commit(&callchain_cursor);
704 * This is for detecting cycles or recursions so that they're
705 * cumulated only one time to prevent entries more than 100%
708 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
709 if (he_cache == NULL)
712 iter->priv = he_cache;
719 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
720 struct addr_location *al)
722 struct perf_evsel *evsel = iter->evsel;
723 struct hists *hists = evsel__hists(evsel);
724 struct perf_sample *sample = iter->sample;
725 struct hist_entry **he_cache = iter->priv;
726 struct hist_entry *he;
729 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
730 sample->period, sample->weight,
731 sample->transaction, true);
736 he_cache[iter->curr++] = he;
738 hist_entry__append_callchain(he, sample);
741 * We need to re-initialize the cursor since callchain_append()
742 * advanced the cursor to the end.
744 callchain_cursor_commit(&callchain_cursor);
746 hists__inc_nr_samples(hists, he->filtered);
752 iter_next_cumulative_entry(struct hist_entry_iter *iter,
753 struct addr_location *al)
755 struct callchain_cursor_node *node;
757 node = callchain_cursor_current(&callchain_cursor);
761 return fill_callchain_info(al, node, iter->hide_unresolved);
765 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
766 struct addr_location *al)
768 struct perf_evsel *evsel = iter->evsel;
769 struct perf_sample *sample = iter->sample;
770 struct hist_entry **he_cache = iter->priv;
771 struct hist_entry *he;
772 struct hist_entry he_tmp = {
773 .hists = evsel__hists(evsel),
775 .thread = al->thread,
776 .comm = thread__comm(al->thread),
782 .parent = iter->parent,
785 struct callchain_cursor cursor;
787 callchain_cursor_snapshot(&cursor, &callchain_cursor);
789 callchain_cursor_advance(&callchain_cursor);
792 * Check if there's duplicate entries in the callchain.
793 * It's possible that it has cycles or recursive calls.
795 for (i = 0; i < iter->curr; i++) {
796 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
797 /* to avoid calling callback function */
803 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
804 sample->period, sample->weight,
805 sample->transaction, false);
810 he_cache[iter->curr++] = he;
812 if (symbol_conf.use_callchain)
813 callchain_append(he->callchain, &cursor, sample->period);
818 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
819 struct addr_location *al __maybe_unused)
827 const struct hist_iter_ops hist_iter_mem = {
828 .prepare_entry = iter_prepare_mem_entry,
829 .add_single_entry = iter_add_single_mem_entry,
830 .next_entry = iter_next_nop_entry,
831 .add_next_entry = iter_add_next_nop_entry,
832 .finish_entry = iter_finish_mem_entry,
835 const struct hist_iter_ops hist_iter_branch = {
836 .prepare_entry = iter_prepare_branch_entry,
837 .add_single_entry = iter_add_single_branch_entry,
838 .next_entry = iter_next_branch_entry,
839 .add_next_entry = iter_add_next_branch_entry,
840 .finish_entry = iter_finish_branch_entry,
843 const struct hist_iter_ops hist_iter_normal = {
844 .prepare_entry = iter_prepare_normal_entry,
845 .add_single_entry = iter_add_single_normal_entry,
846 .next_entry = iter_next_nop_entry,
847 .add_next_entry = iter_add_next_nop_entry,
848 .finish_entry = iter_finish_normal_entry,
851 const struct hist_iter_ops hist_iter_cumulative = {
852 .prepare_entry = iter_prepare_cumulative_entry,
853 .add_single_entry = iter_add_single_cumulative_entry,
854 .next_entry = iter_next_cumulative_entry,
855 .add_next_entry = iter_add_next_cumulative_entry,
856 .finish_entry = iter_finish_cumulative_entry,
859 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
860 int max_stack_depth, void *arg)
864 err = sample__resolve_callchain(iter->sample, &iter->parent,
865 iter->evsel, al, max_stack_depth);
869 err = iter->ops->prepare_entry(iter, al);
873 err = iter->ops->add_single_entry(iter, al);
877 if (iter->he && iter->add_entry_cb) {
878 err = iter->add_entry_cb(iter, al, true, arg);
883 while (iter->ops->next_entry(iter, al)) {
884 err = iter->ops->add_next_entry(iter, al);
888 if (iter->he && iter->add_entry_cb) {
889 err = iter->add_entry_cb(iter, al, false, arg);
896 err2 = iter->ops->finish_entry(iter, al);
904 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
906 struct perf_hpp_fmt *fmt;
909 perf_hpp__for_each_sort_list(fmt) {
910 if (perf_hpp__should_skip(fmt))
913 cmp = fmt->cmp(fmt, left, right);
922 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
924 struct perf_hpp_fmt *fmt;
927 perf_hpp__for_each_sort_list(fmt) {
928 if (perf_hpp__should_skip(fmt))
931 cmp = fmt->collapse(fmt, left, right);
939 void hist_entry__delete(struct hist_entry *he)
941 thread__zput(he->thread);
942 map__zput(he->ms.map);
944 if (he->branch_info) {
945 map__zput(he->branch_info->from.map);
946 map__zput(he->branch_info->to.map);
947 zfree(&he->branch_info);
951 map__zput(he->mem_info->iaddr.map);
952 map__zput(he->mem_info->daddr.map);
953 zfree(&he->mem_info);
956 zfree(&he->stat_acc);
957 free_srcline(he->srcline);
958 if (he->srcfile && he->srcfile[0])
960 free_callchain(he->callchain);
965 * collapse the histogram
968 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
969 struct rb_root *root,
970 struct hist_entry *he)
972 struct rb_node **p = &root->rb_node;
973 struct rb_node *parent = NULL;
974 struct hist_entry *iter;
979 iter = rb_entry(parent, struct hist_entry, rb_node_in);
981 cmp = hist_entry__collapse(iter, he);
984 he_stat__add_stat(&iter->stat, &he->stat);
985 if (symbol_conf.cumulate_callchain)
986 he_stat__add_stat(iter->stat_acc, he->stat_acc);
988 if (symbol_conf.use_callchain) {
989 callchain_cursor_reset(&callchain_cursor);
990 callchain_merge(&callchain_cursor,
994 hist_entry__delete(he);
1001 p = &(*p)->rb_right;
1003 hists->nr_entries++;
1005 rb_link_node(&he->rb_node_in, parent, p);
1006 rb_insert_color(&he->rb_node_in, root);
1010 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1012 struct rb_root *root;
1014 pthread_mutex_lock(&hists->lock);
1016 root = hists->entries_in;
1017 if (++hists->entries_in > &hists->entries_in_array[1])
1018 hists->entries_in = &hists->entries_in_array[0];
1020 pthread_mutex_unlock(&hists->lock);
1025 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1027 hists__filter_entry_by_dso(hists, he);
1028 hists__filter_entry_by_thread(hists, he);
1029 hists__filter_entry_by_symbol(hists, he);
1032 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1034 struct rb_root *root;
1035 struct rb_node *next;
1036 struct hist_entry *n;
1038 if (!sort__need_collapse)
1041 hists->nr_entries = 0;
1043 root = hists__get_rotate_entries_in(hists);
1045 next = rb_first(root);
1050 n = rb_entry(next, struct hist_entry, rb_node_in);
1051 next = rb_next(&n->rb_node_in);
1053 rb_erase(&n->rb_node_in, root);
1054 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1056 * If it wasn't combined with one of the entries already
1057 * collapsed, we need to apply the filters that may have
1058 * been set by, say, the hist_browser.
1060 hists__apply_filters(hists, n);
1063 ui_progress__update(prog, 1);
1067 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1069 struct perf_hpp_fmt *fmt;
1072 perf_hpp__for_each_sort_list(fmt) {
1073 if (perf_hpp__should_skip(fmt))
1076 cmp = fmt->sort(fmt, a, b);
1084 static void hists__reset_filter_stats(struct hists *hists)
1086 hists->nr_non_filtered_entries = 0;
1087 hists->stats.total_non_filtered_period = 0;
1090 void hists__reset_stats(struct hists *hists)
1092 hists->nr_entries = 0;
1093 hists->stats.total_period = 0;
1095 hists__reset_filter_stats(hists);
1098 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1100 hists->nr_non_filtered_entries++;
1101 hists->stats.total_non_filtered_period += h->stat.period;
1104 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1107 hists__inc_filter_stats(hists, h);
1109 hists->nr_entries++;
1110 hists->stats.total_period += h->stat.period;
1113 static void __hists__insert_output_entry(struct rb_root *entries,
1114 struct hist_entry *he,
1115 u64 min_callchain_hits,
1118 struct rb_node **p = &entries->rb_node;
1119 struct rb_node *parent = NULL;
1120 struct hist_entry *iter;
1123 callchain_param.sort(&he->sorted_chain, he->callchain,
1124 min_callchain_hits, &callchain_param);
1126 while (*p != NULL) {
1128 iter = rb_entry(parent, struct hist_entry, rb_node);
1130 if (hist_entry__sort(he, iter) > 0)
1133 p = &(*p)->rb_right;
1136 rb_link_node(&he->rb_node, parent, p);
1137 rb_insert_color(&he->rb_node, entries);
1140 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1142 struct rb_root *root;
1143 struct rb_node *next;
1144 struct hist_entry *n;
1145 u64 min_callchain_hits;
1146 struct perf_evsel *evsel = hists_to_evsel(hists);
1149 if (evsel && !symbol_conf.show_ref_callgraph)
1150 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1152 use_callchain = symbol_conf.use_callchain;
1154 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1156 if (sort__need_collapse)
1157 root = &hists->entries_collapsed;
1159 root = hists->entries_in;
1161 next = rb_first(root);
1162 hists->entries = RB_ROOT;
1164 hists__reset_stats(hists);
1165 hists__reset_col_len(hists);
1168 n = rb_entry(next, struct hist_entry, rb_node_in);
1169 next = rb_next(&n->rb_node_in);
1171 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1172 hists__inc_stats(hists, n);
1175 hists__calc_col_len(hists, n);
1178 ui_progress__update(prog, 1);
1182 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1183 enum hist_filter filter)
1185 h->filtered &= ~(1 << filter);
1189 /* force fold unfiltered entry for simplicity */
1190 h->unfolded = false;
1194 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1196 hists__inc_filter_stats(hists, h);
1197 hists__calc_col_len(hists, h);
1201 static bool hists__filter_entry_by_dso(struct hists *hists,
1202 struct hist_entry *he)
1204 if (hists->dso_filter != NULL &&
1205 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1206 he->filtered |= (1 << HIST_FILTER__DSO);
1213 void hists__filter_by_dso(struct hists *hists)
1217 hists->stats.nr_non_filtered_samples = 0;
1219 hists__reset_filter_stats(hists);
1220 hists__reset_col_len(hists);
1222 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1223 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1225 if (symbol_conf.exclude_other && !h->parent)
1228 if (hists__filter_entry_by_dso(hists, h))
1231 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1235 static bool hists__filter_entry_by_thread(struct hists *hists,
1236 struct hist_entry *he)
1238 if (hists->thread_filter != NULL &&
1239 he->thread != hists->thread_filter) {
1240 he->filtered |= (1 << HIST_FILTER__THREAD);
1247 void hists__filter_by_thread(struct hists *hists)
1251 hists->stats.nr_non_filtered_samples = 0;
1253 hists__reset_filter_stats(hists);
1254 hists__reset_col_len(hists);
1256 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1257 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1259 if (hists__filter_entry_by_thread(hists, h))
1262 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1266 static bool hists__filter_entry_by_symbol(struct hists *hists,
1267 struct hist_entry *he)
1269 if (hists->symbol_filter_str != NULL &&
1270 (!he->ms.sym || strstr(he->ms.sym->name,
1271 hists->symbol_filter_str) == NULL)) {
1272 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1279 void hists__filter_by_symbol(struct hists *hists)
1283 hists->stats.nr_non_filtered_samples = 0;
1285 hists__reset_filter_stats(hists);
1286 hists__reset_col_len(hists);
1288 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1289 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1291 if (hists__filter_entry_by_symbol(hists, h))
1294 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1298 void events_stats__inc(struct events_stats *stats, u32 type)
1300 ++stats->nr_events[0];
1301 ++stats->nr_events[type];
1304 void hists__inc_nr_events(struct hists *hists, u32 type)
1306 events_stats__inc(&hists->stats, type);
1309 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1311 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1313 hists->stats.nr_non_filtered_samples++;
1316 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1317 struct hist_entry *pair)
1319 struct rb_root *root;
1321 struct rb_node *parent = NULL;
1322 struct hist_entry *he;
1325 if (sort__need_collapse)
1326 root = &hists->entries_collapsed;
1328 root = hists->entries_in;
1332 while (*p != NULL) {
1334 he = rb_entry(parent, struct hist_entry, rb_node_in);
1336 cmp = hist_entry__collapse(he, pair);
1344 p = &(*p)->rb_right;
1347 he = hist_entry__new(pair, true);
1349 memset(&he->stat, 0, sizeof(he->stat));
1351 rb_link_node(&he->rb_node_in, parent, p);
1352 rb_insert_color(&he->rb_node_in, root);
1353 hists__inc_stats(hists, he);
1360 static struct hist_entry *hists__find_entry(struct hists *hists,
1361 struct hist_entry *he)
1365 if (sort__need_collapse)
1366 n = hists->entries_collapsed.rb_node;
1368 n = hists->entries_in->rb_node;
1371 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1372 int64_t cmp = hist_entry__collapse(iter, he);
1386 * Look for pairs to link to the leader buckets (hist_entries):
1388 void hists__match(struct hists *leader, struct hists *other)
1390 struct rb_root *root;
1392 struct hist_entry *pos, *pair;
1394 if (sort__need_collapse)
1395 root = &leader->entries_collapsed;
1397 root = leader->entries_in;
1399 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1400 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1401 pair = hists__find_entry(other, pos);
1404 hist_entry__add_pair(pair, pos);
1409 * Look for entries in the other hists that are not present in the leader, if
1410 * we find them, just add a dummy entry on the leader hists, with period=0,
1411 * nr_events=0, to serve as the list header.
1413 int hists__link(struct hists *leader, struct hists *other)
1415 struct rb_root *root;
1417 struct hist_entry *pos, *pair;
1419 if (sort__need_collapse)
1420 root = &other->entries_collapsed;
1422 root = other->entries_in;
1424 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1425 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1427 if (!hist_entry__has_pairs(pos)) {
1428 pair = hists__add_dummy_entry(leader, pos);
1431 hist_entry__add_pair(pos, pair);
1438 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1439 struct perf_sample *sample, bool nonany_branch_mode)
1441 struct branch_info *bi;
1443 /* If we have branch cycles always annotate them. */
1444 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1447 bi = sample__resolve_bstack(sample, al);
1449 struct addr_map_symbol *prev = NULL;
1452 * Ignore errors, still want to process the
1455 * For non standard branch modes always
1456 * force no IPC (prev == NULL)
1458 * Note that perf stores branches reversed from
1461 for (i = bs->nr - 1; i >= 0; i--) {
1462 addr_map_symbol__account_cycles(&bi[i].from,
1463 nonany_branch_mode ? NULL : prev,
1464 bi[i].flags.cycles);
1472 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1474 struct perf_evsel *pos;
1477 evlist__for_each(evlist, pos) {
1478 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1479 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1486 u64 hists__total_period(struct hists *hists)
1488 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1489 hists->stats.total_period;
1492 int parse_filter_percentage(const struct option *opt __maybe_unused,
1493 const char *arg, int unset __maybe_unused)
1495 if (!strcmp(arg, "relative"))
1496 symbol_conf.filter_relative = true;
1497 else if (!strcmp(arg, "absolute"))
1498 symbol_conf.filter_relative = false;
1505 int perf_hist_config(const char *var, const char *value)
1507 if (!strcmp(var, "hist.percentage"))
1508 return parse_filter_percentage(NULL, value, 0);
1513 static int hists_evsel__init(struct perf_evsel *evsel)
1515 struct hists *hists = evsel__hists(evsel);
1517 memset(hists, 0, sizeof(*hists));
1518 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1519 hists->entries_in = &hists->entries_in_array[0];
1520 hists->entries_collapsed = RB_ROOT;
1521 hists->entries = RB_ROOT;
1522 pthread_mutex_init(&hists->lock, NULL);
1527 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1528 * stored in the rbtree...
1531 int hists__init(void)
1533 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1534 hists_evsel__init, NULL);
1536 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);