]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/perf/util/hist.c
Merge branch 'pm-cpufreq'
[karo-tx-linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10                                        struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12                                           struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14                                           struct hist_entry *he);
15
16 struct callchain_param  callchain_param = {
17         .mode   = CHAIN_GRAPH_REL,
18         .min_percent = 0.5,
19         .order  = ORDER_CALLEE,
20         .key    = CCKEY_FUNCTION
21 };
22
23 u16 hists__col_len(struct hists *hists, enum hist_column col)
24 {
25         return hists->col_len[col];
26 }
27
28 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
29 {
30         hists->col_len[col] = len;
31 }
32
33 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
34 {
35         if (len > hists__col_len(hists, col)) {
36                 hists__set_col_len(hists, col, len);
37                 return true;
38         }
39         return false;
40 }
41
42 void hists__reset_col_len(struct hists *hists)
43 {
44         enum hist_column col;
45
46         for (col = 0; col < HISTC_NR_COLS; ++col)
47                 hists__set_col_len(hists, col, 0);
48 }
49
50 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
51 {
52         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
53
54         if (hists__col_len(hists, dso) < unresolved_col_width &&
55             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
56             !symbol_conf.dso_list)
57                 hists__set_col_len(hists, dso, unresolved_col_width);
58 }
59
60 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
61 {
62         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63         int symlen;
64         u16 len;
65
66         /*
67          * +4 accounts for '[x] ' priv level info
68          * +2 accounts for 0x prefix on raw addresses
69          * +3 accounts for ' y ' symtab origin info
70          */
71         if (h->ms.sym) {
72                 symlen = h->ms.sym->namelen + 4;
73                 if (verbose)
74                         symlen += BITS_PER_LONG / 4 + 2 + 3;
75                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
76         } else {
77                 symlen = unresolved_col_width + 4 + 2;
78                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
79                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
80         }
81
82         len = thread__comm_len(h->thread);
83         if (hists__new_col_len(hists, HISTC_COMM, len))
84                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
85
86         if (h->ms.map) {
87                 len = dso__name_len(h->ms.map->dso);
88                 hists__new_col_len(hists, HISTC_DSO, len);
89         }
90
91         if (h->parent)
92                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
93
94         if (h->branch_info) {
95                 if (h->branch_info->from.sym) {
96                         symlen = (int)h->branch_info->from.sym->namelen + 4;
97                         if (verbose)
98                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
99                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100
101                         symlen = dso__name_len(h->branch_info->from.map->dso);
102                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
103                 } else {
104                         symlen = unresolved_col_width + 4 + 2;
105                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
106                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107                 }
108
109                 if (h->branch_info->to.sym) {
110                         symlen = (int)h->branch_info->to.sym->namelen + 4;
111                         if (verbose)
112                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
113                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
114
115                         symlen = dso__name_len(h->branch_info->to.map->dso);
116                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
117                 } else {
118                         symlen = unresolved_col_width + 4 + 2;
119                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
120                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121                 }
122         }
123
124         if (h->mem_info) {
125                 if (h->mem_info->daddr.sym) {
126                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
127                                + unresolved_col_width + 2;
128                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
129                                            symlen);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                 }
135                 if (h->mem_info->daddr.map) {
136                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
137                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138                                            symlen);
139                 } else {
140                         symlen = unresolved_col_width + 4 + 2;
141                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142                 }
143         } else {
144                 symlen = unresolved_col_width + 4 + 2;
145                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147         }
148
149         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
150         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
151         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
152         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
153         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
154         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
155
156         if (h->transaction)
157                 hists__new_col_len(hists, HISTC_TRANSACTION,
158                                    hist_entry__transaction_len());
159 }
160
161 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
162 {
163         struct rb_node *next = rb_first(&hists->entries);
164         struct hist_entry *n;
165         int row = 0;
166
167         hists__reset_col_len(hists);
168
169         while (next && row++ < max_rows) {
170                 n = rb_entry(next, struct hist_entry, rb_node);
171                 if (!n->filtered)
172                         hists__calc_col_len(hists, n);
173                 next = rb_next(&n->rb_node);
174         }
175 }
176
177 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
178                                         unsigned int cpumode, u64 period)
179 {
180         switch (cpumode) {
181         case PERF_RECORD_MISC_KERNEL:
182                 he_stat->period_sys += period;
183                 break;
184         case PERF_RECORD_MISC_USER:
185                 he_stat->period_us += period;
186                 break;
187         case PERF_RECORD_MISC_GUEST_KERNEL:
188                 he_stat->period_guest_sys += period;
189                 break;
190         case PERF_RECORD_MISC_GUEST_USER:
191                 he_stat->period_guest_us += period;
192                 break;
193         default:
194                 break;
195         }
196 }
197
198 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
199                                 u64 weight)
200 {
201
202         he_stat->period         += period;
203         he_stat->weight         += weight;
204         he_stat->nr_events      += 1;
205 }
206
207 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
208 {
209         dest->period            += src->period;
210         dest->period_sys        += src->period_sys;
211         dest->period_us         += src->period_us;
212         dest->period_guest_sys  += src->period_guest_sys;
213         dest->period_guest_us   += src->period_guest_us;
214         dest->nr_events         += src->nr_events;
215         dest->weight            += src->weight;
216 }
217
218 static void he_stat__decay(struct he_stat *he_stat)
219 {
220         he_stat->period = (he_stat->period * 7) / 8;
221         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
222         /* XXX need decay for weight too? */
223 }
224
225 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
226 {
227         u64 prev_period = he->stat.period;
228         u64 diff;
229
230         if (prev_period == 0)
231                 return true;
232
233         he_stat__decay(&he->stat);
234
235         diff = prev_period - he->stat.period;
236
237         hists->stats.total_period -= diff;
238         if (!he->filtered)
239                 hists->stats.total_non_filtered_period -= diff;
240
241         return he->stat.period == 0;
242 }
243
244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245 {
246         struct rb_node *next = rb_first(&hists->entries);
247         struct hist_entry *n;
248
249         while (next) {
250                 n = rb_entry(next, struct hist_entry, rb_node);
251                 next = rb_next(&n->rb_node);
252                 /*
253                  * We may be annotating this, for instance, so keep it here in
254                  * case some it gets new samples, we'll eventually free it when
255                  * the user stops browsing and it agains gets fully decayed.
256                  */
257                 if (((zap_user && n->level == '.') ||
258                      (zap_kernel && n->level != '.') ||
259                      hists__decay_entry(hists, n)) &&
260                     !n->used) {
261                         rb_erase(&n->rb_node, &hists->entries);
262
263                         if (sort__need_collapse)
264                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265
266                         --hists->nr_entries;
267                         if (!n->filtered)
268                                 --hists->nr_non_filtered_entries;
269
270                         hist_entry__free(n);
271                 }
272         }
273 }
274
275 /*
276  * histogram, sorted on item, collects periods
277  */
278
279 static struct hist_entry *hist_entry__new(struct hist_entry *template)
280 {
281         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
282         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
283
284         if (he != NULL) {
285                 *he = *template;
286
287                 if (he->ms.map)
288                         he->ms.map->referenced = true;
289
290                 if (he->branch_info) {
291                         /*
292                          * This branch info is (a part of) allocated from
293                          * sample__resolve_bstack() and will be freed after
294                          * adding new entries.  So we need to save a copy.
295                          */
296                         he->branch_info = malloc(sizeof(*he->branch_info));
297                         if (he->branch_info == NULL) {
298                                 free(he);
299                                 return NULL;
300                         }
301
302                         memcpy(he->branch_info, template->branch_info,
303                                sizeof(*he->branch_info));
304
305                         if (he->branch_info->from.map)
306                                 he->branch_info->from.map->referenced = true;
307                         if (he->branch_info->to.map)
308                                 he->branch_info->to.map->referenced = true;
309                 }
310
311                 if (he->mem_info) {
312                         if (he->mem_info->iaddr.map)
313                                 he->mem_info->iaddr.map->referenced = true;
314                         if (he->mem_info->daddr.map)
315                                 he->mem_info->daddr.map->referenced = true;
316                 }
317
318                 if (symbol_conf.use_callchain)
319                         callchain_init(he->callchain);
320
321                 INIT_LIST_HEAD(&he->pairs.node);
322         }
323
324         return he;
325 }
326
327 static u8 symbol__parent_filter(const struct symbol *parent)
328 {
329         if (symbol_conf.exclude_other && parent == NULL)
330                 return 1 << HIST_FILTER__PARENT;
331         return 0;
332 }
333
334 static struct hist_entry *add_hist_entry(struct hists *hists,
335                                          struct hist_entry *entry,
336                                          struct addr_location *al)
337 {
338         struct rb_node **p;
339         struct rb_node *parent = NULL;
340         struct hist_entry *he;
341         int64_t cmp;
342         u64 period = entry->stat.period;
343         u64 weight = entry->stat.weight;
344
345         p = &hists->entries_in->rb_node;
346
347         while (*p != NULL) {
348                 parent = *p;
349                 he = rb_entry(parent, struct hist_entry, rb_node_in);
350
351                 /*
352                  * Make sure that it receives arguments in a same order as
353                  * hist_entry__collapse() so that we can use an appropriate
354                  * function when searching an entry regardless which sort
355                  * keys were used.
356                  */
357                 cmp = hist_entry__cmp(he, entry);
358
359                 if (!cmp) {
360                         he_stat__add_period(&he->stat, period, weight);
361
362                         /*
363                          * This mem info was allocated from sample__resolve_mem
364                          * and will not be used anymore.
365                          */
366                         zfree(&entry->mem_info);
367
368                         /* If the map of an existing hist_entry has
369                          * become out-of-date due to an exec() or
370                          * similar, update it.  Otherwise we will
371                          * mis-adjust symbol addresses when computing
372                          * the history counter to increment.
373                          */
374                         if (he->ms.map != entry->ms.map) {
375                                 he->ms.map = entry->ms.map;
376                                 if (he->ms.map)
377                                         he->ms.map->referenced = true;
378                         }
379                         goto out;
380                 }
381
382                 if (cmp < 0)
383                         p = &(*p)->rb_left;
384                 else
385                         p = &(*p)->rb_right;
386         }
387
388         he = hist_entry__new(entry);
389         if (!he)
390                 return NULL;
391
392         rb_link_node(&he->rb_node_in, parent, p);
393         rb_insert_color(&he->rb_node_in, hists->entries_in);
394 out:
395         he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
396         return he;
397 }
398
399 struct hist_entry *__hists__add_entry(struct hists *hists,
400                                       struct addr_location *al,
401                                       struct symbol *sym_parent,
402                                       struct branch_info *bi,
403                                       struct mem_info *mi,
404                                       u64 period, u64 weight, u64 transaction)
405 {
406         struct hist_entry entry = {
407                 .thread = al->thread,
408                 .comm = thread__comm(al->thread),
409                 .ms = {
410                         .map    = al->map,
411                         .sym    = al->sym,
412                 },
413                 .cpu    = al->cpu,
414                 .ip     = al->addr,
415                 .level  = al->level,
416                 .stat = {
417                         .nr_events = 1,
418                         .period = period,
419                         .weight = weight,
420                 },
421                 .parent = sym_parent,
422                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
423                 .hists  = hists,
424                 .branch_info = bi,
425                 .mem_info = mi,
426                 .transaction = transaction,
427         };
428
429         return add_hist_entry(hists, &entry, al);
430 }
431
432 int64_t
433 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
434 {
435         struct perf_hpp_fmt *fmt;
436         int64_t cmp = 0;
437
438         perf_hpp__for_each_sort_list(fmt) {
439                 if (perf_hpp__should_skip(fmt))
440                         continue;
441
442                 cmp = fmt->cmp(left, right);
443                 if (cmp)
444                         break;
445         }
446
447         return cmp;
448 }
449
450 int64_t
451 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
452 {
453         struct perf_hpp_fmt *fmt;
454         int64_t cmp = 0;
455
456         perf_hpp__for_each_sort_list(fmt) {
457                 if (perf_hpp__should_skip(fmt))
458                         continue;
459
460                 cmp = fmt->collapse(left, right);
461                 if (cmp)
462                         break;
463         }
464
465         return cmp;
466 }
467
468 void hist_entry__free(struct hist_entry *he)
469 {
470         zfree(&he->branch_info);
471         zfree(&he->mem_info);
472         free_srcline(he->srcline);
473         free(he);
474 }
475
476 /*
477  * collapse the histogram
478  */
479
480 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
481                                          struct rb_root *root,
482                                          struct hist_entry *he)
483 {
484         struct rb_node **p = &root->rb_node;
485         struct rb_node *parent = NULL;
486         struct hist_entry *iter;
487         int64_t cmp;
488
489         while (*p != NULL) {
490                 parent = *p;
491                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
492
493                 cmp = hist_entry__collapse(iter, he);
494
495                 if (!cmp) {
496                         he_stat__add_stat(&iter->stat, &he->stat);
497
498                         if (symbol_conf.use_callchain) {
499                                 callchain_cursor_reset(&callchain_cursor);
500                                 callchain_merge(&callchain_cursor,
501                                                 iter->callchain,
502                                                 he->callchain);
503                         }
504                         hist_entry__free(he);
505                         return false;
506                 }
507
508                 if (cmp < 0)
509                         p = &(*p)->rb_left;
510                 else
511                         p = &(*p)->rb_right;
512         }
513
514         rb_link_node(&he->rb_node_in, parent, p);
515         rb_insert_color(&he->rb_node_in, root);
516         return true;
517 }
518
519 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
520 {
521         struct rb_root *root;
522
523         pthread_mutex_lock(&hists->lock);
524
525         root = hists->entries_in;
526         if (++hists->entries_in > &hists->entries_in_array[1])
527                 hists->entries_in = &hists->entries_in_array[0];
528
529         pthread_mutex_unlock(&hists->lock);
530
531         return root;
532 }
533
534 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
535 {
536         hists__filter_entry_by_dso(hists, he);
537         hists__filter_entry_by_thread(hists, he);
538         hists__filter_entry_by_symbol(hists, he);
539 }
540
541 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
542 {
543         struct rb_root *root;
544         struct rb_node *next;
545         struct hist_entry *n;
546
547         if (!sort__need_collapse)
548                 return;
549
550         root = hists__get_rotate_entries_in(hists);
551         next = rb_first(root);
552
553         while (next) {
554                 if (session_done())
555                         break;
556                 n = rb_entry(next, struct hist_entry, rb_node_in);
557                 next = rb_next(&n->rb_node_in);
558
559                 rb_erase(&n->rb_node_in, root);
560                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
561                         /*
562                          * If it wasn't combined with one of the entries already
563                          * collapsed, we need to apply the filters that may have
564                          * been set by, say, the hist_browser.
565                          */
566                         hists__apply_filters(hists, n);
567                 }
568                 if (prog)
569                         ui_progress__update(prog, 1);
570         }
571 }
572
573 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
574 {
575         struct perf_hpp_fmt *fmt;
576         int64_t cmp = 0;
577
578         perf_hpp__for_each_sort_list(fmt) {
579                 if (perf_hpp__should_skip(fmt))
580                         continue;
581
582                 cmp = fmt->sort(a, b);
583                 if (cmp)
584                         break;
585         }
586
587         return cmp;
588 }
589
590 static void hists__reset_filter_stats(struct hists *hists)
591 {
592         hists->nr_non_filtered_entries = 0;
593         hists->stats.total_non_filtered_period = 0;
594 }
595
596 void hists__reset_stats(struct hists *hists)
597 {
598         hists->nr_entries = 0;
599         hists->stats.total_period = 0;
600
601         hists__reset_filter_stats(hists);
602 }
603
604 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
605 {
606         hists->nr_non_filtered_entries++;
607         hists->stats.total_non_filtered_period += h->stat.period;
608 }
609
610 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
611 {
612         if (!h->filtered)
613                 hists__inc_filter_stats(hists, h);
614
615         hists->nr_entries++;
616         hists->stats.total_period += h->stat.period;
617 }
618
619 static void __hists__insert_output_entry(struct rb_root *entries,
620                                          struct hist_entry *he,
621                                          u64 min_callchain_hits)
622 {
623         struct rb_node **p = &entries->rb_node;
624         struct rb_node *parent = NULL;
625         struct hist_entry *iter;
626
627         if (symbol_conf.use_callchain)
628                 callchain_param.sort(&he->sorted_chain, he->callchain,
629                                       min_callchain_hits, &callchain_param);
630
631         while (*p != NULL) {
632                 parent = *p;
633                 iter = rb_entry(parent, struct hist_entry, rb_node);
634
635                 if (hist_entry__sort(he, iter) > 0)
636                         p = &(*p)->rb_left;
637                 else
638                         p = &(*p)->rb_right;
639         }
640
641         rb_link_node(&he->rb_node, parent, p);
642         rb_insert_color(&he->rb_node, entries);
643 }
644
645 void hists__output_resort(struct hists *hists)
646 {
647         struct rb_root *root;
648         struct rb_node *next;
649         struct hist_entry *n;
650         u64 min_callchain_hits;
651
652         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
653
654         if (sort__need_collapse)
655                 root = &hists->entries_collapsed;
656         else
657                 root = hists->entries_in;
658
659         next = rb_first(root);
660         hists->entries = RB_ROOT;
661
662         hists__reset_stats(hists);
663         hists__reset_col_len(hists);
664
665         while (next) {
666                 n = rb_entry(next, struct hist_entry, rb_node_in);
667                 next = rb_next(&n->rb_node_in);
668
669                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
670                 hists__inc_stats(hists, n);
671
672                 if (!n->filtered)
673                         hists__calc_col_len(hists, n);
674         }
675 }
676
677 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
678                                        enum hist_filter filter)
679 {
680         h->filtered &= ~(1 << filter);
681         if (h->filtered)
682                 return;
683
684         /* force fold unfiltered entry for simplicity */
685         h->ms.unfolded = false;
686         h->row_offset = 0;
687
688         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
689
690         hists__inc_filter_stats(hists, h);
691         hists__calc_col_len(hists, h);
692 }
693
694
695 static bool hists__filter_entry_by_dso(struct hists *hists,
696                                        struct hist_entry *he)
697 {
698         if (hists->dso_filter != NULL &&
699             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
700                 he->filtered |= (1 << HIST_FILTER__DSO);
701                 return true;
702         }
703
704         return false;
705 }
706
707 void hists__filter_by_dso(struct hists *hists)
708 {
709         struct rb_node *nd;
710
711         hists->stats.nr_non_filtered_samples = 0;
712
713         hists__reset_filter_stats(hists);
714         hists__reset_col_len(hists);
715
716         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
717                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
718
719                 if (symbol_conf.exclude_other && !h->parent)
720                         continue;
721
722                 if (hists__filter_entry_by_dso(hists, h))
723                         continue;
724
725                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
726         }
727 }
728
729 static bool hists__filter_entry_by_thread(struct hists *hists,
730                                           struct hist_entry *he)
731 {
732         if (hists->thread_filter != NULL &&
733             he->thread != hists->thread_filter) {
734                 he->filtered |= (1 << HIST_FILTER__THREAD);
735                 return true;
736         }
737
738         return false;
739 }
740
741 void hists__filter_by_thread(struct hists *hists)
742 {
743         struct rb_node *nd;
744
745         hists->stats.nr_non_filtered_samples = 0;
746
747         hists__reset_filter_stats(hists);
748         hists__reset_col_len(hists);
749
750         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
751                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
752
753                 if (hists__filter_entry_by_thread(hists, h))
754                         continue;
755
756                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
757         }
758 }
759
760 static bool hists__filter_entry_by_symbol(struct hists *hists,
761                                           struct hist_entry *he)
762 {
763         if (hists->symbol_filter_str != NULL &&
764             (!he->ms.sym || strstr(he->ms.sym->name,
765                                    hists->symbol_filter_str) == NULL)) {
766                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
767                 return true;
768         }
769
770         return false;
771 }
772
773 void hists__filter_by_symbol(struct hists *hists)
774 {
775         struct rb_node *nd;
776
777         hists->stats.nr_non_filtered_samples = 0;
778
779         hists__reset_filter_stats(hists);
780         hists__reset_col_len(hists);
781
782         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
783                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
784
785                 if (hists__filter_entry_by_symbol(hists, h))
786                         continue;
787
788                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
789         }
790 }
791
792 void events_stats__inc(struct events_stats *stats, u32 type)
793 {
794         ++stats->nr_events[0];
795         ++stats->nr_events[type];
796 }
797
798 void hists__inc_nr_events(struct hists *hists, u32 type)
799 {
800         events_stats__inc(&hists->stats, type);
801 }
802
803 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
804                                                  struct hist_entry *pair)
805 {
806         struct rb_root *root;
807         struct rb_node **p;
808         struct rb_node *parent = NULL;
809         struct hist_entry *he;
810         int64_t cmp;
811
812         if (sort__need_collapse)
813                 root = &hists->entries_collapsed;
814         else
815                 root = hists->entries_in;
816
817         p = &root->rb_node;
818
819         while (*p != NULL) {
820                 parent = *p;
821                 he = rb_entry(parent, struct hist_entry, rb_node_in);
822
823                 cmp = hist_entry__collapse(he, pair);
824
825                 if (!cmp)
826                         goto out;
827
828                 if (cmp < 0)
829                         p = &(*p)->rb_left;
830                 else
831                         p = &(*p)->rb_right;
832         }
833
834         he = hist_entry__new(pair);
835         if (he) {
836                 memset(&he->stat, 0, sizeof(he->stat));
837                 he->hists = hists;
838                 rb_link_node(&he->rb_node_in, parent, p);
839                 rb_insert_color(&he->rb_node_in, root);
840                 hists__inc_stats(hists, he);
841                 he->dummy = true;
842         }
843 out:
844         return he;
845 }
846
847 static struct hist_entry *hists__find_entry(struct hists *hists,
848                                             struct hist_entry *he)
849 {
850         struct rb_node *n;
851
852         if (sort__need_collapse)
853                 n = hists->entries_collapsed.rb_node;
854         else
855                 n = hists->entries_in->rb_node;
856
857         while (n) {
858                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
859                 int64_t cmp = hist_entry__collapse(iter, he);
860
861                 if (cmp < 0)
862                         n = n->rb_left;
863                 else if (cmp > 0)
864                         n = n->rb_right;
865                 else
866                         return iter;
867         }
868
869         return NULL;
870 }
871
872 /*
873  * Look for pairs to link to the leader buckets (hist_entries):
874  */
875 void hists__match(struct hists *leader, struct hists *other)
876 {
877         struct rb_root *root;
878         struct rb_node *nd;
879         struct hist_entry *pos, *pair;
880
881         if (sort__need_collapse)
882                 root = &leader->entries_collapsed;
883         else
884                 root = leader->entries_in;
885
886         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
887                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
888                 pair = hists__find_entry(other, pos);
889
890                 if (pair)
891                         hist_entry__add_pair(pair, pos);
892         }
893 }
894
895 /*
896  * Look for entries in the other hists that are not present in the leader, if
897  * we find them, just add a dummy entry on the leader hists, with period=0,
898  * nr_events=0, to serve as the list header.
899  */
900 int hists__link(struct hists *leader, struct hists *other)
901 {
902         struct rb_root *root;
903         struct rb_node *nd;
904         struct hist_entry *pos, *pair;
905
906         if (sort__need_collapse)
907                 root = &other->entries_collapsed;
908         else
909                 root = other->entries_in;
910
911         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
912                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
913
914                 if (!hist_entry__has_pairs(pos)) {
915                         pair = hists__add_dummy_entry(leader, pos);
916                         if (pair == NULL)
917                                 return -1;
918                         hist_entry__add_pair(pos, pair);
919                 }
920         }
921
922         return 0;
923 }
924
925 u64 hists__total_period(struct hists *hists)
926 {
927         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
928                 hists->stats.total_period;
929 }
930
931 int parse_filter_percentage(const struct option *opt __maybe_unused,
932                             const char *arg, int unset __maybe_unused)
933 {
934         if (!strcmp(arg, "relative"))
935                 symbol_conf.filter_relative = true;
936         else if (!strcmp(arg, "absolute"))
937                 symbol_conf.filter_relative = false;
938         else
939                 return -1;
940
941         return 0;
942 }
943
944 int perf_hist_config(const char *var, const char *value)
945 {
946         if (!strcmp(var, "hist.percentage"))
947                 return parse_filter_percentage(NULL, value, 0);
948
949         return 0;
950 }