8 #include <traceevent/event-parse.h>
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char *sort_order;
19 const char *field_order;
20 regex_t ignore_callees_regex;
21 int have_ignore_callees = 0;
22 int sort__need_collapse = 0;
23 int sort__has_parent = 0;
24 int sort__has_sym = 0;
25 int sort__has_dso = 0;
26 int sort__has_socket = 0;
27 enum sort_mode sort__mode = SORT_MODE__NORMAL;
30 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
36 n = vsnprintf(bf, size, fmt, ap);
37 if (symbol_conf.field_sep && n > 0) {
41 sep = strchr(sep, *symbol_conf.field_sep);
54 static int64_t cmp_null(const void *l, const void *r)
67 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
69 return right->thread->tid - left->thread->tid;
72 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
73 size_t size, unsigned int width)
75 const char *comm = thread__comm_str(he->thread);
77 width = max(7U, width) - 6;
78 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
79 width, width, comm ?: "");
82 struct sort_entry sort_thread = {
83 .se_header = " Pid:Command",
84 .se_cmp = sort__thread_cmp,
85 .se_snprintf = hist_entry__thread_snprintf,
86 .se_width_idx = HISTC_THREAD,
92 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
94 /* Compare the addr that should be unique among comm */
95 return strcmp(comm__str(right->comm), comm__str(left->comm));
99 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
101 /* Compare the addr that should be unique among comm */
102 return strcmp(comm__str(right->comm), comm__str(left->comm));
106 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
108 return strcmp(comm__str(right->comm), comm__str(left->comm));
111 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
112 size_t size, unsigned int width)
114 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
117 struct sort_entry sort_comm = {
118 .se_header = "Command",
119 .se_cmp = sort__comm_cmp,
120 .se_collapse = sort__comm_collapse,
121 .se_sort = sort__comm_sort,
122 .se_snprintf = hist_entry__comm_snprintf,
123 .se_width_idx = HISTC_COMM,
128 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
130 struct dso *dso_l = map_l ? map_l->dso : NULL;
131 struct dso *dso_r = map_r ? map_r->dso : NULL;
132 const char *dso_name_l, *dso_name_r;
134 if (!dso_l || !dso_r)
135 return cmp_null(dso_r, dso_l);
138 dso_name_l = dso_l->long_name;
139 dso_name_r = dso_r->long_name;
141 dso_name_l = dso_l->short_name;
142 dso_name_r = dso_r->short_name;
145 return strcmp(dso_name_l, dso_name_r);
149 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
151 return _sort__dso_cmp(right->ms.map, left->ms.map);
154 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
155 size_t size, unsigned int width)
157 if (map && map->dso) {
158 const char *dso_name = !verbose ? map->dso->short_name :
160 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
163 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
166 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
167 size_t size, unsigned int width)
169 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
172 struct sort_entry sort_dso = {
173 .se_header = "Shared Object",
174 .se_cmp = sort__dso_cmp,
175 .se_snprintf = hist_entry__dso_snprintf,
176 .se_width_idx = HISTC_DSO,
181 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
183 return (int64_t)(right_ip - left_ip);
186 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
188 if (!sym_l || !sym_r)
189 return cmp_null(sym_l, sym_r);
194 if (sym_l->start != sym_r->start)
195 return (int64_t)(sym_r->start - sym_l->start);
197 return (int64_t)(sym_r->end - sym_l->end);
201 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
205 if (!left->ms.sym && !right->ms.sym)
206 return _sort__addr_cmp(left->ip, right->ip);
209 * comparing symbol address alone is not enough since it's a
210 * relative address within a dso.
212 if (!sort__has_dso) {
213 ret = sort__dso_cmp(left, right);
218 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
222 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
224 if (!left->ms.sym || !right->ms.sym)
225 return cmp_null(left->ms.sym, right->ms.sym);
227 return strcmp(right->ms.sym->name, left->ms.sym->name);
230 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
231 u64 ip, char level, char *bf, size_t size,
237 char o = map ? dso__symtab_origin(map->dso) : '!';
238 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
239 BITS_PER_LONG / 4 + 2, ip, o);
242 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
244 if (map->type == MAP__VARIABLE) {
245 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
246 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
247 ip - map->unmap_ip(map, sym->start));
248 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
256 size_t len = BITS_PER_LONG / 4;
257 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
259 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
269 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
270 size_t size, unsigned int width)
272 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
273 he->level, bf, size, width);
276 struct sort_entry sort_sym = {
277 .se_header = "Symbol",
278 .se_cmp = sort__sym_cmp,
279 .se_sort = sort__sym_sort,
280 .se_snprintf = hist_entry__sym_snprintf,
281 .se_width_idx = HISTC_SYMBOL,
287 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
289 if (!left->srcline) {
291 left->srcline = SRCLINE_UNKNOWN;
293 struct map *map = left->ms.map;
294 left->srcline = get_srcline(map->dso,
295 map__rip_2objdump(map, left->ip),
299 if (!right->srcline) {
301 right->srcline = SRCLINE_UNKNOWN;
303 struct map *map = right->ms.map;
304 right->srcline = get_srcline(map->dso,
305 map__rip_2objdump(map, right->ip),
306 right->ms.sym, true);
309 return strcmp(right->srcline, left->srcline);
312 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
313 size_t size, unsigned int width)
315 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
318 struct sort_entry sort_srcline = {
319 .se_header = "Source:Line",
320 .se_cmp = sort__srcline_cmp,
321 .se_snprintf = hist_entry__srcline_snprintf,
322 .se_width_idx = HISTC_SRCLINE,
327 static char no_srcfile[1];
329 static char *get_srcfile(struct hist_entry *e)
332 struct map *map = e->ms.map;
334 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
335 e->ms.sym, false, true);
336 if (!strcmp(sf, SRCLINE_UNKNOWN))
348 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
350 if (!left->srcfile) {
352 left->srcfile = no_srcfile;
354 left->srcfile = get_srcfile(left);
356 if (!right->srcfile) {
358 right->srcfile = no_srcfile;
360 right->srcfile = get_srcfile(right);
362 return strcmp(right->srcfile, left->srcfile);
365 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
366 size_t size, unsigned int width)
368 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371 struct sort_entry sort_srcfile = {
372 .se_header = "Source File",
373 .se_cmp = sort__srcfile_cmp,
374 .se_snprintf = hist_entry__srcfile_snprintf,
375 .se_width_idx = HISTC_SRCFILE,
381 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
383 struct symbol *sym_l = left->parent;
384 struct symbol *sym_r = right->parent;
386 if (!sym_l || !sym_r)
387 return cmp_null(sym_l, sym_r);
389 return strcmp(sym_r->name, sym_l->name);
392 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
393 size_t size, unsigned int width)
395 return repsep_snprintf(bf, size, "%-*.*s", width, width,
396 he->parent ? he->parent->name : "[other]");
399 struct sort_entry sort_parent = {
400 .se_header = "Parent symbol",
401 .se_cmp = sort__parent_cmp,
402 .se_snprintf = hist_entry__parent_snprintf,
403 .se_width_idx = HISTC_PARENT,
409 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
411 return right->cpu - left->cpu;
414 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
415 size_t size, unsigned int width)
417 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
420 struct sort_entry sort_cpu = {
422 .se_cmp = sort__cpu_cmp,
423 .se_snprintf = hist_entry__cpu_snprintf,
424 .se_width_idx = HISTC_CPU,
430 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
432 return right->socket - left->socket;
435 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
436 size_t size, unsigned int width)
438 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441 struct sort_entry sort_socket = {
442 .se_header = "Socket",
443 .se_cmp = sort__socket_cmp,
444 .se_snprintf = hist_entry__socket_snprintf,
445 .se_width_idx = HISTC_SOCKET,
448 /* sort keys for branch stacks */
451 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
453 if (!left->branch_info || !right->branch_info)
454 return cmp_null(left->branch_info, right->branch_info);
456 return _sort__dso_cmp(left->branch_info->from.map,
457 right->branch_info->from.map);
460 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
461 size_t size, unsigned int width)
464 return _hist_entry__dso_snprintf(he->branch_info->from.map,
467 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
471 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
473 if (!left->branch_info || !right->branch_info)
474 return cmp_null(left->branch_info, right->branch_info);
476 return _sort__dso_cmp(left->branch_info->to.map,
477 right->branch_info->to.map);
480 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
481 size_t size, unsigned int width)
484 return _hist_entry__dso_snprintf(he->branch_info->to.map,
487 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
491 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
493 struct addr_map_symbol *from_l = &left->branch_info->from;
494 struct addr_map_symbol *from_r = &right->branch_info->from;
496 if (!left->branch_info || !right->branch_info)
497 return cmp_null(left->branch_info, right->branch_info);
499 from_l = &left->branch_info->from;
500 from_r = &right->branch_info->from;
502 if (!from_l->sym && !from_r->sym)
503 return _sort__addr_cmp(from_l->addr, from_r->addr);
505 return _sort__sym_cmp(from_l->sym, from_r->sym);
509 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
511 struct addr_map_symbol *to_l, *to_r;
513 if (!left->branch_info || !right->branch_info)
514 return cmp_null(left->branch_info, right->branch_info);
516 to_l = &left->branch_info->to;
517 to_r = &right->branch_info->to;
519 if (!to_l->sym && !to_r->sym)
520 return _sort__addr_cmp(to_l->addr, to_r->addr);
522 return _sort__sym_cmp(to_l->sym, to_r->sym);
525 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
526 size_t size, unsigned int width)
528 if (he->branch_info) {
529 struct addr_map_symbol *from = &he->branch_info->from;
531 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
532 he->level, bf, size, width);
535 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
538 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
539 size_t size, unsigned int width)
541 if (he->branch_info) {
542 struct addr_map_symbol *to = &he->branch_info->to;
544 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
545 he->level, bf, size, width);
548 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
551 struct sort_entry sort_dso_from = {
552 .se_header = "Source Shared Object",
553 .se_cmp = sort__dso_from_cmp,
554 .se_snprintf = hist_entry__dso_from_snprintf,
555 .se_width_idx = HISTC_DSO_FROM,
558 struct sort_entry sort_dso_to = {
559 .se_header = "Target Shared Object",
560 .se_cmp = sort__dso_to_cmp,
561 .se_snprintf = hist_entry__dso_to_snprintf,
562 .se_width_idx = HISTC_DSO_TO,
565 struct sort_entry sort_sym_from = {
566 .se_header = "Source Symbol",
567 .se_cmp = sort__sym_from_cmp,
568 .se_snprintf = hist_entry__sym_from_snprintf,
569 .se_width_idx = HISTC_SYMBOL_FROM,
572 struct sort_entry sort_sym_to = {
573 .se_header = "Target Symbol",
574 .se_cmp = sort__sym_to_cmp,
575 .se_snprintf = hist_entry__sym_to_snprintf,
576 .se_width_idx = HISTC_SYMBOL_TO,
580 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
584 if (!left->branch_info || !right->branch_info)
585 return cmp_null(left->branch_info, right->branch_info);
587 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
588 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
592 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
593 size_t size, unsigned int width){
594 static const char *out = "N/A";
596 if (he->branch_info) {
597 if (he->branch_info->flags.predicted)
599 else if (he->branch_info->flags.mispred)
603 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
607 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
609 return left->branch_info->flags.cycles -
610 right->branch_info->flags.cycles;
613 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
616 if (he->branch_info->flags.cycles == 0)
617 return repsep_snprintf(bf, size, "%-*s", width, "-");
618 return repsep_snprintf(bf, size, "%-*hd", width,
619 he->branch_info->flags.cycles);
622 struct sort_entry sort_cycles = {
623 .se_header = "Basic Block Cycles",
624 .se_cmp = sort__cycles_cmp,
625 .se_snprintf = hist_entry__cycles_snprintf,
626 .se_width_idx = HISTC_CYCLES,
629 /* --sort daddr_sym */
631 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
633 uint64_t l = 0, r = 0;
636 l = left->mem_info->daddr.addr;
638 r = right->mem_info->daddr.addr;
640 return (int64_t)(r - l);
643 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
644 size_t size, unsigned int width)
647 struct map *map = NULL;
648 struct symbol *sym = NULL;
651 addr = he->mem_info->daddr.addr;
652 map = he->mem_info->daddr.map;
653 sym = he->mem_info->daddr.sym;
655 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
660 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
662 uint64_t l = 0, r = 0;
665 l = left->mem_info->iaddr.addr;
667 r = right->mem_info->iaddr.addr;
669 return (int64_t)(r - l);
672 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
673 size_t size, unsigned int width)
676 struct map *map = NULL;
677 struct symbol *sym = NULL;
680 addr = he->mem_info->iaddr.addr;
681 map = he->mem_info->iaddr.map;
682 sym = he->mem_info->iaddr.sym;
684 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
689 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
691 struct map *map_l = NULL;
692 struct map *map_r = NULL;
695 map_l = left->mem_info->daddr.map;
697 map_r = right->mem_info->daddr.map;
699 return _sort__dso_cmp(map_l, map_r);
702 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
703 size_t size, unsigned int width)
705 struct map *map = NULL;
708 map = he->mem_info->daddr.map;
710 return _hist_entry__dso_snprintf(map, bf, size, width);
714 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
716 union perf_mem_data_src data_src_l;
717 union perf_mem_data_src data_src_r;
720 data_src_l = left->mem_info->data_src;
722 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
725 data_src_r = right->mem_info->data_src;
727 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
729 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
732 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
733 size_t size, unsigned int width)
736 u64 mask = PERF_MEM_LOCK_NA;
739 mask = he->mem_info->data_src.mem_lock;
741 if (mask & PERF_MEM_LOCK_NA)
743 else if (mask & PERF_MEM_LOCK_LOCKED)
748 return repsep_snprintf(bf, size, "%-*s", width, out);
752 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
754 union perf_mem_data_src data_src_l;
755 union perf_mem_data_src data_src_r;
758 data_src_l = left->mem_info->data_src;
760 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
763 data_src_r = right->mem_info->data_src;
765 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
767 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
770 static const char * const tlb_access[] = {
779 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
781 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
782 size_t size, unsigned int width)
785 size_t sz = sizeof(out) - 1; /* -1 for null termination */
787 u64 m = PERF_MEM_TLB_NA;
793 m = he->mem_info->data_src.mem_dtlb;
795 hit = m & PERF_MEM_TLB_HIT;
796 miss = m & PERF_MEM_TLB_MISS;
798 /* already taken care of */
799 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
801 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
808 strncat(out, tlb_access[i], sz - l);
809 l += strlen(tlb_access[i]);
814 strncat(out, " hit", sz - l);
816 strncat(out, " miss", sz - l);
818 return repsep_snprintf(bf, size, "%-*s", width, out);
822 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
824 union perf_mem_data_src data_src_l;
825 union perf_mem_data_src data_src_r;
828 data_src_l = left->mem_info->data_src;
830 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
833 data_src_r = right->mem_info->data_src;
835 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
837 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
840 static const char * const mem_lvl[] = {
849 "Remote RAM (1 hop)",
850 "Remote RAM (2 hops)",
851 "Remote Cache (1 hop)",
852 "Remote Cache (2 hops)",
856 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
858 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
859 size_t size, unsigned int width)
862 size_t sz = sizeof(out) - 1; /* -1 for null termination */
864 u64 m = PERF_MEM_LVL_NA;
868 m = he->mem_info->data_src.mem_lvl;
872 hit = m & PERF_MEM_LVL_HIT;
873 miss = m & PERF_MEM_LVL_MISS;
875 /* already taken care of */
876 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
878 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
885 strncat(out, mem_lvl[i], sz - l);
886 l += strlen(mem_lvl[i]);
891 strncat(out, " hit", sz - l);
893 strncat(out, " miss", sz - l);
895 return repsep_snprintf(bf, size, "%-*s", width, out);
899 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
901 union perf_mem_data_src data_src_l;
902 union perf_mem_data_src data_src_r;
905 data_src_l = left->mem_info->data_src;
907 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
910 data_src_r = right->mem_info->data_src;
912 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
914 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
917 static const char * const snoop_access[] = {
924 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
926 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
927 size_t size, unsigned int width)
930 size_t sz = sizeof(out) - 1; /* -1 for null termination */
932 u64 m = PERF_MEM_SNOOP_NA;
937 m = he->mem_info->data_src.mem_snoop;
939 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
946 strncat(out, snoop_access[i], sz - l);
947 l += strlen(snoop_access[i]);
953 return repsep_snprintf(bf, size, "%-*s", width, out);
956 static inline u64 cl_address(u64 address)
958 /* return the cacheline of the address */
959 return (address & ~(cacheline_size - 1));
963 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
966 struct map *l_map, *r_map;
968 if (!left->mem_info) return -1;
969 if (!right->mem_info) return 1;
971 /* group event types together */
972 if (left->cpumode > right->cpumode) return -1;
973 if (left->cpumode < right->cpumode) return 1;
975 l_map = left->mem_info->daddr.map;
976 r_map = right->mem_info->daddr.map;
978 /* if both are NULL, jump to sort on al_addr instead */
979 if (!l_map && !r_map)
982 if (!l_map) return -1;
983 if (!r_map) return 1;
985 if (l_map->maj > r_map->maj) return -1;
986 if (l_map->maj < r_map->maj) return 1;
988 if (l_map->min > r_map->min) return -1;
989 if (l_map->min < r_map->min) return 1;
991 if (l_map->ino > r_map->ino) return -1;
992 if (l_map->ino < r_map->ino) return 1;
994 if (l_map->ino_generation > r_map->ino_generation) return -1;
995 if (l_map->ino_generation < r_map->ino_generation) return 1;
998 * Addresses with no major/minor numbers are assumed to be
999 * anonymous in userspace. Sort those on pid then address.
1001 * The kernel and non-zero major/minor mapped areas are
1002 * assumed to be unity mapped. Sort those on address.
1005 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1006 (!(l_map->flags & MAP_SHARED)) &&
1007 !l_map->maj && !l_map->min && !l_map->ino &&
1008 !l_map->ino_generation) {
1009 /* userspace anonymous */
1011 if (left->thread->pid_ > right->thread->pid_) return -1;
1012 if (left->thread->pid_ < right->thread->pid_) return 1;
1016 /* al_addr does all the right addr - start + offset calculations */
1017 l = cl_address(left->mem_info->daddr.al_addr);
1018 r = cl_address(right->mem_info->daddr.al_addr);
1020 if (l > r) return -1;
1021 if (l < r) return 1;
1026 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1027 size_t size, unsigned int width)
1031 struct map *map = NULL;
1032 struct symbol *sym = NULL;
1033 char level = he->level;
1036 addr = cl_address(he->mem_info->daddr.al_addr);
1037 map = he->mem_info->daddr.map;
1038 sym = he->mem_info->daddr.sym;
1040 /* print [s] for shared data mmaps */
1041 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1042 map && (map->type == MAP__VARIABLE) &&
1043 (map->flags & MAP_SHARED) &&
1044 (map->maj || map->min || map->ino ||
1045 map->ino_generation))
1050 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1054 struct sort_entry sort_mispredict = {
1055 .se_header = "Branch Mispredicted",
1056 .se_cmp = sort__mispredict_cmp,
1057 .se_snprintf = hist_entry__mispredict_snprintf,
1058 .se_width_idx = HISTC_MISPREDICT,
1061 static u64 he_weight(struct hist_entry *he)
1063 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1067 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1069 return he_weight(left) - he_weight(right);
1072 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1073 size_t size, unsigned int width)
1075 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1078 struct sort_entry sort_local_weight = {
1079 .se_header = "Local Weight",
1080 .se_cmp = sort__local_weight_cmp,
1081 .se_snprintf = hist_entry__local_weight_snprintf,
1082 .se_width_idx = HISTC_LOCAL_WEIGHT,
1086 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1088 return left->stat.weight - right->stat.weight;
1091 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1092 size_t size, unsigned int width)
1094 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1097 struct sort_entry sort_global_weight = {
1098 .se_header = "Weight",
1099 .se_cmp = sort__global_weight_cmp,
1100 .se_snprintf = hist_entry__global_weight_snprintf,
1101 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1104 struct sort_entry sort_mem_daddr_sym = {
1105 .se_header = "Data Symbol",
1106 .se_cmp = sort__daddr_cmp,
1107 .se_snprintf = hist_entry__daddr_snprintf,
1108 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1111 struct sort_entry sort_mem_iaddr_sym = {
1112 .se_header = "Code Symbol",
1113 .se_cmp = sort__iaddr_cmp,
1114 .se_snprintf = hist_entry__iaddr_snprintf,
1115 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1118 struct sort_entry sort_mem_daddr_dso = {
1119 .se_header = "Data Object",
1120 .se_cmp = sort__dso_daddr_cmp,
1121 .se_snprintf = hist_entry__dso_daddr_snprintf,
1122 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1125 struct sort_entry sort_mem_locked = {
1126 .se_header = "Locked",
1127 .se_cmp = sort__locked_cmp,
1128 .se_snprintf = hist_entry__locked_snprintf,
1129 .se_width_idx = HISTC_MEM_LOCKED,
1132 struct sort_entry sort_mem_tlb = {
1133 .se_header = "TLB access",
1134 .se_cmp = sort__tlb_cmp,
1135 .se_snprintf = hist_entry__tlb_snprintf,
1136 .se_width_idx = HISTC_MEM_TLB,
1139 struct sort_entry sort_mem_lvl = {
1140 .se_header = "Memory access",
1141 .se_cmp = sort__lvl_cmp,
1142 .se_snprintf = hist_entry__lvl_snprintf,
1143 .se_width_idx = HISTC_MEM_LVL,
1146 struct sort_entry sort_mem_snoop = {
1147 .se_header = "Snoop",
1148 .se_cmp = sort__snoop_cmp,
1149 .se_snprintf = hist_entry__snoop_snprintf,
1150 .se_width_idx = HISTC_MEM_SNOOP,
1153 struct sort_entry sort_mem_dcacheline = {
1154 .se_header = "Data Cacheline",
1155 .se_cmp = sort__dcacheline_cmp,
1156 .se_snprintf = hist_entry__dcacheline_snprintf,
1157 .se_width_idx = HISTC_MEM_DCACHELINE,
1161 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1163 if (!left->branch_info || !right->branch_info)
1164 return cmp_null(left->branch_info, right->branch_info);
1166 return left->branch_info->flags.abort !=
1167 right->branch_info->flags.abort;
1170 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1171 size_t size, unsigned int width)
1173 static const char *out = "N/A";
1175 if (he->branch_info) {
1176 if (he->branch_info->flags.abort)
1182 return repsep_snprintf(bf, size, "%-*s", width, out);
1185 struct sort_entry sort_abort = {
1186 .se_header = "Transaction abort",
1187 .se_cmp = sort__abort_cmp,
1188 .se_snprintf = hist_entry__abort_snprintf,
1189 .se_width_idx = HISTC_ABORT,
1193 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1195 if (!left->branch_info || !right->branch_info)
1196 return cmp_null(left->branch_info, right->branch_info);
1198 return left->branch_info->flags.in_tx !=
1199 right->branch_info->flags.in_tx;
1202 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1203 size_t size, unsigned int width)
1205 static const char *out = "N/A";
1207 if (he->branch_info) {
1208 if (he->branch_info->flags.in_tx)
1214 return repsep_snprintf(bf, size, "%-*s", width, out);
1217 struct sort_entry sort_in_tx = {
1218 .se_header = "Branch in transaction",
1219 .se_cmp = sort__in_tx_cmp,
1220 .se_snprintf = hist_entry__in_tx_snprintf,
1221 .se_width_idx = HISTC_IN_TX,
1225 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1227 return left->transaction - right->transaction;
1230 static inline char *add_str(char *p, const char *str)
1233 return p + strlen(str);
1236 static struct txbit {
1241 { PERF_TXN_ELISION, "EL ", 0 },
1242 { PERF_TXN_TRANSACTION, "TX ", 1 },
1243 { PERF_TXN_SYNC, "SYNC ", 1 },
1244 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1245 { PERF_TXN_RETRY, "RETRY ", 0 },
1246 { PERF_TXN_CONFLICT, "CON ", 0 },
1247 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1248 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1252 int hist_entry__transaction_len(void)
1257 for (i = 0; txbits[i].name; i++) {
1258 if (!txbits[i].skip_for_len)
1259 len += strlen(txbits[i].name);
1261 len += 4; /* :XX<space> */
1265 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1266 size_t size, unsigned int width)
1268 u64 t = he->transaction;
1274 for (i = 0; txbits[i].name; i++)
1275 if (txbits[i].flag & t)
1276 p = add_str(p, txbits[i].name);
1277 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1278 p = add_str(p, "NEITHER ");
1279 if (t & PERF_TXN_ABORT_MASK) {
1280 sprintf(p, ":%" PRIx64,
1281 (t & PERF_TXN_ABORT_MASK) >>
1282 PERF_TXN_ABORT_SHIFT);
1286 return repsep_snprintf(bf, size, "%-*s", width, buf);
1289 struct sort_entry sort_transaction = {
1290 .se_header = "Transaction ",
1291 .se_cmp = sort__transaction_cmp,
1292 .se_snprintf = hist_entry__transaction_snprintf,
1293 .se_width_idx = HISTC_TRANSACTION,
1296 struct sort_dimension {
1298 struct sort_entry *entry;
1302 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1304 static struct sort_dimension common_sort_dimensions[] = {
1305 DIM(SORT_PID, "pid", sort_thread),
1306 DIM(SORT_COMM, "comm", sort_comm),
1307 DIM(SORT_DSO, "dso", sort_dso),
1308 DIM(SORT_SYM, "symbol", sort_sym),
1309 DIM(SORT_PARENT, "parent", sort_parent),
1310 DIM(SORT_CPU, "cpu", sort_cpu),
1311 DIM(SORT_SOCKET, "socket", sort_socket),
1312 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1313 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1314 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1315 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1316 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1321 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1323 static struct sort_dimension bstack_sort_dimensions[] = {
1324 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1325 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1326 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1327 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1328 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1329 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1330 DIM(SORT_ABORT, "abort", sort_abort),
1331 DIM(SORT_CYCLES, "cycles", sort_cycles),
1336 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1338 static struct sort_dimension memory_sort_dimensions[] = {
1339 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1340 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1341 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1342 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1343 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1344 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1345 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1346 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1351 struct hpp_dimension {
1353 struct perf_hpp_fmt *fmt;
1357 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1359 static struct hpp_dimension hpp_sort_dimensions[] = {
1360 DIM(PERF_HPP__OVERHEAD, "overhead"),
1361 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1362 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1363 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1364 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1365 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1366 DIM(PERF_HPP__SAMPLES, "sample"),
1367 DIM(PERF_HPP__PERIOD, "period"),
1372 struct hpp_sort_entry {
1373 struct perf_hpp_fmt hpp;
1374 struct sort_entry *se;
1377 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1379 struct hpp_sort_entry *hse_a;
1380 struct hpp_sort_entry *hse_b;
1382 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1385 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1386 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1388 return hse_a->se == hse_b->se;
1391 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1393 struct hpp_sort_entry *hse;
1395 if (!perf_hpp__is_sort_entry(fmt))
1398 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1399 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1402 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1403 struct perf_evsel *evsel)
1405 struct hpp_sort_entry *hse;
1406 size_t len = fmt->user_len;
1408 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1411 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1413 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1416 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1417 struct perf_hpp *hpp __maybe_unused,
1418 struct perf_evsel *evsel)
1420 struct hpp_sort_entry *hse;
1421 size_t len = fmt->user_len;
1423 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1426 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1431 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1432 struct hist_entry *he)
1434 struct hpp_sort_entry *hse;
1435 size_t len = fmt->user_len;
1437 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1440 len = hists__col_len(he->hists, hse->se->se_width_idx);
1442 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1445 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1446 struct hist_entry *a, struct hist_entry *b)
1448 struct hpp_sort_entry *hse;
1450 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1451 return hse->se->se_cmp(a, b);
1454 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1455 struct hist_entry *a, struct hist_entry *b)
1457 struct hpp_sort_entry *hse;
1458 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1460 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1461 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1462 return collapse_fn(a, b);
1465 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1466 struct hist_entry *a, struct hist_entry *b)
1468 struct hpp_sort_entry *hse;
1469 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1471 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1472 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1473 return sort_fn(a, b);
1476 static struct hpp_sort_entry *
1477 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1479 struct hpp_sort_entry *hse;
1481 hse = malloc(sizeof(*hse));
1483 pr_err("Memory allocation failed\n");
1487 hse->se = sd->entry;
1488 hse->hpp.name = sd->entry->se_header;
1489 hse->hpp.header = __sort__hpp_header;
1490 hse->hpp.width = __sort__hpp_width;
1491 hse->hpp.entry = __sort__hpp_entry;
1492 hse->hpp.color = NULL;
1494 hse->hpp.cmp = __sort__hpp_cmp;
1495 hse->hpp.collapse = __sort__hpp_collapse;
1496 hse->hpp.sort = __sort__hpp_sort;
1498 INIT_LIST_HEAD(&hse->hpp.list);
1499 INIT_LIST_HEAD(&hse->hpp.sort_list);
1500 hse->hpp.elide = false;
1502 hse->hpp.user_len = 0;
1507 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1509 return format->header == __sort__hpp_header;
1512 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1514 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1519 perf_hpp__register_sort_field(&hse->hpp);
1523 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1525 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1530 perf_hpp__column_register(&hse->hpp);
1534 struct hpp_dynamic_entry {
1535 struct perf_hpp_fmt hpp;
1536 struct perf_evsel *evsel;
1537 struct format_field *field;
1538 unsigned dynamic_len;
1541 static int hde_width(struct hpp_dynamic_entry *hde)
1543 if (!hde->hpp.len) {
1544 int len = hde->dynamic_len;
1545 int namelen = strlen(hde->field->name);
1546 int fieldlen = hde->field->size;
1551 if (!(hde->field->flags & FIELD_IS_STRING)) {
1552 /* length for print hex numbers */
1553 fieldlen = hde->field->size * 2 + 2;
1560 return hde->hpp.len;
1563 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1564 struct perf_evsel *evsel __maybe_unused)
1566 struct hpp_dynamic_entry *hde;
1567 size_t len = fmt->user_len;
1569 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1572 len = hde_width(hde);
1574 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1577 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1578 struct perf_hpp *hpp __maybe_unused,
1579 struct perf_evsel *evsel __maybe_unused)
1581 struct hpp_dynamic_entry *hde;
1582 size_t len = fmt->user_len;
1584 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1587 len = hde_width(hde);
1592 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1593 struct hist_entry *he)
1595 struct hpp_dynamic_entry *hde;
1596 size_t len = fmt->user_len;
1597 struct trace_seq seq;
1600 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1603 len = hde_width(hde);
1605 if (hists_to_evsel(he->hists) != hde->evsel)
1606 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, "N/A");
1608 trace_seq_init(&seq);
1609 pevent_print_field(&seq, he->raw_data, hde->field);
1610 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, seq.buffer);
1611 trace_seq_destroy(&seq);
1615 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1616 struct hist_entry *a, struct hist_entry *b)
1618 struct hpp_dynamic_entry *hde;
1619 struct format_field *field;
1620 unsigned offset, size;
1622 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1624 if (hists_to_evsel(a->hists) != hde->evsel)
1628 if (field->flags & FIELD_IS_DYNAMIC) {
1629 unsigned long long dyn;
1631 pevent_read_number_field(field, a->raw_data, &dyn);
1632 offset = dyn & 0xffff;
1633 size = (dyn >> 16) & 0xffff;
1635 /* record max width for output */
1636 if (size > hde->dynamic_len)
1637 hde->dynamic_len = size;
1639 offset = field->offset;
1643 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1646 static struct hpp_dynamic_entry *
1647 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1649 struct hpp_dynamic_entry *hde;
1651 hde = malloc(sizeof(*hde));
1653 pr_debug("Memory allocation failed\n");
1659 hde->dynamic_len = 0;
1661 hde->hpp.name = field->name;
1662 hde->hpp.header = __sort__hde_header;
1663 hde->hpp.width = __sort__hde_width;
1664 hde->hpp.entry = __sort__hde_entry;
1665 hde->hpp.color = NULL;
1667 hde->hpp.cmp = __sort__hde_cmp;
1668 hde->hpp.collapse = __sort__hde_cmp;
1669 hde->hpp.sort = __sort__hde_cmp;
1671 INIT_LIST_HEAD(&hde->hpp.list);
1672 INIT_LIST_HEAD(&hde->hpp.sort_list);
1673 hde->hpp.elide = false;
1675 hde->hpp.user_len = 0;
1680 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1682 char *str, *event_name, *field_name;
1683 struct perf_evsel *evsel, *pos;
1684 struct format_field *field;
1685 struct hpp_dynamic_entry *hde;
1696 field_name = strchr(str, '.');
1697 if (field_name == NULL) {
1701 *field_name++ = '\0';
1704 evlist__for_each(evlist, pos) {
1705 if (!strcmp(pos->name, event_name)) {
1711 if (evsel == NULL) {
1712 pr_debug("Cannot find event: %s\n", event_name);
1717 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
1718 pr_debug("%s is not a tracepoint event\n", event_name);
1723 field = pevent_find_any_field(evsel->tp_format, field_name);
1724 if (field == NULL) {
1725 pr_debug("Cannot find event field for %s.%s\n",
1726 event_name, field_name);
1731 hde = __alloc_dynamic_entry(evsel, field);
1737 perf_hpp__register_sort_field(&hde->hpp);
1744 static int __sort_dimension__add(struct sort_dimension *sd)
1749 if (__sort_dimension__add_hpp_sort(sd) < 0)
1752 if (sd->entry->se_collapse)
1753 sort__need_collapse = 1;
1760 static int __hpp_dimension__add(struct hpp_dimension *hd)
1765 perf_hpp__register_sort_field(hd->fmt);
1770 static int __sort_dimension__add_output(struct sort_dimension *sd)
1775 if (__sort_dimension__add_hpp_output(sd) < 0)
1782 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
1787 perf_hpp__column_register(hd->fmt);
1792 int hpp_dimension__add_output(unsigned col)
1794 BUG_ON(col >= PERF_HPP__MAX_INDEX);
1795 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
1798 static int sort_dimension__add(const char *tok,
1799 struct perf_evlist *evlist __maybe_unused)
1803 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1804 struct sort_dimension *sd = &common_sort_dimensions[i];
1806 if (strncasecmp(tok, sd->name, strlen(tok)))
1809 if (sd->entry == &sort_parent) {
1810 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
1814 regerror(ret, &parent_regex, err, sizeof(err));
1815 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
1818 sort__has_parent = 1;
1819 } else if (sd->entry == &sort_sym) {
1822 * perf diff displays the performance difference amongst
1823 * two or more perf.data files. Those files could come
1824 * from different binaries. So we should not compare
1825 * their ips, but the name of symbol.
1827 if (sort__mode == SORT_MODE__DIFF)
1828 sd->entry->se_collapse = sort__sym_sort;
1830 } else if (sd->entry == &sort_dso) {
1832 } else if (sd->entry == &sort_socket) {
1833 sort__has_socket = 1;
1836 return __sort_dimension__add(sd);
1839 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1840 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1842 if (strncasecmp(tok, hd->name, strlen(tok)))
1845 return __hpp_dimension__add(hd);
1848 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1849 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1851 if (strncasecmp(tok, sd->name, strlen(tok)))
1854 if (sort__mode != SORT_MODE__BRANCH)
1857 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
1860 __sort_dimension__add(sd);
1864 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1865 struct sort_dimension *sd = &memory_sort_dimensions[i];
1867 if (strncasecmp(tok, sd->name, strlen(tok)))
1870 if (sort__mode != SORT_MODE__MEMORY)
1873 if (sd->entry == &sort_mem_daddr_sym)
1876 __sort_dimension__add(sd);
1880 if (!add_dynamic_entry(evlist, tok))
1886 static const char *get_default_sort_order(void)
1888 const char *default_sort_orders[] = {
1890 default_branch_sort_order,
1891 default_mem_sort_order,
1892 default_top_sort_order,
1893 default_diff_sort_order,
1896 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
1898 return default_sort_orders[sort__mode];
1901 static int setup_sort_order(void)
1903 char *new_sort_order;
1906 * Append '+'-prefixed sort order to the default sort
1909 if (!sort_order || is_strict_order(sort_order))
1912 if (sort_order[1] == '\0') {
1913 error("Invalid --sort key: `+'");
1918 * We allocate new sort_order string, but we never free it,
1919 * because it's checked over the rest of the code.
1921 if (asprintf(&new_sort_order, "%s,%s",
1922 get_default_sort_order(), sort_order + 1) < 0) {
1923 error("Not enough memory to set up --sort");
1927 sort_order = new_sort_order;
1931 static int __setup_sorting(struct perf_evlist *evlist)
1933 char *tmp, *tok, *str;
1934 const char *sort_keys;
1937 ret = setup_sort_order();
1941 sort_keys = sort_order;
1942 if (sort_keys == NULL) {
1943 if (is_strict_order(field_order)) {
1945 * If user specified field order but no sort order,
1946 * we'll honor it and not add default sort orders.
1951 sort_keys = get_default_sort_order();
1954 str = strdup(sort_keys);
1956 error("Not enough memory to setup sort keys");
1960 for (tok = strtok_r(str, ", ", &tmp);
1961 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1962 ret = sort_dimension__add(tok, evlist);
1963 if (ret == -EINVAL) {
1964 error("Invalid --sort key: `%s'", tok);
1966 } else if (ret == -ESRCH) {
1967 error("Unknown --sort key: `%s'", tok);
1976 void perf_hpp__set_elide(int idx, bool elide)
1978 struct perf_hpp_fmt *fmt;
1979 struct hpp_sort_entry *hse;
1981 perf_hpp__for_each_format(fmt) {
1982 if (!perf_hpp__is_sort_entry(fmt))
1985 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1986 if (hse->se->se_width_idx == idx) {
1993 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
1995 if (list && strlist__nr_entries(list) == 1) {
1997 fprintf(fp, "# %s: %s\n", list_name,
1998 strlist__entry(list, 0)->s);
2004 static bool get_elide(int idx, FILE *output)
2008 return __get_elide(symbol_conf.sym_list, "symbol", output);
2010 return __get_elide(symbol_conf.dso_list, "dso", output);
2012 return __get_elide(symbol_conf.comm_list, "comm", output);
2017 if (sort__mode != SORT_MODE__BRANCH)
2021 case HISTC_SYMBOL_FROM:
2022 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2023 case HISTC_SYMBOL_TO:
2024 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2025 case HISTC_DSO_FROM:
2026 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2028 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2036 void sort__setup_elide(FILE *output)
2038 struct perf_hpp_fmt *fmt;
2039 struct hpp_sort_entry *hse;
2041 perf_hpp__for_each_format(fmt) {
2042 if (!perf_hpp__is_sort_entry(fmt))
2045 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2046 fmt->elide = get_elide(hse->se->se_width_idx, output);
2050 * It makes no sense to elide all of sort entries.
2051 * Just revert them to show up again.
2053 perf_hpp__for_each_format(fmt) {
2054 if (!perf_hpp__is_sort_entry(fmt))
2061 perf_hpp__for_each_format(fmt) {
2062 if (!perf_hpp__is_sort_entry(fmt))
2069 static int output_field_add(char *tok)
2073 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2074 struct sort_dimension *sd = &common_sort_dimensions[i];
2076 if (strncasecmp(tok, sd->name, strlen(tok)))
2079 return __sort_dimension__add_output(sd);
2082 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2083 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2085 if (strncasecmp(tok, hd->name, strlen(tok)))
2088 return __hpp_dimension__add_output(hd);
2091 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2092 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2094 if (strncasecmp(tok, sd->name, strlen(tok)))
2097 return __sort_dimension__add_output(sd);
2100 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2101 struct sort_dimension *sd = &memory_sort_dimensions[i];
2103 if (strncasecmp(tok, sd->name, strlen(tok)))
2106 return __sort_dimension__add_output(sd);
2112 static void reset_dimensions(void)
2116 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2117 common_sort_dimensions[i].taken = 0;
2119 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2120 hpp_sort_dimensions[i].taken = 0;
2122 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2123 bstack_sort_dimensions[i].taken = 0;
2125 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2126 memory_sort_dimensions[i].taken = 0;
2129 bool is_strict_order(const char *order)
2131 return order && (*order != '+');
2134 static int __setup_output_field(void)
2136 char *tmp, *tok, *str, *strp;
2139 if (field_order == NULL)
2142 strp = str = strdup(field_order);
2144 error("Not enough memory to setup output fields");
2148 if (!is_strict_order(field_order))
2151 if (!strlen(strp)) {
2152 error("Invalid --fields key: `+'");
2156 for (tok = strtok_r(strp, ", ", &tmp);
2157 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2158 ret = output_field_add(tok);
2159 if (ret == -EINVAL) {
2160 error("Invalid --fields key: `%s'", tok);
2162 } else if (ret == -ESRCH) {
2163 error("Unknown --fields key: `%s'", tok);
2173 int setup_sorting(struct perf_evlist *evlist)
2177 err = __setup_sorting(evlist);
2181 if (parent_pattern != default_parent_pattern) {
2182 err = sort_dimension__add("parent", evlist);
2190 * perf diff doesn't use default hpp output fields.
2192 if (sort__mode != SORT_MODE__DIFF)
2195 err = __setup_output_field();
2199 /* copy sort keys to output fields */
2200 perf_hpp__setup_output_field();
2201 /* and then copy output fields to sort keys */
2202 perf_hpp__append_sort_keys();
2207 void reset_output_field(void)
2209 sort__need_collapse = 0;
2210 sort__has_parent = 0;
2218 perf_hpp__reset_output_field();