1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "mem-info.h"
27 #include "annotate.h"
28 #include "annotate-data.h"
29 #include "event.h"
30 #include "time-utils.h"
31 #include "cgroup.h"
32 #include "machine.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <event-parse.h>
39 #endif
40
41 regex_t parent_regex;
42 const char default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char *parent_pattern = default_parent_pattern;
44 const char *default_sort_order = "comm,dso,symbol";
45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char default_top_sort_order[] = "dso,symbol";
48 const char default_diff_sort_order[] = "dso,symbol";
49 const char default_tracepoint_sort_order[] = "trace";
50 const char *sort_order;
51 const char *field_order;
52 regex_t ignore_callees_regex;
53 int have_ignore_callees = 0;
54 enum sort_mode sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57
58 /*
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
62 */
63 bool chk_double_cl;
64
65 /*
66 * Replaces all occurrences of a char used with the:
67 *
68 * -t, --field-separator
69 *
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
73 */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
75 {
76 int n;
77 va_list ap;
78
79 va_start(ap, fmt);
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
82 char *sep = bf;
83
84 while (1) {
85 sep = strchr(sep, *symbol_conf.field_sep);
86 if (sep == NULL)
87 break;
88 *sep = '.';
89 }
90 }
91 va_end(ap);
92
93 if (n >= (int)size)
94 return size - 1;
95 return n;
96 }
97
cmp_null(const void * l,const void * r)98 static int64_t cmp_null(const void *l, const void *r)
99 {
100 if (!l && !r)
101 return 0;
102 else if (!l)
103 return -1;
104 else
105 return 1;
106 }
107
108 /* --sort pid */
109
110 static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
112 {
113 return thread__tid(right->thread) - thread__tid(left->thread);
114 }
115
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
118 {
119 const char *comm = thread__comm_str(he->thread);
120
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
124 }
125
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
127 {
128 const struct thread *th = arg;
129
130 if (type != HIST_FILTER__THREAD)
131 return -1;
132
133 return th && !RC_CHK_EQUAL(he->thread, th);
134 }
135
136 struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
142 };
143
144 /* --sort simd */
145
146 static int64_t
sort__simd_cmp(struct hist_entry * left,struct hist_entry * right)147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
148 {
149 if (left->simd_flags.arch != right->simd_flags.arch)
150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
151
152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
153 }
154
hist_entry__get_simd_name(struct simd_flags * simd_flags)155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
156 {
157 u64 arch = simd_flags->arch;
158
159 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
160 return "SVE";
161 else
162 return "n/a";
163 }
164
hist_entry__simd_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
166 size_t size, unsigned int width __maybe_unused)
167 {
168 const char *name;
169
170 if (!he->simd_flags.arch)
171 return repsep_snprintf(bf, size, "");
172
173 name = hist_entry__get_simd_name(&he->simd_flags);
174
175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
176 return repsep_snprintf(bf, size, "[e] %s", name);
177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
178 return repsep_snprintf(bf, size, "[p] %s", name);
179
180 return repsep_snprintf(bf, size, "[.] %s", name);
181 }
182
183 struct sort_entry sort_simd = {
184 .se_header = "Simd ",
185 .se_cmp = sort__simd_cmp,
186 .se_snprintf = hist_entry__simd_snprintf,
187 .se_width_idx = HISTC_SIMD,
188 };
189
190 /* --sort comm */
191
192 /*
193 * We can't use pointer comparison in functions below,
194 * because it gives different results based on pointer
195 * values, which could break some sorting assumptions.
196 */
197 static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
199 {
200 return strcmp(comm__str(right->comm), comm__str(left->comm));
201 }
202
203 static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
205 {
206 return strcmp(comm__str(right->comm), comm__str(left->comm));
207 }
208
209 static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
211 {
212 return strcmp(comm__str(right->comm), comm__str(left->comm));
213 }
214
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
216 size_t size, unsigned int width)
217 {
218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
219 }
220
221 struct sort_entry sort_comm = {
222 .se_header = "Command",
223 .se_cmp = sort__comm_cmp,
224 .se_collapse = sort__comm_collapse,
225 .se_sort = sort__comm_sort,
226 .se_snprintf = hist_entry__comm_snprintf,
227 .se_filter = hist_entry__thread_filter,
228 .se_width_idx = HISTC_COMM,
229 };
230
231 /* --sort dso */
232
_sort__dso_cmp(struct map * map_l,struct map * map_r)233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
234 {
235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
237 const char *dso_name_l, *dso_name_r;
238
239 if (!dso_l || !dso_r)
240 return cmp_null(dso_r, dso_l);
241
242 if (verbose > 0) {
243 dso_name_l = dso__long_name(dso_l);
244 dso_name_r = dso__long_name(dso_r);
245 } else {
246 dso_name_l = dso__short_name(dso_l);
247 dso_name_r = dso__short_name(dso_r);
248 }
249
250 return strcmp(dso_name_l, dso_name_r);
251 }
252
253 static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
255 {
256 return _sort__dso_cmp(right->ms.map, left->ms.map);
257 }
258
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)259 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
260 size_t size, unsigned int width)
261 {
262 const struct dso *dso = map ? map__dso(map) : NULL;
263 const char *dso_name = "[unknown]";
264
265 if (dso)
266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
267
268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
269 }
270
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
273 {
274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
275 }
276
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
278 {
279 const struct dso *dso = arg;
280
281 if (type != HIST_FILTER__DSO)
282 return -1;
283
284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
285 }
286
287 struct sort_entry sort_dso = {
288 .se_header = "Shared Object",
289 .se_cmp = sort__dso_cmp,
290 .se_snprintf = hist_entry__dso_snprintf,
291 .se_filter = hist_entry__dso_filter,
292 .se_width_idx = HISTC_DSO,
293 };
294
295 /* --sort symbol */
296
_sort__addr_cmp(u64 left_ip,u64 right_ip)297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
298 {
299 return (int64_t)(right_ip - left_ip);
300 }
301
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
303 {
304 if (!sym_l || !sym_r)
305 return cmp_null(sym_l, sym_r);
306
307 if (sym_l == sym_r)
308 return 0;
309
310 if (sym_l->inlined || sym_r->inlined) {
311 int ret = strcmp(sym_l->name, sym_r->name);
312
313 if (ret)
314 return ret;
315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
316 return 0;
317 }
318
319 if (sym_l->start != sym_r->start)
320 return (int64_t)(sym_r->start - sym_l->start);
321
322 return (int64_t)(sym_r->end - sym_l->end);
323 }
324
325 static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
327 {
328 int64_t ret;
329
330 if (!left->ms.sym && !right->ms.sym)
331 return _sort__addr_cmp(left->ip, right->ip);
332
333 /*
334 * comparing symbol address alone is not enough since it's a
335 * relative address within a dso.
336 */
337 if (!hists__has(left->hists, dso)) {
338 ret = sort__dso_cmp(left, right);
339 if (ret != 0)
340 return ret;
341 }
342
343 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
344 }
345
346 static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
348 {
349 if (!left->ms.sym || !right->ms.sym)
350 return cmp_null(left->ms.sym, right->ms.sym);
351
352 return strcmp(right->ms.sym->name, left->ms.sym->name);
353 }
354
_hist_entry__sym_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)355 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
356 u64 ip, char level, char *bf, size_t size,
357 unsigned int width)
358 {
359 struct symbol *sym = ms->sym;
360 struct map *map = ms->map;
361 size_t ret = 0;
362
363 if (verbose > 0) {
364 struct dso *dso = map ? map__dso(map) : NULL;
365 char o = dso ? dso__symtab_origin(dso) : '!';
366 u64 rip = ip;
367
368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
369 rip = map__unmap_ip(map, ip);
370
371 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
372 BITS_PER_LONG / 4 + 2, rip, o);
373 }
374
375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
376 if (sym && map) {
377 if (sym->type == STT_OBJECT) {
378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
380 ip - map__unmap_ip(map, sym->start));
381 } else {
382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
383 width - ret,
384 sym->name);
385 if (sym->inlined)
386 ret += repsep_snprintf(bf + ret, size - ret,
387 " (inlined)");
388 }
389 } else {
390 size_t len = BITS_PER_LONG / 4;
391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
392 len, ip);
393 }
394
395 return ret;
396 }
397
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
399 {
400 return _hist_entry__sym_snprintf(&he->ms, he->ip,
401 he->level, bf, size, width);
402 }
403
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
405 {
406 const char *sym = arg;
407
408 if (type != HIST_FILTER__SYMBOL)
409 return -1;
410
411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
412 }
413
414 struct sort_entry sort_sym = {
415 .se_header = "Symbol",
416 .se_cmp = sort__sym_cmp,
417 .se_sort = sort__sym_sort,
418 .se_snprintf = hist_entry__sym_snprintf,
419 .se_filter = hist_entry__sym_filter,
420 .se_width_idx = HISTC_SYMBOL,
421 };
422
423 /* --sort symoff */
424
425 static int64_t
sort__symoff_cmp(struct hist_entry * left,struct hist_entry * right)426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
427 {
428 int64_t ret;
429
430 ret = sort__sym_cmp(left, right);
431 if (ret)
432 return ret;
433
434 return left->ip - right->ip;
435 }
436
437 static int64_t
sort__symoff_sort(struct hist_entry * left,struct hist_entry * right)438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
439 {
440 int64_t ret;
441
442 ret = sort__sym_sort(left, right);
443 if (ret)
444 return ret;
445
446 return left->ip - right->ip;
447 }
448
449 static int
hist_entry__symoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
451 {
452 struct symbol *sym = he->ms.sym;
453
454 if (sym == NULL)
455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
456
457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
458 }
459
460 struct sort_entry sort_sym_offset = {
461 .se_header = "Symbol Offset",
462 .se_cmp = sort__symoff_cmp,
463 .se_sort = sort__symoff_sort,
464 .se_snprintf = hist_entry__symoff_snprintf,
465 .se_filter = hist_entry__sym_filter,
466 .se_width_idx = HISTC_SYMBOL_OFFSET,
467 };
468
469 /* --sort srcline */
470
hist_entry__srcline(struct hist_entry * he)471 char *hist_entry__srcline(struct hist_entry *he)
472 {
473 return map__srcline(he->ms.map, he->ip, he->ms.sym);
474 }
475
476 static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
478 {
479 int64_t ret;
480
481 ret = _sort__addr_cmp(left->ip, right->ip);
482 if (ret)
483 return ret;
484
485 return sort__dso_cmp(left, right);
486 }
487
488 static int64_t
sort__srcline_collapse(struct hist_entry * left,struct hist_entry * right)489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
490 {
491 if (!left->srcline)
492 left->srcline = hist_entry__srcline(left);
493 if (!right->srcline)
494 right->srcline = hist_entry__srcline(right);
495
496 return strcmp(right->srcline, left->srcline);
497 }
498
499 static int64_t
sort__srcline_sort(struct hist_entry * left,struct hist_entry * right)500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
501 {
502 return sort__srcline_collapse(left, right);
503 }
504
505 static void
sort__srcline_init(struct hist_entry * he)506 sort__srcline_init(struct hist_entry *he)
507 {
508 if (!he->srcline)
509 he->srcline = hist_entry__srcline(he);
510 }
511
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
513 size_t size, unsigned int width)
514 {
515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
516 }
517
518 struct sort_entry sort_srcline = {
519 .se_header = "Source:Line",
520 .se_cmp = sort__srcline_cmp,
521 .se_collapse = sort__srcline_collapse,
522 .se_sort = sort__srcline_sort,
523 .se_init = sort__srcline_init,
524 .se_snprintf = hist_entry__srcline_snprintf,
525 .se_width_idx = HISTC_SRCLINE,
526 };
527
528 /* --sort srcline_from */
529
addr_map_symbol__srcline(struct addr_map_symbol * ams)530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
531 {
532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
533 }
534
535 static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
537 {
538 return left->branch_info->from.addr - right->branch_info->from.addr;
539 }
540
541 static int64_t
sort__srcline_from_collapse(struct hist_entry * left,struct hist_entry * right)542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
543 {
544 if (!left->branch_info->srcline_from)
545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
546
547 if (!right->branch_info->srcline_from)
548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
549
550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
551 }
552
553 static int64_t
sort__srcline_from_sort(struct hist_entry * left,struct hist_entry * right)554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
555 {
556 return sort__srcline_from_collapse(left, right);
557 }
558
sort__srcline_from_init(struct hist_entry * he)559 static void sort__srcline_from_init(struct hist_entry *he)
560 {
561 if (!he->branch_info->srcline_from)
562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
563 }
564
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
566 size_t size, unsigned int width)
567 {
568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
569 }
570
571 struct sort_entry sort_srcline_from = {
572 .se_header = "From Source:Line",
573 .se_cmp = sort__srcline_from_cmp,
574 .se_collapse = sort__srcline_from_collapse,
575 .se_sort = sort__srcline_from_sort,
576 .se_init = sort__srcline_from_init,
577 .se_snprintf = hist_entry__srcline_from_snprintf,
578 .se_width_idx = HISTC_SRCLINE_FROM,
579 };
580
581 /* --sort srcline_to */
582
583 static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
585 {
586 return left->branch_info->to.addr - right->branch_info->to.addr;
587 }
588
589 static int64_t
sort__srcline_to_collapse(struct hist_entry * left,struct hist_entry * right)590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
591 {
592 if (!left->branch_info->srcline_to)
593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
594
595 if (!right->branch_info->srcline_to)
596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
597
598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
599 }
600
601 static int64_t
sort__srcline_to_sort(struct hist_entry * left,struct hist_entry * right)602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
603 {
604 return sort__srcline_to_collapse(left, right);
605 }
606
sort__srcline_to_init(struct hist_entry * he)607 static void sort__srcline_to_init(struct hist_entry *he)
608 {
609 if (!he->branch_info->srcline_to)
610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
611 }
612
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
615 {
616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
617 }
618
619 struct sort_entry sort_srcline_to = {
620 .se_header = "To Source:Line",
621 .se_cmp = sort__srcline_to_cmp,
622 .se_collapse = sort__srcline_to_collapse,
623 .se_sort = sort__srcline_to_sort,
624 .se_init = sort__srcline_to_init,
625 .se_snprintf = hist_entry__srcline_to_snprintf,
626 .se_width_idx = HISTC_SRCLINE_TO,
627 };
628
hist_entry__sym_ipc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
630 size_t size, unsigned int width)
631 {
632
633 struct symbol *sym = he->ms.sym;
634 struct annotated_branch *branch;
635 double ipc = 0.0, coverage = 0.0;
636 char tmp[64];
637
638 if (!sym)
639 return repsep_snprintf(bf, size, "%-*s", width, "-");
640
641 branch = symbol__annotation(sym)->branch;
642
643 if (branch && branch->hit_cycles)
644 ipc = branch->hit_insn / ((double)branch->hit_cycles);
645
646 if (branch && branch->total_insn) {
647 coverage = branch->cover_insn * 100.0 /
648 ((double)branch->total_insn);
649 }
650
651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
652 return repsep_snprintf(bf, size, "%-*s", width, tmp);
653 }
654
655 struct sort_entry sort_sym_ipc = {
656 .se_header = "IPC [IPC Coverage]",
657 .se_cmp = sort__sym_cmp,
658 .se_snprintf = hist_entry__sym_ipc_snprintf,
659 .se_width_idx = HISTC_SYMBOL_IPC,
660 };
661
hist_entry__sym_ipc_null_snprintf(struct hist_entry * he __maybe_unused,char * bf,size_t size,unsigned int width)662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
663 __maybe_unused,
664 char *bf, size_t size,
665 unsigned int width)
666 {
667 char tmp[64];
668
669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
670 return repsep_snprintf(bf, size, "%-*s", width, tmp);
671 }
672
673 struct sort_entry sort_sym_ipc_null = {
674 .se_header = "IPC [IPC Coverage]",
675 .se_cmp = sort__sym_cmp,
676 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
677 .se_width_idx = HISTC_SYMBOL_IPC,
678 };
679
680 /* --sort callchain_branch_predicted */
681
682 static int64_t
sort__callchain_branch_predicted_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)683 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
684 struct hist_entry *right __maybe_unused)
685 {
686 return 0;
687 }
688
hist_entry__callchain_branch_predicted_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)689 static int hist_entry__callchain_branch_predicted_snprintf(
690 struct hist_entry *he, char *bf, size_t size, unsigned int width)
691 {
692 u64 branch_count, predicted_count;
693 double percent = 0.0;
694 char str[32];
695
696 callchain_branch_counts(he->callchain, &branch_count,
697 &predicted_count, NULL, NULL);
698
699 if (branch_count)
700 percent = predicted_count * 100.0 / branch_count;
701
702 snprintf(str, sizeof(str), "%.1f%%", percent);
703 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
704 }
705
706 struct sort_entry sort_callchain_branch_predicted = {
707 .se_header = "Predicted",
708 .se_cmp = sort__callchain_branch_predicted_cmp,
709 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
710 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
711 };
712
713 /* --sort callchain_branch_abort */
714
715 static int64_t
sort__callchain_branch_abort_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)716 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
717 struct hist_entry *right __maybe_unused)
718 {
719 return 0;
720 }
721
hist_entry__callchain_branch_abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)722 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
723 char *bf, size_t size,
724 unsigned int width)
725 {
726 u64 branch_count, abort_count;
727 char str[32];
728
729 callchain_branch_counts(he->callchain, &branch_count,
730 NULL, &abort_count, NULL);
731
732 snprintf(str, sizeof(str), "%" PRId64, abort_count);
733 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
734 }
735
736 struct sort_entry sort_callchain_branch_abort = {
737 .se_header = "Abort",
738 .se_cmp = sort__callchain_branch_abort_cmp,
739 .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
740 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
741 };
742
743 /* --sort callchain_branch_cycles */
744
745 static int64_t
sort__callchain_branch_cycles_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)746 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
747 struct hist_entry *right __maybe_unused)
748 {
749 return 0;
750 }
751
hist_entry__callchain_branch_cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)752 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
753 char *bf, size_t size,
754 unsigned int width)
755 {
756 u64 branch_count, cycles_count, cycles = 0;
757 char str[32];
758
759 callchain_branch_counts(he->callchain, &branch_count,
760 NULL, NULL, &cycles_count);
761
762 if (branch_count)
763 cycles = cycles_count / branch_count;
764
765 snprintf(str, sizeof(str), "%" PRId64 "", cycles);
766 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
767 }
768
769 struct sort_entry sort_callchain_branch_cycles = {
770 .se_header = "Cycles",
771 .se_cmp = sort__callchain_branch_cycles_cmp,
772 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
773 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
774 };
775
776 /* --sort srcfile */
777
778 static char no_srcfile[1];
779
hist_entry__get_srcfile(struct hist_entry * e)780 static char *hist_entry__get_srcfile(struct hist_entry *e)
781 {
782 char *sf, *p;
783 struct map *map = e->ms.map;
784
785 if (!map)
786 return no_srcfile;
787
788 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
789 e->ms.sym, false, true, true, e->ip);
790 if (sf == SRCLINE_UNKNOWN)
791 return no_srcfile;
792 p = strchr(sf, ':');
793 if (p && *sf) {
794 *p = 0;
795 return sf;
796 }
797 free(sf);
798 return no_srcfile;
799 }
800
801 static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)802 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
803 {
804 return sort__srcline_cmp(left, right);
805 }
806
807 static int64_t
sort__srcfile_collapse(struct hist_entry * left,struct hist_entry * right)808 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
809 {
810 if (!left->srcfile)
811 left->srcfile = hist_entry__get_srcfile(left);
812 if (!right->srcfile)
813 right->srcfile = hist_entry__get_srcfile(right);
814
815 return strcmp(right->srcfile, left->srcfile);
816 }
817
818 static int64_t
sort__srcfile_sort(struct hist_entry * left,struct hist_entry * right)819 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
820 {
821 return sort__srcfile_collapse(left, right);
822 }
823
sort__srcfile_init(struct hist_entry * he)824 static void sort__srcfile_init(struct hist_entry *he)
825 {
826 if (!he->srcfile)
827 he->srcfile = hist_entry__get_srcfile(he);
828 }
829
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)830 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
831 size_t size, unsigned int width)
832 {
833 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
834 }
835
836 struct sort_entry sort_srcfile = {
837 .se_header = "Source File",
838 .se_cmp = sort__srcfile_cmp,
839 .se_collapse = sort__srcfile_collapse,
840 .se_sort = sort__srcfile_sort,
841 .se_init = sort__srcfile_init,
842 .se_snprintf = hist_entry__srcfile_snprintf,
843 .se_width_idx = HISTC_SRCFILE,
844 };
845
846 /* --sort parent */
847
848 static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)849 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
850 {
851 struct symbol *sym_l = left->parent;
852 struct symbol *sym_r = right->parent;
853
854 if (!sym_l || !sym_r)
855 return cmp_null(sym_l, sym_r);
856
857 return strcmp(sym_r->name, sym_l->name);
858 }
859
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)860 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
861 size_t size, unsigned int width)
862 {
863 return repsep_snprintf(bf, size, "%-*.*s", width, width,
864 he->parent ? he->parent->name : "[other]");
865 }
866
867 struct sort_entry sort_parent = {
868 .se_header = "Parent symbol",
869 .se_cmp = sort__parent_cmp,
870 .se_snprintf = hist_entry__parent_snprintf,
871 .se_width_idx = HISTC_PARENT,
872 };
873
874 /* --sort cpu */
875
876 static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)877 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
878 {
879 return right->cpu - left->cpu;
880 }
881
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)882 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
883 size_t size, unsigned int width)
884 {
885 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
886 }
887
888 struct sort_entry sort_cpu = {
889 .se_header = "CPU",
890 .se_cmp = sort__cpu_cmp,
891 .se_snprintf = hist_entry__cpu_snprintf,
892 .se_width_idx = HISTC_CPU,
893 };
894
895 /* --sort cgroup_id */
896
_sort__cgroup_dev_cmp(u64 left_dev,u64 right_dev)897 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
898 {
899 return (int64_t)(right_dev - left_dev);
900 }
901
_sort__cgroup_inode_cmp(u64 left_ino,u64 right_ino)902 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
903 {
904 return (int64_t)(right_ino - left_ino);
905 }
906
907 static int64_t
sort__cgroup_id_cmp(struct hist_entry * left,struct hist_entry * right)908 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
909 {
910 int64_t ret;
911
912 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
913 if (ret != 0)
914 return ret;
915
916 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
917 left->cgroup_id.ino);
918 }
919
hist_entry__cgroup_id_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)920 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
921 char *bf, size_t size,
922 unsigned int width __maybe_unused)
923 {
924 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
925 he->cgroup_id.ino);
926 }
927
928 struct sort_entry sort_cgroup_id = {
929 .se_header = "cgroup id (dev/inode)",
930 .se_cmp = sort__cgroup_id_cmp,
931 .se_snprintf = hist_entry__cgroup_id_snprintf,
932 .se_width_idx = HISTC_CGROUP_ID,
933 };
934
935 /* --sort cgroup */
936
937 static int64_t
sort__cgroup_cmp(struct hist_entry * left,struct hist_entry * right)938 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
939 {
940 return right->cgroup - left->cgroup;
941 }
942
hist_entry__cgroup_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)943 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
944 char *bf, size_t size,
945 unsigned int width __maybe_unused)
946 {
947 const char *cgrp_name = "N/A";
948
949 if (he->cgroup) {
950 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
951 he->cgroup);
952 if (cgrp != NULL)
953 cgrp_name = cgrp->name;
954 else
955 cgrp_name = "unknown";
956 }
957
958 return repsep_snprintf(bf, size, "%s", cgrp_name);
959 }
960
961 struct sort_entry sort_cgroup = {
962 .se_header = "Cgroup",
963 .se_cmp = sort__cgroup_cmp,
964 .se_snprintf = hist_entry__cgroup_snprintf,
965 .se_width_idx = HISTC_CGROUP,
966 };
967
968 /* --sort socket */
969
970 static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)971 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
972 {
973 return right->socket - left->socket;
974 }
975
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)976 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
977 size_t size, unsigned int width)
978 {
979 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
980 }
981
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)982 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
983 {
984 int sk = *(const int *)arg;
985
986 if (type != HIST_FILTER__SOCKET)
987 return -1;
988
989 return sk >= 0 && he->socket != sk;
990 }
991
992 struct sort_entry sort_socket = {
993 .se_header = "Socket",
994 .se_cmp = sort__socket_cmp,
995 .se_snprintf = hist_entry__socket_snprintf,
996 .se_filter = hist_entry__socket_filter,
997 .se_width_idx = HISTC_SOCKET,
998 };
999
1000 /* --sort time */
1001
1002 static int64_t
sort__time_cmp(struct hist_entry * left,struct hist_entry * right)1003 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1004 {
1005 return right->time - left->time;
1006 }
1007
hist_entry__time_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1008 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1009 size_t size, unsigned int width)
1010 {
1011 char he_time[32];
1012
1013 if (symbol_conf.nanosecs)
1014 timestamp__scnprintf_nsec(he->time, he_time,
1015 sizeof(he_time));
1016 else
1017 timestamp__scnprintf_usec(he->time, he_time,
1018 sizeof(he_time));
1019
1020 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1021 }
1022
1023 struct sort_entry sort_time = {
1024 .se_header = "Time",
1025 .se_cmp = sort__time_cmp,
1026 .se_snprintf = hist_entry__time_snprintf,
1027 .se_width_idx = HISTC_TIME,
1028 };
1029
1030 /* --sort trace */
1031
1032 #ifdef HAVE_LIBTRACEEVENT
get_trace_output(struct hist_entry * he)1033 static char *get_trace_output(struct hist_entry *he)
1034 {
1035 struct trace_seq seq;
1036 struct evsel *evsel;
1037 struct tep_record rec = {
1038 .data = he->raw_data,
1039 .size = he->raw_size,
1040 };
1041 struct tep_event *tp_format;
1042
1043 evsel = hists_to_evsel(he->hists);
1044
1045 trace_seq_init(&seq);
1046 tp_format = evsel__tp_format(evsel);
1047 if (tp_format) {
1048 if (symbol_conf.raw_trace)
1049 tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
1050 else
1051 tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
1052 }
1053
1054 /*
1055 * Trim the buffer, it starts at 4KB and we're not going to
1056 * add anything more to this buffer.
1057 */
1058 return realloc(seq.buffer, seq.len + 1);
1059 }
1060
1061 static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)1062 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1063 {
1064 struct evsel *evsel;
1065
1066 evsel = hists_to_evsel(left->hists);
1067 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1068 return 0;
1069
1070 if (left->trace_output == NULL)
1071 left->trace_output = get_trace_output(left);
1072 if (right->trace_output == NULL)
1073 right->trace_output = get_trace_output(right);
1074
1075 return strcmp(right->trace_output, left->trace_output);
1076 }
1077
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1078 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1079 size_t size, unsigned int width)
1080 {
1081 struct evsel *evsel;
1082
1083 evsel = hists_to_evsel(he->hists);
1084 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1085 return scnprintf(bf, size, "%-.*s", width, "N/A");
1086
1087 if (he->trace_output == NULL)
1088 he->trace_output = get_trace_output(he);
1089 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1090 }
1091
1092 struct sort_entry sort_trace = {
1093 .se_header = "Trace output",
1094 .se_cmp = sort__trace_cmp,
1095 .se_snprintf = hist_entry__trace_snprintf,
1096 .se_width_idx = HISTC_TRACE,
1097 };
1098 #endif /* HAVE_LIBTRACEEVENT */
1099
1100 /* sort keys for branch stacks */
1101
1102 static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)1103 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1104 {
1105 if (!left->branch_info || !right->branch_info)
1106 return cmp_null(left->branch_info, right->branch_info);
1107
1108 return _sort__dso_cmp(left->branch_info->from.ms.map,
1109 right->branch_info->from.ms.map);
1110 }
1111
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1112 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1113 size_t size, unsigned int width)
1114 {
1115 if (he->branch_info)
1116 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1117 bf, size, width);
1118 else
1119 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1120 }
1121
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)1122 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1123 const void *arg)
1124 {
1125 const struct dso *dso = arg;
1126
1127 if (type != HIST_FILTER__DSO)
1128 return -1;
1129
1130 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1131 map__dso(he->branch_info->from.ms.map) != dso);
1132 }
1133
1134 static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)1135 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1136 {
1137 if (!left->branch_info || !right->branch_info)
1138 return cmp_null(left->branch_info, right->branch_info);
1139
1140 return _sort__dso_cmp(left->branch_info->to.ms.map,
1141 right->branch_info->to.ms.map);
1142 }
1143
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1144 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1145 size_t size, unsigned int width)
1146 {
1147 if (he->branch_info)
1148 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1149 bf, size, width);
1150 else
1151 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1152 }
1153
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)1154 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1155 const void *arg)
1156 {
1157 const struct dso *dso = arg;
1158
1159 if (type != HIST_FILTER__DSO)
1160 return -1;
1161
1162 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1163 map__dso(he->branch_info->to.ms.map) != dso);
1164 }
1165
1166 static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)1167 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1168 {
1169 struct addr_map_symbol *from_l, *from_r;
1170
1171 if (!left->branch_info || !right->branch_info)
1172 return cmp_null(left->branch_info, right->branch_info);
1173
1174 from_l = &left->branch_info->from;
1175 from_r = &right->branch_info->from;
1176
1177 if (!from_l->ms.sym && !from_r->ms.sym)
1178 return _sort__addr_cmp(from_l->addr, from_r->addr);
1179
1180 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1181 }
1182
1183 static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)1184 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1185 {
1186 struct addr_map_symbol *to_l, *to_r;
1187
1188 if (!left->branch_info || !right->branch_info)
1189 return cmp_null(left->branch_info, right->branch_info);
1190
1191 to_l = &left->branch_info->to;
1192 to_r = &right->branch_info->to;
1193
1194 if (!to_l->ms.sym && !to_r->ms.sym)
1195 return _sort__addr_cmp(to_l->addr, to_r->addr);
1196
1197 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1198 }
1199
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1200 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1201 size_t size, unsigned int width)
1202 {
1203 if (he->branch_info) {
1204 struct addr_map_symbol *from = &he->branch_info->from;
1205
1206 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1207 from->al_level, bf, size, width);
1208 }
1209
1210 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1211 }
1212
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1213 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1214 size_t size, unsigned int width)
1215 {
1216 if (he->branch_info) {
1217 struct addr_map_symbol *to = &he->branch_info->to;
1218
1219 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1220 to->al_level, bf, size, width);
1221 }
1222
1223 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1224 }
1225
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)1226 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1227 const void *arg)
1228 {
1229 const char *sym = arg;
1230
1231 if (type != HIST_FILTER__SYMBOL)
1232 return -1;
1233
1234 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1235 strstr(he->branch_info->from.ms.sym->name, sym));
1236 }
1237
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)1238 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1239 const void *arg)
1240 {
1241 const char *sym = arg;
1242
1243 if (type != HIST_FILTER__SYMBOL)
1244 return -1;
1245
1246 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1247 strstr(he->branch_info->to.ms.sym->name, sym));
1248 }
1249
1250 struct sort_entry sort_dso_from = {
1251 .se_header = "Source Shared Object",
1252 .se_cmp = sort__dso_from_cmp,
1253 .se_snprintf = hist_entry__dso_from_snprintf,
1254 .se_filter = hist_entry__dso_from_filter,
1255 .se_width_idx = HISTC_DSO_FROM,
1256 };
1257
1258 struct sort_entry sort_dso_to = {
1259 .se_header = "Target Shared Object",
1260 .se_cmp = sort__dso_to_cmp,
1261 .se_snprintf = hist_entry__dso_to_snprintf,
1262 .se_filter = hist_entry__dso_to_filter,
1263 .se_width_idx = HISTC_DSO_TO,
1264 };
1265
1266 struct sort_entry sort_sym_from = {
1267 .se_header = "Source Symbol",
1268 .se_cmp = sort__sym_from_cmp,
1269 .se_snprintf = hist_entry__sym_from_snprintf,
1270 .se_filter = hist_entry__sym_from_filter,
1271 .se_width_idx = HISTC_SYMBOL_FROM,
1272 };
1273
1274 struct sort_entry sort_sym_to = {
1275 .se_header = "Target Symbol",
1276 .se_cmp = sort__sym_to_cmp,
1277 .se_snprintf = hist_entry__sym_to_snprintf,
1278 .se_filter = hist_entry__sym_to_filter,
1279 .se_width_idx = HISTC_SYMBOL_TO,
1280 };
1281
_hist_entry__addr_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)1282 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1283 u64 ip, char level, char *bf, size_t size,
1284 unsigned int width)
1285 {
1286 struct symbol *sym = ms->sym;
1287 struct map *map = ms->map;
1288 size_t ret = 0, offs;
1289
1290 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1291 if (sym && map) {
1292 if (sym->type == STT_OBJECT) {
1293 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1294 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1295 ip - map__unmap_ip(map, sym->start));
1296 } else {
1297 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1298 width - ret,
1299 sym->name);
1300 offs = ip - sym->start;
1301 if (offs)
1302 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1303 }
1304 } else {
1305 size_t len = BITS_PER_LONG / 4;
1306 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1307 len, ip);
1308 }
1309
1310 return ret;
1311 }
1312
hist_entry__addr_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1313 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1314 size_t size, unsigned int width)
1315 {
1316 if (he->branch_info) {
1317 struct addr_map_symbol *from = &he->branch_info->from;
1318
1319 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1320 he->level, bf, size, width);
1321 }
1322
1323 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1324 }
1325
hist_entry__addr_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1326 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1327 size_t size, unsigned int width)
1328 {
1329 if (he->branch_info) {
1330 struct addr_map_symbol *to = &he->branch_info->to;
1331
1332 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1333 he->level, bf, size, width);
1334 }
1335
1336 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1337 }
1338
1339 static int64_t
sort__addr_from_cmp(struct hist_entry * left,struct hist_entry * right)1340 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1341 {
1342 struct addr_map_symbol *from_l;
1343 struct addr_map_symbol *from_r;
1344 int64_t ret;
1345
1346 if (!left->branch_info || !right->branch_info)
1347 return cmp_null(left->branch_info, right->branch_info);
1348
1349 from_l = &left->branch_info->from;
1350 from_r = &right->branch_info->from;
1351
1352 /*
1353 * comparing symbol address alone is not enough since it's a
1354 * relative address within a dso.
1355 */
1356 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1357 if (ret != 0)
1358 return ret;
1359
1360 return _sort__addr_cmp(from_l->addr, from_r->addr);
1361 }
1362
1363 static int64_t
sort__addr_to_cmp(struct hist_entry * left,struct hist_entry * right)1364 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1365 {
1366 struct addr_map_symbol *to_l;
1367 struct addr_map_symbol *to_r;
1368 int64_t ret;
1369
1370 if (!left->branch_info || !right->branch_info)
1371 return cmp_null(left->branch_info, right->branch_info);
1372
1373 to_l = &left->branch_info->to;
1374 to_r = &right->branch_info->to;
1375
1376 /*
1377 * comparing symbol address alone is not enough since it's a
1378 * relative address within a dso.
1379 */
1380 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1381 if (ret != 0)
1382 return ret;
1383
1384 return _sort__addr_cmp(to_l->addr, to_r->addr);
1385 }
1386
1387 struct sort_entry sort_addr_from = {
1388 .se_header = "Source Address",
1389 .se_cmp = sort__addr_from_cmp,
1390 .se_snprintf = hist_entry__addr_from_snprintf,
1391 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1392 .se_width_idx = HISTC_ADDR_FROM,
1393 };
1394
1395 struct sort_entry sort_addr_to = {
1396 .se_header = "Target Address",
1397 .se_cmp = sort__addr_to_cmp,
1398 .se_snprintf = hist_entry__addr_to_snprintf,
1399 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1400 .se_width_idx = HISTC_ADDR_TO,
1401 };
1402
1403
1404 static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)1405 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1406 {
1407 unsigned char mp, p;
1408
1409 if (!left->branch_info || !right->branch_info)
1410 return cmp_null(left->branch_info, right->branch_info);
1411
1412 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1413 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1414 return mp || p;
1415 }
1416
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1417 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1418 size_t size, unsigned int width){
1419 static const char *out = "N/A";
1420
1421 if (he->branch_info) {
1422 if (he->branch_info->flags.predicted)
1423 out = "N";
1424 else if (he->branch_info->flags.mispred)
1425 out = "Y";
1426 }
1427
1428 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1429 }
1430
1431 static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)1432 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1433 {
1434 if (!left->branch_info || !right->branch_info)
1435 return cmp_null(left->branch_info, right->branch_info);
1436
1437 return left->branch_info->flags.cycles -
1438 right->branch_info->flags.cycles;
1439 }
1440
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1441 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1442 size_t size, unsigned int width)
1443 {
1444 if (!he->branch_info)
1445 return scnprintf(bf, size, "%-.*s", width, "N/A");
1446 if (he->branch_info->flags.cycles == 0)
1447 return repsep_snprintf(bf, size, "%-*s", width, "-");
1448 return repsep_snprintf(bf, size, "%-*hd", width,
1449 he->branch_info->flags.cycles);
1450 }
1451
1452 struct sort_entry sort_cycles = {
1453 .se_header = "Basic Block Cycles",
1454 .se_cmp = sort__cycles_cmp,
1455 .se_snprintf = hist_entry__cycles_snprintf,
1456 .se_width_idx = HISTC_CYCLES,
1457 };
1458
1459 /* --sort daddr_sym */
1460 int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)1461 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1462 {
1463 uint64_t l = 0, r = 0;
1464
1465 if (left->mem_info)
1466 l = mem_info__daddr(left->mem_info)->addr;
1467 if (right->mem_info)
1468 r = mem_info__daddr(right->mem_info)->addr;
1469
1470 return (int64_t)(r - l);
1471 }
1472
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1473 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1474 size_t size, unsigned int width)
1475 {
1476 uint64_t addr = 0;
1477 struct map_symbol *ms = NULL;
1478
1479 if (he->mem_info) {
1480 addr = mem_info__daddr(he->mem_info)->addr;
1481 ms = &mem_info__daddr(he->mem_info)->ms;
1482 }
1483 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1484 }
1485
1486 int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)1487 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1488 {
1489 uint64_t l = 0, r = 0;
1490
1491 if (left->mem_info)
1492 l = mem_info__iaddr(left->mem_info)->addr;
1493 if (right->mem_info)
1494 r = mem_info__iaddr(right->mem_info)->addr;
1495
1496 return (int64_t)(r - l);
1497 }
1498
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1499 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1500 size_t size, unsigned int width)
1501 {
1502 uint64_t addr = 0;
1503 struct map_symbol *ms = NULL;
1504
1505 if (he->mem_info) {
1506 addr = mem_info__iaddr(he->mem_info)->addr;
1507 ms = &mem_info__iaddr(he->mem_info)->ms;
1508 }
1509 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1510 }
1511
1512 static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1513 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1514 {
1515 struct map *map_l = NULL;
1516 struct map *map_r = NULL;
1517
1518 if (left->mem_info)
1519 map_l = mem_info__daddr(left->mem_info)->ms.map;
1520 if (right->mem_info)
1521 map_r = mem_info__daddr(right->mem_info)->ms.map;
1522
1523 return _sort__dso_cmp(map_l, map_r);
1524 }
1525
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1526 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1527 size_t size, unsigned int width)
1528 {
1529 struct map *map = NULL;
1530
1531 if (he->mem_info)
1532 map = mem_info__daddr(he->mem_info)->ms.map;
1533
1534 return _hist_entry__dso_snprintf(map, bf, size, width);
1535 }
1536
1537 static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)1538 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1539 {
1540 union perf_mem_data_src data_src_l;
1541 union perf_mem_data_src data_src_r;
1542
1543 if (left->mem_info)
1544 data_src_l = *mem_info__data_src(left->mem_info);
1545 else
1546 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1547
1548 if (right->mem_info)
1549 data_src_r = *mem_info__data_src(right->mem_info);
1550 else
1551 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1552
1553 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1554 }
1555
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1556 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1557 size_t size, unsigned int width)
1558 {
1559 char out[10];
1560
1561 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1562 return repsep_snprintf(bf, size, "%.*s", width, out);
1563 }
1564
1565 static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)1566 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1567 {
1568 union perf_mem_data_src data_src_l;
1569 union perf_mem_data_src data_src_r;
1570
1571 if (left->mem_info)
1572 data_src_l = *mem_info__data_src(left->mem_info);
1573 else
1574 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1575
1576 if (right->mem_info)
1577 data_src_r = *mem_info__data_src(right->mem_info);
1578 else
1579 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1580
1581 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1582 }
1583
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1584 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1585 size_t size, unsigned int width)
1586 {
1587 char out[64];
1588
1589 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1590 return repsep_snprintf(bf, size, "%-*s", width, out);
1591 }
1592
1593 static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1594 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1595 {
1596 union perf_mem_data_src data_src_l;
1597 union perf_mem_data_src data_src_r;
1598
1599 if (left->mem_info)
1600 data_src_l = *mem_info__data_src(left->mem_info);
1601 else
1602 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1603
1604 if (right->mem_info)
1605 data_src_r = *mem_info__data_src(right->mem_info);
1606 else
1607 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1608
1609 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1610 }
1611
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1612 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1613 size_t size, unsigned int width)
1614 {
1615 char out[64];
1616
1617 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1618 return repsep_snprintf(bf, size, "%-*s", width, out);
1619 }
1620
1621 static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1622 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1623 {
1624 union perf_mem_data_src data_src_l;
1625 union perf_mem_data_src data_src_r;
1626
1627 if (left->mem_info)
1628 data_src_l = *mem_info__data_src(left->mem_info);
1629 else
1630 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1631
1632 if (right->mem_info)
1633 data_src_r = *mem_info__data_src(right->mem_info);
1634 else
1635 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1636
1637 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1638 }
1639
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1640 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1641 size_t size, unsigned int width)
1642 {
1643 char out[64];
1644
1645 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1646 return repsep_snprintf(bf, size, "%-*s", width, out);
1647 }
1648
1649 int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1650 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1651 {
1652 u64 l, r;
1653 struct map *l_map, *r_map;
1654 struct dso *l_dso, *r_dso;
1655 int rc;
1656
1657 if (!left->mem_info) return -1;
1658 if (!right->mem_info) return 1;
1659
1660 /* group event types together */
1661 if (left->cpumode > right->cpumode) return -1;
1662 if (left->cpumode < right->cpumode) return 1;
1663
1664 l_map = mem_info__daddr(left->mem_info)->ms.map;
1665 r_map = mem_info__daddr(right->mem_info)->ms.map;
1666
1667 /* if both are NULL, jump to sort on al_addr instead */
1668 if (!l_map && !r_map)
1669 goto addr;
1670
1671 if (!l_map) return -1;
1672 if (!r_map) return 1;
1673
1674 l_dso = map__dso(l_map);
1675 r_dso = map__dso(r_map);
1676 rc = dso__cmp_id(l_dso, r_dso);
1677 if (rc)
1678 return rc;
1679 /*
1680 * Addresses with no major/minor numbers are assumed to be
1681 * anonymous in userspace. Sort those on pid then address.
1682 *
1683 * The kernel and non-zero major/minor mapped areas are
1684 * assumed to be unity mapped. Sort those on address.
1685 */
1686
1687 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1688 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1689 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1690 /* userspace anonymous */
1691
1692 if (thread__pid(left->thread) > thread__pid(right->thread))
1693 return -1;
1694 if (thread__pid(left->thread) < thread__pid(right->thread))
1695 return 1;
1696 }
1697
1698 addr:
1699 /* al_addr does all the right addr - start + offset calculations */
1700 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1701 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1702
1703 if (l > r) return -1;
1704 if (l < r) return 1;
1705
1706 return 0;
1707 }
1708
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1709 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1710 size_t size, unsigned int width)
1711 {
1712
1713 uint64_t addr = 0;
1714 struct map_symbol *ms = NULL;
1715 char level = he->level;
1716
1717 if (he->mem_info) {
1718 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1719 struct dso *dso = map ? map__dso(map) : NULL;
1720
1721 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1722 ms = &mem_info__daddr(he->mem_info)->ms;
1723
1724 /* print [s] for shared data mmaps */
1725 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1726 map && !(map__prot(map) & PROT_EXEC) &&
1727 (map__flags(map) & MAP_SHARED) &&
1728 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1729 dso__id(dso)->ino_generation))
1730 level = 's';
1731 else if (!map)
1732 level = 'X';
1733 }
1734 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1735 }
1736
1737 struct sort_entry sort_mispredict = {
1738 .se_header = "Branch Mispredicted",
1739 .se_cmp = sort__mispredict_cmp,
1740 .se_snprintf = hist_entry__mispredict_snprintf,
1741 .se_width_idx = HISTC_MISPREDICT,
1742 };
1743
1744 static int64_t
sort__weight_cmp(struct hist_entry * left,struct hist_entry * right)1745 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1746 {
1747 return left->weight - right->weight;
1748 }
1749
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1750 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1751 size_t size, unsigned int width)
1752 {
1753 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1754 }
1755
1756 struct sort_entry sort_local_weight = {
1757 .se_header = "Local Weight",
1758 .se_cmp = sort__weight_cmp,
1759 .se_snprintf = hist_entry__local_weight_snprintf,
1760 .se_width_idx = HISTC_LOCAL_WEIGHT,
1761 };
1762
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1763 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1764 size_t size, unsigned int width)
1765 {
1766 return repsep_snprintf(bf, size, "%-*llu", width,
1767 he->weight * he->stat.nr_events);
1768 }
1769
1770 struct sort_entry sort_global_weight = {
1771 .se_header = "Weight",
1772 .se_cmp = sort__weight_cmp,
1773 .se_snprintf = hist_entry__global_weight_snprintf,
1774 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1775 };
1776
1777 static int64_t
sort__ins_lat_cmp(struct hist_entry * left,struct hist_entry * right)1778 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1779 {
1780 return left->ins_lat - right->ins_lat;
1781 }
1782
hist_entry__local_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1783 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1784 size_t size, unsigned int width)
1785 {
1786 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1787 }
1788
1789 struct sort_entry sort_local_ins_lat = {
1790 .se_header = "Local INSTR Latency",
1791 .se_cmp = sort__ins_lat_cmp,
1792 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1793 .se_width_idx = HISTC_LOCAL_INS_LAT,
1794 };
1795
hist_entry__global_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1796 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1797 size_t size, unsigned int width)
1798 {
1799 return repsep_snprintf(bf, size, "%-*u", width,
1800 he->ins_lat * he->stat.nr_events);
1801 }
1802
1803 struct sort_entry sort_global_ins_lat = {
1804 .se_header = "INSTR Latency",
1805 .se_cmp = sort__ins_lat_cmp,
1806 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1807 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1808 };
1809
1810 static int64_t
sort__p_stage_cyc_cmp(struct hist_entry * left,struct hist_entry * right)1811 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1812 {
1813 return left->p_stage_cyc - right->p_stage_cyc;
1814 }
1815
hist_entry__global_p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1816 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1817 size_t size, unsigned int width)
1818 {
1819 return repsep_snprintf(bf, size, "%-*u", width,
1820 he->p_stage_cyc * he->stat.nr_events);
1821 }
1822
1823
hist_entry__p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1824 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1825 size_t size, unsigned int width)
1826 {
1827 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1828 }
1829
1830 struct sort_entry sort_local_p_stage_cyc = {
1831 .se_header = "Local Pipeline Stage Cycle",
1832 .se_cmp = sort__p_stage_cyc_cmp,
1833 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1834 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1835 };
1836
1837 struct sort_entry sort_global_p_stage_cyc = {
1838 .se_header = "Pipeline Stage Cycle",
1839 .se_cmp = sort__p_stage_cyc_cmp,
1840 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1841 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1842 };
1843
1844 struct sort_entry sort_mem_daddr_sym = {
1845 .se_header = "Data Symbol",
1846 .se_cmp = sort__daddr_cmp,
1847 .se_snprintf = hist_entry__daddr_snprintf,
1848 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1849 };
1850
1851 struct sort_entry sort_mem_iaddr_sym = {
1852 .se_header = "Code Symbol",
1853 .se_cmp = sort__iaddr_cmp,
1854 .se_snprintf = hist_entry__iaddr_snprintf,
1855 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1856 };
1857
1858 struct sort_entry sort_mem_daddr_dso = {
1859 .se_header = "Data Object",
1860 .se_cmp = sort__dso_daddr_cmp,
1861 .se_snprintf = hist_entry__dso_daddr_snprintf,
1862 .se_width_idx = HISTC_MEM_DADDR_DSO,
1863 };
1864
1865 struct sort_entry sort_mem_locked = {
1866 .se_header = "Locked",
1867 .se_cmp = sort__locked_cmp,
1868 .se_snprintf = hist_entry__locked_snprintf,
1869 .se_width_idx = HISTC_MEM_LOCKED,
1870 };
1871
1872 struct sort_entry sort_mem_tlb = {
1873 .se_header = "TLB access",
1874 .se_cmp = sort__tlb_cmp,
1875 .se_snprintf = hist_entry__tlb_snprintf,
1876 .se_width_idx = HISTC_MEM_TLB,
1877 };
1878
1879 struct sort_entry sort_mem_lvl = {
1880 .se_header = "Memory access",
1881 .se_cmp = sort__lvl_cmp,
1882 .se_snprintf = hist_entry__lvl_snprintf,
1883 .se_width_idx = HISTC_MEM_LVL,
1884 };
1885
1886 struct sort_entry sort_mem_snoop = {
1887 .se_header = "Snoop",
1888 .se_cmp = sort__snoop_cmp,
1889 .se_snprintf = hist_entry__snoop_snprintf,
1890 .se_width_idx = HISTC_MEM_SNOOP,
1891 };
1892
1893 struct sort_entry sort_mem_dcacheline = {
1894 .se_header = "Data Cacheline",
1895 .se_cmp = sort__dcacheline_cmp,
1896 .se_snprintf = hist_entry__dcacheline_snprintf,
1897 .se_width_idx = HISTC_MEM_DCACHELINE,
1898 };
1899
1900 static int64_t
sort__blocked_cmp(struct hist_entry * left,struct hist_entry * right)1901 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1902 {
1903 union perf_mem_data_src data_src_l;
1904 union perf_mem_data_src data_src_r;
1905
1906 if (left->mem_info)
1907 data_src_l = *mem_info__data_src(left->mem_info);
1908 else
1909 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1910
1911 if (right->mem_info)
1912 data_src_r = *mem_info__data_src(right->mem_info);
1913 else
1914 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1915
1916 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1917 }
1918
hist_entry__blocked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1919 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1920 size_t size, unsigned int width)
1921 {
1922 char out[16];
1923
1924 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1925 return repsep_snprintf(bf, size, "%.*s", width, out);
1926 }
1927
1928 struct sort_entry sort_mem_blocked = {
1929 .se_header = "Blocked",
1930 .se_cmp = sort__blocked_cmp,
1931 .se_snprintf = hist_entry__blocked_snprintf,
1932 .se_width_idx = HISTC_MEM_BLOCKED,
1933 };
1934
1935 static int64_t
sort__phys_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1936 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1937 {
1938 uint64_t l = 0, r = 0;
1939
1940 if (left->mem_info)
1941 l = mem_info__daddr(left->mem_info)->phys_addr;
1942 if (right->mem_info)
1943 r = mem_info__daddr(right->mem_info)->phys_addr;
1944
1945 return (int64_t)(r - l);
1946 }
1947
hist_entry__phys_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1948 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1949 size_t size, unsigned int width)
1950 {
1951 uint64_t addr = 0;
1952 size_t ret = 0;
1953 size_t len = BITS_PER_LONG / 4;
1954
1955 addr = mem_info__daddr(he->mem_info)->phys_addr;
1956
1957 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1958
1959 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1960
1961 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1962
1963 if (ret > width)
1964 bf[width] = '\0';
1965
1966 return width;
1967 }
1968
1969 struct sort_entry sort_mem_phys_daddr = {
1970 .se_header = "Data Physical Address",
1971 .se_cmp = sort__phys_daddr_cmp,
1972 .se_snprintf = hist_entry__phys_daddr_snprintf,
1973 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1974 };
1975
1976 static int64_t
sort__data_page_size_cmp(struct hist_entry * left,struct hist_entry * right)1977 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1978 {
1979 uint64_t l = 0, r = 0;
1980
1981 if (left->mem_info)
1982 l = mem_info__daddr(left->mem_info)->data_page_size;
1983 if (right->mem_info)
1984 r = mem_info__daddr(right->mem_info)->data_page_size;
1985
1986 return (int64_t)(r - l);
1987 }
1988
hist_entry__data_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1989 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1990 size_t size, unsigned int width)
1991 {
1992 char str[PAGE_SIZE_NAME_LEN];
1993
1994 return repsep_snprintf(bf, size, "%-*s", width,
1995 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
1996 }
1997
1998 struct sort_entry sort_mem_data_page_size = {
1999 .se_header = "Data Page Size",
2000 .se_cmp = sort__data_page_size_cmp,
2001 .se_snprintf = hist_entry__data_page_size_snprintf,
2002 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
2003 };
2004
2005 static int64_t
sort__code_page_size_cmp(struct hist_entry * left,struct hist_entry * right)2006 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2007 {
2008 uint64_t l = left->code_page_size;
2009 uint64_t r = right->code_page_size;
2010
2011 return (int64_t)(r - l);
2012 }
2013
hist_entry__code_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2014 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2015 size_t size, unsigned int width)
2016 {
2017 char str[PAGE_SIZE_NAME_LEN];
2018
2019 return repsep_snprintf(bf, size, "%-*s", width,
2020 get_page_size_name(he->code_page_size, str));
2021 }
2022
2023 struct sort_entry sort_code_page_size = {
2024 .se_header = "Code Page Size",
2025 .se_cmp = sort__code_page_size_cmp,
2026 .se_snprintf = hist_entry__code_page_size_snprintf,
2027 .se_width_idx = HISTC_CODE_PAGE_SIZE,
2028 };
2029
2030 static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)2031 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2032 {
2033 if (!left->branch_info || !right->branch_info)
2034 return cmp_null(left->branch_info, right->branch_info);
2035
2036 return left->branch_info->flags.abort !=
2037 right->branch_info->flags.abort;
2038 }
2039
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2040 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2041 size_t size, unsigned int width)
2042 {
2043 static const char *out = "N/A";
2044
2045 if (he->branch_info) {
2046 if (he->branch_info->flags.abort)
2047 out = "A";
2048 else
2049 out = ".";
2050 }
2051
2052 return repsep_snprintf(bf, size, "%-*s", width, out);
2053 }
2054
2055 struct sort_entry sort_abort = {
2056 .se_header = "Transaction abort",
2057 .se_cmp = sort__abort_cmp,
2058 .se_snprintf = hist_entry__abort_snprintf,
2059 .se_width_idx = HISTC_ABORT,
2060 };
2061
2062 static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)2063 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2064 {
2065 if (!left->branch_info || !right->branch_info)
2066 return cmp_null(left->branch_info, right->branch_info);
2067
2068 return left->branch_info->flags.in_tx !=
2069 right->branch_info->flags.in_tx;
2070 }
2071
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2072 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2073 size_t size, unsigned int width)
2074 {
2075 static const char *out = "N/A";
2076
2077 if (he->branch_info) {
2078 if (he->branch_info->flags.in_tx)
2079 out = "T";
2080 else
2081 out = ".";
2082 }
2083
2084 return repsep_snprintf(bf, size, "%-*s", width, out);
2085 }
2086
2087 struct sort_entry sort_in_tx = {
2088 .se_header = "Branch in transaction",
2089 .se_cmp = sort__in_tx_cmp,
2090 .se_snprintf = hist_entry__in_tx_snprintf,
2091 .se_width_idx = HISTC_IN_TX,
2092 };
2093
2094 static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)2095 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2096 {
2097 return left->transaction - right->transaction;
2098 }
2099
add_str(char * p,const char * str)2100 static inline char *add_str(char *p, const char *str)
2101 {
2102 strcpy(p, str);
2103 return p + strlen(str);
2104 }
2105
2106 static struct txbit {
2107 unsigned flag;
2108 const char *name;
2109 int skip_for_len;
2110 } txbits[] = {
2111 { PERF_TXN_ELISION, "EL ", 0 },
2112 { PERF_TXN_TRANSACTION, "TX ", 1 },
2113 { PERF_TXN_SYNC, "SYNC ", 1 },
2114 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2115 { PERF_TXN_RETRY, "RETRY ", 0 },
2116 { PERF_TXN_CONFLICT, "CON ", 0 },
2117 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2118 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2119 { 0, NULL, 0 }
2120 };
2121
hist_entry__transaction_len(void)2122 int hist_entry__transaction_len(void)
2123 {
2124 int i;
2125 int len = 0;
2126
2127 for (i = 0; txbits[i].name; i++) {
2128 if (!txbits[i].skip_for_len)
2129 len += strlen(txbits[i].name);
2130 }
2131 len += 4; /* :XX<space> */
2132 return len;
2133 }
2134
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2135 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2136 size_t size, unsigned int width)
2137 {
2138 u64 t = he->transaction;
2139 char buf[128];
2140 char *p = buf;
2141 int i;
2142
2143 buf[0] = 0;
2144 for (i = 0; txbits[i].name; i++)
2145 if (txbits[i].flag & t)
2146 p = add_str(p, txbits[i].name);
2147 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2148 p = add_str(p, "NEITHER ");
2149 if (t & PERF_TXN_ABORT_MASK) {
2150 sprintf(p, ":%" PRIx64,
2151 (t & PERF_TXN_ABORT_MASK) >>
2152 PERF_TXN_ABORT_SHIFT);
2153 p += strlen(p);
2154 }
2155
2156 return repsep_snprintf(bf, size, "%-*s", width, buf);
2157 }
2158
2159 struct sort_entry sort_transaction = {
2160 .se_header = "Transaction ",
2161 .se_cmp = sort__transaction_cmp,
2162 .se_snprintf = hist_entry__transaction_snprintf,
2163 .se_width_idx = HISTC_TRANSACTION,
2164 };
2165
2166 /* --sort symbol_size */
2167
_sort__sym_size_cmp(struct symbol * sym_l,struct symbol * sym_r)2168 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2169 {
2170 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2171 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2172
2173 return size_l < size_r ? -1 :
2174 size_l == size_r ? 0 : 1;
2175 }
2176
2177 static int64_t
sort__sym_size_cmp(struct hist_entry * left,struct hist_entry * right)2178 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2179 {
2180 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2181 }
2182
_hist_entry__sym_size_snprintf(struct symbol * sym,char * bf,size_t bf_size,unsigned int width)2183 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2184 size_t bf_size, unsigned int width)
2185 {
2186 if (sym)
2187 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2188
2189 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2190 }
2191
hist_entry__sym_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2192 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2193 size_t size, unsigned int width)
2194 {
2195 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2196 }
2197
2198 struct sort_entry sort_sym_size = {
2199 .se_header = "Symbol size",
2200 .se_cmp = sort__sym_size_cmp,
2201 .se_snprintf = hist_entry__sym_size_snprintf,
2202 .se_width_idx = HISTC_SYM_SIZE,
2203 };
2204
2205 /* --sort dso_size */
2206
_sort__dso_size_cmp(struct map * map_l,struct map * map_r)2207 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2208 {
2209 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2210 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2211
2212 return size_l < size_r ? -1 :
2213 size_l == size_r ? 0 : 1;
2214 }
2215
2216 static int64_t
sort__dso_size_cmp(struct hist_entry * left,struct hist_entry * right)2217 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2218 {
2219 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2220 }
2221
_hist_entry__dso_size_snprintf(struct map * map,char * bf,size_t bf_size,unsigned int width)2222 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2223 size_t bf_size, unsigned int width)
2224 {
2225 if (map && map__dso(map))
2226 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2227
2228 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2229 }
2230
hist_entry__dso_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2231 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2232 size_t size, unsigned int width)
2233 {
2234 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2235 }
2236
2237 struct sort_entry sort_dso_size = {
2238 .se_header = "DSO size",
2239 .se_cmp = sort__dso_size_cmp,
2240 .se_snprintf = hist_entry__dso_size_snprintf,
2241 .se_width_idx = HISTC_DSO_SIZE,
2242 };
2243
2244 /* --sort addr */
2245
2246 static int64_t
sort__addr_cmp(struct hist_entry * left,struct hist_entry * right)2247 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2248 {
2249 u64 left_ip = left->ip;
2250 u64 right_ip = right->ip;
2251 struct map *left_map = left->ms.map;
2252 struct map *right_map = right->ms.map;
2253
2254 if (left_map)
2255 left_ip = map__unmap_ip(left_map, left_ip);
2256 if (right_map)
2257 right_ip = map__unmap_ip(right_map, right_ip);
2258
2259 return _sort__addr_cmp(left_ip, right_ip);
2260 }
2261
hist_entry__addr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2262 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2263 size_t size, unsigned int width)
2264 {
2265 u64 ip = he->ip;
2266 struct map *map = he->ms.map;
2267
2268 if (map)
2269 ip = map__unmap_ip(map, ip);
2270
2271 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2272 }
2273
2274 struct sort_entry sort_addr = {
2275 .se_header = "Address",
2276 .se_cmp = sort__addr_cmp,
2277 .se_snprintf = hist_entry__addr_snprintf,
2278 .se_width_idx = HISTC_ADDR,
2279 };
2280
2281 /* --sort type */
2282
2283 struct annotated_data_type unknown_type = {
2284 .self = {
2285 .type_name = (char *)"(unknown)",
2286 .children = LIST_HEAD_INIT(unknown_type.self.children),
2287 },
2288 };
2289
2290 static int64_t
sort__type_cmp(struct hist_entry * left,struct hist_entry * right)2291 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2292 {
2293 return sort__addr_cmp(left, right);
2294 }
2295
sort__type_init(struct hist_entry * he)2296 static void sort__type_init(struct hist_entry *he)
2297 {
2298 if (he->mem_type)
2299 return;
2300
2301 he->mem_type = hist_entry__get_data_type(he);
2302 if (he->mem_type == NULL) {
2303 he->mem_type = &unknown_type;
2304 he->mem_type_off = 0;
2305 }
2306 }
2307
2308 static int64_t
sort__type_collapse(struct hist_entry * left,struct hist_entry * right)2309 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2310 {
2311 struct annotated_data_type *left_type = left->mem_type;
2312 struct annotated_data_type *right_type = right->mem_type;
2313
2314 if (!left_type) {
2315 sort__type_init(left);
2316 left_type = left->mem_type;
2317 }
2318
2319 if (!right_type) {
2320 sort__type_init(right);
2321 right_type = right->mem_type;
2322 }
2323
2324 return strcmp(left_type->self.type_name, right_type->self.type_name);
2325 }
2326
2327 static int64_t
sort__type_sort(struct hist_entry * left,struct hist_entry * right)2328 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2329 {
2330 return sort__type_collapse(left, right);
2331 }
2332
hist_entry__type_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2333 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2334 size_t size, unsigned int width)
2335 {
2336 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2337 }
2338
2339 struct sort_entry sort_type = {
2340 .se_header = "Data Type",
2341 .se_cmp = sort__type_cmp,
2342 .se_collapse = sort__type_collapse,
2343 .se_sort = sort__type_sort,
2344 .se_init = sort__type_init,
2345 .se_snprintf = hist_entry__type_snprintf,
2346 .se_width_idx = HISTC_TYPE,
2347 };
2348
2349 /* --sort typeoff */
2350
2351 static int64_t
sort__typeoff_sort(struct hist_entry * left,struct hist_entry * right)2352 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2353 {
2354 struct annotated_data_type *left_type = left->mem_type;
2355 struct annotated_data_type *right_type = right->mem_type;
2356 int64_t ret;
2357
2358 if (!left_type) {
2359 sort__type_init(left);
2360 left_type = left->mem_type;
2361 }
2362
2363 if (!right_type) {
2364 sort__type_init(right);
2365 right_type = right->mem_type;
2366 }
2367
2368 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2369 if (ret)
2370 return ret;
2371 return left->mem_type_off - right->mem_type_off;
2372 }
2373
fill_member_name(char * buf,size_t sz,struct annotated_member * m,int offset,bool first)2374 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2375 int offset, bool first)
2376 {
2377 struct annotated_member *child;
2378
2379 if (list_empty(&m->children))
2380 return;
2381
2382 list_for_each_entry(child, &m->children, node) {
2383 if (child->offset <= offset && offset < child->offset + child->size) {
2384 int len = 0;
2385
2386 /* It can have anonymous struct/union members */
2387 if (child->var_name) {
2388 len = scnprintf(buf, sz, "%s%s",
2389 first ? "" : ".", child->var_name);
2390 first = false;
2391 }
2392
2393 fill_member_name(buf + len, sz - len, child, offset, first);
2394 return;
2395 }
2396 }
2397 }
2398
hist_entry__typeoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2399 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2400 size_t size, unsigned int width __maybe_unused)
2401 {
2402 struct annotated_data_type *he_type = he->mem_type;
2403 char buf[4096];
2404
2405 buf[0] = '\0';
2406 if (list_empty(&he_type->self.children))
2407 snprintf(buf, sizeof(buf), "no field");
2408 else
2409 fill_member_name(buf, sizeof(buf), &he_type->self,
2410 he->mem_type_off, true);
2411 buf[4095] = '\0';
2412
2413 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2414 he->mem_type_off, buf);
2415 }
2416
2417 struct sort_entry sort_type_offset = {
2418 .se_header = "Data Type Offset",
2419 .se_cmp = sort__type_cmp,
2420 .se_collapse = sort__typeoff_sort,
2421 .se_sort = sort__typeoff_sort,
2422 .se_init = sort__type_init,
2423 .se_snprintf = hist_entry__typeoff_snprintf,
2424 .se_width_idx = HISTC_TYPE_OFFSET,
2425 };
2426
2427 /* --sort typecln */
2428
2429 /* TODO: use actual value in the system */
2430 #define TYPE_CACHELINE_SIZE 64
2431
2432 static int64_t
sort__typecln_sort(struct hist_entry * left,struct hist_entry * right)2433 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2434 {
2435 struct annotated_data_type *left_type = left->mem_type;
2436 struct annotated_data_type *right_type = right->mem_type;
2437 int64_t left_cln, right_cln;
2438 int64_t ret;
2439
2440 if (!left_type) {
2441 sort__type_init(left);
2442 left_type = left->mem_type;
2443 }
2444
2445 if (!right_type) {
2446 sort__type_init(right);
2447 right_type = right->mem_type;
2448 }
2449
2450 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2451 if (ret)
2452 return ret;
2453
2454 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2455 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2456 return left_cln - right_cln;
2457 }
2458
hist_entry__typecln_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2459 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2460 size_t size, unsigned int width __maybe_unused)
2461 {
2462 struct annotated_data_type *he_type = he->mem_type;
2463
2464 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2465 he->mem_type_off / TYPE_CACHELINE_SIZE);
2466 }
2467
2468 struct sort_entry sort_type_cacheline = {
2469 .se_header = "Data Type Cacheline",
2470 .se_cmp = sort__type_cmp,
2471 .se_collapse = sort__typecln_sort,
2472 .se_sort = sort__typecln_sort,
2473 .se_init = sort__type_init,
2474 .se_snprintf = hist_entry__typecln_snprintf,
2475 .se_width_idx = HISTC_TYPE_CACHELINE,
2476 };
2477
2478
2479 struct sort_dimension {
2480 const char *name;
2481 struct sort_entry *entry;
2482 int taken;
2483 };
2484
arch_support_sort_key(const char * sort_key __maybe_unused)2485 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2486 {
2487 return 0;
2488 }
2489
arch_perf_header_entry(const char * se_header)2490 const char * __weak arch_perf_header_entry(const char *se_header)
2491 {
2492 return se_header;
2493 }
2494
sort_dimension_add_dynamic_header(struct sort_dimension * sd)2495 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2496 {
2497 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2498 }
2499
2500 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2501
2502 static struct sort_dimension common_sort_dimensions[] = {
2503 DIM(SORT_PID, "pid", sort_thread),
2504 DIM(SORT_COMM, "comm", sort_comm),
2505 DIM(SORT_DSO, "dso", sort_dso),
2506 DIM(SORT_SYM, "symbol", sort_sym),
2507 DIM(SORT_PARENT, "parent", sort_parent),
2508 DIM(SORT_CPU, "cpu", sort_cpu),
2509 DIM(SORT_SOCKET, "socket", sort_socket),
2510 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2511 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2512 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2513 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2514 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2515 #ifdef HAVE_LIBTRACEEVENT
2516 DIM(SORT_TRACE, "trace", sort_trace),
2517 #endif
2518 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2519 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2520 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2521 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2522 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2523 DIM(SORT_TIME, "time", sort_time),
2524 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2525 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2526 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2527 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2528 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2529 DIM(SORT_ADDR, "addr", sort_addr),
2530 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2531 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2532 DIM(SORT_SIMD, "simd", sort_simd),
2533 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2534 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2535 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2536 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2537 };
2538
2539 #undef DIM
2540
2541 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2542
2543 static struct sort_dimension bstack_sort_dimensions[] = {
2544 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2545 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2546 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2547 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2548 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2549 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2550 DIM(SORT_ABORT, "abort", sort_abort),
2551 DIM(SORT_CYCLES, "cycles", sort_cycles),
2552 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2553 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2554 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2555 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2556 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2557 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2558 "callchain_branch_predicted",
2559 sort_callchain_branch_predicted),
2560 DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2561 "callchain_branch_abort",
2562 sort_callchain_branch_abort),
2563 DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2564 "callchain_branch_cycles",
2565 sort_callchain_branch_cycles)
2566 };
2567
2568 #undef DIM
2569
2570 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2571
2572 static struct sort_dimension memory_sort_dimensions[] = {
2573 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2574 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2575 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2576 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2577 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2578 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2579 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2580 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2581 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2582 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2583 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2584 };
2585
2586 #undef DIM
2587
2588 struct hpp_dimension {
2589 const char *name;
2590 struct perf_hpp_fmt *fmt;
2591 int taken;
2592 };
2593
2594 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2595
2596 static struct hpp_dimension hpp_sort_dimensions[] = {
2597 DIM(PERF_HPP__OVERHEAD, "overhead"),
2598 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2599 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2600 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2601 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2602 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2603 DIM(PERF_HPP__SAMPLES, "sample"),
2604 DIM(PERF_HPP__PERIOD, "period"),
2605 DIM(PERF_HPP__WEIGHT1, "weight1"),
2606 DIM(PERF_HPP__WEIGHT2, "weight2"),
2607 DIM(PERF_HPP__WEIGHT3, "weight3"),
2608 /* aliases for weight_struct */
2609 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2610 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2611 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2612 };
2613
2614 #undef DIM
2615
2616 struct hpp_sort_entry {
2617 struct perf_hpp_fmt hpp;
2618 struct sort_entry *se;
2619 };
2620
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)2621 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2622 {
2623 struct hpp_sort_entry *hse;
2624
2625 if (!perf_hpp__is_sort_entry(fmt))
2626 return;
2627
2628 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2629 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2630 }
2631
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)2632 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2633 struct hists *hists, int line __maybe_unused,
2634 int *span __maybe_unused)
2635 {
2636 struct hpp_sort_entry *hse;
2637 size_t len = fmt->user_len;
2638
2639 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2640
2641 if (!len)
2642 len = hists__col_len(hists, hse->se->se_width_idx);
2643
2644 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2645 }
2646
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)2647 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2648 struct perf_hpp *hpp __maybe_unused,
2649 struct hists *hists)
2650 {
2651 struct hpp_sort_entry *hse;
2652 size_t len = fmt->user_len;
2653
2654 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2655
2656 if (!len)
2657 len = hists__col_len(hists, hse->se->se_width_idx);
2658
2659 return len;
2660 }
2661
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)2662 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2663 struct hist_entry *he)
2664 {
2665 struct hpp_sort_entry *hse;
2666 size_t len = fmt->user_len;
2667
2668 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2669
2670 if (!len)
2671 len = hists__col_len(he->hists, hse->se->se_width_idx);
2672
2673 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2674 }
2675
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2676 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2677 struct hist_entry *a, struct hist_entry *b)
2678 {
2679 struct hpp_sort_entry *hse;
2680
2681 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2682 return hse->se->se_cmp(a, b);
2683 }
2684
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2685 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2686 struct hist_entry *a, struct hist_entry *b)
2687 {
2688 struct hpp_sort_entry *hse;
2689 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2690
2691 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2692 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2693 return collapse_fn(a, b);
2694 }
2695
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2696 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2697 struct hist_entry *a, struct hist_entry *b)
2698 {
2699 struct hpp_sort_entry *hse;
2700 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2701
2702 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2703 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2704 return sort_fn(a, b);
2705 }
2706
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)2707 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2708 {
2709 return format->header == __sort__hpp_header;
2710 }
2711
2712 #define MK_SORT_ENTRY_CHK(key) \
2713 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2714 { \
2715 struct hpp_sort_entry *hse; \
2716 \
2717 if (!perf_hpp__is_sort_entry(fmt)) \
2718 return false; \
2719 \
2720 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2721 return hse->se == &sort_ ## key ; \
2722 }
2723
2724 #ifdef HAVE_LIBTRACEEVENT
2725 MK_SORT_ENTRY_CHK(trace)
2726 #else
2727 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2728 {
2729 return false;
2730 }
2731 #endif
MK_SORT_ENTRY_CHK(srcline)2732 MK_SORT_ENTRY_CHK(srcline)
2733 MK_SORT_ENTRY_CHK(srcfile)
2734 MK_SORT_ENTRY_CHK(thread)
2735 MK_SORT_ENTRY_CHK(comm)
2736 MK_SORT_ENTRY_CHK(dso)
2737 MK_SORT_ENTRY_CHK(sym)
2738
2739
2740 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2741 {
2742 struct hpp_sort_entry *hse_a;
2743 struct hpp_sort_entry *hse_b;
2744
2745 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2746 return false;
2747
2748 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2749 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2750
2751 return hse_a->se == hse_b->se;
2752 }
2753
hse_free(struct perf_hpp_fmt * fmt)2754 static void hse_free(struct perf_hpp_fmt *fmt)
2755 {
2756 struct hpp_sort_entry *hse;
2757
2758 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2759 free(hse);
2760 }
2761
hse_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)2762 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2763 {
2764 struct hpp_sort_entry *hse;
2765
2766 if (!perf_hpp__is_sort_entry(fmt))
2767 return;
2768
2769 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2770
2771 if (hse->se->se_init)
2772 hse->se->se_init(he);
2773 }
2774
2775 static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)2776 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2777 {
2778 struct hpp_sort_entry *hse;
2779
2780 hse = malloc(sizeof(*hse));
2781 if (hse == NULL) {
2782 pr_err("Memory allocation failed\n");
2783 return NULL;
2784 }
2785
2786 hse->se = sd->entry;
2787 hse->hpp.name = sd->entry->se_header;
2788 hse->hpp.header = __sort__hpp_header;
2789 hse->hpp.width = __sort__hpp_width;
2790 hse->hpp.entry = __sort__hpp_entry;
2791 hse->hpp.color = NULL;
2792
2793 hse->hpp.cmp = __sort__hpp_cmp;
2794 hse->hpp.collapse = __sort__hpp_collapse;
2795 hse->hpp.sort = __sort__hpp_sort;
2796 hse->hpp.equal = __sort__hpp_equal;
2797 hse->hpp.free = hse_free;
2798 hse->hpp.init = hse_init;
2799
2800 INIT_LIST_HEAD(&hse->hpp.list);
2801 INIT_LIST_HEAD(&hse->hpp.sort_list);
2802 hse->hpp.elide = false;
2803 hse->hpp.len = 0;
2804 hse->hpp.user_len = 0;
2805 hse->hpp.level = level;
2806
2807 return hse;
2808 }
2809
hpp_free(struct perf_hpp_fmt * fmt)2810 static void hpp_free(struct perf_hpp_fmt *fmt)
2811 {
2812 free(fmt);
2813 }
2814
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)2815 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2816 int level)
2817 {
2818 struct perf_hpp_fmt *fmt;
2819
2820 fmt = memdup(hd->fmt, sizeof(*fmt));
2821 if (fmt) {
2822 INIT_LIST_HEAD(&fmt->list);
2823 INIT_LIST_HEAD(&fmt->sort_list);
2824 fmt->free = hpp_free;
2825 fmt->level = level;
2826 }
2827
2828 return fmt;
2829 }
2830
hist_entry__filter(struct hist_entry * he,int type,const void * arg)2831 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2832 {
2833 struct perf_hpp_fmt *fmt;
2834 struct hpp_sort_entry *hse;
2835 int ret = -1;
2836 int r;
2837
2838 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2839 if (!perf_hpp__is_sort_entry(fmt))
2840 continue;
2841
2842 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2843 if (hse->se->se_filter == NULL)
2844 continue;
2845
2846 /*
2847 * hist entry is filtered if any of sort key in the hpp list
2848 * is applied. But it should skip non-matched filter types.
2849 */
2850 r = hse->se->se_filter(he, type, arg);
2851 if (r >= 0) {
2852 if (ret < 0)
2853 ret = 0;
2854 ret |= r;
2855 }
2856 }
2857
2858 return ret;
2859 }
2860
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2861 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2862 struct perf_hpp_list *list,
2863 int level)
2864 {
2865 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2866
2867 if (hse == NULL)
2868 return -1;
2869
2870 perf_hpp_list__register_sort_field(list, &hse->hpp);
2871 return 0;
2872 }
2873
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list)2874 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2875 struct perf_hpp_list *list)
2876 {
2877 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2878
2879 if (hse == NULL)
2880 return -1;
2881
2882 perf_hpp_list__column_register(list, &hse->hpp);
2883 return 0;
2884 }
2885
2886 #ifndef HAVE_LIBTRACEEVENT
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused)2887 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2888 {
2889 return false;
2890 }
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused,struct hists * hists __maybe_unused)2891 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2892 struct hists *hists __maybe_unused)
2893 {
2894 return false;
2895 }
2896 #else
2897 struct hpp_dynamic_entry {
2898 struct perf_hpp_fmt hpp;
2899 struct evsel *evsel;
2900 struct tep_format_field *field;
2901 unsigned dynamic_len;
2902 bool raw_trace;
2903 };
2904
hde_width(struct hpp_dynamic_entry * hde)2905 static int hde_width(struct hpp_dynamic_entry *hde)
2906 {
2907 if (!hde->hpp.len) {
2908 int len = hde->dynamic_len;
2909 int namelen = strlen(hde->field->name);
2910 int fieldlen = hde->field->size;
2911
2912 if (namelen > len)
2913 len = namelen;
2914
2915 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2916 /* length for print hex numbers */
2917 fieldlen = hde->field->size * 2 + 2;
2918 }
2919 if (fieldlen > len)
2920 len = fieldlen;
2921
2922 hde->hpp.len = len;
2923 }
2924 return hde->hpp.len;
2925 }
2926
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)2927 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2928 struct hist_entry *he)
2929 {
2930 char *str, *pos;
2931 struct tep_format_field *field = hde->field;
2932 size_t namelen;
2933 bool last = false;
2934
2935 if (hde->raw_trace)
2936 return;
2937
2938 /* parse pretty print result and update max length */
2939 if (!he->trace_output)
2940 he->trace_output = get_trace_output(he);
2941
2942 namelen = strlen(field->name);
2943 str = he->trace_output;
2944
2945 while (str) {
2946 pos = strchr(str, ' ');
2947 if (pos == NULL) {
2948 last = true;
2949 pos = str + strlen(str);
2950 }
2951
2952 if (!strncmp(str, field->name, namelen)) {
2953 size_t len;
2954
2955 str += namelen + 1;
2956 len = pos - str;
2957
2958 if (len > hde->dynamic_len)
2959 hde->dynamic_len = len;
2960 break;
2961 }
2962
2963 if (last)
2964 str = NULL;
2965 else
2966 str = pos + 1;
2967 }
2968 }
2969
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)2970 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2971 struct hists *hists __maybe_unused,
2972 int line __maybe_unused,
2973 int *span __maybe_unused)
2974 {
2975 struct hpp_dynamic_entry *hde;
2976 size_t len = fmt->user_len;
2977
2978 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2979
2980 if (!len)
2981 len = hde_width(hde);
2982
2983 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2984 }
2985
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)2986 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2987 struct perf_hpp *hpp __maybe_unused,
2988 struct hists *hists __maybe_unused)
2989 {
2990 struct hpp_dynamic_entry *hde;
2991 size_t len = fmt->user_len;
2992
2993 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2994
2995 if (!len)
2996 len = hde_width(hde);
2997
2998 return len;
2999 }
3000
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)3001 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3002 {
3003 struct hpp_dynamic_entry *hde;
3004
3005 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3006
3007 return hists_to_evsel(hists) == hde->evsel;
3008 }
3009
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)3010 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3011 struct hist_entry *he)
3012 {
3013 struct hpp_dynamic_entry *hde;
3014 size_t len = fmt->user_len;
3015 char *str, *pos;
3016 struct tep_format_field *field;
3017 size_t namelen;
3018 bool last = false;
3019 int ret;
3020
3021 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3022
3023 if (!len)
3024 len = hde_width(hde);
3025
3026 if (hde->raw_trace)
3027 goto raw_field;
3028
3029 if (!he->trace_output)
3030 he->trace_output = get_trace_output(he);
3031
3032 field = hde->field;
3033 namelen = strlen(field->name);
3034 str = he->trace_output;
3035
3036 while (str) {
3037 pos = strchr(str, ' ');
3038 if (pos == NULL) {
3039 last = true;
3040 pos = str + strlen(str);
3041 }
3042
3043 if (!strncmp(str, field->name, namelen)) {
3044 str += namelen + 1;
3045 str = strndup(str, pos - str);
3046
3047 if (str == NULL)
3048 return scnprintf(hpp->buf, hpp->size,
3049 "%*.*s", len, len, "ERROR");
3050 break;
3051 }
3052
3053 if (last)
3054 str = NULL;
3055 else
3056 str = pos + 1;
3057 }
3058
3059 if (str == NULL) {
3060 struct trace_seq seq;
3061 raw_field:
3062 trace_seq_init(&seq);
3063 tep_print_field(&seq, he->raw_data, hde->field);
3064 str = seq.buffer;
3065 }
3066
3067 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3068 free(str);
3069 return ret;
3070 }
3071
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)3072 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3073 struct hist_entry *a, struct hist_entry *b)
3074 {
3075 struct hpp_dynamic_entry *hde;
3076 struct tep_format_field *field;
3077 unsigned offset, size;
3078
3079 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3080
3081 field = hde->field;
3082 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3083 unsigned long long dyn;
3084
3085 tep_read_number_field(field, a->raw_data, &dyn);
3086 offset = dyn & 0xffff;
3087 size = (dyn >> 16) & 0xffff;
3088 if (tep_field_is_relative(field->flags))
3089 offset += field->offset + field->size;
3090 /* record max width for output */
3091 if (size > hde->dynamic_len)
3092 hde->dynamic_len = size;
3093 } else {
3094 offset = field->offset;
3095 size = field->size;
3096 }
3097
3098 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3099 }
3100
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)3101 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3102 {
3103 return fmt->cmp == __sort__hde_cmp;
3104 }
3105
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)3106 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3107 {
3108 struct hpp_dynamic_entry *hde_a;
3109 struct hpp_dynamic_entry *hde_b;
3110
3111 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3112 return false;
3113
3114 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3115 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3116
3117 return hde_a->field == hde_b->field;
3118 }
3119
hde_free(struct perf_hpp_fmt * fmt)3120 static void hde_free(struct perf_hpp_fmt *fmt)
3121 {
3122 struct hpp_dynamic_entry *hde;
3123
3124 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3125 free(hde);
3126 }
3127
__sort__hde_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)3128 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3129 {
3130 struct hpp_dynamic_entry *hde;
3131
3132 if (!perf_hpp__is_dynamic_entry(fmt))
3133 return;
3134
3135 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3136 update_dynamic_len(hde, he);
3137 }
3138
3139 static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct evsel * evsel,struct tep_format_field * field,int level)3140 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3141 int level)
3142 {
3143 struct hpp_dynamic_entry *hde;
3144
3145 hde = malloc(sizeof(*hde));
3146 if (hde == NULL) {
3147 pr_debug("Memory allocation failed\n");
3148 return NULL;
3149 }
3150
3151 hde->evsel = evsel;
3152 hde->field = field;
3153 hde->dynamic_len = 0;
3154
3155 hde->hpp.name = field->name;
3156 hde->hpp.header = __sort__hde_header;
3157 hde->hpp.width = __sort__hde_width;
3158 hde->hpp.entry = __sort__hde_entry;
3159 hde->hpp.color = NULL;
3160
3161 hde->hpp.init = __sort__hde_init;
3162 hde->hpp.cmp = __sort__hde_cmp;
3163 hde->hpp.collapse = __sort__hde_cmp;
3164 hde->hpp.sort = __sort__hde_cmp;
3165 hde->hpp.equal = __sort__hde_equal;
3166 hde->hpp.free = hde_free;
3167
3168 INIT_LIST_HEAD(&hde->hpp.list);
3169 INIT_LIST_HEAD(&hde->hpp.sort_list);
3170 hde->hpp.elide = false;
3171 hde->hpp.len = 0;
3172 hde->hpp.user_len = 0;
3173 hde->hpp.level = level;
3174
3175 return hde;
3176 }
3177 #endif /* HAVE_LIBTRACEEVENT */
3178
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)3179 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3180 {
3181 struct perf_hpp_fmt *new_fmt = NULL;
3182
3183 if (perf_hpp__is_sort_entry(fmt)) {
3184 struct hpp_sort_entry *hse, *new_hse;
3185
3186 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3187 new_hse = memdup(hse, sizeof(*hse));
3188 if (new_hse)
3189 new_fmt = &new_hse->hpp;
3190 #ifdef HAVE_LIBTRACEEVENT
3191 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3192 struct hpp_dynamic_entry *hde, *new_hde;
3193
3194 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3195 new_hde = memdup(hde, sizeof(*hde));
3196 if (new_hde)
3197 new_fmt = &new_hde->hpp;
3198 #endif
3199 } else {
3200 new_fmt = memdup(fmt, sizeof(*fmt));
3201 }
3202
3203 INIT_LIST_HEAD(&new_fmt->list);
3204 INIT_LIST_HEAD(&new_fmt->sort_list);
3205
3206 return new_fmt;
3207 }
3208
parse_field_name(char * str,char ** event,char ** field,char ** opt)3209 static int parse_field_name(char *str, char **event, char **field, char **opt)
3210 {
3211 char *event_name, *field_name, *opt_name;
3212
3213 event_name = str;
3214 field_name = strchr(str, '.');
3215
3216 if (field_name) {
3217 *field_name++ = '\0';
3218 } else {
3219 event_name = NULL;
3220 field_name = str;
3221 }
3222
3223 opt_name = strchr(field_name, '/');
3224 if (opt_name)
3225 *opt_name++ = '\0';
3226
3227 *event = event_name;
3228 *field = field_name;
3229 *opt = opt_name;
3230
3231 return 0;
3232 }
3233
3234 /* find match evsel using a given event name. The event name can be:
3235 * 1. '%' + event index (e.g. '%1' for first event)
3236 * 2. full event name (e.g. sched:sched_switch)
3237 * 3. partial event name (should not contain ':')
3238 */
find_evsel(struct evlist * evlist,char * event_name)3239 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3240 {
3241 struct evsel *evsel = NULL;
3242 struct evsel *pos;
3243 bool full_name;
3244
3245 /* case 1 */
3246 if (event_name[0] == '%') {
3247 int nr = strtol(event_name+1, NULL, 0);
3248
3249 if (nr > evlist->core.nr_entries)
3250 return NULL;
3251
3252 evsel = evlist__first(evlist);
3253 while (--nr > 0)
3254 evsel = evsel__next(evsel);
3255
3256 return evsel;
3257 }
3258
3259 full_name = !!strchr(event_name, ':');
3260 evlist__for_each_entry(evlist, pos) {
3261 /* case 2 */
3262 if (full_name && evsel__name_is(pos, event_name))
3263 return pos;
3264 /* case 3 */
3265 if (!full_name && strstr(pos->name, event_name)) {
3266 if (evsel) {
3267 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3268 event_name, evsel->name, pos->name);
3269 return NULL;
3270 }
3271 evsel = pos;
3272 }
3273 }
3274
3275 return evsel;
3276 }
3277
3278 #ifdef HAVE_LIBTRACEEVENT
__dynamic_dimension__add(struct evsel * evsel,struct tep_format_field * field,bool raw_trace,int level)3279 static int __dynamic_dimension__add(struct evsel *evsel,
3280 struct tep_format_field *field,
3281 bool raw_trace, int level)
3282 {
3283 struct hpp_dynamic_entry *hde;
3284
3285 hde = __alloc_dynamic_entry(evsel, field, level);
3286 if (hde == NULL)
3287 return -ENOMEM;
3288
3289 hde->raw_trace = raw_trace;
3290
3291 perf_hpp__register_sort_field(&hde->hpp);
3292 return 0;
3293 }
3294
add_evsel_fields(struct evsel * evsel,bool raw_trace,int level)3295 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3296 {
3297 int ret;
3298 struct tep_event *tp_format = evsel__tp_format(evsel);
3299 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3300 while (field) {
3301 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3302 if (ret < 0)
3303 return ret;
3304
3305 field = field->next;
3306 }
3307 return 0;
3308 }
3309
add_all_dynamic_fields(struct evlist * evlist,bool raw_trace,int level)3310 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3311 int level)
3312 {
3313 int ret;
3314 struct evsel *evsel;
3315
3316 evlist__for_each_entry(evlist, evsel) {
3317 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3318 continue;
3319
3320 ret = add_evsel_fields(evsel, raw_trace, level);
3321 if (ret < 0)
3322 return ret;
3323 }
3324 return 0;
3325 }
3326
add_all_matching_fields(struct evlist * evlist,char * field_name,bool raw_trace,int level)3327 static int add_all_matching_fields(struct evlist *evlist,
3328 char *field_name, bool raw_trace, int level)
3329 {
3330 int ret = -ESRCH;
3331 struct evsel *evsel;
3332
3333 evlist__for_each_entry(evlist, evsel) {
3334 struct tep_event *tp_format;
3335 struct tep_format_field *field;
3336
3337 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3338 continue;
3339
3340 tp_format = evsel__tp_format(evsel);
3341 if (tp_format == NULL)
3342 continue;
3343
3344 field = tep_find_any_field(tp_format, field_name);
3345 if (field == NULL)
3346 continue;
3347
3348 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3349 if (ret < 0)
3350 break;
3351 }
3352 return ret;
3353 }
3354 #endif /* HAVE_LIBTRACEEVENT */
3355
add_dynamic_entry(struct evlist * evlist,const char * tok,int level)3356 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3357 int level)
3358 {
3359 char *str, *event_name, *field_name, *opt_name;
3360 struct evsel *evsel;
3361 bool raw_trace = symbol_conf.raw_trace;
3362 int ret = 0;
3363
3364 if (evlist == NULL)
3365 return -ENOENT;
3366
3367 str = strdup(tok);
3368 if (str == NULL)
3369 return -ENOMEM;
3370
3371 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3372 ret = -EINVAL;
3373 goto out;
3374 }
3375
3376 if (opt_name) {
3377 if (strcmp(opt_name, "raw")) {
3378 pr_debug("unsupported field option %s\n", opt_name);
3379 ret = -EINVAL;
3380 goto out;
3381 }
3382 raw_trace = true;
3383 }
3384
3385 #ifdef HAVE_LIBTRACEEVENT
3386 if (!strcmp(field_name, "trace_fields")) {
3387 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3388 goto out;
3389 }
3390
3391 if (event_name == NULL) {
3392 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3393 goto out;
3394 }
3395 #else
3396 evlist__for_each_entry(evlist, evsel) {
3397 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3398 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3399 ret = -ENOTSUP;
3400 }
3401 }
3402
3403 if (ret) {
3404 pr_err("\n");
3405 goto out;
3406 }
3407 #endif
3408
3409 evsel = find_evsel(evlist, event_name);
3410 if (evsel == NULL) {
3411 pr_debug("Cannot find event: %s\n", event_name);
3412 ret = -ENOENT;
3413 goto out;
3414 }
3415
3416 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3417 pr_debug("%s is not a tracepoint event\n", event_name);
3418 ret = -EINVAL;
3419 goto out;
3420 }
3421
3422 #ifdef HAVE_LIBTRACEEVENT
3423 if (!strcmp(field_name, "*")) {
3424 ret = add_evsel_fields(evsel, raw_trace, level);
3425 } else {
3426 struct tep_event *tp_format = evsel__tp_format(evsel);
3427 struct tep_format_field *field =
3428 tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
3429
3430 if (field == NULL) {
3431 pr_debug("Cannot find event field for %s.%s\n",
3432 event_name, field_name);
3433 return -ENOENT;
3434 }
3435
3436 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3437 }
3438 #else
3439 (void)level;
3440 (void)raw_trace;
3441 #endif /* HAVE_LIBTRACEEVENT */
3442
3443 out:
3444 free(str);
3445 return ret;
3446 }
3447
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)3448 static int __sort_dimension__add(struct sort_dimension *sd,
3449 struct perf_hpp_list *list,
3450 int level)
3451 {
3452 if (sd->taken)
3453 return 0;
3454
3455 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3456 return -1;
3457
3458 if (sd->entry->se_collapse)
3459 list->need_collapse = 1;
3460
3461 sd->taken = 1;
3462
3463 return 0;
3464 }
3465
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)3466 static int __hpp_dimension__add(struct hpp_dimension *hd,
3467 struct perf_hpp_list *list,
3468 int level)
3469 {
3470 struct perf_hpp_fmt *fmt;
3471
3472 if (hd->taken)
3473 return 0;
3474
3475 fmt = __hpp_dimension__alloc_hpp(hd, level);
3476 if (!fmt)
3477 return -1;
3478
3479 hd->taken = 1;
3480 perf_hpp_list__register_sort_field(list, fmt);
3481 return 0;
3482 }
3483
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd)3484 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3485 struct sort_dimension *sd)
3486 {
3487 if (sd->taken)
3488 return 0;
3489
3490 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3491 return -1;
3492
3493 sd->taken = 1;
3494 return 0;
3495 }
3496
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd)3497 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3498 struct hpp_dimension *hd)
3499 {
3500 struct perf_hpp_fmt *fmt;
3501
3502 if (hd->taken)
3503 return 0;
3504
3505 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3506 if (!fmt)
3507 return -1;
3508
3509 hd->taken = 1;
3510 perf_hpp_list__column_register(list, fmt);
3511 return 0;
3512 }
3513
hpp_dimension__add_output(unsigned col)3514 int hpp_dimension__add_output(unsigned col)
3515 {
3516 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3517 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3518 }
3519
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct evlist * evlist,int level)3520 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3521 struct evlist *evlist,
3522 int level)
3523 {
3524 unsigned int i, j;
3525
3526 /*
3527 * Check to see if there are any arch specific
3528 * sort dimensions not applicable for the current
3529 * architecture. If so, Skip that sort key since
3530 * we don't want to display it in the output fields.
3531 */
3532 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3533 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3534 !arch_support_sort_key(tok)) {
3535 return 0;
3536 }
3537 }
3538
3539 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3540 struct sort_dimension *sd = &common_sort_dimensions[i];
3541
3542 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3543 continue;
3544
3545 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3546 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3547 sort_dimension_add_dynamic_header(sd);
3548 }
3549
3550 if (sd->entry == &sort_parent && parent_pattern) {
3551 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3552 if (ret) {
3553 char err[BUFSIZ];
3554
3555 regerror(ret, &parent_regex, err, sizeof(err));
3556 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3557 return -EINVAL;
3558 }
3559 list->parent = 1;
3560 } else if (sd->entry == &sort_sym) {
3561 list->sym = 1;
3562 /*
3563 * perf diff displays the performance difference amongst
3564 * two or more perf.data files. Those files could come
3565 * from different binaries. So we should not compare
3566 * their ips, but the name of symbol.
3567 */
3568 if (sort__mode == SORT_MODE__DIFF)
3569 sd->entry->se_collapse = sort__sym_sort;
3570
3571 } else if (sd->entry == &sort_dso) {
3572 list->dso = 1;
3573 } else if (sd->entry == &sort_socket) {
3574 list->socket = 1;
3575 } else if (sd->entry == &sort_thread) {
3576 list->thread = 1;
3577 } else if (sd->entry == &sort_comm) {
3578 list->comm = 1;
3579 } else if (sd->entry == &sort_type_offset) {
3580 symbol_conf.annotate_data_member = true;
3581 }
3582
3583 return __sort_dimension__add(sd, list, level);
3584 }
3585
3586 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3587 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3588
3589 if (strncasecmp(tok, hd->name, strlen(tok)))
3590 continue;
3591
3592 return __hpp_dimension__add(hd, list, level);
3593 }
3594
3595 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3596 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3597
3598 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3599 continue;
3600
3601 if ((sort__mode != SORT_MODE__BRANCH) &&
3602 strncasecmp(tok, "callchain_branch_predicted",
3603 strlen(tok)) &&
3604 strncasecmp(tok, "callchain_branch_abort",
3605 strlen(tok)) &&
3606 strncasecmp(tok, "callchain_branch_cycles",
3607 strlen(tok)))
3608 return -EINVAL;
3609
3610 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3611 list->sym = 1;
3612
3613 __sort_dimension__add(sd, list, level);
3614 return 0;
3615 }
3616
3617 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3618 struct sort_dimension *sd = &memory_sort_dimensions[i];
3619
3620 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3621 continue;
3622
3623 if (sort__mode != SORT_MODE__MEMORY)
3624 return -EINVAL;
3625
3626 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3627 return -EINVAL;
3628
3629 if (sd->entry == &sort_mem_daddr_sym)
3630 list->sym = 1;
3631
3632 __sort_dimension__add(sd, list, level);
3633 return 0;
3634 }
3635
3636 if (!add_dynamic_entry(evlist, tok, level))
3637 return 0;
3638
3639 return -ESRCH;
3640 }
3641
setup_sort_list(struct perf_hpp_list * list,char * str,struct evlist * evlist)3642 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3643 struct evlist *evlist)
3644 {
3645 char *tmp, *tok;
3646 int ret = 0;
3647 int level = 0;
3648 int next_level = 1;
3649 bool in_group = false;
3650
3651 do {
3652 tok = str;
3653 tmp = strpbrk(str, "{}, ");
3654 if (tmp) {
3655 if (in_group)
3656 next_level = level;
3657 else
3658 next_level = level + 1;
3659
3660 if (*tmp == '{')
3661 in_group = true;
3662 else if (*tmp == '}')
3663 in_group = false;
3664
3665 *tmp = '\0';
3666 str = tmp + 1;
3667 }
3668
3669 if (*tok) {
3670 ret = sort_dimension__add(list, tok, evlist, level);
3671 if (ret == -EINVAL) {
3672 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3673 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3674 else
3675 ui__error("Invalid --sort key: `%s'", tok);
3676 break;
3677 } else if (ret == -ESRCH) {
3678 ui__error("Unknown --sort key: `%s'", tok);
3679 break;
3680 }
3681 }
3682
3683 level = next_level;
3684 } while (tmp);
3685
3686 return ret;
3687 }
3688
get_default_sort_order(struct evlist * evlist)3689 static const char *get_default_sort_order(struct evlist *evlist)
3690 {
3691 const char *default_sort_orders[] = {
3692 default_sort_order,
3693 default_branch_sort_order,
3694 default_mem_sort_order,
3695 default_top_sort_order,
3696 default_diff_sort_order,
3697 default_tracepoint_sort_order,
3698 };
3699 bool use_trace = true;
3700 struct evsel *evsel;
3701
3702 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3703
3704 if (evlist == NULL || evlist__empty(evlist))
3705 goto out_no_evlist;
3706
3707 evlist__for_each_entry(evlist, evsel) {
3708 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3709 use_trace = false;
3710 break;
3711 }
3712 }
3713
3714 if (use_trace) {
3715 sort__mode = SORT_MODE__TRACEPOINT;
3716 if (symbol_conf.raw_trace)
3717 return "trace_fields";
3718 }
3719 out_no_evlist:
3720 return default_sort_orders[sort__mode];
3721 }
3722
setup_sort_order(struct evlist * evlist)3723 static int setup_sort_order(struct evlist *evlist)
3724 {
3725 char *new_sort_order;
3726
3727 /*
3728 * Append '+'-prefixed sort order to the default sort
3729 * order string.
3730 */
3731 if (!sort_order || is_strict_order(sort_order))
3732 return 0;
3733
3734 if (sort_order[1] == '\0') {
3735 ui__error("Invalid --sort key: `+'");
3736 return -EINVAL;
3737 }
3738
3739 /*
3740 * We allocate new sort_order string, but we never free it,
3741 * because it's checked over the rest of the code.
3742 */
3743 if (asprintf(&new_sort_order, "%s,%s",
3744 get_default_sort_order(evlist), sort_order + 1) < 0) {
3745 pr_err("Not enough memory to set up --sort");
3746 return -ENOMEM;
3747 }
3748
3749 sort_order = new_sort_order;
3750 return 0;
3751 }
3752
3753 /*
3754 * Adds 'pre,' prefix into 'str' is 'pre' is
3755 * not already part of 'str'.
3756 */
prefix_if_not_in(const char * pre,char * str)3757 static char *prefix_if_not_in(const char *pre, char *str)
3758 {
3759 char *n;
3760
3761 if (!str || strstr(str, pre))
3762 return str;
3763
3764 if (asprintf(&n, "%s,%s", pre, str) < 0)
3765 n = NULL;
3766
3767 free(str);
3768 return n;
3769 }
3770
setup_overhead(char * keys)3771 static char *setup_overhead(char *keys)
3772 {
3773 if (sort__mode == SORT_MODE__DIFF)
3774 return keys;
3775
3776 keys = prefix_if_not_in("overhead", keys);
3777
3778 if (symbol_conf.cumulate_callchain)
3779 keys = prefix_if_not_in("overhead_children", keys);
3780
3781 return keys;
3782 }
3783
__setup_sorting(struct evlist * evlist)3784 static int __setup_sorting(struct evlist *evlist)
3785 {
3786 char *str;
3787 const char *sort_keys;
3788 int ret = 0;
3789
3790 ret = setup_sort_order(evlist);
3791 if (ret)
3792 return ret;
3793
3794 sort_keys = sort_order;
3795 if (sort_keys == NULL) {
3796 if (is_strict_order(field_order)) {
3797 /*
3798 * If user specified field order but no sort order,
3799 * we'll honor it and not add default sort orders.
3800 */
3801 return 0;
3802 }
3803
3804 sort_keys = get_default_sort_order(evlist);
3805 }
3806
3807 str = strdup(sort_keys);
3808 if (str == NULL) {
3809 pr_err("Not enough memory to setup sort keys");
3810 return -ENOMEM;
3811 }
3812
3813 /*
3814 * Prepend overhead fields for backward compatibility.
3815 */
3816 if (!is_strict_order(field_order)) {
3817 str = setup_overhead(str);
3818 if (str == NULL) {
3819 pr_err("Not enough memory to setup overhead keys");
3820 return -ENOMEM;
3821 }
3822 }
3823
3824 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3825
3826 free(str);
3827 return ret;
3828 }
3829
perf_hpp__set_elide(int idx,bool elide)3830 void perf_hpp__set_elide(int idx, bool elide)
3831 {
3832 struct perf_hpp_fmt *fmt;
3833 struct hpp_sort_entry *hse;
3834
3835 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3836 if (!perf_hpp__is_sort_entry(fmt))
3837 continue;
3838
3839 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3840 if (hse->se->se_width_idx == idx) {
3841 fmt->elide = elide;
3842 break;
3843 }
3844 }
3845 }
3846
__get_elide(struct strlist * list,const char * list_name,FILE * fp)3847 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3848 {
3849 if (list && strlist__nr_entries(list) == 1) {
3850 if (fp != NULL)
3851 fprintf(fp, "# %s: %s\n", list_name,
3852 strlist__entry(list, 0)->s);
3853 return true;
3854 }
3855 return false;
3856 }
3857
get_elide(int idx,FILE * output)3858 static bool get_elide(int idx, FILE *output)
3859 {
3860 switch (idx) {
3861 case HISTC_SYMBOL:
3862 return __get_elide(symbol_conf.sym_list, "symbol", output);
3863 case HISTC_DSO:
3864 return __get_elide(symbol_conf.dso_list, "dso", output);
3865 case HISTC_COMM:
3866 return __get_elide(symbol_conf.comm_list, "comm", output);
3867 default:
3868 break;
3869 }
3870
3871 if (sort__mode != SORT_MODE__BRANCH)
3872 return false;
3873
3874 switch (idx) {
3875 case HISTC_SYMBOL_FROM:
3876 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3877 case HISTC_SYMBOL_TO:
3878 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3879 case HISTC_DSO_FROM:
3880 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3881 case HISTC_DSO_TO:
3882 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3883 case HISTC_ADDR_FROM:
3884 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3885 case HISTC_ADDR_TO:
3886 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3887 default:
3888 break;
3889 }
3890
3891 return false;
3892 }
3893
sort__setup_elide(FILE * output)3894 void sort__setup_elide(FILE *output)
3895 {
3896 struct perf_hpp_fmt *fmt;
3897 struct hpp_sort_entry *hse;
3898
3899 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3900 if (!perf_hpp__is_sort_entry(fmt))
3901 continue;
3902
3903 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3904 fmt->elide = get_elide(hse->se->se_width_idx, output);
3905 }
3906
3907 /*
3908 * It makes no sense to elide all of sort entries.
3909 * Just revert them to show up again.
3910 */
3911 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3912 if (!perf_hpp__is_sort_entry(fmt))
3913 continue;
3914
3915 if (!fmt->elide)
3916 return;
3917 }
3918
3919 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3920 if (!perf_hpp__is_sort_entry(fmt))
3921 continue;
3922
3923 fmt->elide = false;
3924 }
3925 }
3926
output_field_add(struct perf_hpp_list * list,const char * tok)3927 int output_field_add(struct perf_hpp_list *list, const char *tok)
3928 {
3929 unsigned int i;
3930
3931 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3932 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3933
3934 if (strncasecmp(tok, hd->name, strlen(tok)))
3935 continue;
3936
3937 if (!strcasecmp(tok, "weight"))
3938 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
3939
3940 return __hpp_dimension__add_output(list, hd);
3941 }
3942
3943 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3944 struct sort_dimension *sd = &common_sort_dimensions[i];
3945
3946 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3947 continue;
3948
3949 return __sort_dimension__add_output(list, sd);
3950 }
3951
3952 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3953 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3954
3955 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3956 continue;
3957
3958 if (sort__mode != SORT_MODE__BRANCH)
3959 return -EINVAL;
3960
3961 return __sort_dimension__add_output(list, sd);
3962 }
3963
3964 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3965 struct sort_dimension *sd = &memory_sort_dimensions[i];
3966
3967 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3968 continue;
3969
3970 if (sort__mode != SORT_MODE__MEMORY)
3971 return -EINVAL;
3972
3973 return __sort_dimension__add_output(list, sd);
3974 }
3975
3976 return -ESRCH;
3977 }
3978
setup_output_list(struct perf_hpp_list * list,char * str)3979 static int setup_output_list(struct perf_hpp_list *list, char *str)
3980 {
3981 char *tmp, *tok;
3982 int ret = 0;
3983
3984 for (tok = strtok_r(str, ", ", &tmp);
3985 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3986 ret = output_field_add(list, tok);
3987 if (ret == -EINVAL) {
3988 ui__error("Invalid --fields key: `%s'", tok);
3989 break;
3990 } else if (ret == -ESRCH) {
3991 ui__error("Unknown --fields key: `%s'", tok);
3992 break;
3993 }
3994 }
3995
3996 return ret;
3997 }
3998
reset_dimensions(void)3999 void reset_dimensions(void)
4000 {
4001 unsigned int i;
4002
4003 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
4004 common_sort_dimensions[i].taken = 0;
4005
4006 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
4007 hpp_sort_dimensions[i].taken = 0;
4008
4009 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4010 bstack_sort_dimensions[i].taken = 0;
4011
4012 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4013 memory_sort_dimensions[i].taken = 0;
4014 }
4015
is_strict_order(const char * order)4016 bool is_strict_order(const char *order)
4017 {
4018 return order && (*order != '+');
4019 }
4020
__setup_output_field(void)4021 static int __setup_output_field(void)
4022 {
4023 char *str, *strp;
4024 int ret = -EINVAL;
4025
4026 if (field_order == NULL)
4027 return 0;
4028
4029 strp = str = strdup(field_order);
4030 if (str == NULL) {
4031 pr_err("Not enough memory to setup output fields");
4032 return -ENOMEM;
4033 }
4034
4035 if (!is_strict_order(field_order))
4036 strp++;
4037
4038 if (!strlen(strp)) {
4039 ui__error("Invalid --fields key: `+'");
4040 goto out;
4041 }
4042
4043 ret = setup_output_list(&perf_hpp_list, strp);
4044
4045 out:
4046 free(str);
4047 return ret;
4048 }
4049
setup_sorting(struct evlist * evlist)4050 int setup_sorting(struct evlist *evlist)
4051 {
4052 int err;
4053
4054 err = __setup_sorting(evlist);
4055 if (err < 0)
4056 return err;
4057
4058 if (parent_pattern != default_parent_pattern) {
4059 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
4060 if (err < 0)
4061 return err;
4062 }
4063
4064 reset_dimensions();
4065
4066 /*
4067 * perf diff doesn't use default hpp output fields.
4068 */
4069 if (sort__mode != SORT_MODE__DIFF)
4070 perf_hpp__init();
4071
4072 err = __setup_output_field();
4073 if (err < 0)
4074 return err;
4075
4076 /* copy sort keys to output fields */
4077 perf_hpp__setup_output_field(&perf_hpp_list);
4078 /* and then copy output fields to sort keys */
4079 perf_hpp__append_sort_keys(&perf_hpp_list);
4080
4081 /* setup hists-specific output fields */
4082 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4083 return -1;
4084
4085 return 0;
4086 }
4087
reset_output_field(void)4088 void reset_output_field(void)
4089 {
4090 perf_hpp_list.need_collapse = 0;
4091 perf_hpp_list.parent = 0;
4092 perf_hpp_list.sym = 0;
4093 perf_hpp_list.dso = 0;
4094
4095 field_order = NULL;
4096 sort_order = NULL;
4097
4098 reset_dimensions();
4099 perf_hpp__reset_output_field(&perf_hpp_list);
4100 }
4101
4102 #define INDENT (3*8 + 1)
4103
add_key(struct strbuf * sb,const char * str,int * llen)4104 static void add_key(struct strbuf *sb, const char *str, int *llen)
4105 {
4106 if (!str)
4107 return;
4108
4109 if (*llen >= 75) {
4110 strbuf_addstr(sb, "\n\t\t\t ");
4111 *llen = INDENT;
4112 }
4113 strbuf_addf(sb, " %s", str);
4114 *llen += strlen(str) + 1;
4115 }
4116
add_sort_string(struct strbuf * sb,struct sort_dimension * s,int n,int * llen)4117 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4118 int *llen)
4119 {
4120 int i;
4121
4122 for (i = 0; i < n; i++)
4123 add_key(sb, s[i].name, llen);
4124 }
4125
add_hpp_sort_string(struct strbuf * sb,struct hpp_dimension * s,int n,int * llen)4126 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4127 int *llen)
4128 {
4129 int i;
4130
4131 for (i = 0; i < n; i++)
4132 add_key(sb, s[i].name, llen);
4133 }
4134
sort_help(const char * prefix,enum sort_mode mode)4135 char *sort_help(const char *prefix, enum sort_mode mode)
4136 {
4137 struct strbuf sb;
4138 char *s;
4139 int len = strlen(prefix) + INDENT;
4140
4141 strbuf_init(&sb, 300);
4142 strbuf_addstr(&sb, prefix);
4143 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4144 ARRAY_SIZE(hpp_sort_dimensions), &len);
4145 add_sort_string(&sb, common_sort_dimensions,
4146 ARRAY_SIZE(common_sort_dimensions), &len);
4147 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4148 add_sort_string(&sb, bstack_sort_dimensions,
4149 ARRAY_SIZE(bstack_sort_dimensions), &len);
4150 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4151 add_sort_string(&sb, memory_sort_dimensions,
4152 ARRAY_SIZE(memory_sort_dimensions), &len);
4153 s = strbuf_detach(&sb, NULL);
4154 strbuf_release(&sb);
4155 return s;
4156 }
4157