1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-report.c
4 *
5 * Builtin report command: Analyze the perf.data input file,
6 * look up and read DSOs and symbol information and display
7 * a histogram of results, along various sorting keys.
8 */
9 #include "builtin.h"
10
11 #include "util/config.h"
12
13 #include "util/annotate.h"
14 #include "util/color.h"
15 #include "util/dso.h"
16 #include <linux/list.h>
17 #include <linux/rbtree.h>
18 #include <linux/err.h>
19 #include <linux/zalloc.h>
20 #include "util/map.h"
21 #include "util/symbol.h"
22 #include "util/map_symbol.h"
23 #include "util/mem-events.h"
24 #include "util/branch.h"
25 #include "util/callchain.h"
26 #include "util/values.h"
27
28 #include "perf.h"
29 #include "util/debug.h"
30 #include "util/evlist.h"
31 #include "util/evsel.h"
32 #include "util/evswitch.h"
33 #include "util/header.h"
34 #include "util/mem-info.h"
35 #include "util/session.h"
36 #include "util/srcline.h"
37 #include "util/tool.h"
38
39 #include <subcmd/parse-options.h>
40 #include <subcmd/exec-cmd.h>
41 #include "util/parse-events.h"
42
43 #include "util/thread.h"
44 #include "util/sort.h"
45 #include "util/hist.h"
46 #include "util/data.h"
47 #include "arch/common.h"
48 #include "util/time-utils.h"
49 #include "util/auxtrace.h"
50 #include "util/units.h"
51 #include "util/util.h" // perf_tip()
52 #include "ui/ui.h"
53 #include "ui/progress.h"
54 #include "util/block-info.h"
55
56 #include <dlfcn.h>
57 #include <errno.h>
58 #include <inttypes.h>
59 #include <regex.h>
60 #include <linux/ctype.h>
61 #include <signal.h>
62 #include <linux/bitmap.h>
63 #include <linux/list_sort.h>
64 #include <linux/string.h>
65 #include <linux/stringify.h>
66 #include <linux/time64.h>
67 #include <sys/types.h>
68 #include <sys/stat.h>
69 #include <unistd.h>
70 #include <linux/mman.h>
71
72 #ifdef HAVE_LIBTRACEEVENT
73 #include <event-parse.h>
74 #endif
75
76 struct report {
77 struct perf_tool tool;
78 struct perf_session *session;
79 struct evswitch evswitch;
80 #ifdef HAVE_SLANG_SUPPORT
81 bool use_tui;
82 #endif
83 #ifdef HAVE_GTK2_SUPPORT
84 bool use_gtk;
85 #endif
86 bool use_stdio;
87 bool show_full_info;
88 bool show_threads;
89 bool inverted_callchain;
90 bool mem_mode;
91 bool stats_mode;
92 bool tasks_mode;
93 bool mmaps_mode;
94 bool header;
95 bool header_only;
96 bool nonany_branch_mode;
97 bool group_set;
98 bool stitch_lbr;
99 bool disable_order;
100 bool skip_empty;
101 bool data_type;
102 int max_stack;
103 struct perf_read_values show_threads_values;
104 const char *pretty_printing_style;
105 const char *cpu_list;
106 const char *symbol_filter_str;
107 const char *time_str;
108 struct perf_time_interval *ptime_range;
109 int range_size;
110 int range_num;
111 float min_percent;
112 u64 nr_entries;
113 u64 queue_size;
114 u64 total_cycles;
115 int socket_filter;
116 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
117 struct branch_type_stat brtype_stat;
118 bool symbol_ipc;
119 bool total_cycles_mode;
120 struct block_report *block_reports;
121 int nr_block_reports;
122 };
123
report__config(const char * var,const char * value,void * cb)124 static int report__config(const char *var, const char *value, void *cb)
125 {
126 struct report *rep = cb;
127
128 if (!strcmp(var, "report.group")) {
129 symbol_conf.event_group = perf_config_bool(var, value);
130 return 0;
131 }
132 if (!strcmp(var, "report.percent-limit")) {
133 double pcnt = strtof(value, NULL);
134
135 rep->min_percent = pcnt;
136 callchain_param.min_percent = pcnt;
137 return 0;
138 }
139 if (!strcmp(var, "report.children")) {
140 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
141 return 0;
142 }
143 if (!strcmp(var, "report.queue-size"))
144 return perf_config_u64(&rep->queue_size, var, value);
145
146 if (!strcmp(var, "report.sort_order")) {
147 default_sort_order = strdup(value);
148 if (!default_sort_order) {
149 pr_err("Not enough memory for report.sort_order\n");
150 return -1;
151 }
152 return 0;
153 }
154
155 if (!strcmp(var, "report.skip-empty")) {
156 rep->skip_empty = perf_config_bool(var, value);
157 return 0;
158 }
159
160 pr_debug("%s variable unknown, ignoring...", var);
161 return 0;
162 }
163
hist_iter__report_callback(struct hist_entry_iter * iter,struct addr_location * al,bool single,void * arg)164 static int hist_iter__report_callback(struct hist_entry_iter *iter,
165 struct addr_location *al, bool single,
166 void *arg)
167 {
168 int err = 0;
169 struct report *rep = arg;
170 struct hist_entry *he = iter->he;
171 struct evsel *evsel = iter->evsel;
172 struct perf_sample *sample = iter->sample;
173 struct mem_info *mi;
174 struct branch_info *bi;
175
176 if (!ui__has_annotation() && !rep->symbol_ipc)
177 return 0;
178
179 if (sort__mode == SORT_MODE__BRANCH) {
180 bi = he->branch_info;
181 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
182 if (err)
183 goto out;
184
185 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
186
187 } else if (rep->mem_mode) {
188 mi = he->mem_info;
189 err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel);
190 if (err)
191 goto out;
192
193 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
194
195 } else if (symbol_conf.cumulate_callchain) {
196 if (single)
197 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
198 } else {
199 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
200 }
201
202 out:
203 return err;
204 }
205
hist_iter__branch_callback(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused,bool single __maybe_unused,void * arg)206 static int hist_iter__branch_callback(struct hist_entry_iter *iter,
207 struct addr_location *al __maybe_unused,
208 bool single __maybe_unused,
209 void *arg)
210 {
211 struct hist_entry *he = iter->he;
212 struct report *rep = arg;
213 struct branch_info *bi = he->branch_info;
214 struct perf_sample *sample = iter->sample;
215 struct evsel *evsel = iter->evsel;
216 int err;
217
218 branch_type_count(&rep->brtype_stat, &bi->flags,
219 bi->from.addr, bi->to.addr);
220
221 if (!ui__has_annotation() && !rep->symbol_ipc)
222 return 0;
223
224 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
225 if (err)
226 goto out;
227
228 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
229
230 out:
231 return err;
232 }
233
setup_forced_leader(struct report * report,struct evlist * evlist)234 static void setup_forced_leader(struct report *report,
235 struct evlist *evlist)
236 {
237 if (report->group_set)
238 evlist__force_leader(evlist);
239 }
240
process_feature_event(struct perf_session * session,union perf_event * event)241 static int process_feature_event(struct perf_session *session,
242 union perf_event *event)
243 {
244 struct report *rep = container_of(session->tool, struct report, tool);
245
246 if (event->feat.feat_id < HEADER_LAST_FEATURE)
247 return perf_event__process_feature(session, event);
248
249 if (event->feat.feat_id != HEADER_LAST_FEATURE) {
250 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
251 event->feat.feat_id);
252 return -1;
253 } else if (rep->header_only) {
254 session_done = 1;
255 }
256
257 /*
258 * (feat_id = HEADER_LAST_FEATURE) is the end marker which
259 * means all features are received, now we can force the
260 * group if needed.
261 */
262 setup_forced_leader(rep, session->evlist);
263 return 0;
264 }
265
process_sample_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)266 static int process_sample_event(const struct perf_tool *tool,
267 union perf_event *event,
268 struct perf_sample *sample,
269 struct evsel *evsel,
270 struct machine *machine)
271 {
272 struct report *rep = container_of(tool, struct report, tool);
273 struct addr_location al;
274 struct hist_entry_iter iter = {
275 .evsel = evsel,
276 .sample = sample,
277 .hide_unresolved = symbol_conf.hide_unresolved,
278 .add_entry_cb = hist_iter__report_callback,
279 };
280 int ret = 0;
281
282 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
283 sample->time)) {
284 return 0;
285 }
286
287 if (evswitch__discard(&rep->evswitch, evsel))
288 return 0;
289
290 addr_location__init(&al);
291 if (machine__resolve(machine, &al, sample) < 0) {
292 pr_debug("problem processing %d event, skipping it.\n",
293 event->header.type);
294 ret = -1;
295 goto out_put;
296 }
297
298 if (rep->stitch_lbr)
299 thread__set_lbr_stitch_enable(al.thread, true);
300
301 if (symbol_conf.hide_unresolved && al.sym == NULL)
302 goto out_put;
303
304 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
305 goto out_put;
306
307 if (sort__mode == SORT_MODE__BRANCH) {
308 /*
309 * A non-synthesized event might not have a branch stack if
310 * branch stacks have been synthesized (using itrace options).
311 */
312 if (!sample->branch_stack)
313 goto out_put;
314
315 iter.add_entry_cb = hist_iter__branch_callback;
316 iter.ops = &hist_iter_branch;
317 } else if (rep->mem_mode) {
318 iter.ops = &hist_iter_mem;
319 } else if (symbol_conf.cumulate_callchain) {
320 iter.ops = &hist_iter_cumulative;
321 } else {
322 iter.ops = &hist_iter_normal;
323 }
324
325 if (al.map != NULL)
326 dso__set_hit(map__dso(al.map));
327
328 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
329 hist__account_cycles(sample->branch_stack, &al, sample,
330 rep->nonany_branch_mode,
331 &rep->total_cycles, evsel);
332 }
333
334 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
335 if (ret < 0)
336 pr_debug("problem adding hist entry, skipping event\n");
337 out_put:
338 addr_location__exit(&al);
339 return ret;
340 }
341
process_read_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct evsel * evsel,struct machine * machine __maybe_unused)342 static int process_read_event(const struct perf_tool *tool,
343 union perf_event *event,
344 struct perf_sample *sample __maybe_unused,
345 struct evsel *evsel,
346 struct machine *machine __maybe_unused)
347 {
348 struct report *rep = container_of(tool, struct report, tool);
349
350 if (rep->show_threads) {
351 int err = perf_read_values_add_value(&rep->show_threads_values,
352 event->read.pid, event->read.tid,
353 evsel,
354 event->read.value);
355
356 if (err)
357 return err;
358 }
359
360 return 0;
361 }
362
363 /* For pipe mode, sample_type is not currently set */
report__setup_sample_type(struct report * rep)364 static int report__setup_sample_type(struct report *rep)
365 {
366 struct perf_session *session = rep->session;
367 u64 sample_type = evlist__combined_sample_type(session->evlist);
368 bool is_pipe = perf_data__is_pipe(session->data);
369 struct evsel *evsel;
370
371 if (session->itrace_synth_opts->callchain ||
372 session->itrace_synth_opts->add_callchain ||
373 (!is_pipe &&
374 perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
375 !session->itrace_synth_opts->set))
376 sample_type |= PERF_SAMPLE_CALLCHAIN;
377
378 if (session->itrace_synth_opts->last_branch ||
379 session->itrace_synth_opts->add_last_branch)
380 sample_type |= PERF_SAMPLE_BRANCH_STACK;
381
382 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
383 if (perf_hpp_list.parent) {
384 ui__error("Selected --sort parent, but no "
385 "callchain data. Did you call "
386 "'perf record' without -g?\n");
387 return -EINVAL;
388 }
389 if (symbol_conf.use_callchain &&
390 !symbol_conf.show_branchflag_count) {
391 ui__error("Selected -g or --branch-history.\n"
392 "But no callchain or branch data.\n"
393 "Did you call 'perf record' without -g or -b?\n");
394 return -1;
395 }
396 } else if (!callchain_param.enabled &&
397 callchain_param.mode != CHAIN_NONE &&
398 !symbol_conf.use_callchain) {
399 symbol_conf.use_callchain = true;
400 if (callchain_register_param(&callchain_param) < 0) {
401 ui__error("Can't register callchain params.\n");
402 return -EINVAL;
403 }
404 }
405
406 if (symbol_conf.cumulate_callchain) {
407 /* Silently ignore if callchain is missing */
408 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
409 symbol_conf.cumulate_callchain = false;
410 perf_hpp__cancel_cumulate();
411 }
412 }
413
414 if (sort__mode == SORT_MODE__BRANCH) {
415 if (!is_pipe &&
416 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
417 ui__error("Selected -b but no branch data. "
418 "Did you call perf record without -b?\n");
419 return -1;
420 }
421 }
422
423 if (sort__mode == SORT_MODE__MEMORY) {
424 /*
425 * FIXUP: prior to kernel 5.18, Arm SPE missed to set
426 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
427 * compatibility, set the bit if it's an old perf data file.
428 */
429 evlist__for_each_entry(session->evlist, evsel) {
430 if (strstr(evsel__name(evsel), "arm_spe") &&
431 !(sample_type & PERF_SAMPLE_DATA_SRC)) {
432 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
433 sample_type |= PERF_SAMPLE_DATA_SRC;
434 }
435 }
436
437 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
438 ui__error("Selected --mem-mode but no mem data. "
439 "Did you call perf record without -d?\n");
440 return -1;
441 }
442 }
443
444 callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
445
446 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
447 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
448 "Please apply --call-graph lbr when recording.\n");
449 rep->stitch_lbr = false;
450 }
451
452 /* ??? handle more cases than just ANY? */
453 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
454 rep->nonany_branch_mode = true;
455
456 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_LIBDW_SUPPORT)
457 if (dwarf_callchain_users) {
458 ui__warning("Please install libunwind or libdw "
459 "development packages during the perf build.\n");
460 }
461 #endif
462
463 return 0;
464 }
465
sig_handler(int sig __maybe_unused)466 static void sig_handler(int sig __maybe_unused)
467 {
468 session_done = 1;
469 }
470
hists__fprintf_nr_sample_events(struct hists * hists,struct report * rep,const char * evname,FILE * fp)471 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
472 const char *evname, FILE *fp)
473 {
474 size_t ret;
475 char unit;
476 unsigned long nr_samples = hists->stats.nr_samples;
477 u64 nr_events = hists->stats.total_period;
478 struct evsel *evsel = hists_to_evsel(hists);
479 char buf[512];
480 size_t size = sizeof(buf);
481 int socked_id = hists->socket_filter;
482
483 if (quiet)
484 return 0;
485
486 if (symbol_conf.filter_relative) {
487 nr_samples = hists->stats.nr_non_filtered_samples;
488 nr_events = hists->stats.total_non_filtered_period;
489 }
490
491 if (evsel__is_group_event(evsel)) {
492 struct evsel *pos;
493
494 evsel__group_desc(evsel, buf, size);
495 evname = buf;
496
497 for_each_group_member(pos, evsel) {
498 const struct hists *pos_hists = evsel__hists(pos);
499
500 if (symbol_conf.filter_relative) {
501 nr_samples += pos_hists->stats.nr_non_filtered_samples;
502 nr_events += pos_hists->stats.total_non_filtered_period;
503 } else {
504 nr_samples += pos_hists->stats.nr_samples;
505 nr_events += pos_hists->stats.total_period;
506 }
507 }
508 }
509
510 nr_samples = convert_unit(nr_samples, &unit);
511 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
512 if (evname != NULL) {
513 ret += fprintf(fp, " of event%s '%s'",
514 evsel->core.nr_members > 1 ? "s" : "", evname);
515 }
516
517 if (rep->time_str)
518 ret += fprintf(fp, " (time slices: %s)", rep->time_str);
519
520 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
521 ret += fprintf(fp, ", show reference callgraph");
522 }
523
524 if (rep->mem_mode) {
525 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
526 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
527 } else
528 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
529
530 if (socked_id > -1)
531 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
532
533 return ret + fprintf(fp, "\n#\n");
534 }
535
evlist__tui_block_hists_browse(struct evlist * evlist,struct report * rep)536 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
537 {
538 struct evsel *pos;
539 int i = 0, ret;
540
541 evlist__for_each_entry(evlist, pos) {
542 ret = report__browse_block_hists(&rep->block_reports[i++].hist,
543 rep->min_percent, pos,
544 &rep->session->header.env);
545 if (ret != 0)
546 return ret;
547 }
548
549 return 0;
550 }
551
evlist__tty_browse_hists(struct evlist * evlist,struct report * rep,const char * help)552 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
553 {
554 struct evsel *pos;
555 int i = 0;
556
557 if (!quiet) {
558 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
559 evlist->stats.total_lost_samples);
560 }
561
562 evlist__for_each_entry(evlist, pos) {
563 struct hists *hists = evsel__hists(pos);
564 const char *evname = evsel__name(pos);
565
566 i++;
567 if (symbol_conf.event_group && !evsel__is_group_leader(pos))
568 continue;
569
570 if (rep->skip_empty && !hists->stats.nr_samples)
571 continue;
572
573 hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
574
575 if (rep->total_cycles_mode) {
576 char *buf;
577
578 if (!annotation_br_cntr_abbr_list(&buf, pos, true)) {
579 fprintf(stdout, "%s", buf);
580 fprintf(stdout, "#\n");
581 free(buf);
582 }
583 report__browse_block_hists(&rep->block_reports[i - 1].hist,
584 rep->min_percent, pos, NULL);
585 continue;
586 }
587
588 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
589 !(symbol_conf.use_callchain ||
590 symbol_conf.show_branchflag_count));
591 fprintf(stdout, "\n\n");
592 }
593
594 if (!quiet)
595 fprintf(stdout, "#\n# (%s)\n#\n", help);
596
597 if (rep->show_threads) {
598 bool style = !strcmp(rep->pretty_printing_style, "raw");
599 perf_read_values_display(stdout, &rep->show_threads_values,
600 style);
601 perf_read_values_destroy(&rep->show_threads_values);
602 }
603
604 if (sort__mode == SORT_MODE__BRANCH)
605 branch_type_stat_display(stdout, &rep->brtype_stat);
606
607 return 0;
608 }
609
report__warn_kptr_restrict(const struct report * rep)610 static void report__warn_kptr_restrict(const struct report *rep)
611 {
612 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
613 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
614
615 if (evlist__exclude_kernel(rep->session->evlist))
616 return;
617
618 if (kernel_map == NULL ||
619 (dso__hit(map__dso(kernel_map)) &&
620 (kernel_kmap->ref_reloc_sym == NULL ||
621 kernel_kmap->ref_reloc_sym->addr == 0))) {
622 const char *desc =
623 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
624 "can't be resolved.";
625
626 if (kernel_map && map__has_symbols(kernel_map)) {
627 desc = "If some relocation was applied (e.g. "
628 "kexec) symbols may be misresolved.";
629 }
630
631 ui__warning(
632 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
633 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
634 "Samples in kernel modules can't be resolved as well.\n\n",
635 desc);
636 }
637 }
638
report__gtk_browse_hists(struct report * rep,const char * help)639 static int report__gtk_browse_hists(struct report *rep, const char *help)
640 {
641 int (*hist_browser)(struct evlist *evlist, const char *help,
642 struct hist_browser_timer *timer, float min_pcnt);
643
644 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
645
646 if (hist_browser == NULL) {
647 ui__error("GTK browser not found!\n");
648 return -1;
649 }
650
651 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
652 }
653
report__browse_hists(struct report * rep)654 static int report__browse_hists(struct report *rep)
655 {
656 int ret;
657 struct perf_session *session = rep->session;
658 struct evlist *evlist = session->evlist;
659 char *help = NULL, *path = NULL;
660
661 path = system_path(TIPDIR);
662 if (perf_tip(&help, path) || help == NULL) {
663 /* fallback for people who don't install perf ;-) */
664 free(path);
665 path = system_path(DOCDIR);
666 if (perf_tip(&help, path) || help == NULL)
667 help = strdup("Cannot load tips.txt file, please install perf!");
668 }
669 free(path);
670
671 switch (use_browser) {
672 case 1:
673 if (rep->total_cycles_mode) {
674 ret = evlist__tui_block_hists_browse(evlist, rep);
675 break;
676 }
677
678 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
679 &session->header.env, true);
680 /*
681 * Usually "ret" is the last pressed key, and we only
682 * care if the key notifies us to switch data file.
683 */
684 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
685 ret = 0;
686 break;
687 case 2:
688 ret = report__gtk_browse_hists(rep, help);
689 break;
690 default:
691 ret = evlist__tty_browse_hists(evlist, rep, help);
692 break;
693 }
694 free(help);
695 return ret;
696 }
697
report__collapse_hists(struct report * rep)698 static int report__collapse_hists(struct report *rep)
699 {
700 struct perf_session *session = rep->session;
701 struct evlist *evlist = session->evlist;
702 struct ui_progress prog;
703 struct evsel *pos;
704 int ret = 0;
705
706 /*
707 * The pipe data needs to setup hierarchy hpp formats now, because it
708 * cannot know about evsels in the data before reading the data. The
709 * normal file data saves the event (attribute) info in the header
710 * section, but pipe does not have the luxury.
711 */
712 if (perf_data__is_pipe(session->data)) {
713 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) {
714 ui__error("Failed to setup hierarchy output formats\n");
715 return -1;
716 }
717 }
718
719 ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
720
721 evlist__for_each_entry(rep->session->evlist, pos) {
722 struct hists *hists = evsel__hists(pos);
723
724 if (pos->core.idx == 0)
725 hists->symbol_filter_str = rep->symbol_filter_str;
726
727 hists->socket_filter = rep->socket_filter;
728
729 ret = hists__collapse_resort(hists, &prog);
730 if (ret < 0)
731 break;
732
733 /* Non-group events are considered as leader */
734 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
735 struct hists *leader_hists = evsel__hists(evsel__leader(pos));
736
737 hists__match(leader_hists, hists);
738 hists__link(leader_hists, hists);
739 }
740 }
741
742 ui_progress__finish();
743 return ret;
744 }
745
hists__resort_cb(struct hist_entry * he,void * arg)746 static int hists__resort_cb(struct hist_entry *he, void *arg)
747 {
748 struct report *rep = arg;
749 struct symbol *sym = he->ms.sym;
750
751 if (rep->symbol_ipc && sym && !sym->annotate2) {
752 struct evsel *evsel = hists_to_evsel(he->hists);
753
754 symbol__annotate2(&he->ms, evsel, NULL);
755 }
756
757 return 0;
758 }
759
report__output_resort(struct report * rep)760 static void report__output_resort(struct report *rep)
761 {
762 struct ui_progress prog;
763 struct evsel *pos;
764
765 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
766
767 evlist__for_each_entry(rep->session->evlist, pos) {
768 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
769 }
770
771 ui_progress__finish();
772 }
773
count_sample_event(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel,struct machine * machine __maybe_unused)774 static int count_sample_event(const struct perf_tool *tool __maybe_unused,
775 union perf_event *event __maybe_unused,
776 struct perf_sample *sample __maybe_unused,
777 struct evsel *evsel,
778 struct machine *machine __maybe_unused)
779 {
780 struct hists *hists = evsel__hists(evsel);
781
782 hists__inc_nr_events(hists);
783 return 0;
784 }
785
count_lost_samples_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)786 static int count_lost_samples_event(const struct perf_tool *tool,
787 union perf_event *event,
788 struct perf_sample *sample,
789 struct machine *machine __maybe_unused)
790 {
791 struct report *rep = container_of(tool, struct report, tool);
792 struct evsel *evsel;
793
794 evsel = evlist__id2evsel(rep->session->evlist, sample->id);
795 if (evsel) {
796 struct hists *hists = evsel__hists(evsel);
797 u32 count = event->lost_samples.lost;
798
799 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
800 hists__inc_nr_dropped_samples(hists, count);
801 else
802 hists__inc_nr_lost_samples(hists, count);
803 }
804 return 0;
805 }
806
807 static int process_attr(const struct perf_tool *tool __maybe_unused,
808 union perf_event *event,
809 struct evlist **pevlist);
810
stats_setup(struct report * rep)811 static void stats_setup(struct report *rep)
812 {
813 perf_tool__init(&rep->tool, /*ordered_events=*/false);
814 rep->tool.attr = process_attr;
815 rep->tool.sample = count_sample_event;
816 rep->tool.lost_samples = count_lost_samples_event;
817 rep->tool.event_update = perf_event__process_event_update;
818 rep->tool.no_warn = true;
819 }
820
stats_print(struct report * rep)821 static int stats_print(struct report *rep)
822 {
823 struct perf_session *session = rep->session;
824
825 perf_session__fprintf_nr_events(session, stdout);
826 evlist__fprintf_nr_events(session->evlist, stdout);
827 return 0;
828 }
829
tasks_setup(struct report * rep)830 static void tasks_setup(struct report *rep)
831 {
832 perf_tool__init(&rep->tool, /*ordered_events=*/true);
833 if (rep->mmaps_mode) {
834 rep->tool.mmap = perf_event__process_mmap;
835 rep->tool.mmap2 = perf_event__process_mmap2;
836 }
837 rep->tool.attr = process_attr;
838 rep->tool.comm = perf_event__process_comm;
839 rep->tool.exit = perf_event__process_exit;
840 rep->tool.fork = perf_event__process_fork;
841 rep->tool.no_warn = true;
842 }
843
844 struct maps__fprintf_task_args {
845 int indent;
846 FILE *fp;
847 size_t printed;
848 };
849
maps__fprintf_task_cb(struct map * map,void * data)850 static int maps__fprintf_task_cb(struct map *map, void *data)
851 {
852 struct maps__fprintf_task_args *args = data;
853 const struct dso *dso = map__dso(map);
854 u32 prot = map__prot(map);
855 int ret;
856
857 ret = fprintf(args->fp,
858 "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
859 args->indent, "", map__start(map), map__end(map),
860 prot & PROT_READ ? 'r' : '-',
861 prot & PROT_WRITE ? 'w' : '-',
862 prot & PROT_EXEC ? 'x' : '-',
863 map__flags(map) ? 's' : 'p',
864 map__pgoff(map),
865 dso__id_const(dso)->ino, dso__name(dso));
866
867 if (ret < 0)
868 return ret;
869
870 args->printed += ret;
871 return 0;
872 }
873
maps__fprintf_task(struct maps * maps,int indent,FILE * fp)874 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
875 {
876 struct maps__fprintf_task_args args = {
877 .indent = indent,
878 .fp = fp,
879 .printed = 0,
880 };
881
882 maps__for_each_map(maps, maps__fprintf_task_cb, &args);
883
884 return args.printed;
885 }
886
thread_level(struct machine * machine,const struct thread * thread)887 static int thread_level(struct machine *machine, const struct thread *thread)
888 {
889 struct thread *parent_thread;
890 int res;
891
892 if (thread__tid(thread) <= 0)
893 return 0;
894
895 if (thread__ppid(thread) <= 0)
896 return 1;
897
898 parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
899 if (!parent_thread) {
900 pr_err("Missing parent thread of %d\n", thread__tid(thread));
901 return 0;
902 }
903 res = 1 + thread_level(machine, parent_thread);
904 thread__put(parent_thread);
905 return res;
906 }
907
task__print_level(struct machine * machine,struct thread * thread,FILE * fp)908 static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp)
909 {
910 int level = thread_level(machine, thread);
911 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
912 thread__pid(thread), thread__tid(thread),
913 thread__ppid(thread), level, "");
914
915 fprintf(fp, "%s\n", thread__comm_str(thread));
916
917 maps__fprintf_task(thread__maps(thread), comm_indent, fp);
918 }
919
920 /*
921 * Sort two thread list nodes such that they form a tree. The first node is the
922 * root of the tree, its children are ordered numerically after it. If a child
923 * has children itself then they appear immediately after their parent. For
924 * example, the 4 threads in the order they'd appear in the list:
925 * - init with a TID 1 and a parent of 0
926 * - systemd with a TID 3000 and a parent of init/1
927 * - systemd child thread with TID 4000, the parent is 3000
928 * - NetworkManager is a child of init with a TID of 3500.
929 */
task_list_cmp(void * priv,const struct list_head * la,const struct list_head * lb)930 static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb)
931 {
932 struct machine *machine = priv;
933 struct thread_list *task_a = list_entry(la, struct thread_list, list);
934 struct thread_list *task_b = list_entry(lb, struct thread_list, list);
935 struct thread *a = task_a->thread;
936 struct thread *b = task_b->thread;
937 int level_a, level_b, res;
938
939 /* Same thread? */
940 if (thread__tid(a) == thread__tid(b))
941 return 0;
942
943 /* Compare a and b to root. */
944 if (thread__tid(a) == 0)
945 return -1;
946
947 if (thread__tid(b) == 0)
948 return 1;
949
950 /* If parents match sort by tid. */
951 if (thread__ppid(a) == thread__ppid(b))
952 return thread__tid(a) < thread__tid(b) ? -1 : 1;
953
954 /*
955 * Find a and b such that if they are a child of each other a and b's
956 * tid's match, otherwise a and b have a common parent and distinct
957 * tid's to sort by. First make the depths of the threads match.
958 */
959 level_a = thread_level(machine, a);
960 level_b = thread_level(machine, b);
961 a = thread__get(a);
962 b = thread__get(b);
963 for (int i = level_a; i > level_b; i--) {
964 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a));
965
966 thread__put(a);
967 if (!parent) {
968 pr_err("Missing parent thread of %d\n", thread__tid(a));
969 thread__put(b);
970 return -1;
971 }
972 a = parent;
973 }
974 for (int i = level_b; i > level_a; i--) {
975 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b));
976
977 thread__put(b);
978 if (!parent) {
979 pr_err("Missing parent thread of %d\n", thread__tid(b));
980 thread__put(a);
981 return 1;
982 }
983 b = parent;
984 }
985 /* Search up to a common parent. */
986 while (thread__ppid(a) != thread__ppid(b)) {
987 struct thread *parent;
988
989 parent = machine__find_thread(machine, -1, thread__ppid(a));
990 thread__put(a);
991 if (!parent)
992 pr_err("Missing parent thread of %d\n", thread__tid(a));
993 a = parent;
994 parent = machine__find_thread(machine, -1, thread__ppid(b));
995 thread__put(b);
996 if (!parent)
997 pr_err("Missing parent thread of %d\n", thread__tid(b));
998 b = parent;
999 if (!a || !b) {
1000 /* Handle missing parent (unexpected) with some sanity. */
1001 thread__put(a);
1002 thread__put(b);
1003 return !a && !b ? 0 : (!a ? -1 : 1);
1004 }
1005 }
1006 if (thread__tid(a) == thread__tid(b)) {
1007 /* a is a child of b or vice-versa, deeper levels appear later. */
1008 res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0);
1009 } else {
1010 /* Sort by tid now the parent is the same. */
1011 res = thread__tid(a) < thread__tid(b) ? -1 : 1;
1012 }
1013 thread__put(a);
1014 thread__put(b);
1015 return res;
1016 }
1017
tasks_print(struct report * rep,FILE * fp)1018 static int tasks_print(struct report *rep, FILE *fp)
1019 {
1020 struct machine *machine = &rep->session->machines.host;
1021 LIST_HEAD(tasks);
1022 int ret;
1023
1024 ret = machine__thread_list(machine, &tasks);
1025 if (!ret) {
1026 struct thread_list *task;
1027
1028 list_sort(machine, &tasks, task_list_cmp);
1029
1030 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
1031
1032 list_for_each_entry(task, &tasks, list)
1033 task__print_level(machine, task->thread, fp);
1034 }
1035 thread_list__delete(&tasks);
1036 return ret;
1037 }
1038
__cmd_report(struct report * rep)1039 static int __cmd_report(struct report *rep)
1040 {
1041 int ret;
1042 struct perf_session *session = rep->session;
1043 struct evsel *pos;
1044 struct perf_data *data = session->data;
1045
1046 signal(SIGINT, sig_handler);
1047
1048 if (rep->cpu_list) {
1049 ret = perf_session__cpu_bitmap(session, rep->cpu_list,
1050 rep->cpu_bitmap);
1051 if (ret) {
1052 ui__error("failed to set cpu bitmap\n");
1053 return ret;
1054 }
1055 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
1056 }
1057
1058 if (rep->show_threads) {
1059 ret = perf_read_values_init(&rep->show_threads_values);
1060 if (ret)
1061 return ret;
1062 }
1063
1064 ret = report__setup_sample_type(rep);
1065 if (ret) {
1066 /* report__setup_sample_type() already showed error message */
1067 return ret;
1068 }
1069
1070 if (rep->stats_mode)
1071 stats_setup(rep);
1072
1073 if (rep->tasks_mode)
1074 tasks_setup(rep);
1075
1076 ret = perf_session__process_events(session);
1077 if (ret) {
1078 ui__error("failed to process sample\n");
1079 return ret;
1080 }
1081
1082 evlist__check_mem_load_aux(session->evlist);
1083
1084 if (rep->stats_mode)
1085 return stats_print(rep);
1086
1087 if (rep->tasks_mode)
1088 return tasks_print(rep, stdout);
1089
1090 report__warn_kptr_restrict(rep);
1091
1092 evlist__for_each_entry(session->evlist, pos)
1093 rep->nr_entries += evsel__hists(pos)->nr_entries;
1094
1095 if (use_browser == 0) {
1096 if (verbose > 3)
1097 perf_session__fprintf(session, stdout);
1098
1099 if (verbose > 2)
1100 perf_session__fprintf_dsos(session, stdout);
1101
1102 if (dump_trace) {
1103 stats_print(rep);
1104 return 0;
1105 }
1106 }
1107
1108 ret = report__collapse_hists(rep);
1109 if (ret) {
1110 ui__error("failed to process hist entry\n");
1111 return ret;
1112 }
1113
1114 if (session_done())
1115 return 0;
1116
1117 /*
1118 * recalculate number of entries after collapsing since it
1119 * might be changed during the collapse phase.
1120 */
1121 rep->nr_entries = 0;
1122 evlist__for_each_entry(session->evlist, pos)
1123 rep->nr_entries += evsel__hists(pos)->nr_entries;
1124
1125 if (rep->nr_entries == 0) {
1126 ui__error("The %s data has no samples!\n", data->path);
1127 return 0;
1128 }
1129
1130 report__output_resort(rep);
1131
1132 if (rep->total_cycles_mode) {
1133 int nr_hpps = 4;
1134 int block_hpps[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
1135 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
1136 PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
1137 PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
1138 PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
1139 };
1140
1141 if (session->evlist->nr_br_cntr > 0)
1142 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER;
1143
1144 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_RANGE;
1145 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_DSO;
1146
1147 rep->block_reports = block_info__create_report(session->evlist,
1148 rep->total_cycles,
1149 block_hpps, nr_hpps,
1150 &rep->nr_block_reports);
1151 if (!rep->block_reports)
1152 return -1;
1153 }
1154
1155 return report__browse_hists(rep);
1156 }
1157
1158 static int
report_parse_callchain_opt(const struct option * opt,const char * arg,int unset)1159 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1160 {
1161 struct callchain_param *callchain = opt->value;
1162
1163 callchain->enabled = !unset;
1164 /*
1165 * --no-call-graph
1166 */
1167 if (unset) {
1168 symbol_conf.use_callchain = false;
1169 callchain->mode = CHAIN_NONE;
1170 return 0;
1171 }
1172
1173 return parse_callchain_report_opt(arg);
1174 }
1175
1176 static int
parse_time_quantum(const struct option * opt,const char * arg,int unset __maybe_unused)1177 parse_time_quantum(const struct option *opt, const char *arg,
1178 int unset __maybe_unused)
1179 {
1180 unsigned long *time_q = opt->value;
1181 char *end;
1182
1183 *time_q = strtoul(arg, &end, 0);
1184 if (end == arg)
1185 goto parse_err;
1186 if (*time_q == 0) {
1187 pr_err("time quantum cannot be 0");
1188 return -1;
1189 }
1190 end = skip_spaces(end);
1191 if (*end == 0)
1192 return 0;
1193 if (!strcmp(end, "s")) {
1194 *time_q *= NSEC_PER_SEC;
1195 return 0;
1196 }
1197 if (!strcmp(end, "ms")) {
1198 *time_q *= NSEC_PER_MSEC;
1199 return 0;
1200 }
1201 if (!strcmp(end, "us")) {
1202 *time_q *= NSEC_PER_USEC;
1203 return 0;
1204 }
1205 if (!strcmp(end, "ns"))
1206 return 0;
1207 parse_err:
1208 pr_err("Cannot parse time quantum `%s'\n", arg);
1209 return -1;
1210 }
1211
1212 int
report_parse_ignore_callees_opt(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)1213 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
1214 const char *arg, int unset __maybe_unused)
1215 {
1216 if (arg) {
1217 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
1218 if (err) {
1219 char buf[BUFSIZ];
1220 regerror(err, &ignore_callees_regex, buf, sizeof(buf));
1221 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
1222 return -1;
1223 }
1224 have_ignore_callees = 1;
1225 }
1226
1227 return 0;
1228 }
1229
1230 static int
parse_branch_mode(const struct option * opt,const char * str __maybe_unused,int unset)1231 parse_branch_mode(const struct option *opt,
1232 const char *str __maybe_unused, int unset)
1233 {
1234 int *branch_mode = opt->value;
1235
1236 *branch_mode = !unset;
1237 return 0;
1238 }
1239
1240 static int
parse_percent_limit(const struct option * opt,const char * str,int unset __maybe_unused)1241 parse_percent_limit(const struct option *opt, const char *str,
1242 int unset __maybe_unused)
1243 {
1244 struct report *rep = opt->value;
1245 double pcnt = strtof(str, NULL);
1246
1247 rep->min_percent = pcnt;
1248 callchain_param.min_percent = pcnt;
1249 return 0;
1250 }
1251
process_attr(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)1252 static int process_attr(const struct perf_tool *tool __maybe_unused,
1253 union perf_event *event,
1254 struct evlist **pevlist)
1255 {
1256 u64 sample_type;
1257 int err;
1258
1259 err = perf_event__process_attr(tool, event, pevlist);
1260 if (err)
1261 return err;
1262
1263 /*
1264 * Check if we need to enable callchains based
1265 * on events sample_type.
1266 */
1267 sample_type = evlist__combined_sample_type(*pevlist);
1268 callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
1269 return 0;
1270 }
1271
1272 #define CALLCHAIN_BRANCH_SORT_ORDER \
1273 "srcline,symbol,dso,callchain_branch_predicted," \
1274 "callchain_branch_abort,callchain_branch_cycles"
1275
cmd_report(int argc,const char ** argv)1276 int cmd_report(int argc, const char **argv)
1277 {
1278 struct perf_session *session;
1279 struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1280 struct stat st;
1281 bool has_br_stack = false;
1282 int branch_mode = -1;
1283 int last_key = 0;
1284 bool branch_call_mode = false;
1285 #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
1286 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1287 CALLCHAIN_REPORT_HELP
1288 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1289 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1290 const char * const report_usage[] = {
1291 "perf report [<options>]",
1292 NULL
1293 };
1294 struct report report = {
1295 .max_stack = PERF_MAX_STACK_DEPTH,
1296 .pretty_printing_style = "normal",
1297 .socket_filter = -1,
1298 .skip_empty = true,
1299 };
1300 char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__NORMAL);
1301 char *field_order_help = sort_help("output field(s):", SORT_MODE__NORMAL);
1302 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1303 const struct option options[] = {
1304 OPT_STRING('i', "input", &input_name, "file",
1305 "input file name"),
1306 OPT_INCR('v', "verbose", &verbose,
1307 "be more verbose (show symbol address, etc)"),
1308 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
1309 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1310 "dump raw trace in ASCII"),
1311 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1312 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1313 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1314 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1315 "file", "vmlinux pathname"),
1316 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1317 "don't load vmlinux even if found"),
1318 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1319 "file", "kallsyms pathname"),
1320 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1321 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1322 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1323 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1324 "Show a column with the number of samples"),
1325 OPT_BOOLEAN('T', "threads", &report.show_threads,
1326 "Show per-thread event counters"),
1327 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1328 "pretty printing style key: normal raw"),
1329 #ifdef HAVE_SLANG_SUPPORT
1330 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1331 #endif
1332 #ifdef HAVE_GTK2_SUPPORT
1333 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1334 #endif
1335 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1336 "Use the stdio interface"),
1337 OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1338 OPT_BOOLEAN(0, "header-only", &report.header_only,
1339 "Show only data header."),
1340 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1341 sort_order_help),
1342 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1343 field_order_help),
1344 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1345 "Show sample percentage for different cpu modes"),
1346 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1347 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1348 OPT_STRING('p', "parent", &parent_pattern, "regex",
1349 "regex filter to identify parent, see: '--sort parent'"),
1350 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1351 "Only display entries with parent-match"),
1352 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1353 "print_type,threshold[,print_limit],order,sort_key[,branch],value",
1354 report_callchain_help, &report_parse_callchain_opt,
1355 callchain_default_opt),
1356 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1357 "Accumulate callchains of children and show total overhead as well. "
1358 "Enabled by default, use --no-children to disable."),
1359 OPT_INTEGER(0, "max-stack", &report.max_stack,
1360 "Set the maximum stack depth when parsing the callchain, "
1361 "anything beyond the specified depth will be ignored. "
1362 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1363 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1364 "alias for inverted call graph"),
1365 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1366 "ignore callees of these functions in call graphs",
1367 report_parse_ignore_callees_opt),
1368 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1369 "only consider symbols in these dsos"),
1370 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1371 "only consider symbols in these comms"),
1372 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1373 "only consider symbols in these pids"),
1374 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1375 "only consider symbols in these tids"),
1376 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1377 "only consider these symbols"),
1378 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1379 "only show symbols that (partially) match with this filter"),
1380 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1381 "width[,width...]",
1382 "don't try to adjust column width, use these fixed values"),
1383 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1384 "separator for columns, no spaces will be added between "
1385 "columns '.' is reserved."),
1386 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1387 "Only display entries resolved to a symbol"),
1388 OPT_CALLBACK(0, "symfs", NULL, "directory",
1389 "Look for files with symbols relative to this directory",
1390 symbol__config_symfs),
1391 OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1392 "list of cpus to profile"),
1393 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1394 "Display extended information about perf.data file"),
1395 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
1396 "Interleave source code with assembly code (default)"),
1397 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
1398 "Display raw encoding of assembly instructions (default)"),
1399 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1400 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1401 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
1402 "Add prefix to source file path names in programs (with --prefix-strip)"),
1403 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
1404 "Strip first N entries of source file path name in programs (with --prefix)"),
1405 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1406 "Show a column with the sum of periods"),
1407 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1408 "Show event group information together"),
1409 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1410 "Sort the output by the event at the index n in group. "
1411 "If n is invalid, sort by the first event. "
1412 "WARNING: should be used on grouped events."),
1413 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1414 "use branch records for per branch histogram filling",
1415 parse_branch_mode),
1416 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1417 "add last branch records to call history"),
1418 OPT_STRING(0, "objdump", &objdump_path, "path",
1419 "objdump binary to use for disassembly and annotations"),
1420 OPT_STRING(0, "addr2line", &addr2line_path, "path",
1421 "addr2line binary to use for line numbers"),
1422 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1423 "Symbol demangling. Enabled by default, use --no-demangle to disable."),
1424 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1425 "Enable kernel symbol demangling"),
1426 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1427 OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1428 "Number of samples to save per histogram entry for individual browsing"),
1429 OPT_CALLBACK(0, "percent-limit", &report, "percent",
1430 "Don't show entries under that percent", parse_percent_limit),
1431 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1432 "how to display percentage of filtered entries", parse_filter_percentage),
1433 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1434 "Instruction Tracing options\n" ITRACE_HELP,
1435 itrace_parse_synth_opts),
1436 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1437 "Show full source file name path for source lines"),
1438 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1439 "Show callgraph from reference event"),
1440 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
1441 "Enable LBR callgraph stitching approach"),
1442 OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1443 "only show processor socket that match with this filter"),
1444 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1445 "Show raw trace event output (do not use print fmt or plugins)"),
1446 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy,
1447 "Show entries in a hierarchy"),
1448 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1449 "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1450 stdio__config_color, "always"),
1451 OPT_STRING(0, "time", &report.time_str, "str",
1452 "Time span of interest (start,stop)"),
1453 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1454 "Show inline function"),
1455 OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
1456 "Set percent type local/global-period/hits",
1457 annotate_parse_percent_type),
1458 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1459 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1460 "Set time quantum for time sort key (default 100ms)",
1461 parse_time_quantum),
1462 OPTS_EVSWITCH(&report.evswitch),
1463 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
1464 "Sort all blocks by 'Sampled Cycles%'"),
1465 OPT_BOOLEAN(0, "disable-order", &report.disable_order,
1466 "Disable raw trace ordering"),
1467 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
1468 "Do not display empty (or dummy) events in the output"),
1469 OPT_END()
1470 };
1471 struct perf_data data = {
1472 .mode = PERF_DATA_MODE_READ,
1473 };
1474 int ret = hists__init();
1475 char sort_tmp[128];
1476 bool ordered_events = true;
1477
1478 if (ret < 0)
1479 goto exit;
1480
1481 /*
1482 * tasks_mode require access to exited threads to list those that are in
1483 * the data file. Off-cpu events are synthesized after other events and
1484 * reference exited threads.
1485 */
1486 symbol_conf.keep_exited_threads = true;
1487
1488 annotation_options__init();
1489
1490 ret = perf_config(report__config, &report);
1491 if (ret)
1492 goto exit;
1493
1494 argc = parse_options(argc, argv, options, report_usage, 0);
1495 if (argc) {
1496 /*
1497 * Special case: if there's an argument left then assume that
1498 * it's a symbol filter:
1499 */
1500 if (argc > 1)
1501 usage_with_options(report_usage, options);
1502
1503 report.symbol_filter_str = argv[0];
1504 }
1505
1506 if (disassembler_style) {
1507 annotate_opts.disassembler_style = strdup(disassembler_style);
1508 if (!annotate_opts.disassembler_style)
1509 return -ENOMEM;
1510 }
1511 if (objdump_path) {
1512 annotate_opts.objdump_path = strdup(objdump_path);
1513 if (!annotate_opts.objdump_path)
1514 return -ENOMEM;
1515 }
1516 if (addr2line_path) {
1517 symbol_conf.addr2line_path = strdup(addr2line_path);
1518 if (!symbol_conf.addr2line_path)
1519 return -ENOMEM;
1520 }
1521
1522 if (annotate_check_args() < 0) {
1523 ret = -EINVAL;
1524 goto exit;
1525 }
1526
1527 if (report.mmaps_mode)
1528 report.tasks_mode = true;
1529
1530 if (dump_trace && report.disable_order)
1531 ordered_events = false;
1532
1533 if (quiet)
1534 perf_quiet_option();
1535
1536 ret = symbol__validate_sym_arguments();
1537 if (ret)
1538 goto exit;
1539
1540 if (report.inverted_callchain)
1541 callchain_param.order = ORDER_CALLER;
1542 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1543 callchain_param.order = ORDER_CALLER;
1544
1545 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
1546 (int)itrace_synth_opts.callchain_sz > report.max_stack)
1547 report.max_stack = itrace_synth_opts.callchain_sz;
1548
1549 if (!input_name || !strlen(input_name)) {
1550 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1551 input_name = "-";
1552 else
1553 input_name = "perf.data";
1554 }
1555
1556 repeat:
1557 data.path = input_name;
1558 data.force = symbol_conf.force;
1559
1560 symbol_conf.skip_empty = report.skip_empty;
1561
1562 perf_tool__init(&report.tool, ordered_events);
1563 report.tool.sample = process_sample_event;
1564 report.tool.mmap = perf_event__process_mmap;
1565 report.tool.mmap2 = perf_event__process_mmap2;
1566 report.tool.comm = perf_event__process_comm;
1567 report.tool.namespaces = perf_event__process_namespaces;
1568 report.tool.cgroup = perf_event__process_cgroup;
1569 report.tool.exit = perf_event__process_exit;
1570 report.tool.fork = perf_event__process_fork;
1571 report.tool.lost = perf_event__process_lost;
1572 report.tool.read = process_read_event;
1573 report.tool.attr = process_attr;
1574 #ifdef HAVE_LIBTRACEEVENT
1575 report.tool.tracing_data = perf_event__process_tracing_data;
1576 #endif
1577 report.tool.build_id = perf_event__process_build_id;
1578 report.tool.id_index = perf_event__process_id_index;
1579 report.tool.auxtrace_info = perf_event__process_auxtrace_info;
1580 report.tool.auxtrace = perf_event__process_auxtrace;
1581 report.tool.event_update = perf_event__process_event_update;
1582 report.tool.feature = process_feature_event;
1583 report.tool.ordering_requires_timestamps = true;
1584
1585 session = perf_session__new(&data, &report.tool);
1586 if (IS_ERR(session)) {
1587 ret = PTR_ERR(session);
1588 goto exit;
1589 }
1590
1591 ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1592 if (ret)
1593 goto exit;
1594
1595 if (zstd_init(&(session->zstd_data), 0) < 0)
1596 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1597
1598 if (report.queue_size) {
1599 ordered_events__set_alloc_size(&session->ordered_events,
1600 report.queue_size);
1601 }
1602
1603 session->itrace_synth_opts = &itrace_synth_opts;
1604
1605 report.session = session;
1606
1607 has_br_stack = perf_header__has_feat(&session->header,
1608 HEADER_BRANCH_STACK);
1609 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1610 has_br_stack = false;
1611
1612 setup_forced_leader(&report, session->evlist);
1613
1614 if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
1615 parse_options_usage(NULL, options, "group-sort-idx", 0);
1616 ret = -EINVAL;
1617 goto error;
1618 }
1619
1620 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
1621 has_br_stack = true;
1622
1623 if (has_br_stack && branch_call_mode)
1624 symbol_conf.show_branchflag_count = true;
1625
1626 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1627
1628 /*
1629 * Branch mode is a tristate:
1630 * -1 means default, so decide based on the file having branch data.
1631 * 0/1 means the user chose a mode.
1632 */
1633 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1634 !branch_call_mode) {
1635 sort__mode = SORT_MODE__BRANCH;
1636 symbol_conf.cumulate_callchain = false;
1637 }
1638 if (branch_call_mode) {
1639 callchain_param.key = CCKEY_ADDRESS;
1640 callchain_param.branch_callstack = true;
1641 symbol_conf.use_callchain = true;
1642 callchain_register_param(&callchain_param);
1643 if (sort_order == NULL)
1644 sort_order = CALLCHAIN_BRANCH_SORT_ORDER;
1645 }
1646
1647 if (report.mem_mode) {
1648 if (sort__mode == SORT_MODE__BRANCH) {
1649 pr_err("branch and mem mode incompatible\n");
1650 goto error;
1651 }
1652 sort__mode = SORT_MODE__MEMORY;
1653 symbol_conf.cumulate_callchain = false;
1654 }
1655
1656 if (symbol_conf.report_hierarchy) {
1657 /* disable incompatible options */
1658 symbol_conf.cumulate_callchain = false;
1659
1660 if (field_order) {
1661 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1662 parse_options_usage(report_usage, options, "F", 1);
1663 parse_options_usage(NULL, options, "hierarchy", 0);
1664 goto error;
1665 }
1666
1667 perf_hpp_list.need_collapse = true;
1668 }
1669
1670 if (report.use_stdio)
1671 use_browser = 0;
1672 #ifdef HAVE_SLANG_SUPPORT
1673 else if (report.use_tui)
1674 use_browser = 1;
1675 #endif
1676 #ifdef HAVE_GTK2_SUPPORT
1677 else if (report.use_gtk)
1678 use_browser = 2;
1679 #endif
1680
1681 /* Force tty output for header output and per-thread stat. */
1682 if (report.header || report.header_only || report.show_threads)
1683 use_browser = 0;
1684 if (report.header || report.header_only)
1685 report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1686 if (report.show_full_info)
1687 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1688 if (report.stats_mode || report.tasks_mode)
1689 use_browser = 0;
1690 if (report.stats_mode && report.tasks_mode) {
1691 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1692 goto error;
1693 }
1694
1695 if (report.total_cycles_mode) {
1696 if (sort__mode != SORT_MODE__BRANCH)
1697 report.total_cycles_mode = false;
1698 else
1699 sort_order = NULL;
1700 }
1701
1702 if (sort_order && strstr(sort_order, "type")) {
1703 report.data_type = true;
1704 annotate_opts.annotate_src = false;
1705
1706 #ifndef HAVE_LIBDW_SUPPORT
1707 pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
1708 goto error;
1709 #endif
1710 }
1711
1712 if (strcmp(input_name, "-") != 0)
1713 setup_browser(true);
1714 else
1715 use_browser = 0;
1716
1717 if (report.data_type && use_browser == 1) {
1718 symbol_conf.annotate_data_member = true;
1719 symbol_conf.annotate_data_sample = true;
1720 }
1721
1722 if (last_key != K_SWITCH_INPUT_DATA) {
1723 if (sort_order && strstr(sort_order, "ipc")) {
1724 parse_options_usage(report_usage, options, "s", 1);
1725 goto error;
1726 }
1727
1728 if (sort_order && strstr(sort_order, "symbol")) {
1729 if (sort__mode == SORT_MODE__BRANCH) {
1730 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1731 sort_order, "ipc_lbr");
1732 report.symbol_ipc = true;
1733 } else {
1734 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1735 sort_order, "ipc_null");
1736 }
1737
1738 sort_order = sort_tmp;
1739 }
1740 }
1741
1742 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
1743 (setup_sorting(session->evlist) < 0)) {
1744 if (sort_order)
1745 parse_options_usage(report_usage, options, "s", 1);
1746 if (field_order)
1747 parse_options_usage(sort_order ? NULL : report_usage,
1748 options, "F", 1);
1749 goto error;
1750 }
1751
1752 if ((report.header || report.header_only) && !quiet) {
1753 perf_session__fprintf_info(session, stdout,
1754 report.show_full_info);
1755 if (report.header_only) {
1756 if (data.is_pipe) {
1757 /*
1758 * we need to process first few records
1759 * which contains PERF_RECORD_HEADER_FEATURE.
1760 */
1761 perf_session__process_events(session);
1762 }
1763 ret = 0;
1764 goto error;
1765 }
1766 } else if (use_browser == 0 && !quiet &&
1767 !report.stats_mode && !report.tasks_mode) {
1768 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1769 stdout);
1770 }
1771
1772 /*
1773 * Only in the TUI browser we are doing integrated annotation,
1774 * so don't allocate extra space that won't be used in the stdio
1775 * implementation.
1776 */
1777 if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
1778 report.total_cycles_mode) {
1779 ret = symbol__annotation_init();
1780 if (ret < 0)
1781 goto error;
1782 /*
1783 * For searching by name on the "Browse map details".
1784 * providing it only in verbose mode not to bloat too
1785 * much struct symbol.
1786 */
1787 if (verbose > 0) {
1788 /*
1789 * XXX: Need to provide a less kludgy way to ask for
1790 * more space per symbol, the u32 is for the index on
1791 * the ui browser.
1792 * See symbol__browser_index.
1793 */
1794 symbol_conf.priv_size += sizeof(u32);
1795 }
1796 annotation_config__init();
1797 }
1798
1799 if (symbol__init(&session->header.env) < 0)
1800 goto error;
1801
1802 if (report.time_str) {
1803 ret = perf_time__parse_for_ranges(report.time_str, session,
1804 &report.ptime_range,
1805 &report.range_size,
1806 &report.range_num);
1807 if (ret < 0)
1808 goto error;
1809
1810 itrace_synth_opts__set_time_range(&itrace_synth_opts,
1811 report.ptime_range,
1812 report.range_num);
1813 }
1814
1815 #ifdef HAVE_LIBTRACEEVENT
1816 if (session->tevent.pevent &&
1817 tep_set_function_resolver(session->tevent.pevent,
1818 machine__resolve_kernel_addr,
1819 &session->machines.host) < 0) {
1820 pr_err("%s: failed to set libtraceevent function resolver\n",
1821 __func__);
1822 return -1;
1823 }
1824 #endif
1825 sort__setup_elide(stdout);
1826
1827 ret = __cmd_report(&report);
1828 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
1829 perf_session__delete(session);
1830 last_key = K_SWITCH_INPUT_DATA;
1831 goto repeat;
1832 } else
1833 ret = 0;
1834
1835 if (!use_browser && (verbose > 2 || debug_kmaps))
1836 perf_session__dump_kmaps(session);
1837 error:
1838 if (report.ptime_range) {
1839 itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1840 zfree(&report.ptime_range);
1841 }
1842
1843 if (report.block_reports) {
1844 block_info__free_report(report.block_reports,
1845 report.nr_block_reports);
1846 report.block_reports = NULL;
1847 }
1848
1849 zstd_fini(&(session->zstd_data));
1850 perf_session__delete(session);
1851 exit:
1852 annotation_options__exit();
1853 free(sort_order_help);
1854 free(field_order_help);
1855 return ret;
1856 }
1857