Lines Matching +full:static +full:- +full:trace +full:- +full:id
2 * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
8 * event may be specified using --event.
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
39 #include "util/synthetic-events.h"
44 #include <subcmd/exec-cmd.h>
51 #include <subcmd/parse-options.h>
58 #include "trace/beauty/beauty.h"
59 #include "trace-event.h"
60 #include "util/parse-events.h"
91 #include <event-parse.h>
142 struct trace { struct
224 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) in trace__load_vmlinux_btf() argument
227 if (trace->btf != NULL) in trace__load_vmlinux_btf()
230 trace->btf = btf__load_vmlinux_btf(); in trace__load_vmlinux_btf()
232 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : in trace__load_vmlinux_btf()
247 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
250 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
260 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
263 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
271 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) in __tp_field__init_uint()
273 field->offset = offset; in __tp_field__init_uint()
277 field->integer = tp_field__u8; in __tp_field__init_uint()
280 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; in __tp_field__init_uint()
283 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; in __tp_field__init_uint()
286 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; in __tp_field__init_uint()
289 return -1; in __tp_field__init_uint()
295 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool … in tp_field__init_uint()
297 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); in tp_field__init_uint()
300 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) in tp_field__ptr()
302 return sample->raw_data + field->offset; in tp_field__ptr()
305 static int __tp_field__init_ptr(struct tp_field *field, int offset) in __tp_field__init_ptr()
307 field->offset = offset; in __tp_field__init_ptr()
308 field->pointer = tp_field__ptr; in __tp_field__init_ptr()
312 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) in tp_field__init_ptr()
314 return __tp_field__init_ptr(field, format_field->offset); in tp_field__init_ptr()
318 struct tp_field id; member
325 * The evsel->priv as used by 'perf trace'
334 static struct evsel_trace *evsel_trace__new(void) in evsel_trace__new()
339 static void evsel_trace__delete(struct evsel_trace *et) in evsel_trace__delete()
344 zfree(&et->fmt); in evsel_trace__delete()
352 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) in __evsel__syscall_tp()
354 struct evsel_trace *et = evsel->priv; in __evsel__syscall_tp()
356 return &et->sc; in __evsel__syscall_tp()
359 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) in evsel__syscall_tp()
361 if (evsel->priv == NULL) { in evsel__syscall_tp()
362 evsel->priv = evsel_trace__new(); in evsel__syscall_tp()
363 if (evsel->priv == NULL) in evsel__syscall_tp()
373 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) in __evsel__syscall_arg_fmt()
375 struct evsel_trace *et = evsel->priv; in __evsel__syscall_arg_fmt()
377 return et->fmt; in __evsel__syscall_arg_fmt()
380 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) in evsel__syscall_arg_fmt()
382 struct evsel_trace *et = evsel->priv; in evsel__syscall_arg_fmt()
384 if (evsel->priv == NULL) { in evsel__syscall_arg_fmt()
385 et = evsel->priv = evsel_trace__new(); in evsel__syscall_arg_fmt()
391 if (et->fmt == NULL) { in evsel__syscall_arg_fmt()
397 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); in evsel__syscall_arg_fmt()
398 if (et->fmt == NULL) in evsel__syscall_arg_fmt()
405 evsel_trace__delete(evsel->priv); in evsel__syscall_arg_fmt()
406 evsel->priv = NULL; in evsel__syscall_arg_fmt()
410 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) in evsel__init_tp_uint_field()
415 return -1; in evsel__init_tp_uint_field()
417 return tp_field__init_uint(field, format_field, evsel->needs_swap); in evsel__init_tp_uint_field()
422 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
424 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) in evsel__init_tp_ptr_field()
429 return -1; in evsel__init_tp_ptr_field()
436 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
438 static void evsel__delete_priv(struct evsel *evsel) in evsel__delete_priv()
440 zfree(&evsel->priv); in evsel__delete_priv()
444 static int evsel__init_syscall_tp(struct evsel *evsel) in evsel__init_syscall_tp()
449 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && in evsel__init_syscall_tp()
450 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) in evsel__init_syscall_tp()
451 return -ENOENT; in evsel__init_syscall_tp()
456 return -ENOMEM; in evsel__init_syscall_tp()
459 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) in evsel__init_augmented_syscall_tp()
464 struct tep_format_field *syscall_id = evsel__field(tp, "id"); in evsel__init_augmented_syscall_tp()
468 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) in evsel__init_augmented_syscall_tp()
469 return -EINVAL; in evsel__init_augmented_syscall_tp()
474 return -ENOMEM; in evsel__init_augmented_syscall_tp()
477 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) in evsel__init_augmented_syscall_tp_args()
481 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); in evsel__init_augmented_syscall_tp_args()
484 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) in evsel__init_augmented_syscall_tp_ret()
488 …return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap… in evsel__init_augmented_syscall_tp_ret()
491 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) in evsel__init_raw_syscall_tp()
494 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) in evsel__init_raw_syscall_tp()
495 return -ENOENT; in evsel__init_raw_syscall_tp()
497 evsel->handler = handler; in evsel__init_raw_syscall_tp()
501 return -ENOMEM; in evsel__init_raw_syscall_tp()
504 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) in perf_evsel__raw_syscall_newtp()
527 fields->name.integer(&fields->name, sample); })
531 fields->name.pointer(&fields->name, sample); })
535 int idx = val - sa->offset; in strarray__scnprintf_suffix()
537 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf_suffix()
540 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf_suffix()
544 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); in strarray__scnprintf_suffix()
549 int idx = val - sa->offset; in strarray__scnprintf()
551 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf()
554 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf()
558 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarray__scnprintf()
561 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, in __syscall_arg__scnprintf_strarray()
565 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); in __syscall_arg__scnprintf_strarray()
568 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, in syscall_arg__scnprintf_strarray()
578 return strarray__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray()
583 return strarray__strtoul_flags(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray_flags()
588 return strarrays__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarrays()
593 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarray_flags()
601 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__scnprintf()
602 struct strarray *sa = sas->entries[i]; in strarrays__scnprintf()
603 int idx = val - sa->offset; in strarrays__scnprintf()
605 if (idx >= 0 && idx < sa->nr_entries) { in strarrays__scnprintf()
606 if (sa->entries[idx] == NULL) in strarrays__scnprintf()
608 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarrays__scnprintf()
614 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); in strarrays__scnprintf()
622 for (i = 0; i < sa->nr_entries; ++i) { in strarray__strtoul()
623 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { in strarray__strtoul()
624 *ret = sa->offset + i; in strarray__strtoul()
644 size -= sep - tok + 1; in strarray__strtoul_flags()
646 end = sep - 1; in strarray__strtoul_flags()
648 --end; in strarray__strtoul_flags()
650 toklen = end - tok + 1; in strarray__strtoul_flags()
662 *ret |= (1 << (val - 1)); in strarray__strtoul_flags()
676 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__strtoul()
677 struct strarray *sa = sas->entries[i]; in strarrays__strtoul()
689 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarrays()
693 #define AT_FDCWD -100
696 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, in syscall_arg__scnprintf_fd_at()
699 int fd = arg->val; in syscall_arg__scnprintf_fd_at()
703 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); in syscall_arg__scnprintf_fd_at()
710 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
717 return scnprintf(bf, size, "%#lx", arg->val); in syscall_arg__scnprintf_hex()
722 if (arg->val == 0) in syscall_arg__scnprintf_ptr()
729 return scnprintf(bf, size, "%d", arg->val); in syscall_arg__scnprintf_int()
734 return scnprintf(bf, size, "%ld", arg->val); in syscall_arg__scnprintf_long()
737 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) in syscall_arg__scnprintf_char_array()
742 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); in syscall_arg__scnprintf_char_array()
747 static const char *bpf_cmd[] = {
759 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
761 static const char *fsmount_flags[] = {
764 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
766 #include "trace/beauty/generated/fsconfig_arrays.c"
768 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
770 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
771 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
773 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
774 static DEFINE_STRARRAY(itimers, "ITIMER_");
776 static const char *keyctl_options[] = {
783 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
785 static const char *whences[] = { "SET", "CUR", "END",
793 static DEFINE_STRARRAY(whences, "SEEK_");
795 static const char *fcntl_cmds[] = {
801 static DEFINE_STRARRAY(fcntl_cmds, "F_");
803 static const char *fcntl_linux_specific_cmds[] = {
809 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
811 static struct strarray *fcntl_cmds_arrays[] = {
816 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
818 static const char *rlimit_resources[] = {
823 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
825 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
826 static DEFINE_STRARRAY(sighow, "SIG_");
828 static const char *clockid[] = {
833 static DEFINE_STRARRAY(clockid, "CLOCK_");
835 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, in syscall_arg__scnprintf_access_mode()
838 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_access_mode()
841 int mode = arg->val; in syscall_arg__scnprintf_access_mode()
847 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ in syscall_arg__scnprintf_access_mode()
857 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); in syscall_arg__scnprintf_access_mode()
864 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
874 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg);
878 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, in syscall_arg__scnprintf_pipe_flags()
881 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_pipe_flags()
883 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_pipe_flags()
887 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_pipe_flags()
896 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_pipe_flags()
910 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, in syscall_arg__scnprintf_getrandom_flags()
913 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_getrandom_flags()
915 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_getrandom_flags()
919 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_getrandom_flags()
928 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_getrandom_flags()
936 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char … in syscall_arg_fmt__cache_btf_enum()
938 int id; in syscall_arg_fmt__cache_btf_enum() local
946 id = btf__find_by_name(btf, type); in syscall_arg_fmt__cache_btf_enum()
947 if (id < 0) in syscall_arg_fmt__cache_btf_enum()
950 arg_fmt->type = btf__type_by_id(btf, id); in syscall_arg_fmt__cache_btf_enum()
953 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) in syscall_arg__strtoul_btf_enum()
955 const struct btf_type *bt = arg->fmt->type; in syscall_arg__strtoul_btf_enum()
956 struct btf *btf = arg->trace->btf; in syscall_arg__strtoul_btf_enum()
960 const char *name = btf__name_by_offset(btf, be->name_off); in syscall_arg__strtoul_btf_enum()
964 *val = be->val; in syscall_arg__strtoul_btf_enum()
972 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) in syscall_arg__strtoul_btf_type()
975 char *type = arg->type_name; in syscall_arg__strtoul_btf_type()
978 trace__load_vmlinux_btf(arg->trace); in syscall_arg__strtoul_btf_type()
980 btf = arg->trace->btf; in syscall_arg__strtoul_btf_type()
984 if (arg->fmt->type == NULL) { in syscall_arg__strtoul_btf_type()
986 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); in syscall_arg__strtoul_btf_type()
990 bt = arg->fmt->type; in syscall_arg__strtoul_btf_type()
995 if (btf_is_enum(arg->fmt->type)) in syscall_arg__strtoul_btf_type()
1001 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t siz… in btf_enum_scnprintf()
1007 if (be->val == val) { in btf_enum_scnprintf()
1009 btf__name_by_offset(btf, be->name_off)); in btf_enum_scnprintf()
1021 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args) in trace__btf_dump_snprintf()
1025 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); in trace__btf_dump_snprintf()
1028 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t s… in btf_struct_scnprintf()
1034 struct augmented_arg *augmented_arg = arg->augmented.args; in btf_struct_scnprintf()
1035 int type_id = arg->fmt->type_id, consumed; in btf_struct_scnprintf()
1041 if (arg == NULL || arg->augmented.args == NULL) in btf_struct_scnprintf()
1045 dump_data_opts.skip_names = !arg->trace->show_arg_names; in btf_struct_scnprintf()
1052 …if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data… in btf_struct_scnprintf()
1055 consumed = sizeof(*augmented_arg) + augmented_arg->size; in btf_struct_scnprintf()
1056 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in btf_struct_scnprintf()
1057 arg->augmented.size -= consumed; in btf_struct_scnprintf()
1064 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, in trace__btf_scnprintf() argument
1067 struct syscall_arg_fmt *arg_fmt = arg->fmt; in trace__btf_scnprintf()
1069 if (trace->btf == NULL) in trace__btf_scnprintf()
1072 if (arg_fmt->type == NULL) { in trace__btf_scnprintf()
1074 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); in trace__btf_scnprintf()
1078 if (arg_fmt->type == NULL) in trace__btf_scnprintf()
1081 if (btf_is_enum(arg_fmt->type)) in trace__btf_scnprintf()
1082 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); in trace__btf_scnprintf()
1083 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) in trace__btf_scnprintf()
1084 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); in trace__btf_scnprintf()
1090 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __ma… in trace__btf_scnprintf()
1097 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, in syscall_arg__strtoul_btf_type()
1116 #include "trace/beauty/eventfd.c"
1117 #include "trace/beauty/futex_op.c"
1118 #include "trace/beauty/futex_val3.c"
1119 #include "trace/beauty/mmap.c"
1120 #include "trace/beauty/mode_t.c"
1121 #include "trace/beauty/msg_flags.c"
1122 #include "trace/beauty/open_flags.c"
1123 #include "trace/beauty/perf_event_open.c"
1124 #include "trace/beauty/pid.c"
1125 #include "trace/beauty/sched_policy.c"
1126 #include "trace/beauty/seccomp.c"
1127 #include "trace/beauty/signum.c"
1128 #include "trace/beauty/socket_type.c"
1129 #include "trace/beauty/waitid_options.c"
1131 static const struct syscall_fmt syscall_fmts[] = {
1410 static int syscall_fmt__cmp(const void *name, const void *fmtp) in syscall_fmt__cmp()
1413 return strcmp(name, fmt->name); in syscall_fmt__cmp()
1416 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, in __syscall_fmt__find()
1423 static const struct syscall_fmt *syscall_fmt__find(const char *name) in syscall_fmt__find()
1429 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, in __syscall_fmt__find_by_alias()
1442 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) in syscall_fmt__find_by_alias()
1452 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1479 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) in fprintf_duration()
1525 static struct thread_trace *thread_trace__new(void) in thread_trace__new()
1530 ttrace->files.max = -1; in thread_trace__new()
1531 ttrace->syscall_stats = intlist__new(NULL); in thread_trace__new()
1537 static void thread_trace__free_files(struct thread_trace *ttrace);
1539 static void thread_trace__delete(void *pttrace) in thread_trace__delete()
1546 intlist__delete(ttrace->syscall_stats); in thread_trace__delete()
1547 ttrace->syscall_stats = NULL; in thread_trace__delete()
1549 zfree(&ttrace->entry_str); in thread_trace__delete()
1553 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) in thread__trace()
1567 ++ttrace->nr_events; in thread__trace()
1580 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__set_ret_scnprintf()
1582 ttrace->ret_scnprintf = ret_scnprintf; in syscall_arg__set_ret_scnprintf()
1588 static const size_t trace__entry_str_size = 2048;
1590 static void thread_trace__free_files(struct thread_trace *ttrace) in thread_trace__free_files()
1592 for (int i = 0; i < ttrace->files.max; ++i) { in thread_trace__free_files()
1593 struct file *file = ttrace->files.table + i; in thread_trace__free_files()
1594 zfree(&file->pathname); in thread_trace__free_files()
1597 zfree(&ttrace->files.table); in thread_trace__free_files()
1598 ttrace->files.max = -1; in thread_trace__free_files()
1601 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) in thread_trace__files_entry()
1606 if (fd > ttrace->files.max) { in thread_trace__files_entry()
1607 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); in thread_trace__files_entry()
1612 if (ttrace->files.max != -1) { in thread_trace__files_entry()
1613 memset(nfiles + ttrace->files.max + 1, 0, in thread_trace__files_entry()
1614 (fd - ttrace->files.max) * sizeof(struct file)); in thread_trace__files_entry()
1619 ttrace->files.table = nfiles; in thread_trace__files_entry()
1620 ttrace->files.max = fd; in thread_trace__files_entry()
1623 return ttrace->files.table + fd; in thread_trace__files_entry()
1631 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) in trace__set_fd_pathname()
1639 file->dev_maj = major(st.st_rdev); in trace__set_fd_pathname()
1640 file->pathname = strdup(pathname); in trace__set_fd_pathname()
1641 if (file->pathname) in trace__set_fd_pathname()
1645 return -1; in trace__set_fd_pathname()
1648 static int thread__read_fd_path(struct thread *thread, int fd) in thread__read_fd_path()
1664 return -1; in thread__read_fd_path()
1669 return -1; in thread__read_fd_path()
1675 static const char *thread__fd_path(struct thread *thread, int fd, in thread__fd_path()
1676 struct trace *trace) in thread__fd_path() argument
1680 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1686 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { in thread__fd_path()
1687 if (!trace->live) in thread__fd_path()
1689 ++trace->stats.proc_getname; in thread__fd_path()
1694 return ttrace->files.table[fd].pathname; in thread__fd_path()
1699 int fd = arg->val; in syscall_arg__scnprintf_fd()
1701 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1704 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in syscall_arg__scnprintf_fd()
1709 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) in pid__scnprintf_fd() argument
1712 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1715 const char *path = thread__fd_path(thread, fd, trace); in pid__scnprintf_fd()
1718 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in pid__scnprintf_fd()
1726 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, in syscall_arg__scnprintf_close_fd()
1729 int fd = arg->val; in syscall_arg__scnprintf_close_fd()
1731 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__scnprintf_close_fd()
1733 if (ttrace && fd >= 0 && fd <= ttrace->files.max) in syscall_arg__scnprintf_close_fd()
1734 zfree(&ttrace->files.table[fd].pathname); in syscall_arg__scnprintf_close_fd()
1739 static void thread__set_filename_pos(struct thread *thread, const char *bf, in thread__set_filename_pos()
1744 ttrace->filename.ptr = ptr; in thread__set_filename_pos()
1745 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; in thread__set_filename_pos()
1748 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t siz… in syscall_arg__scnprintf_augmented_string()
1750 struct augmented_arg *augmented_arg = arg->augmented.args; in syscall_arg__scnprintf_augmented_string()
1751 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); in syscall_arg__scnprintf_augmented_string()
1756 int consumed = sizeof(*augmented_arg) + augmented_arg->size; in syscall_arg__scnprintf_augmented_string()
1758 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in syscall_arg__scnprintf_augmented_string()
1759 arg->augmented.size -= consumed; in syscall_arg__scnprintf_augmented_string()
1764 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, in syscall_arg__scnprintf_filename()
1767 unsigned long ptr = arg->val; in syscall_arg__scnprintf_filename()
1769 if (arg->augmented.args) in syscall_arg__scnprintf_filename()
1772 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1775 thread__set_filename_pos(arg->thread, bf, ptr); in syscall_arg__scnprintf_filename()
1782 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg) in syscall_arg__scnprintf_buf()
1784 struct augmented_arg *augmented_arg = arg->augmented.args; in syscall_arg__scnprintf_buf()
1785 unsigned char *orig = (unsigned char *)augmented_arg->value; in syscall_arg__scnprintf_buf()
1792 for (int j = 0; j < augmented_arg->size; ++j) { in syscall_arg__scnprintf_buf()
1794 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ in syscall_arg__scnprintf_buf()
1795 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); in syscall_arg__scnprintf_buf()
1798 consumed = sizeof(*augmented_arg) + augmented_arg->size; in syscall_arg__scnprintf_buf()
1799 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in syscall_arg__scnprintf_buf()
1800 arg->augmented.size -= consumed; in syscall_arg__scnprintf_buf()
1805 static bool trace__filter_duration(struct trace *trace, double t) in trace__filter_duration() argument
1807 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1810 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in __trace__fprintf_tstamp() argument
1812 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1819 * using ttrace->entry_time for a thread that receives a sys_exit without
1823 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in trace__fprintf_tstamp() argument
1826 return __trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_tstamp()
1831 static pid_t workload_pid = -1;
1832 static volatile sig_atomic_t done = false;
1833 static volatile sig_atomic_t interrupted = false;
1835 static void sighandler_interrupt(int sig __maybe_unused) in sighandler_interrupt()
1840 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, in sighandler_chld()
1843 if (info->si_pid == workload_pid) in sighandler_chld()
1847 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) in trace__fprintf_comm_tid() argument
1851 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1852 if (trace->show_comm) in trace__fprintf_comm_tid()
1860 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, in trace__fprintf_entry_head() argument
1865 if (trace->show_tstamp) in trace__fprintf_entry_head()
1866 printed = trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_entry_head()
1867 if (trace->show_duration) in trace__fprintf_entry_head()
1869 return printed + trace__fprintf_comm_tid(trace, thread, fp); in trace__fprintf_entry_head()
1872 static int trace__process_event(struct trace *trace, struct machine *machine, in trace__process_event() argument
1877 switch (event->header.type) { in trace__process_event()
1879 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1880 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); in trace__process_event()
1891 static int trace__tool_process(const struct perf_tool *tool, in trace__tool_process()
1896 struct trace *trace = container_of(tool, struct trace, tool); in trace__tool_process() local
1897 return trace__process_event(trace, machine, event, sample); in trace__tool_process()
1900 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **… in trace__machine__resolve_kernel_addr()
1904 if (machine->kptr_restrict_warned) in trace__machine__resolve_kernel_addr()
1911 machine->kptr_restrict_warned = true; in trace__machine__resolve_kernel_addr()
1918 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) in trace__symbols_init() argument
1925 trace->host = machine__new_host(); in trace__symbols_init()
1926 if (trace->host == NULL) in trace__symbols_init()
1927 return -ENOMEM; in trace__symbols_init()
1931 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1935 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1936 evlist->core.threads, trace__tool_process, in trace__symbols_init()
1945 static void trace__symbols__exit(struct trace *trace) in trace__symbols__exit() argument
1947 machine__exit(trace->host); in trace__symbols__exit()
1948 trace->host = NULL; in trace__symbols__exit()
1953 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) in syscall__alloc_arg_fmts()
1957 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) in syscall__alloc_arg_fmts()
1958 nr_args = sc->fmt->nr_args; in syscall__alloc_arg_fmts()
1960 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); in syscall__alloc_arg_fmts()
1961 if (sc->arg_fmt == NULL) in syscall__alloc_arg_fmts()
1962 return -1; in syscall__alloc_arg_fmts()
1965 if (sc->fmt) in syscall__alloc_arg_fmts()
1966 sc->arg_fmt[idx] = sc->fmt->arg[idx]; in syscall__alloc_arg_fmts()
1969 sc->nr_args = nr_args; in syscall__alloc_arg_fmts()
1973 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1978 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) in syscall_arg_fmt__cmp()
1981 return strcmp(name, fmt->name); in syscall_arg_fmt__cmp()
1984 static const struct syscall_arg_fmt *
1991 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) in syscall_arg_fmt__find_by_name()
1997 static struct tep_format_field *
2004 for (; field; field = field->next, ++arg) { in syscall_arg_fmt__init_array()
2007 if (arg->scnprintf) in syscall_arg_fmt__init_array()
2010 len = strlen(field->name); in syscall_arg_fmt__init_array()
2013 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) in syscall_arg_fmt__init_array()
2014 arg->from_user = true; in syscall_arg_fmt__init_array()
2016 if (strcmp(field->type, "const char *") == 0 && in syscall_arg_fmt__init_array()
2017 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || in syscall_arg_fmt__init_array()
2018 strstr(field->name, "path") != NULL)) { in syscall_arg_fmt__init_array()
2019 arg->scnprintf = SCA_FILENAME; in syscall_arg_fmt__init_array()
2020 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) in syscall_arg_fmt__init_array()
2021 arg->scnprintf = SCA_PTR; in syscall_arg_fmt__init_array()
2022 else if (strcmp(field->type, "pid_t") == 0) in syscall_arg_fmt__init_array()
2023 arg->scnprintf = SCA_PID; in syscall_arg_fmt__init_array()
2024 else if (strcmp(field->type, "umode_t") == 0) in syscall_arg_fmt__init_array()
2025 arg->scnprintf = SCA_MODE_T; in syscall_arg_fmt__init_array()
2026 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { in syscall_arg_fmt__init_array()
2027 arg->scnprintf = SCA_CHAR_ARRAY; in syscall_arg_fmt__init_array()
2028 arg->nr_entries = field->arraylen; in syscall_arg_fmt__init_array()
2029 } else if ((strcmp(field->type, "int") == 0 || in syscall_arg_fmt__init_array()
2030 strcmp(field->type, "unsigned int") == 0 || in syscall_arg_fmt__init_array()
2031 strcmp(field->type, "long") == 0) && in syscall_arg_fmt__init_array()
2032 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { in syscall_arg_fmt__init_array()
2035 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c in syscall_arg_fmt__init_array()
2040 arg->scnprintf = SCA_FD; in syscall_arg_fmt__init_array()
2041 } else if (strstr(field->type, "enum") && use_btf != NULL) { in syscall_arg_fmt__init_array()
2043 arg->strtoul = STUL_BTF_TYPE; in syscall_arg_fmt__init_array()
2046 syscall_arg_fmt__find_by_name(field->name); in syscall_arg_fmt__init_array()
2049 arg->scnprintf = fmt->scnprintf; in syscall_arg_fmt__init_array()
2050 arg->strtoul = fmt->strtoul; in syscall_arg_fmt__init_array()
2058 static int syscall__set_arg_fmts(struct syscall *sc) in syscall__set_arg_fmts()
2060 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, in syscall__set_arg_fmts()
2061 &sc->use_btf); in syscall__set_arg_fmts()
2064 sc->args_size = last_field->offset + last_field->size; in syscall__set_arg_fmts()
2069 static int trace__read_syscall_info(struct trace *trace, int id) in trace__read_syscall_info() argument
2073 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
2076 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
2077 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
2078 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2079 return -ENOMEM; in trace__read_syscall_info()
2081 sc = trace->syscalls.table + id; in trace__read_syscall_info()
2082 if (sc->nonexistent) in trace__read_syscall_info()
2083 return -EEXIST; in trace__read_syscall_info()
2086 sc->nonexistent = true; in trace__read_syscall_info()
2087 return -EEXIST; in trace__read_syscall_info()
2090 sc->name = name; in trace__read_syscall_info()
2091 sc->fmt = syscall_fmt__find(sc->name); in trace__read_syscall_info()
2093 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); in trace__read_syscall_info()
2094 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
2096 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { in trace__read_syscall_info()
2097 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); in trace__read_syscall_info()
2098 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
2102 * Fails to read trace point format via sysfs node, so the trace point in trace__read_syscall_info()
2105 if (IS_ERR(sc->tp_format)) { in trace__read_syscall_info()
2106 sc->nonexistent = true; in trace__read_syscall_info()
2107 return PTR_ERR(sc->tp_format); in trace__read_syscall_info()
2114 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? in trace__read_syscall_info()
2115 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1)) in trace__read_syscall_info()
2116 return -ENOMEM; in trace__read_syscall_info()
2118 sc->args = sc->tp_format->format.fields; in trace__read_syscall_info()
2124 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { in trace__read_syscall_info()
2125 sc->args = sc->args->next; in trace__read_syscall_info()
2126 --sc->nr_args; in trace__read_syscall_info()
2129 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); in trace__read_syscall_info()
2130 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); in trace__read_syscall_info()
2135 if (sc->use_btf) in trace__read_syscall_info()
2136 trace__load_vmlinux_btf(trace); in trace__read_syscall_info()
2141 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) in evsel__init_tp_arg_scnprintf()
2149 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); in evsel__init_tp_arg_scnprintf()
2154 return -ENOMEM; in evsel__init_tp_arg_scnprintf()
2157 static int intcmp(const void *a, const void *b) in intcmp()
2161 return *one - *another; in intcmp()
2164 static int trace__validate_ev_qualifier(struct trace *trace) in trace__validate_ev_qualifier() argument
2169 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
2171 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
2172 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2174 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
2176 trace->output); in trace__validate_ev_qualifier()
2177 err = -EINVAL; in trace__validate_ev_qualifier()
2181 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
2182 const char *sc = pos->s; in trace__validate_ev_qualifier()
2183 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier() local
2185 if (id < 0) { in trace__validate_ev_qualifier()
2186 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2187 if (id >= 0) in trace__validate_ev_qualifier()
2201 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2202 if (match_next == -1) in trace__validate_ev_qualifier()
2206 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2207 if (id < 0) in trace__validate_ev_qualifier()
2213 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
2214 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2216 err = -ENOMEM; in trace__validate_ev_qualifier()
2217 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
2220 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
2222 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2226 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
2227 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
2233 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
2234 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
2238 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) in trace__syscall_enabled() argument
2242 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
2245 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
2246 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
2249 return !trace->not_ev_qualifier; in trace__syscall_enabled()
2251 return trace->not_ev_qualifier; in trace__syscall_enabled()
2256 * 8-byte unaligned accesses. args points to raw_data within the event
2257 * and raw_data is guaranteed to be 8-byte unaligned because it is
2265 unsigned char *p = arg->args + sizeof(unsigned long) * idx; in syscall_arg__val()
2271 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, in syscall__scnprintf_name()
2274 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) in syscall__scnprintf_name()
2275 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); in syscall__scnprintf_name()
2277 return scnprintf(bf, size, "arg%d: ", arg->idx); in syscall__scnprintf_name()
2283 * in tools/perf/trace/beauty/mount_flags.c
2285 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg… in syscall_arg_fmt__mask_val()
2287 if (fmt && fmt->mask_val) in syscall_arg_fmt__mask_val()
2288 return fmt->mask_val(arg, val); in syscall_arg_fmt__mask_val()
2293 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, in syscall_arg_fmt__scnprintf_val()
2296 if (fmt && fmt->scnprintf) { in syscall_arg_fmt__scnprintf_val()
2297 arg->val = val; in syscall_arg_fmt__scnprintf_val()
2298 if (fmt->parm) in syscall_arg_fmt__scnprintf_val()
2299 arg->parm = fmt->parm; in syscall_arg_fmt__scnprintf_val()
2300 return fmt->scnprintf(bf, size, arg); in syscall_arg_fmt__scnprintf_val()
2305 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, in syscall__scnprintf_args()
2307 struct trace *trace, struct thread *thread) in syscall__scnprintf_args() argument
2320 .trace = trace, in syscall__scnprintf_args()
2322 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2332 ttrace->ret_scnprintf = NULL; in syscall__scnprintf_args()
2334 if (sc->args != NULL) { in syscall__scnprintf_args()
2337 for (field = sc->args; field; in syscall__scnprintf_args()
2338 field = field->next, ++arg.idx, bit <<= 1) { in syscall__scnprintf_args()
2342 arg.fmt = &sc->arg_fmt[arg.idx]; in syscall__scnprintf_args()
2348 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); in syscall__scnprintf_args()
2357 if (val == 0 && !trace->show_zeros && in syscall__scnprintf_args()
2358 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && in syscall__scnprintf_args()
2359 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) in syscall__scnprintf_args()
2362 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); in syscall__scnprintf_args()
2364 if (trace->show_arg_names) in syscall__scnprintf_args()
2365 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); in syscall__scnprintf_args()
2367 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; in syscall__scnprintf_args()
2369 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { in syscall__scnprintf_args()
2370 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, in syscall__scnprintf_args()
2371 size - printed, val, field->type); in syscall__scnprintf_args()
2378 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], in syscall__scnprintf_args()
2379 bf + printed, size - printed, &arg, val); in syscall__scnprintf_args()
2381 } else if (IS_ERR(sc->tp_format)) { in syscall__scnprintf_args()
2387 while (arg.idx < sc->nr_args) { in syscall__scnprintf_args()
2392 printed += scnprintf(bf + printed, size - printed, ", "); in syscall__scnprintf_args()
2393 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); in syscall__scnprintf_args()
2394 …printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &ar… in syscall__scnprintf_args()
2404 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2408 static struct syscall *trace__syscall_info(struct trace *trace, in trace__syscall_info() argument
2409 struct evsel *evsel, int id) in trace__syscall_info() argument
2413 if (id < 0) { in trace__syscall_info()
2421 * grep "NR -1 " /t/trace_pipe in trace__syscall_info()
2426 static u64 n; in trace__syscall_info()
2427 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", in trace__syscall_info()
2428 id, evsel__name(evsel), ++n); in trace__syscall_info()
2433 err = -EINVAL; in trace__syscall_info()
2435 if (id > trace->sctbl->syscalls.max_id) { in trace__syscall_info()
2439 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && in trace__syscall_info()
2440 (err = trace__read_syscall_info(trace, id)) != 0) in trace__syscall_info()
2443 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) in trace__syscall_info()
2446 return &trace->syscalls.table[id]; in trace__syscall_info()
2451 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s… in trace__syscall_info()
2452 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) in trace__syscall_info()
2453 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); in trace__syscall_info()
2454 fputs(" information\n", trace->output); in trace__syscall_info()
2466 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, in thread__update_stats()
2467 int id, struct perf_sample *sample, long err, bool errno_summary) in thread__update_stats() argument
2473 inode = intlist__findnew(ttrace->syscall_stats, id); in thread__update_stats()
2477 stats = inode->priv; in thread__update_stats()
2483 init_stats(&stats->stats); in thread__update_stats()
2484 inode->priv = stats; in thread__update_stats()
2487 if (ttrace->entry_time && sample->time > ttrace->entry_time) in thread__update_stats()
2488 duration = sample->time - ttrace->entry_time; in thread__update_stats()
2490 update_stats(&stats->stats, duration); in thread__update_stats()
2493 ++stats->nr_failures; in thread__update_stats()
2498 err = -err; in thread__update_stats()
2499 if (err > stats->max_errno) { in thread__update_stats()
2500 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); in thread__update_stats()
2503 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); in thread__update_stats()
2511 stats->errnos = new_errnos; in thread__update_stats()
2512 stats->max_errno = err; in thread__update_stats()
2515 ++stats->errnos[err - 1]; in thread__update_stats()
2519 static int trace__printf_interrupted_entry(struct trace *trace) in trace__printf_interrupted_entry() argument
2525 if (trace->failure_only || trace->current == NULL) in trace__printf_interrupted_entry()
2528 ttrace = thread__priv(trace->current); in trace__printf_interrupted_entry()
2530 if (!ttrace->entry_pending) in trace__printf_interrupted_entry()
2533 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o… in trace__printf_interrupted_entry()
2534 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); in trace__printf_interrupted_entry()
2536 if (len < trace->args_alignment - 4) in trace__printf_interrupted_entry()
2537 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); in trace__printf_interrupted_entry()
2539 printed += fprintf(trace->output, " ...\n"); in trace__printf_interrupted_entry()
2541 ttrace->entry_pending = false; in trace__printf_interrupted_entry()
2542 ++trace->nr_events_printed; in trace__printf_interrupted_entry()
2547 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, in trace__fprintf_sample() argument
2552 if (trace->print_sample) { in trace__fprintf_sample()
2553 double ts = (double)sample->time / NSEC_PER_MSEC; in trace__fprintf_sample()
2555 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", in trace__fprintf_sample()
2558 sample->pid, sample->tid, sample->cpu); in trace__fprintf_sample()
2564 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented… in syscall__augmented_args()
2571 * sc->args_size but always after the full raw_syscalls:sys_enter payload, in syscall__augmented_args()
2574 * We'll revisit this later to pass s->args_size to the BPF augmenter in syscall__augmented_args()
2580 int args_size = raw_augmented_args_size ?: sc->args_size; in syscall__augmented_args()
2582 *augmented_args_size = sample->raw_size - args_size; in syscall__augmented_args()
2584 static uintptr_t argbuf[1024]; /* assuming single-threaded */ in syscall__augmented_args()
2590 * The perf ring-buffer is 8-byte aligned but sample->raw_data in syscall__augmented_args()
2594 * into a static buffer as it's single-threaded for now. in syscall__augmented_args()
2596 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); in syscall__augmented_args()
2603 static void syscall__exit(struct syscall *sc) in syscall__exit()
2608 zfree(&sc->arg_fmt); in syscall__exit()
2611 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, in trace__sys_enter() argument
2619 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; in trace__sys_enter() local
2622 struct syscall *sc = trace__syscall_info(trace, evsel, id); in trace__sys_enter()
2626 return -1; in trace__sys_enter()
2628 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__sys_enter()
2629 ttrace = thread__trace(thread, trace->output); in trace__sys_enter()
2633 trace__fprintf_sample(trace, evsel, sample, thread); in trace__sys_enter()
2637 if (ttrace->entry_str == NULL) { in trace__sys_enter()
2638 ttrace->entry_str = malloc(trace__entry_str_size); in trace__sys_enter()
2639 if (!ttrace->entry_str) in trace__sys_enter()
2643 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) in trace__sys_enter()
2644 trace__printf_interrupted_entry(trace); in trace__sys_enter()
2649 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, in trace__sys_enter()
2655 if (evsel != trace->syscalls.events.sys_enter) in trace__sys_enter()
2656 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy… in trace__sys_enter()
2657 ttrace->entry_time = sample->time; in trace__sys_enter()
2658 msg = ttrace->entry_str; in trace__sys_enter()
2659 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); in trace__sys_enter()
2661 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, in trace__sys_enter()
2662 args, augmented_args, augmented_args_size, trace, thread); in trace__sys_enter()
2664 if (sc->is_exit) { in trace__sys_enter()
2665 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { in trace__sys_enter()
2668 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); in trace__sys_enter()
2669 printed = fprintf(trace->output, "%s)", ttrace->entry_str); in trace__sys_enter()
2670 if (trace->args_alignment > printed) in trace__sys_enter()
2671 alignment = trace->args_alignment - printed; in trace__sys_enter()
2672 fprintf(trace->output, "%*s= ?\n", alignment, " "); in trace__sys_enter()
2675 ttrace->entry_pending = true; in trace__sys_enter()
2677 ttrace->filename.pending_open = false; in trace__sys_enter()
2680 if (trace->current != thread) { in trace__sys_enter()
2681 thread__put(trace->current); in trace__sys_enter()
2682 trace->current = thread__get(thread); in trace__sys_enter()
2690 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, in trace__fprintf_sys_enter() argument
2695 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; in trace__fprintf_sys_enter() local
2696 struct syscall *sc = trace__syscall_info(trace, evsel, id); in trace__fprintf_sys_enter()
2703 return -1; in trace__fprintf_sys_enter()
2705 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__fprintf_sys_enter()
2706 ttrace = thread__trace(thread, trace->output); in trace__fprintf_sys_enter()
2715 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy… in trace__fprintf_sys_enter()
2716 …ll__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); in trace__fprintf_sys_enter()
2717 fprintf(trace->output, "%.*s", (int)printed, msg); in trace__fprintf_sys_enter()
2724 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, in trace__resolve_callchain() argument
2729 int max_stack = evsel->core.attr.sample_max_stack ? in trace__resolve_callchain()
2730 evsel->core.attr.sample_max_stack : in trace__resolve_callchain()
2731 trace->max_stack; in trace__resolve_callchain()
2732 int err = -1; in trace__resolve_callchain()
2735 if (machine__resolve(trace->host, &al, sample) < 0) in trace__resolve_callchain()
2744 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) in trace__fprintf_callchain() argument
2746 /* TODO: user-configurable print_opts */ in trace__fprintf_callchain()
2751 …chain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); in trace__fprintf_callchain()
2754 static const char *errno_to_name(struct evsel *evsel, int err) in errno_to_name()
2761 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, in trace__sys_exit() argument
2769 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; in trace__sys_exit() local
2770 int alignment = trace->args_alignment; in trace__sys_exit()
2771 struct syscall *sc = trace__syscall_info(trace, evsel, id); in trace__sys_exit()
2775 return -1; in trace__sys_exit()
2777 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__sys_exit()
2778 ttrace = thread__trace(thread, trace->output); in trace__sys_exit()
2782 trace__fprintf_sample(trace, evsel, sample, thread); in trace__sys_exit()
2786 if (trace->summary) in trace__sys_exit()
2787 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); in trace__sys_exit()
2789 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { in trace__sys_exit()
2790 trace__set_fd_pathname(thread, ret, ttrace->filename.name); in trace__sys_exit()
2791 ttrace->filename.pending_open = false; in trace__sys_exit()
2792 ++trace->stats.vfs_getname; in trace__sys_exit()
2795 if (ttrace->entry_time) { in trace__sys_exit()
2796 duration = sample->time - ttrace->entry_time; in trace__sys_exit()
2797 if (trace__filter_duration(trace, duration)) in trace__sys_exit()
2800 } else if (trace->duration_filter) in trace__sys_exit()
2803 if (sample->callchain) { in trace__sys_exit()
2806 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); in trace__sys_exit()
2808 if (cursor->nr < trace->min_stack) in trace__sys_exit()
2814 if (trace->summary_only || (ret >= 0 && trace->failure_only)) in trace__sys_exit()
2817 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-… in trace__sys_exit()
2819 if (ttrace->entry_pending) { in trace__sys_exit()
2820 printed = fprintf(trace->output, "%s", ttrace->entry_str); in trace__sys_exit()
2822 printed += fprintf(trace->output, " ... ["); in trace__sys_exit()
2823 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); in trace__sys_exit()
2825 printed += fprintf(trace->output, "]: %s()", sc->name); in trace__sys_exit()
2831 alignment -= printed; in trace__sys_exit()
2835 fprintf(trace->output, ")%*s= ", alignment, " "); in trace__sys_exit()
2837 if (sc->fmt == NULL) { in trace__sys_exit()
2841 fprintf(trace->output, "%ld", ret); in trace__sys_exit()
2845 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), in trace__sys_exit()
2846 *e = errno_to_name(evsel, -ret); in trace__sys_exit()
2848 fprintf(trace->output, "-1 %s (%s)", e, emsg); in trace__sys_exit()
2850 } else if (ret == 0 && sc->fmt->timeout) in trace__sys_exit()
2851 fprintf(trace->output, "0 (Timeout)"); in trace__sys_exit()
2852 else if (ttrace->ret_scnprintf) { in trace__sys_exit()
2857 .trace = trace, in trace__sys_exit()
2859 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); in trace__sys_exit()
2860 ttrace->ret_scnprintf = NULL; in trace__sys_exit()
2861 fprintf(trace->output, "%s", bf); in trace__sys_exit()
2862 } else if (sc->fmt->hexret) in trace__sys_exit()
2863 fprintf(trace->output, "%#lx", ret); in trace__sys_exit()
2864 else if (sc->fmt->errpid) { in trace__sys_exit()
2865 struct thread *child = machine__find_thread(trace->host, ret, ret); in trace__sys_exit()
2868 fprintf(trace->output, "%ld", ret); in trace__sys_exit()
2870 fprintf(trace->output, " (%s)", thread__comm_str(child)); in trace__sys_exit()
2876 fputc('\n', trace->output); in trace__sys_exit()
2879 * We only consider an 'event' for the sake of --max-events a non-filtered in trace__sys_exit()
2882 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) in trace__sys_exit()
2886 trace__fprintf_callchain(trace, sample); in trace__sys_exit()
2890 ttrace->entry_pending = false; in trace__sys_exit()
2897 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, in trace__vfs_getname() argument
2901 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__vfs_getname()
2919 if (ttrace->filename.namelen < filename_len) { in trace__vfs_getname()
2920 char *f = realloc(ttrace->filename.name, filename_len + 1); in trace__vfs_getname()
2925 ttrace->filename.namelen = filename_len; in trace__vfs_getname()
2926 ttrace->filename.name = f; in trace__vfs_getname()
2929 strcpy(ttrace->filename.name, filename); in trace__vfs_getname()
2930 ttrace->filename.pending_open = true; in trace__vfs_getname()
2932 if (!ttrace->filename.ptr) in trace__vfs_getname()
2935 entry_str_len = strlen(ttrace->entry_str); in trace__vfs_getname()
2936 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ in trace__vfs_getname()
2941 filename += filename_len - remaining_space; in trace__vfs_getname()
2945 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ in trace__vfs_getname()
2946 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; in trace__vfs_getname()
2950 ttrace->filename.ptr = 0; in trace__vfs_getname()
2951 ttrace->filename.entry_str_pos = 0; in trace__vfs_getname()
2958 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, in trace__sched_stat_runtime() argument
2964 struct thread *thread = machine__findnew_thread(trace->host, in trace__sched_stat_runtime()
2965 sample->pid, in trace__sched_stat_runtime()
2966 sample->tid); in trace__sched_stat_runtime()
2967 struct thread_trace *ttrace = thread__trace(thread, trace->output); in trace__sched_stat_runtime()
2972 ttrace->runtime_ms += runtime_ms; in trace__sched_stat_runtime()
2973 trace->runtime_ms += runtime_ms; in trace__sched_stat_runtime()
2979 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", in trace__sched_stat_runtime()
2980 evsel->name, in trace__sched_stat_runtime()
2988 static int bpf_output__printer(enum binary_printer_ops op, in bpf_output__printer()
3012 static void bpf_output__fprintf(struct trace *trace, in bpf_output__fprintf() argument
3015 binary__fprintf(sample->raw_data, sample->raw_size, 8, in bpf_output__fprintf()
3016 bpf_output__printer, NULL, trace->output); in bpf_output__fprintf()
3017 ++trace->nr_events_printed; in bpf_output__fprintf()
3020 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample… in trace__fprintf_tp_fields() argument
3026 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; in trace__fprintf_tp_fields()
3038 .trace = trace, in trace__fprintf_tp_fields()
3040 .show_string_prefix = trace->show_string_prefix, in trace__fprintf_tp_fields()
3043 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { in trace__fprintf_tp_fields()
3049 if (field->flags & TEP_FIELD_IS_ARRAY) { in trace__fprintf_tp_fields()
3050 int offset = field->offset; in trace__fprintf_tp_fields()
3052 if (field->flags & TEP_FIELD_IS_DYNAMIC) { in trace__fprintf_tp_fields()
3053 offset = format_field__intval(field, sample, evsel->needs_swap); in trace__fprintf_tp_fields()
3056 if (tep_field_is_relative(field->flags)) in trace__fprintf_tp_fields()
3057 offset += field->offset + field->size; in trace__fprintf_tp_fields()
3060 val = (uintptr_t)(sample->raw_data + offset); in trace__fprintf_tp_fields()
3062 val = format_field__intval(field, sample, evsel->needs_swap); in trace__fprintf_tp_fields()
3070 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) in trace__fprintf_tp_fields()
3073 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); in trace__fprintf_tp_fields()
3075 if (trace->show_arg_names) in trace__fprintf_tp_fields()
3076 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); in trace__fprintf_tp_fields()
3078 …btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->… in trace__fprintf_tp_fields()
3084 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); in trace__fprintf_tp_fields()
3087 return printed + fprintf(trace->output, "%.*s", (int)printed, bf); in trace__fprintf_tp_fields()
3090 static int trace__event_handler(struct trace *trace, struct evsel *evsel, in trace__event_handler() argument
3097 if (evsel->nr_events_printed >= evsel->max_events) in trace__event_handler()
3100 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__event_handler()
3102 if (sample->callchain) { in trace__event_handler()
3105 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); in trace__event_handler()
3107 if (cursor->nr < trace->min_stack) in trace__event_handler()
3113 trace__printf_interrupted_entry(trace); in trace__event_handler()
3114 trace__fprintf_tstamp(trace, sample->time, trace->output); in trace__event_handler()
3116 if (trace->trace_syscalls && trace->show_duration) in trace__event_handler()
3117 fprintf(trace->output, "( ): "); in trace__event_handler()
3120 trace__fprintf_comm_tid(trace, thread, trace->output); in trace__event_handler()
3122 if (evsel == trace->syscalls.events.bpf_output) { in trace__event_handler()
3123 int id = perf_evsel__sc_tp_uint(evsel, id, sample); in trace__event_handler() local
3124 struct syscall *sc = trace__syscall_info(trace, evsel, id); in trace__event_handler()
3127 fprintf(trace->output, "%s(", sc->name); in trace__event_handler()
3128 trace__fprintf_sys_enter(trace, evsel, sample); in trace__event_handler()
3129 fputc(')', trace->output); in trace__event_handler()
3140 fprintf(trace->output, "%s(", evsel->name); in trace__event_handler()
3143 bpf_output__fprintf(trace, sample); in trace__event_handler()
3147 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || in trace__event_handler()
3148 trace__fprintf_sys_enter(trace, evsel, sample))) { in trace__event_handler()
3149 if (trace->libtraceevent_print) { in trace__event_handler()
3150 event_format__fprintf(tp_format, sample->cpu, in trace__event_handler()
3151 sample->raw_data, sample->raw_size, in trace__event_handler()
3152 trace->output); in trace__event_handler()
3154 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); in trace__event_handler()
3160 fprintf(trace->output, ")\n"); in trace__event_handler()
3163 trace__fprintf_callchain(trace, sample); in trace__event_handler()
3167 ++trace->nr_events_printed; in trace__event_handler()
3169 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { in trace__event_handler()
3178 static void print_location(FILE *f, struct perf_sample *sample, in print_location()
3183 if ((verbose > 0 || print_dso) && al->map) in print_location()
3184 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); in print_location()
3186 if ((verbose > 0 || print_sym) && al->sym) in print_location()
3187 fprintf(f, "%s+0x%" PRIx64, al->sym->name, in print_location()
3188 al->addr - al->sym->start); in print_location()
3189 else if (al->map) in print_location()
3190 fprintf(f, "0x%" PRIx64, al->addr); in print_location()
3192 fprintf(f, "0x%" PRIx64, sample->addr); in print_location()
3195 static int trace__pgfault(struct trace *trace, in trace__pgfault() argument
3204 int err = -1; in trace__pgfault()
3208 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__pgfault()
3210 if (sample->callchain) { in trace__pgfault()
3213 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); in trace__pgfault()
3215 if (cursor->nr < trace->min_stack) in trace__pgfault()
3221 ttrace = thread__trace(thread, trace->output); in trace__pgfault()
3225 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) in trace__pgfault()
3226 ttrace->pfmaj++; in trace__pgfault()
3228 ttrace->pfmin++; in trace__pgfault()
3230 if (trace->summary_only) in trace__pgfault()
3233 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); in trace__pgfault()
3235 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); in trace__pgfault()
3237 fprintf(trace->output, "%sfault [", in trace__pgfault()
3238 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? in trace__pgfault()
3241 print_location(trace->output, sample, &al, false, true); in trace__pgfault()
3243 fprintf(trace->output, "] => "); in trace__pgfault()
3245 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); in trace__pgfault()
3248 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); in trace__pgfault()
3256 print_location(trace->output, sample, &al, true, false); in trace__pgfault()
3258 fprintf(trace->output, " (%c%c)\n", map_type, al.level); in trace__pgfault()
3261 trace__fprintf_callchain(trace, sample); in trace__pgfault()
3265 ++trace->nr_events_printed; in trace__pgfault()
3274 static void trace__set_base_time(struct trace *trace, in trace__set_base_time() argument
3280 * and don't use sample->time unconditionally, we may end up having in trace__set_base_time()
3286 if (trace->base_time == 0 && !trace->full_time && in trace__set_base_time()
3287 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) in trace__set_base_time()
3288 trace->base_time = sample->time; in trace__set_base_time()
3291 static int trace__process_sample(const struct perf_tool *tool, in trace__process_sample()
3297 struct trace *trace = container_of(tool, struct trace, tool); in trace__process_sample() local
3301 tracepoint_handler handler = evsel->handler; in trace__process_sample()
3303 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__process_sample()
3307 trace__set_base_time(trace, evsel, sample); in trace__process_sample()
3310 ++trace->nr_events; in trace__process_sample()
3311 handler(trace, evsel, event, sample); in trace__process_sample()
3318 static int trace__record(struct trace *trace, int argc, const char **argv) in trace__record() argument
3324 "-R", in trace__record()
3325 "-m", "1024", in trace__record()
3326 "-c", "1", in trace__record()
3330 const char * const sc_args[] = { "-e", }; in trace__record()
3332 const char * const majpf_args[] = { "-e", "major-faults" }; in trace__record()
3334 const char * const minpf_args[] = { "-e", "minor-faults" }; in trace__record()
3336 int err = -1; in trace__record()
3350 if (trace->trace_syscalls) { in trace__record()
3354 /* event string may be different for older kernels - e.g., RHEL6 */ in trace__record()
3365 rec_argv[j++] = "--filter"; in trace__record()
3368 if (trace->trace_pgfaults & TRACE_PFMAJ) in trace__record()
3372 if (trace->trace_pgfaults & TRACE_PFMIN) in trace__record()
3386 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3388 static bool evlist__add_vfs_getname(struct evlist *evlist) in evlist__add_vfs_getname()
3406 evsel->handler = trace__vfs_getname; in evlist__add_vfs_getname()
3411 list_del_init(&evsel->core.node); in evlist__add_vfs_getname()
3412 evsel->evlist = NULL; in evlist__add_vfs_getname()
3419 static struct evsel *evsel__new_pgfault(u64 config) in evsel__new_pgfault()
3434 evsel->handler = trace__pgfault; in evsel__new_pgfault()
3439 static void evlist__free_syscall_tp_fields(struct evlist *evlist) in evlist__free_syscall_tp_fields()
3444 evsel_trace__delete(evsel->priv); in evlist__free_syscall_tp_fields()
3445 evsel->priv = NULL; in evlist__free_syscall_tp_fields()
3449 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *s… in trace__handle_event() argument
3451 const u32 type = event->header.type; in trace__handle_event()
3455 trace__process_event(trace, trace->host, event, sample); in trace__handle_event()
3459 evsel = evlist__id2evsel(trace->evlist, sample->id); in trace__handle_event()
3461 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); in trace__handle_event()
3465 if (evswitch__discard(&trace->evswitch, evsel)) in trace__handle_event()
3468 trace__set_base_time(trace, evsel, sample); in trace__handle_event()
3470 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && in trace__handle_event()
3471 sample->raw_data == NULL) { in trace__handle_event()
3472 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", in trace__handle_event()
3473 evsel__name(evsel), sample->tid, in trace__handle_event()
3474 sample->cpu, sample->raw_size); in trace__handle_event()
3476 tracepoint_handler handler = evsel->handler; in trace__handle_event()
3477 handler(trace, evsel, event, sample); in trace__handle_event()
3480 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) in trace__handle_event()
3484 static int trace__add_syscall_newtp(struct trace *trace) in trace__add_syscall_newtp() argument
3486 int ret = -1; in trace__add_syscall_newtp()
3487 struct evlist *evlist = trace->evlist; in trace__add_syscall_newtp()
3504 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); in trace__add_syscall_newtp()
3505 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); in trace__add_syscall_newtp()
3510 if (callchain_param.enabled && !trace->kernel_syscallchains) { in trace__add_syscall_newtp()
3514 * debugging reasons using --kernel_syscall_callchains in trace__add_syscall_newtp()
3516 sys_exit->core.attr.exclude_callchain_kernel = 1; in trace__add_syscall_newtp()
3519 trace->syscalls.events.sys_enter = sys_enter; in trace__add_syscall_newtp()
3520 trace->syscalls.events.sys_exit = sys_exit; in trace__add_syscall_newtp()
3533 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) in trace__set_ev_qualifier_tp_filter() argument
3535 int err = -1; in trace__set_ev_qualifier_tp_filter()
3537 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, in trace__set_ev_qualifier_tp_filter()
3538 trace->ev_qualifier_ids.nr, in trace__set_ev_qualifier_tp_filter()
3539 trace->ev_qualifier_ids.entries); in trace__set_ev_qualifier_tp_filter()
3544 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { in trace__set_ev_qualifier_tp_filter()
3545 sys_exit = trace->syscalls.events.sys_exit; in trace__set_ev_qualifier_tp_filter()
3558 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char… in syscall_arg_fmt__cache_btf_struct()
3560 int id; in syscall_arg_fmt__cache_btf_struct() local
3562 if (arg_fmt->type != NULL) in syscall_arg_fmt__cache_btf_struct()
3563 return -1; in syscall_arg_fmt__cache_btf_struct()
3565 id = btf__find_by_name(btf, type); in syscall_arg_fmt__cache_btf_struct()
3566 if (id < 0) in syscall_arg_fmt__cache_btf_struct()
3567 return -1; in syscall_arg_fmt__cache_btf_struct()
3569 arg_fmt->type = btf__type_by_id(btf, id); in syscall_arg_fmt__cache_btf_struct()
3570 arg_fmt->type_id = id; in syscall_arg_fmt__cache_btf_struct()
3575 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) in trace__find_bpf_program_by_title() argument
3580 if (trace->skel->obj == NULL) in trace__find_bpf_program_by_title()
3583 bpf_object__for_each_program(pos, trace->skel->obj) { in trace__find_bpf_program_by_title()
3594 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, in trace__find_syscall_bpf_prog() argument
3601 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); in trace__find_syscall_bpf_prog()
3602 prog = trace__find_bpf_program_by_title(trace, default_prog_name); in trace__find_syscall_bpf_prog()
3605 if (sc->fmt && sc->fmt->alias) { in trace__find_syscall_bpf_prog()
3606 …scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->al… in trace__find_syscall_bpf_prog()
3607 prog = trace__find_bpf_program_by_title(trace, default_prog_name); in trace__find_syscall_bpf_prog()
3614 prog = trace__find_bpf_program_by_title(trace, prog_name); in trace__find_syscall_bpf_prog()
3622 prog_name, type, sc->name); in trace__find_syscall_bpf_prog()
3624 return trace->skel->progs.syscall_unaugmented; in trace__find_syscall_bpf_prog()
3627 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) in trace__init_syscall_bpf_progs() argument
3629 struct syscall *sc = trace__syscall_info(trace, NULL, id); in trace__init_syscall_bpf_progs()
3634 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.… in trace__init_syscall_bpf_progs()
3635 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.… in trace__init_syscall_bpf_progs()
3638 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) in trace__bpf_prog_sys_enter_fd() argument
3640 struct syscall *sc = trace__syscall_info(trace, NULL, id); in trace__bpf_prog_sys_enter_fd()
3641 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_u… in trace__bpf_prog_sys_enter_fd()
3644 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) in trace__bpf_prog_sys_exit_fd() argument
3646 struct syscall *sc = trace__syscall_info(trace, NULL, id); in trace__bpf_prog_sys_exit_fd()
3647 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_un… in trace__bpf_prog_sys_exit_fd()
3650 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array) in trace__bpf_sys_enter_beauty_map() argument
3653 struct syscall *sc = trace__syscall_info(trace, NULL, key); in trace__bpf_sys_enter_beauty_map()
3660 return -1; in trace__bpf_sys_enter_beauty_map()
3662 trace__load_vmlinux_btf(trace); in trace__bpf_sys_enter_beauty_map()
3663 if (trace->btf == NULL) in trace__bpf_sys_enter_beauty_map()
3664 return -1; in trace__bpf_sys_enter_beauty_map()
3666 for (i = 0, field = sc->args; field; ++i, field = field->next) { in trace__bpf_sys_enter_beauty_map()
3668 if (!sc->arg_fmt[i].from_user) in trace__bpf_sys_enter_beauty_map()
3671 struct_offset = strstr(field->type, "struct "); in trace__bpf_sys_enter_beauty_map()
3673 struct_offset = strstr(field->type, "union "); in trace__bpf_sys_enter_beauty_map()
3677 …if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr a… in trace__bpf_sys_enter_beauty_map()
3688 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) in trace__bpf_sys_enter_beauty_map()
3691 bt = sc->arg_fmt[i].type; in trace__bpf_sys_enter_beauty_map()
3692 beauty_array[i] = bt->size; in trace__bpf_sys_enter_beauty_map()
3694 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ in trace__bpf_sys_enter_beauty_map()
3695 strcmp(field->type, "const char *") == 0 && in trace__bpf_sys_enter_beauty_map()
3696 (strstr(field->name, "name") || in trace__bpf_sys_enter_beauty_map()
3697 strstr(field->name, "path") || in trace__bpf_sys_enter_beauty_map()
3698 strstr(field->name, "file") || in trace__bpf_sys_enter_beauty_map()
3699 strstr(field->name, "root") || in trace__bpf_sys_enter_beauty_map()
3700 strstr(field->name, "key") || in trace__bpf_sys_enter_beauty_map()
3701 strstr(field->name, "special") || in trace__bpf_sys_enter_beauty_map()
3702 strstr(field->name, "type") || in trace__bpf_sys_enter_beauty_map()
3703 strstr(field->name, "description"))) { in trace__bpf_sys_enter_beauty_map()
3706 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ in trace__bpf_sys_enter_beauty_map()
3707 strstr(field->type, "char *") && in trace__bpf_sys_enter_beauty_map()
3708 (strstr(field->name, "buf") || in trace__bpf_sys_enter_beauty_map()
3709 strstr(field->name, "val") || in trace__bpf_sys_enter_beauty_map()
3710 strstr(field->name, "msg"))) { in trace__bpf_sys_enter_beauty_map()
3715 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { in trace__bpf_sys_enter_beauty_map()
3716 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ in trace__bpf_sys_enter_beauty_map()
3717 (strstr(field_tmp->name, "count") || in trace__bpf_sys_enter_beauty_map()
3718 strstr(field_tmp->name, "siz") || /* size, bufsiz */ in trace__bpf_sys_enter_beauty_map()
3719 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { in trace__bpf_sys_enter_beauty_map()
3721 beauty_array[i] = -(j + 1); in trace__bpf_sys_enter_beauty_map()
3732 return -1; in trace__bpf_sys_enter_beauty_map()
3735 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *s… in trace__find_usable_bpf_prog_entry() argument
3741 for (field = sc->args; field; field = field->next) { in trace__find_usable_bpf_prog_entry()
3742 if (field->flags & TEP_FIELD_IS_POINTER) in trace__find_usable_bpf_prog_entry()
3749 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__find_usable_bpf_prog_entry()
3750 int id = syscalltbl__id_at_idx(trace->sctbl, i); in trace__find_usable_bpf_prog_entry() local
3751 struct syscall *pair = trace__syscall_info(trace, NULL, id); in trace__find_usable_bpf_prog_entry()
3756 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) in trace__find_usable_bpf_prog_entry()
3759 for (field = sc->args, candidate_field = pair->args; in trace__find_usable_bpf_prog_entry()
3760 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { in trace__find_usable_bpf_prog_entry()
3761 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, in trace__find_usable_bpf_prog_entry()
3762 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; in trace__find_usable_bpf_prog_entry()
3777 if (strcmp(field->type, candidate_field->type)) in trace__find_usable_bpf_prog_entry()
3785 if (strcmp(field->type, "const char *") == 0 && in trace__find_usable_bpf_prog_entry()
3786 !(strstr(field->name, "name") || in trace__find_usable_bpf_prog_entry()
3787 strstr(field->name, "path") || in trace__find_usable_bpf_prog_entry()
3788 strstr(field->name, "file") || in trace__find_usable_bpf_prog_entry()
3789 strstr(field->name, "root") || in trace__find_usable_bpf_prog_entry()
3790 strstr(field->name, "description"))) in trace__find_usable_bpf_prog_entry()
3805 …for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->… in trace__find_usable_bpf_prog_entry()
3806 if (candidate_field->flags & TEP_FIELD_IS_POINTER) in trace__find_usable_bpf_prog_entry()
3810 pair_prog = pair->bpf_prog.sys_enter; in trace__find_usable_bpf_prog_entry()
3815 * program for a filtered syscall on a non-filtered one. in trace__find_usable_bpf_prog_entry()
3821 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent… in trace__find_usable_bpf_prog_entry()
3822 if (pair_prog == trace->skel->progs.syscall_unaugmented) in trace__find_usable_bpf_prog_entry()
3826 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); in trace__find_usable_bpf_prog_entry()
3835 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) in trace__init_syscalls_bpf_prog_array_maps() argument
3837 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); in trace__init_syscalls_bpf_prog_array_maps()
3838 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); in trace__init_syscalls_bpf_prog_array_maps()
3839 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); in trace__init_syscalls_bpf_prog_array_maps()
3843 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__init_syscalls_bpf_prog_array_maps()
3844 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i); in trace__init_syscalls_bpf_prog_array_maps()
3846 if (!trace__syscall_enabled(trace, key)) in trace__init_syscalls_bpf_prog_array_maps()
3849 trace__init_syscall_bpf_progs(trace, key); in trace__init_syscalls_bpf_prog_array_maps()
3852 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); in trace__init_syscalls_bpf_prog_array_maps()
3856 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); in trace__init_syscalls_bpf_prog_array_maps()
3863 err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array); in trace__init_syscalls_bpf_prog_array_maps()
3874 * syscall with an augmenter so that we can auto-reuse it. in trace__init_syscalls_bpf_prog_array_maps()
3899 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__init_syscalls_bpf_prog_array_maps()
3900 int key = syscalltbl__id_at_idx(trace->sctbl, i); in trace__init_syscalls_bpf_prog_array_maps()
3901 struct syscall *sc = trace__syscall_info(trace, NULL, key); in trace__init_syscalls_bpf_prog_array_maps()
3905 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) in trace__init_syscalls_bpf_prog_array_maps()
3912 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) in trace__init_syscalls_bpf_prog_array_maps()
3919 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); in trace__init_syscalls_bpf_prog_array_maps()
3923 sc->bpf_prog.sys_enter = pair_prog; in trace__init_syscalls_bpf_prog_array_maps()
3929 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); in trace__init_syscalls_bpf_prog_array_maps()
3939 static int trace__set_ev_qualifier_filter(struct trace *trace) in trace__set_ev_qualifier_filter() argument
3941 if (trace->syscalls.events.sys_enter) in trace__set_ev_qualifier_filter()
3942 return trace__set_ev_qualifier_tp_filter(trace); in trace__set_ev_qualifier_filter()
3946 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, in bpf_map__set_filter_pids()
3964 static int trace__set_filter_loop_pids(struct trace *trace) in trace__set_filter_loop_pids() argument
3970 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); in trace__set_filter_loop_pids()
3973 struct thread *parent = machine__find_thread(trace->host, in trace__set_filter_loop_pids()
3981 strstarts(thread__comm_str(parent), "gnome-terminal")) { in trace__set_filter_loop_pids()
3988 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); in trace__set_filter_loop_pids()
3989 if (!err && trace->filter_pids.map) in trace__set_filter_loop_pids()
3990 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); in trace__set_filter_loop_pids()
3995 static int trace__set_filter_pids(struct trace *trace) in trace__set_filter_pids() argument
4004 if (trace->filter_pids.nr > 0) { in trace__set_filter_pids()
4005 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, in trace__set_filter_pids()
4006 trace->filter_pids.entries); in trace__set_filter_pids()
4007 if (!err && trace->filter_pids.map) { in trace__set_filter_pids()
4008 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, in trace__set_filter_pids()
4009 trace->filter_pids.entries); in trace__set_filter_pids()
4011 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { in trace__set_filter_pids()
4012 err = trace__set_filter_loop_pids(trace); in trace__set_filter_pids()
4018 static int __trace__deliver_event(struct trace *trace, union perf_event *event) in __trace__deliver_event() argument
4020 struct evlist *evlist = trace->evlist; in __trace__deliver_event()
4025 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); in __trace__deliver_event()
4027 trace__handle_event(trace, event, &sample); in __trace__deliver_event()
4032 static int __trace__flush_events(struct trace *trace) in __trace__flush_events() argument
4034 u64 first = ordered_events__first_time(&trace->oe.data); in __trace__flush_events()
4035 u64 flush = trace->oe.last - NSEC_PER_SEC; in __trace__flush_events()
4039 return ordered_events__flush_time(&trace->oe.data, flush); in __trace__flush_events()
4044 static int trace__flush_events(struct trace *trace) in trace__flush_events() argument
4046 return !trace->sort_events ? 0 : __trace__flush_events(trace); in trace__flush_events()
4049 static int trace__deliver_event(struct trace *trace, union perf_event *event) in trace__deliver_event() argument
4053 if (!trace->sort_events) in trace__deliver_event()
4054 return __trace__deliver_event(trace, event); in trace__deliver_event()
4056 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); in trace__deliver_event()
4057 if (err && err != -1) in trace__deliver_event()
4060 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); in trace__deliver_event()
4064 return trace__flush_events(trace); in trace__deliver_event()
4067 static int ordered_events__deliver_event(struct ordered_events *oe, in ordered_events__deliver_event()
4070 struct trace *trace = container_of(oe, struct trace, oe.data); in ordered_events__deliver_event() local
4072 return __trace__deliver_event(trace, event->event); in ordered_events__deliver_event()
4075 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, in evsel__find_syscall_arg_fmt_by_name()
4088 for (const struct tep_format_field *field = tp_format->format.fields; field; in evsel__find_syscall_arg_fmt_by_name()
4089 field = field->next, ++fmt) { in evsel__find_syscall_arg_fmt_by_name()
4090 if (strcmp(field->name, arg) == 0) { in evsel__find_syscall_arg_fmt_by_name()
4091 *type = field->type; in evsel__find_syscall_arg_fmt_by_name()
4099 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) in trace__expand_filter() argument
4101 char *tok, *left = evsel->filter, *new_filter = evsel->filter; in trace__expand_filter()
4130 int left_size = tok - left, in trace__expand_filter()
4131 right_size = right_end - right; in trace__expand_filter()
4134 while (isspace(left[left_size - 1])) in trace__expand_filter()
4135 --left_size; in trace__expand_filter()
4142 arg, evsel->name, evsel->filter); in trace__expand_filter()
4143 return -1; in trace__expand_filter()
4146 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", in trace__expand_filter()
4147 arg, (int)(right - tok), tok, right_size, right); in trace__expand_filter()
4149 if (fmt->strtoul) { in trace__expand_filter()
4152 .trace = trace, in trace__expand_filter()
4155 .parm = fmt->parm, in trace__expand_filter()
4158 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { in trace__expand_filter()
4161 int expansion_offset = right - new_filter; in trace__expand_filter()
4168 return -1; in trace__expand_filter()
4170 if (new_filter != evsel->filter) in trace__expand_filter()
4176 right_size, right, arg, evsel->name, evsel->filter); in trace__expand_filter()
4177 return -1; in trace__expand_filter()
4181 arg, evsel->name, evsel->filter); in trace__expand_filter()
4182 return -1; in trace__expand_filter()
4191 if (new_filter != evsel->filter) { in trace__expand_filter()
4192 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); in trace__expand_filter()
4200 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) in trace__expand_filters() argument
4202 struct evlist *evlist = trace->evlist; in trace__expand_filters()
4206 if (evsel->filter == NULL) in trace__expand_filters()
4209 if (trace__expand_filter(trace, evsel)) { in trace__expand_filters()
4211 return -1; in trace__expand_filters()
4218 static int trace__run(struct trace *trace, int argc, const char **argv) in trace__run() argument
4220 struct evlist *evlist = trace->evlist; in trace__run()
4222 int err = -1, i; in trace__run()
4227 trace->live = true; in trace__run()
4229 if (!trace->raw_augmented_syscalls) { in trace__run()
4230 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) in trace__run()
4233 if (trace->trace_syscalls) in trace__run()
4234 trace->vfs_getname = evlist__add_vfs_getname(evlist); in trace__run()
4237 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { in trace__run()
4241 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); in trace__run()
4245 if ((trace->trace_pgfaults & TRACE_PFMIN)) { in trace__run()
4249 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); in trace__run()
4253 /* Enable ignoring missing threads when -u/-p option is defined. */ in trace__run()
4254 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; in trace__run()
4256 if (trace->sched && in trace__run()
4263 * trace -G A -e sched:*switch in trace__run()
4268 * trace -e sched:*switch -G A in trace__run()
4276 * trace -G A -e sched:*switch -G B in trace__run()
4282 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. in trace__run()
4284 if (trace->cgroup) in trace__run()
4285 evlist__set_default_cgroup(trace->evlist, trace->cgroup); in trace__run()
4287 err = evlist__create_maps(evlist, &trace->opts.target); in trace__run()
4289 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); in trace__run()
4293 err = trace__symbols_init(trace, evlist); in trace__run()
4295 fprintf(trace->output, "Problems initializing symbol libraries!\n"); in trace__run()
4299 evlist__config(evlist, &trace->opts, &callchain_param); in trace__run()
4302 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); in trace__run()
4304 fprintf(trace->output, "Couldn't run the workload!\n"); in trace__run()
4307 workload_pid = evlist->workload.pid; in trace__run()
4314 if (trace->syscalls.events.bpf_output) { in trace__run()
4319 * CPU the bpf-output event's file descriptor. in trace__run()
4321 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { in trace__run()
4322 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, in trace__run()
4324 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, in trace__run()
4330 if (trace->skel) in trace__run()
4331 trace->filter_pids.map = trace->skel->maps.pids_filtered; in trace__run()
4333 err = trace__set_filter_pids(trace); in trace__run()
4338 if (trace->skel && trace->skel->progs.sys_enter) in trace__run()
4339 trace__init_syscalls_bpf_prog_array_maps(trace); in trace__run()
4342 if (trace->ev_qualifier_ids.nr > 0) { in trace__run()
4343 err = trace__set_ev_qualifier_filter(trace); in trace__run()
4347 if (trace->syscalls.events.sys_exit) { in trace__run()
4349 trace->syscalls.events.sys_exit->filter); in trace__run()
4356 * fd->pathname table and were ending up showing the last value set by in trace__run()
4364 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); in trace__run()
4366 err = trace__expand_filters(trace, &evsel); in trace__run()
4369 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); in trace__run()
4373 err = evlist__mmap(evlist, trace->opts.mmap_pages); in trace__run()
4377 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) in trace__run()
4383 if (trace->opts.target.initial_delay) { in trace__run()
4384 usleep(trace->opts.target.initial_delay * 1000); in trace__run()
4388 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || in trace__run()
4389 perf_thread_map__nr(evlist->core.threads) > 1 || in trace__run()
4390 evlist__first(evlist)->core.attr.inherit; in trace__run()
4393 * Now that we already used evsel->core.attr to ask the kernel to setup the in trace__run()
4394 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in in trace__run()
4395 * trace__resolve_callchain(), allowing per-event max-stack settings in trace__run()
4396 * to override an explicitly set --max-stack global setting. in trace__run()
4400 evsel->core.attr.sample_max_stack == 0) in trace__run()
4401 evsel->core.attr.sample_max_stack = trace->max_stack; in trace__run()
4404 before = trace->nr_events; in trace__run()
4406 for (i = 0; i < evlist->core.nr_mmaps; i++) { in trace__run()
4410 md = &evlist->mmap[i]; in trace__run()
4411 if (perf_mmap__read_init(&md->core) < 0) in trace__run()
4414 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in trace__run()
4415 ++trace->nr_events; in trace__run()
4417 err = trace__deliver_event(trace, event); in trace__run()
4421 perf_mmap__consume(&md->core); in trace__run()
4431 perf_mmap__read_done(&md->core); in trace__run()
4434 if (trace->nr_events == before) { in trace__run()
4435 int timeout = done ? 100 : -1; in trace__run()
4443 if (trace__flush_events(trace)) in trace__run()
4451 thread__zput(trace->current); in trace__run()
4455 if (trace->sort_events) in trace__run()
4456 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); in trace__run()
4459 if (trace->summary) in trace__run()
4460 trace__fprintf_thread_summary(trace, trace->output); in trace__run()
4462 if (trace->show_tool_stats) { in trace__run()
4463 fprintf(trace->output, "Stats:\n " in trace__run()
4466 trace->stats.vfs_getname, in trace__run()
4467 trace->stats.proc_getname); in trace__run()
4472 trace__symbols__exit(trace); in trace__run()
4475 cgroup__put(trace->cgroup); in trace__run()
4476 trace->evlist = NULL; in trace__run()
4477 trace->live = false; in trace__run()
4498 fprintf(trace->output, "%s\n", errbuf); in trace__run()
4502 fprintf(trace->output, in trace__run()
4504 evsel->filter, evsel__name(evsel), errno, in trace__run()
4509 fprintf(trace->output, "Not enough memory to run!\n"); in trace__run()
4513 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); in trace__run()
4517 static int trace__replay(struct trace *trace) in trace__replay() argument
4525 .force = trace->force, in trace__replay()
4529 int err = -1; in trace__replay()
4531 trace->tool.sample = trace__process_sample; in trace__replay()
4532 trace->tool.mmap = perf_event__process_mmap; in trace__replay()
4533 trace->tool.mmap2 = perf_event__process_mmap2; in trace__replay()
4534 trace->tool.comm = perf_event__process_comm; in trace__replay()
4535 trace->tool.exit = perf_event__process_exit; in trace__replay()
4536 trace->tool.fork = perf_event__process_fork; in trace__replay()
4537 trace->tool.attr = perf_event__process_attr; in trace__replay()
4538 trace->tool.tracing_data = perf_event__process_tracing_data; in trace__replay()
4539 trace->tool.build_id = perf_event__process_build_id; in trace__replay()
4540 trace->tool.namespaces = perf_event__process_namespaces; in trace__replay()
4542 trace->tool.ordered_events = true; in trace__replay()
4543 trace->tool.ordering_requires_timestamps = true; in trace__replay()
4546 trace->multiple_threads = true; in trace__replay()
4548 session = perf_session__new(&data, &trace->tool); in trace__replay()
4552 if (trace->opts.target.pid) in trace__replay()
4553 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); in trace__replay()
4555 if (trace->opts.target.tid) in trace__replay()
4556 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); in trace__replay()
4558 if (symbol__init(&session->header.env) < 0) in trace__replay()
4561 trace->host = &session->machines.host; in trace__replay()
4567 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); in trace__replay()
4568 trace->syscalls.events.sys_enter = evsel; in trace__replay()
4571 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); in trace__replay()
4580 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); in trace__replay()
4581 trace->syscalls.events.sys_exit = evsel; in trace__replay()
4583 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); in trace__replay()
4591 evlist__for_each_entry(session->evlist, evsel) { in trace__replay()
4592 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && in trace__replay()
4593 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || in trace__replay()
4594 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || in trace__replay()
4595 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) in trace__replay()
4596 evsel->handler = trace__pgfault; in trace__replay()
4605 else if (trace->summary) in trace__replay()
4606 trace__fprintf_thread_summary(trace, trace->output); in trace__replay()
4614 static size_t trace__fprintf_threads_header(FILE *fp) in trace__fprintf_threads_header()
4623 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4630 struct syscall_stats *stats = source->priv;
4632 entry->syscall = source->i;
4633 entry->stats = stats;
4634 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4637 static size_t thread__dump_stats(struct thread_trace *ttrace, in thread__dump_stats()
4638 struct trace *trace, FILE *fp) in thread__dump_stats() argument
4643 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); in thread__dump_stats()
4652 …printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- ---------… in thread__dump_stats()
4655 struct syscall_stats *stats = syscall_stats_entry->stats; in thread__dump_stats()
4657 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; in thread__dump_stats()
4658 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; in thread__dump_stats()
4659 double avg = avg_stats(&stats->stats); in thread__dump_stats()
4661 u64 n = (u64)stats->stats.n; in thread__dump_stats()
4663 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; in thread__dump_stats()
4666 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; in thread__dump_stats()
4667 printed += fprintf(fp, " %-15s", sc->name); in thread__dump_stats()
4669 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); in thread__dump_stats()
4672 if (trace->errno_summary && stats->nr_failures) { in thread__dump_stats()
4675 for (e = 0; e < stats->max_errno; ++e) { in thread__dump_stats()
4676 if (stats->errnos[e] != 0) in thread__dump_stats()
4677 …fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]… in thread__dump_stats()
4689 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) in trace__fprintf_thread() argument
4698 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; in trace__fprintf_thread()
4701 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); in trace__fprintf_thread()
4703 if (ttrace->pfmaj) in trace__fprintf_thread()
4704 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); in trace__fprintf_thread()
4705 if (ttrace->pfmin) in trace__fprintf_thread()
4706 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); in trace__fprintf_thread()
4707 if (trace->sched) in trace__fprintf_thread()
4708 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); in trace__fprintf_thread()
4712 printed += thread__dump_stats(ttrace, trace, fp); in trace__fprintf_thread()
4717 static unsigned long thread__nr_events(struct thread_trace *ttrace) in thread__nr_events()
4719 return ttrace ? ttrace->nr_events : 0; in thread__nr_events()
4722 static int trace_nr_events_cmp(void *priv __maybe_unused, in trace_nr_events_cmp()
4728 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); in trace_nr_events_cmp()
4729 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); in trace_nr_events_cmp()
4732 return a_nr_events < b_nr_events ? -1 : 1; in trace_nr_events_cmp()
4735 return thread__tid(a->thread) < thread__tid(b->thread) in trace_nr_events_cmp()
4736 ? -1 in trace_nr_events_cmp()
4737 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); in trace_nr_events_cmp()
4740 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) in trace__fprintf_thread_summary() argument
4745 if (machine__thread_list(trace->host, &threads) == 0) { in trace__fprintf_thread_summary()
4751 printed += trace__fprintf_thread(fp, pos->thread, trace); in trace__fprintf_thread_summary()
4757 static int trace__set_duration(const struct option *opt, const char *str, in trace__set_duration()
4760 struct trace *trace = opt->value; in trace__set_duration() local
4762 trace->duration_filter = atof(str); in trace__set_duration()
4766 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, in trace__set_filter_pids_from_option()
4769 int ret = -1; in trace__set_filter_pids_from_option()
4771 struct trace *trace = opt->value; in trace__set_filter_pids_from_option() local
4779 return -1; in trace__set_filter_pids_from_option()
4781 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; in trace__set_filter_pids_from_option()
4782 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); in trace__set_filter_pids_from_option()
4784 if (trace->filter_pids.entries == NULL) in trace__set_filter_pids_from_option()
4787 trace->filter_pids.entries[0] = getpid(); in trace__set_filter_pids_from_option()
4789 for (i = 1; i < trace->filter_pids.nr; ++i) in trace__set_filter_pids_from_option()
4790 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; in trace__set_filter_pids_from_option()
4798 static int trace__open_output(struct trace *trace, const char *filename) in trace__open_output() argument
4810 trace->output = fopen(filename, "w"); in trace__open_output()
4812 return trace->output == NULL ? -errno : 0; in trace__open_output()
4815 static int parse_pagefaults(const struct option *opt, const char *str, in parse_pagefaults()
4818 int *trace_pgfaults = opt->value; in parse_pagefaults()
4827 return -1; in parse_pagefaults()
4832 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) in evlist__set_default_evsel_handler()
4837 if (evsel->handler == NULL) in evlist__set_default_evsel_handler()
4838 evsel->handler = handler; in evlist__set_default_evsel_handler()
4842 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) in evsel__set_syscall_arg_fmt()
4855 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || in evsel__set_syscall_arg_fmt()
4856 strcmp(tp_format->format.fields->name, "nr") == 0) in evsel__set_syscall_arg_fmt()
4859 memcpy(fmt + skip, scfmt->arg, in evsel__set_syscall_arg_fmt()
4860 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); in evsel__set_syscall_arg_fmt()
4866 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) in evlist__set_syscall_tp_fields()
4873 if (evsel->priv) in evlist__set_syscall_tp_fields()
4880 if (strcmp(tp_format->system, "syscalls")) { in evlist__set_syscall_tp_fields()
4886 return -1; in evlist__set_syscall_tp_fields()
4888 if (!strncmp(tp_format->name, "sys_enter_", 10)) { in evlist__set_syscall_tp_fields()
4891 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) in evlist__set_syscall_tp_fields()
4892 return -1; in evlist__set_syscall_tp_fields()
4895 tp_format->name + sizeof("sys_enter_") - 1); in evlist__set_syscall_tp_fields()
4896 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { in evlist__set_syscall_tp_fields()
4899 if (__tp_field__init_uint(&sc->ret, sizeof(u64), in evlist__set_syscall_tp_fields()
4900 sc->id.offset + sizeof(u64), in evlist__set_syscall_tp_fields()
4901 evsel->needs_swap)) in evlist__set_syscall_tp_fields()
4902 return -1; in evlist__set_syscall_tp_fields()
4905 tp_format->name + sizeof("sys_exit_") - 1); in evlist__set_syscall_tp_fields()
4913 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4915 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4920 static int trace__parse_events_option(const struct option *opt, const char *str, in trace__parse_events_option()
4923 struct trace *trace = (struct trace *)opt->value; in trace__parse_events_option() local
4926 int len = strlen(str) + 1, err = -1, list, idx; in trace__parse_events_option()
4932 return -1; in trace__parse_events_option()
4936 trace->not_ev_qualifier = true; in trace__parse_events_option()
4944 if (syscalltbl__id(trace->sctbl, s) >= 0 || in trace__parse_events_option()
4945 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { in trace__parse_events_option()
4953 s = fmt->name; in trace__parse_events_option()
4981 trace->ev_qualifier = strlist__new(lists[1], &slist_config); in trace__parse_events_option()
4982 if (trace->ev_qualifier == NULL) { in trace__parse_events_option()
4983 fputs("Not enough memory to parse event qualifier", trace->output); in trace__parse_events_option()
4987 if (trace__validate_ev_qualifier(trace)) in trace__parse_events_option()
4989 trace->trace_syscalls = true; in trace__parse_events_option()
4996 .evlistp = &trace->evlist, in trace__parse_events_option()
5013 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) in trace__parse_cgroups()
5015 struct trace *trace = opt->value; in trace__parse_cgroups() local
5017 if (!list_empty(&trace->evlist->core.entries)) { in trace__parse_cgroups()
5019 .value = &trace->evlist, in trace__parse_cgroups()
5023 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); in trace__parse_cgroups()
5028 static int trace__config(const char *var, const char *value, void *arg) in trace__config()
5030 struct trace *trace = arg; in trace__config() local
5033 if (!strcmp(var, "trace.add_events")) { in trace__config()
5034 trace->perfconfig_events = strdup(value); in trace__config()
5035 if (trace->perfconfig_events == NULL) { in trace__config()
5036 pr_err("Not enough memory for %s\n", "trace.add_events"); in trace__config()
5037 return -1; in trace__config()
5039 } else if (!strcmp(var, "trace.show_timestamp")) { in trace__config()
5040 trace->show_tstamp = perf_config_bool(var, value); in trace__config()
5041 } else if (!strcmp(var, "trace.show_duration")) { in trace__config()
5042 trace->show_duration = perf_config_bool(var, value); in trace__config()
5043 } else if (!strcmp(var, "trace.show_arg_names")) { in trace__config()
5044 trace->show_arg_names = perf_config_bool(var, value); in trace__config()
5045 if (!trace->show_arg_names) in trace__config()
5046 trace->show_zeros = true; in trace__config()
5047 } else if (!strcmp(var, "trace.show_zeros")) { in trace__config()
5049 if (!trace->show_arg_names && !new_show_zeros) { in trace__config()
5050 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); in trace__config()
5053 trace->show_zeros = new_show_zeros; in trace__config()
5054 } else if (!strcmp(var, "trace.show_prefix")) { in trace__config()
5055 trace->show_string_prefix = perf_config_bool(var, value); in trace__config()
5056 } else if (!strcmp(var, "trace.no_inherit")) { in trace__config()
5057 trace->opts.no_inherit = perf_config_bool(var, value); in trace__config()
5058 } else if (!strcmp(var, "trace.args_alignment")) { in trace__config()
5061 trace->args_alignment = args_alignment; in trace__config()
5062 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { in trace__config()
5064 trace->libtraceevent_print = true; in trace__config()
5066 trace->libtraceevent_print = false; in trace__config()
5072 static void trace__exit(struct trace *trace) in trace__exit() argument
5076 strlist__delete(trace->ev_qualifier); in trace__exit()
5077 zfree(&trace->ev_qualifier_ids.entries); in trace__exit()
5078 if (trace->syscalls.table) { in trace__exit()
5079 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) in trace__exit()
5080 syscall__exit(&trace->syscalls.table[i]); in trace__exit()
5081 zfree(&trace->syscalls.table); in trace__exit()
5083 syscalltbl__delete(trace->sctbl); in trace__exit()
5084 zfree(&trace->perfconfig_events); in trace__exit()
5088 static int bpf__setup_bpf_output(struct evlist *evlist) in bpf__setup_bpf_output()
5090 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); in bpf__setup_bpf_output()
5093 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); in bpf__setup_bpf_output()
5102 "perf trace [<options>] [<command>]", in cmd_trace()
5103 "perf trace [<options>] -- <command> [<options>]", in cmd_trace()
5104 "perf trace record [<options>] [<command>]", in cmd_trace()
5105 "perf trace record [<options>] -- <command> [<options>]", in cmd_trace()
5108 struct trace trace = { in cmd_trace() local
5132 OPT_CALLBACK('e', "event", &trace, "event", in cmd_trace()
5135 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", in cmd_trace()
5137 OPT_BOOLEAN(0, "comm", &trace.show_comm, in cmd_trace()
5138 "show the thread COMM next to its id"), in cmd_trace()
5139 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), in cmd_trace()
5140 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", in cmd_trace()
5144 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", in cmd_trace()
5145 "trace events on existing process id"), in cmd_trace()
5146 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", in cmd_trace()
5147 "trace events on existing thread id"), in cmd_trace()
5148 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", in cmd_trace()
5150 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, in cmd_trace()
5151 "system-wide collection from all CPUs"), in cmd_trace()
5152 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", in cmd_trace()
5154 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, in cmd_trace()
5156 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", in cmd_trace()
5158 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", in cmd_trace()
5160 OPT_CALLBACK(0, "duration", &trace, "float", in cmd_trace()
5163 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), in cmd_trace()
5165 OPT_BOOLEAN('T', "time", &trace.full_time, in cmd_trace()
5167 OPT_BOOLEAN(0, "failure", &trace.failure_only, in cmd_trace()
5169 OPT_BOOLEAN('s', "summary", &trace.summary_only, in cmd_trace()
5171 OPT_BOOLEAN('S', "with-summary", &trace.summary, in cmd_trace()
5173 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, in cmd_trace()
5174 "Show errno stats per syscall, use with -s or -S"), in cmd_trace()
5175 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", in cmd_trace()
5176 "Trace pagefaults", parse_pagefaults, "maj"), in cmd_trace()
5177 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), in cmd_trace()
5178 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), in cmd_trace()
5179 OPT_CALLBACK(0, "call-graph", &trace.opts, in cmd_trace()
5182 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, in cmd_trace()
5184 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, in cmd_trace()
5186 OPT_ULONG(0, "max-events", &trace.max_events, in cmd_trace()
5188 OPT_UINTEGER(0, "min-stack", &trace.min_stack, in cmd_trace()
5191 OPT_UINTEGER(0, "max-stack", &trace.max_stack, in cmd_trace()
5195 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, in cmd_trace()
5197 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, in cmd_trace()
5199 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, in cmd_trace()
5201 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", in cmd_trace()
5203 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, in cmd_trace()
5206 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" in cmd_trace()
5208 OPTS_EVSWITCH(&trace.evswitch), in cmd_trace()
5215 int err = -1; in cmd_trace()
5228 trace.evlist = evlist__new(); in cmd_trace()
5229 trace.sctbl = syscalltbl__new(); in cmd_trace()
5231 if (trace.evlist == NULL || trace.sctbl == NULL) { in cmd_trace()
5233 err = -ENOMEM; in cmd_trace()
5241 * global setting. If it fails we'll get something in 'perf trace -v' in cmd_trace()
5246 err = perf_config(trace__config, &trace); in cmd_trace()
5255 * already figured out if -e syscall_name, if not but if --event in cmd_trace()
5257 * tracepoint events, not in the strace-like syscall-name-based mode. in cmd_trace()
5259 * This is important because we need to check if strace-like mode is in cmd_trace()
5262 * .perfconfig trace.add_events, and filter those out. in cmd_trace()
5264 if (!trace.trace_syscalls && !trace.trace_pgfaults && in cmd_trace()
5265 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { in cmd_trace()
5266 trace.trace_syscalls = true; in cmd_trace()
5269 * Now that we have --verbose figured out, lets see if we need to parse in cmd_trace()
5271 * BPF program fails, then we'll be able to use --verbose to see what went in cmd_trace()
5274 if (trace.perfconfig_events != NULL) { in cmd_trace()
5278 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); in cmd_trace()
5280 parse_events_error__print(&parse_err, trace.perfconfig_events); in cmd_trace()
5286 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { in cmd_trace()
5288 "cgroup monitoring only available in system-wide mode"); in cmd_trace()
5292 if (!trace.trace_syscalls) in cmd_trace()
5300 trace.skel = augmented_raw_syscalls_bpf__open(); in cmd_trace()
5301 if (!trace.skel) { in cmd_trace()
5310 bpf_object__for_each_program(prog, trace.skel->obj) { in cmd_trace()
5311 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) in cmd_trace()
5315 err = augmented_raw_syscalls_bpf__load(trace.skel); in cmd_trace()
5321 augmented_raw_syscalls_bpf__attach(trace.skel); in cmd_trace()
5322 trace__add_syscall_newtp(&trace); in cmd_trace()
5326 err = bpf__setup_bpf_output(trace.evlist); in cmd_trace()
5332 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); in cmd_trace()
5333 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); in cmd_trace()
5336 err = -1; in cmd_trace()
5338 if (trace.trace_pgfaults) { in cmd_trace()
5339 trace.opts.sample_address = true; in cmd_trace()
5340 trace.opts.sample_time = true; in cmd_trace()
5343 if (trace.opts.mmap_pages == UINT_MAX) in cmd_trace()
5346 if (trace.max_stack == UINT_MAX) { in cmd_trace()
5347 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); in cmd_trace()
5352 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { in cmd_trace()
5353 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); in cmd_trace()
5359 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; in cmd_trace()
5364 if (trace.evlist->core.nr_entries > 0) { in cmd_trace()
5367 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); in cmd_trace()
5368 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { in cmd_trace()
5374 trace__load_vmlinux_btf(&trace); in cmd_trace()
5377 if (trace.sort_events) { in cmd_trace()
5378 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); in cmd_trace()
5379 ordered_events__set_copy_on_queue(&trace.oe.data, true); in cmd_trace()
5393 if (trace.syscalls.events.bpf_output) { in cmd_trace()
5394 evlist__for_each_entry(trace.evlist, evsel) { in cmd_trace()
5398 trace.raw_augmented_syscalls = true; in cmd_trace()
5402 if (trace.syscalls.events.bpf_output->priv == NULL && in cmd_trace()
5404 struct evsel *augmented = trace.syscalls.events.bpf_output; in cmd_trace()
5413 augmented->handler = trace__sys_enter; in cmd_trace()
5423 evsel->handler = trace__sys_enter; in cmd_trace()
5438 * don't look after the sc->args_size but in cmd_trace()
5443 * s->args_size to the BPF augmenter (now in cmd_trace()
5451 if (trace.raw_augmented_syscalls) in cmd_trace()
5452 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; in cmd_trace()
5454 evsel->handler = trace__sys_exit; in cmd_trace()
5460 return trace__record(&trace, argc-1, &argv[1]); in cmd_trace()
5462 /* Using just --errno-summary will trigger --summary */ in cmd_trace()
5463 if (trace.errno_summary && !trace.summary && !trace.summary_only) in cmd_trace()
5464 trace.summary_only = true; in cmd_trace()
5467 if (trace.summary_only) in cmd_trace()
5468 trace.summary = trace.summary_only; in cmd_trace()
5471 if (trace.summary) in cmd_trace()
5475 err = trace__open_output(&trace, output_name); in cmd_trace()
5482 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); in cmd_trace()
5486 err = target__validate(&trace.opts.target); in cmd_trace()
5488 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); in cmd_trace()
5489 fprintf(trace.output, "%s", bf); in cmd_trace()
5493 err = target__parse_uid(&trace.opts.target); in cmd_trace()
5495 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); in cmd_trace()
5496 fprintf(trace.output, "%s", bf); in cmd_trace()
5500 if (!argc && target__none(&trace.opts.target)) in cmd_trace()
5501 trace.opts.target.system_wide = true; in cmd_trace()
5504 err = trace__replay(&trace); in cmd_trace()
5506 err = trace__run(&trace, argc, argv); in cmd_trace()
5510 fclose(trace.output); in cmd_trace()
5512 trace__exit(&trace); in cmd_trace()
5514 augmented_raw_syscalls_bpf__destroy(trace.skel); in cmd_trace()