Lines Matching +full:oe +full:- +full:extra +full:- +full:delay

2  * builtin-trace.c
8 * event may be specified using --event.
39 #include "util/synthetic-events.h"
44 #include <subcmd/exec-cmd.h>
51 #include <subcmd/parse-options.h>
59 #include "trace-event.h"
60 #include "util/parse-events.h"
91 #include <event-parse.h>
221 } oe; member
227 if (trace->btf != NULL) in trace__load_vmlinux_btf()
230 trace->btf = btf__load_vmlinux_btf(); in trace__load_vmlinux_btf()
232 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : in trace__load_vmlinux_btf()
250 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
263 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
273 field->offset = offset; in __tp_field__init_uint()
277 field->integer = tp_field__u8; in __tp_field__init_uint()
280 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; in __tp_field__init_uint()
283 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; in __tp_field__init_uint()
286 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; in __tp_field__init_uint()
289 return -1; in __tp_field__init_uint()
297 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); in tp_field__init_uint()
302 return sample->raw_data + field->offset; in tp_field__ptr()
307 field->offset = offset; in __tp_field__init_ptr()
308 field->pointer = tp_field__ptr; in __tp_field__init_ptr()
314 return __tp_field__init_ptr(field, format_field->offset); in tp_field__init_ptr()
325 * The evsel->priv as used by 'perf trace'
344 zfree(&et->fmt); in evsel_trace__delete()
354 struct evsel_trace *et = evsel->priv; in __evsel__syscall_tp()
356 return &et->sc; in __evsel__syscall_tp()
361 if (evsel->priv == NULL) { in evsel__syscall_tp()
362 evsel->priv = evsel_trace__new(); in evsel__syscall_tp()
363 if (evsel->priv == NULL) in evsel__syscall_tp()
375 struct evsel_trace *et = evsel->priv; in __evsel__syscall_arg_fmt()
377 return et->fmt; in __evsel__syscall_arg_fmt()
382 struct evsel_trace *et = evsel->priv; in evsel__syscall_arg_fmt()
384 if (evsel->priv == NULL) { in evsel__syscall_arg_fmt()
385 et = evsel->priv = evsel_trace__new(); in evsel__syscall_arg_fmt()
391 if (et->fmt == NULL) { in evsel__syscall_arg_fmt()
397 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); in evsel__syscall_arg_fmt()
398 if (et->fmt == NULL) in evsel__syscall_arg_fmt()
405 evsel_trace__delete(evsel->priv); in evsel__syscall_arg_fmt()
406 evsel->priv = NULL; in evsel__syscall_arg_fmt()
415 return -1; in evsel__init_tp_uint_field()
417 return tp_field__init_uint(field, format_field, evsel->needs_swap); in evsel__init_tp_uint_field()
422 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
429 return -1; in evsel__init_tp_ptr_field()
436 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
440 zfree(&evsel->priv); in evsel__delete_priv()
449 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && in evsel__init_syscall_tp()
450 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) in evsel__init_syscall_tp()
451 return -ENOENT; in evsel__init_syscall_tp()
456 return -ENOMEM; in evsel__init_syscall_tp()
468 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) in evsel__init_augmented_syscall_tp()
469 return -EINVAL; in evsel__init_augmented_syscall_tp()
474 return -ENOMEM; in evsel__init_augmented_syscall_tp()
481 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); in evsel__init_augmented_syscall_tp_args()
488 …return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap… in evsel__init_augmented_syscall_tp_ret()
495 return -ENOENT; in evsel__init_raw_syscall_tp()
497 evsel->handler = handler; in evsel__init_raw_syscall_tp()
501 return -ENOMEM; in evsel__init_raw_syscall_tp()
527 fields->name.integer(&fields->name, sample); })
531 fields->name.pointer(&fields->name, sample); })
535 int idx = val - sa->offset; in strarray__scnprintf_suffix()
537 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf_suffix()
540 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf_suffix()
544 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); in strarray__scnprintf_suffix()
549 int idx = val - sa->offset; in strarray__scnprintf()
551 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf()
554 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf()
558 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarray__scnprintf()
565 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); in __syscall_arg__scnprintf_strarray()
578 return strarray__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray()
583 return strarray__strtoul_flags(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray_flags()
588 return strarrays__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarrays()
593 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarray_flags()
601 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__scnprintf()
602 struct strarray *sa = sas->entries[i]; in strarrays__scnprintf()
603 int idx = val - sa->offset; in strarrays__scnprintf()
605 if (idx >= 0 && idx < sa->nr_entries) { in strarrays__scnprintf()
606 if (sa->entries[idx] == NULL) in strarrays__scnprintf()
608 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarrays__scnprintf()
614 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); in strarrays__scnprintf()
622 for (i = 0; i < sa->nr_entries; ++i) { in strarray__strtoul()
623 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { in strarray__strtoul()
624 *ret = sa->offset + i; in strarray__strtoul()
644 size -= sep - tok + 1; in strarray__strtoul_flags()
646 end = sep - 1; in strarray__strtoul_flags()
648 --end; in strarray__strtoul_flags()
650 toklen = end - tok + 1; in strarray__strtoul_flags()
662 *ret |= (1 << (val - 1)); in strarray__strtoul_flags()
676 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__strtoul()
677 struct strarray *sa = sas->entries[i]; in strarrays__strtoul()
689 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarrays()
693 #define AT_FDCWD -100
699 int fd = arg->val; in syscall_arg__scnprintf_fd_at()
703 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); in syscall_arg__scnprintf_fd_at()
717 return scnprintf(bf, size, "%#lx", arg->val); in syscall_arg__scnprintf_hex()
722 if (arg->val == 0) in syscall_arg__scnprintf_ptr()
729 return scnprintf(bf, size, "%d", arg->val); in syscall_arg__scnprintf_int()
734 return scnprintf(bf, size, "%ld", arg->val); in syscall_arg__scnprintf_long()
742 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); in syscall_arg__scnprintf_char_array()
838 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_access_mode()
841 int mode = arg->val; in syscall_arg__scnprintf_access_mode()
847 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ in syscall_arg__scnprintf_access_mode()
857 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); in syscall_arg__scnprintf_access_mode()
881 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_pipe_flags()
883 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_pipe_flags()
887 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_pipe_flags()
896 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_pipe_flags()
913 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_getrandom_flags()
915 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_getrandom_flags()
919 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_getrandom_flags()
928 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_getrandom_flags()
950 arg_fmt->type = btf__type_by_id(btf, id); in syscall_arg_fmt__cache_btf_enum()
955 const struct btf_type *bt = arg->fmt->type; in syscall_arg__strtoul_btf_enum()
956 struct btf *btf = arg->trace->btf; in syscall_arg__strtoul_btf_enum()
960 const char *name = btf__name_by_offset(btf, be->name_off); in syscall_arg__strtoul_btf_enum()
964 *val = be->val; in syscall_arg__strtoul_btf_enum()
975 char *type = arg->type_name; in syscall_arg__strtoul_btf_type()
978 trace__load_vmlinux_btf(arg->trace); in syscall_arg__strtoul_btf_type()
980 btf = arg->trace->btf; in syscall_arg__strtoul_btf_type()
984 if (arg->fmt->type == NULL) { in syscall_arg__strtoul_btf_type()
986 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); in syscall_arg__strtoul_btf_type()
990 bt = arg->fmt->type; in syscall_arg__strtoul_btf_type()
995 if (btf_is_enum(arg->fmt->type)) in syscall_arg__strtoul_btf_type()
1007 if (be->val == val) { in btf_enum_scnprintf()
1009 btf__name_by_offset(btf, be->name_off)); in btf_enum_scnprintf()
1025 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); in trace__btf_dump_snprintf()
1034 struct augmented_arg *augmented_arg = arg->augmented.args; in btf_struct_scnprintf()
1035 int type_id = arg->fmt->type_id, consumed; in btf_struct_scnprintf()
1041 if (arg == NULL || arg->augmented.args == NULL) in btf_struct_scnprintf()
1045 dump_data_opts.skip_names = !arg->trace->show_arg_names; in btf_struct_scnprintf()
1052 …if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data… in btf_struct_scnprintf()
1055 consumed = sizeof(*augmented_arg) + augmented_arg->size; in btf_struct_scnprintf()
1056 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in btf_struct_scnprintf()
1057 arg->augmented.size -= consumed; in btf_struct_scnprintf()
1067 struct syscall_arg_fmt *arg_fmt = arg->fmt; in trace__btf_scnprintf()
1069 if (trace->btf == NULL) in trace__btf_scnprintf()
1072 if (arg_fmt->type == NULL) { in trace__btf_scnprintf()
1074 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); in trace__btf_scnprintf()
1078 if (arg_fmt->type == NULL) in trace__btf_scnprintf()
1081 if (btf_is_enum(arg_fmt->type)) in trace__btf_scnprintf()
1082 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); in trace__btf_scnprintf()
1083 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) in trace__btf_scnprintf()
1084 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); in trace__btf_scnprintf()
1413 return strcmp(name, fmt->name); in syscall_fmt__cmp()
1530 ttrace->files.max = -1; in thread_trace__new()
1531 ttrace->syscall_stats = intlist__new(NULL); in thread_trace__new()
1546 intlist__delete(ttrace->syscall_stats); in thread_trace__delete()
1547 ttrace->syscall_stats = NULL; in thread_trace__delete()
1549 zfree(&ttrace->entry_str); in thread_trace__delete()
1567 ++ttrace->nr_events; in thread__trace()
1580 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__set_ret_scnprintf()
1582 ttrace->ret_scnprintf = ret_scnprintf; in syscall_arg__set_ret_scnprintf()
1592 for (int i = 0; i < ttrace->files.max; ++i) { in thread_trace__free_files()
1593 struct file *file = ttrace->files.table + i; in thread_trace__free_files()
1594 zfree(&file->pathname); in thread_trace__free_files()
1597 zfree(&ttrace->files.table); in thread_trace__free_files()
1598 ttrace->files.max = -1; in thread_trace__free_files()
1606 if (fd > ttrace->files.max) { in thread_trace__files_entry()
1607 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); in thread_trace__files_entry()
1612 if (ttrace->files.max != -1) { in thread_trace__files_entry()
1613 memset(nfiles + ttrace->files.max + 1, 0, in thread_trace__files_entry()
1614 (fd - ttrace->files.max) * sizeof(struct file)); in thread_trace__files_entry()
1619 ttrace->files.table = nfiles; in thread_trace__files_entry()
1620 ttrace->files.max = fd; in thread_trace__files_entry()
1623 return ttrace->files.table + fd; in thread_trace__files_entry()
1639 file->dev_maj = major(st.st_rdev); in trace__set_fd_pathname()
1640 file->pathname = strdup(pathname); in trace__set_fd_pathname()
1641 if (file->pathname) in trace__set_fd_pathname()
1645 return -1; in trace__set_fd_pathname()
1664 return -1; in thread__read_fd_path()
1669 return -1; in thread__read_fd_path()
1680 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1686 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { in thread__fd_path()
1687 if (!trace->live) in thread__fd_path()
1689 ++trace->stats.proc_getname; in thread__fd_path()
1694 return ttrace->files.table[fd].pathname; in thread__fd_path()
1699 int fd = arg->val; in syscall_arg__scnprintf_fd()
1701 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1704 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in syscall_arg__scnprintf_fd()
1712 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1718 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in pid__scnprintf_fd()
1729 int fd = arg->val; in syscall_arg__scnprintf_close_fd()
1731 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__scnprintf_close_fd()
1733 if (ttrace && fd >= 0 && fd <= ttrace->files.max) in syscall_arg__scnprintf_close_fd()
1734 zfree(&ttrace->files.table[fd].pathname); in syscall_arg__scnprintf_close_fd()
1744 ttrace->filename.ptr = ptr; in thread__set_filename_pos()
1745 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; in thread__set_filename_pos()
1750 struct augmented_arg *augmented_arg = arg->augmented.args; in syscall_arg__scnprintf_augmented_string()
1751 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); in syscall_arg__scnprintf_augmented_string()
1756 int consumed = sizeof(*augmented_arg) + augmented_arg->size; in syscall_arg__scnprintf_augmented_string()
1758 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in syscall_arg__scnprintf_augmented_string()
1759 arg->augmented.size -= consumed; in syscall_arg__scnprintf_augmented_string()
1767 unsigned long ptr = arg->val; in syscall_arg__scnprintf_filename()
1769 if (arg->augmented.args) in syscall_arg__scnprintf_filename()
1772 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1775 thread__set_filename_pos(arg->thread, bf, ptr); in syscall_arg__scnprintf_filename()
1784 struct augmented_arg *augmented_arg = arg->augmented.args; in syscall_arg__scnprintf_buf()
1785 unsigned char *orig = (unsigned char *)augmented_arg->value; in syscall_arg__scnprintf_buf()
1792 for (int j = 0; j < augmented_arg->size; ++j) { in syscall_arg__scnprintf_buf()
1794 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ in syscall_arg__scnprintf_buf()
1795 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); in syscall_arg__scnprintf_buf()
1798 consumed = sizeof(*augmented_arg) + augmented_arg->size; in syscall_arg__scnprintf_buf()
1799 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in syscall_arg__scnprintf_buf()
1800 arg->augmented.size -= consumed; in syscall_arg__scnprintf_buf()
1807 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1812 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1819 * using ttrace->entry_time for a thread that receives a sys_exit without
1831 static pid_t workload_pid = -1;
1843 if (info->si_pid == workload_pid) in sighandler_chld()
1851 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1852 if (trace->show_comm) in trace__fprintf_comm_tid()
1865 if (trace->show_tstamp) in trace__fprintf_entry_head()
1867 if (trace->show_duration) in trace__fprintf_entry_head()
1877 switch (event->header.type) { in trace__process_event()
1879 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1880 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); in trace__process_event()
1904 if (machine->kptr_restrict_warned) in trace__machine__resolve_kernel_addr()
1911 machine->kptr_restrict_warned = true; in trace__machine__resolve_kernel_addr()
1925 trace->host = machine__new_host(); in trace__symbols_init()
1926 if (trace->host == NULL) in trace__symbols_init()
1927 return -ENOMEM; in trace__symbols_init()
1931 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1935 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1936 evlist->core.threads, trace__tool_process, in trace__symbols_init()
1947 machine__exit(trace->host); in trace__symbols__exit()
1948 trace->host = NULL; in trace__symbols__exit()
1957 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) in syscall__alloc_arg_fmts()
1958 nr_args = sc->fmt->nr_args; in syscall__alloc_arg_fmts()
1960 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); in syscall__alloc_arg_fmts()
1961 if (sc->arg_fmt == NULL) in syscall__alloc_arg_fmts()
1962 return -1; in syscall__alloc_arg_fmts()
1965 if (sc->fmt) in syscall__alloc_arg_fmts()
1966 sc->arg_fmt[idx] = sc->fmt->arg[idx]; in syscall__alloc_arg_fmts()
1969 sc->nr_args = nr_args; in syscall__alloc_arg_fmts()
1981 return strcmp(name, fmt->name); in syscall_arg_fmt__cmp()
2004 for (; field; field = field->next, ++arg) { in syscall_arg_fmt__init_array()
2007 if (arg->scnprintf) in syscall_arg_fmt__init_array()
2010 len = strlen(field->name); in syscall_arg_fmt__init_array()
2013 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) in syscall_arg_fmt__init_array()
2014 arg->from_user = true; in syscall_arg_fmt__init_array()
2016 if (strcmp(field->type, "const char *") == 0 && in syscall_arg_fmt__init_array()
2017 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || in syscall_arg_fmt__init_array()
2018 strstr(field->name, "path") != NULL)) { in syscall_arg_fmt__init_array()
2019 arg->scnprintf = SCA_FILENAME; in syscall_arg_fmt__init_array()
2020 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) in syscall_arg_fmt__init_array()
2021 arg->scnprintf = SCA_PTR; in syscall_arg_fmt__init_array()
2022 else if (strcmp(field->type, "pid_t") == 0) in syscall_arg_fmt__init_array()
2023 arg->scnprintf = SCA_PID; in syscall_arg_fmt__init_array()
2024 else if (strcmp(field->type, "umode_t") == 0) in syscall_arg_fmt__init_array()
2025 arg->scnprintf = SCA_MODE_T; in syscall_arg_fmt__init_array()
2026 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { in syscall_arg_fmt__init_array()
2027 arg->scnprintf = SCA_CHAR_ARRAY; in syscall_arg_fmt__init_array()
2028 arg->nr_entries = field->arraylen; in syscall_arg_fmt__init_array()
2029 } else if ((strcmp(field->type, "int") == 0 || in syscall_arg_fmt__init_array()
2030 strcmp(field->type, "unsigned int") == 0 || in syscall_arg_fmt__init_array()
2031 strcmp(field->type, "long") == 0) && in syscall_arg_fmt__init_array()
2032 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { in syscall_arg_fmt__init_array()
2035 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c in syscall_arg_fmt__init_array()
2040 arg->scnprintf = SCA_FD; in syscall_arg_fmt__init_array()
2041 } else if (strstr(field->type, "enum") && use_btf != NULL) { in syscall_arg_fmt__init_array()
2043 arg->strtoul = STUL_BTF_TYPE; in syscall_arg_fmt__init_array()
2046 syscall_arg_fmt__find_by_name(field->name); in syscall_arg_fmt__init_array()
2049 arg->scnprintf = fmt->scnprintf; in syscall_arg_fmt__init_array()
2050 arg->strtoul = fmt->strtoul; in syscall_arg_fmt__init_array()
2060 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, in syscall__set_arg_fmts()
2061 &sc->use_btf); in syscall__set_arg_fmts()
2064 sc->args_size = last_field->offset + last_field->size; in syscall__set_arg_fmts()
2073 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
2076 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
2077 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
2078 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2079 return -ENOMEM; in trace__read_syscall_info()
2081 sc = trace->syscalls.table + id; in trace__read_syscall_info()
2082 if (sc->nonexistent) in trace__read_syscall_info()
2083 return -EEXIST; in trace__read_syscall_info()
2086 sc->nonexistent = true; in trace__read_syscall_info()
2087 return -EEXIST; in trace__read_syscall_info()
2090 sc->name = name; in trace__read_syscall_info()
2091 sc->fmt = syscall_fmt__find(sc->name); in trace__read_syscall_info()
2093 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); in trace__read_syscall_info()
2094 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
2096 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { in trace__read_syscall_info()
2097 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); in trace__read_syscall_info()
2098 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
2105 if (IS_ERR(sc->tp_format)) { in trace__read_syscall_info()
2106 sc->nonexistent = true; in trace__read_syscall_info()
2107 return PTR_ERR(sc->tp_format); in trace__read_syscall_info()
2114 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? in trace__read_syscall_info()
2115 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1)) in trace__read_syscall_info()
2116 return -ENOMEM; in trace__read_syscall_info()
2118 sc->args = sc->tp_format->format.fields; in trace__read_syscall_info()
2124 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { in trace__read_syscall_info()
2125 sc->args = sc->args->next; in trace__read_syscall_info()
2126 --sc->nr_args; in trace__read_syscall_info()
2129 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); in trace__read_syscall_info()
2130 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); in trace__read_syscall_info()
2135 if (sc->use_btf) in trace__read_syscall_info()
2149 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); in evsel__init_tp_arg_scnprintf()
2154 return -ENOMEM; in evsel__init_tp_arg_scnprintf()
2161 return *one - *another; in intcmp()
2169 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
2171 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
2172 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2174 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
2176 trace->output); in trace__validate_ev_qualifier()
2177 err = -EINVAL; in trace__validate_ev_qualifier()
2181 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
2182 const char *sc = pos->s; in trace__validate_ev_qualifier()
2183 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
2186 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2201 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2202 if (match_next == -1) in trace__validate_ev_qualifier()
2206 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2213 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
2214 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2216 err = -ENOMEM; in trace__validate_ev_qualifier()
2217 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
2220 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
2222 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2226 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
2227 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
2233 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
2234 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
2242 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
2245 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
2246 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
2249 return !trace->not_ev_qualifier; in trace__syscall_enabled()
2251 return trace->not_ev_qualifier; in trace__syscall_enabled()
2256 * 8-byte unaligned accesses. args points to raw_data within the event
2257 * and raw_data is guaranteed to be 8-byte unaligned because it is
2265 unsigned char *p = arg->args + sizeof(unsigned long) * idx; in syscall_arg__val()
2274 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) in syscall__scnprintf_name()
2275 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); in syscall__scnprintf_name()
2277 return scnprintf(bf, size, "arg%d: ", arg->idx); in syscall__scnprintf_name()
2287 if (fmt && fmt->mask_val) in syscall_arg_fmt__mask_val()
2288 return fmt->mask_val(arg, val); in syscall_arg_fmt__mask_val()
2296 if (fmt && fmt->scnprintf) { in syscall_arg_fmt__scnprintf_val()
2297 arg->val = val; in syscall_arg_fmt__scnprintf_val()
2298 if (fmt->parm) in syscall_arg_fmt__scnprintf_val()
2299 arg->parm = fmt->parm; in syscall_arg_fmt__scnprintf_val()
2300 return fmt->scnprintf(bf, size, arg); in syscall_arg_fmt__scnprintf_val()
2322 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2332 ttrace->ret_scnprintf = NULL; in syscall__scnprintf_args()
2334 if (sc->args != NULL) { in syscall__scnprintf_args()
2337 for (field = sc->args; field; in syscall__scnprintf_args()
2338 field = field->next, ++arg.idx, bit <<= 1) { in syscall__scnprintf_args()
2342 arg.fmt = &sc->arg_fmt[arg.idx]; in syscall__scnprintf_args()
2348 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); in syscall__scnprintf_args()
2357 if (val == 0 && !trace->show_zeros && in syscall__scnprintf_args()
2358 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && in syscall__scnprintf_args()
2359 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) in syscall__scnprintf_args()
2362 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); in syscall__scnprintf_args()
2364 if (trace->show_arg_names) in syscall__scnprintf_args()
2365 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); in syscall__scnprintf_args()
2367 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; in syscall__scnprintf_args()
2369 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { in syscall__scnprintf_args()
2371 size - printed, val, field->type); in syscall__scnprintf_args()
2378 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], in syscall__scnprintf_args()
2379 bf + printed, size - printed, &arg, val); in syscall__scnprintf_args()
2381 } else if (IS_ERR(sc->tp_format)) { in syscall__scnprintf_args()
2387 while (arg.idx < sc->nr_args) { in syscall__scnprintf_args()
2392 printed += scnprintf(bf + printed, size - printed, ", "); in syscall__scnprintf_args()
2393 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); in syscall__scnprintf_args()
2394 …printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &ar… in syscall__scnprintf_args()
2421 * grep "NR -1 " /t/trace_pipe in trace__syscall_info()
2427 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", in trace__syscall_info()
2433 err = -EINVAL; in trace__syscall_info()
2435 if (id > trace->sctbl->syscalls.max_id) { in trace__syscall_info()
2439 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && in trace__syscall_info()
2443 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) in trace__syscall_info()
2446 return &trace->syscalls.table[id]; in trace__syscall_info()
2451 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s… in trace__syscall_info()
2452 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) in trace__syscall_info()
2453 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); in trace__syscall_info()
2454 fputs(" information\n", trace->output); in trace__syscall_info()
2473 inode = intlist__findnew(ttrace->syscall_stats, id); in thread__update_stats()
2477 stats = inode->priv; in thread__update_stats()
2483 init_stats(&stats->stats); in thread__update_stats()
2484 inode->priv = stats; in thread__update_stats()
2487 if (ttrace->entry_time && sample->time > ttrace->entry_time) in thread__update_stats()
2488 duration = sample->time - ttrace->entry_time; in thread__update_stats()
2490 update_stats(&stats->stats, duration); in thread__update_stats()
2493 ++stats->nr_failures; in thread__update_stats()
2498 err = -err; in thread__update_stats()
2499 if (err > stats->max_errno) { in thread__update_stats()
2500 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); in thread__update_stats()
2503 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); in thread__update_stats()
2511 stats->errnos = new_errnos; in thread__update_stats()
2512 stats->max_errno = err; in thread__update_stats()
2515 ++stats->errnos[err - 1]; in thread__update_stats()
2525 if (trace->failure_only || trace->current == NULL) in trace__printf_interrupted_entry()
2528 ttrace = thread__priv(trace->current); in trace__printf_interrupted_entry()
2530 if (!ttrace->entry_pending) in trace__printf_interrupted_entry()
2533 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o… in trace__printf_interrupted_entry()
2534 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); in trace__printf_interrupted_entry()
2536 if (len < trace->args_alignment - 4) in trace__printf_interrupted_entry()
2537 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); in trace__printf_interrupted_entry()
2539 printed += fprintf(trace->output, " ...\n"); in trace__printf_interrupted_entry()
2541 ttrace->entry_pending = false; in trace__printf_interrupted_entry()
2542 ++trace->nr_events_printed; in trace__printf_interrupted_entry()
2552 if (trace->print_sample) { in trace__fprintf_sample()
2553 double ts = (double)sample->time / NSEC_PER_MSEC; in trace__fprintf_sample()
2555 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", in trace__fprintf_sample()
2558 sample->pid, sample->tid, sample->cpu); in trace__fprintf_sample()
2571 * sc->args_size but always after the full raw_syscalls:sys_enter payload, in syscall__augmented_args()
2574 * We'll revisit this later to pass s->args_size to the BPF augmenter in syscall__augmented_args()
2580 int args_size = raw_augmented_args_size ?: sc->args_size; in syscall__augmented_args()
2582 *augmented_args_size = sample->raw_size - args_size; in syscall__augmented_args()
2584 static uintptr_t argbuf[1024]; /* assuming single-threaded */ in syscall__augmented_args()
2590 * The perf ring-buffer is 8-byte aligned but sample->raw_data in syscall__augmented_args()
2594 * into a static buffer as it's single-threaded for now. in syscall__augmented_args()
2596 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); in syscall__augmented_args()
2608 zfree(&sc->arg_fmt); in syscall__exit()
2619 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; in trace__sys_enter()
2626 return -1; in trace__sys_enter()
2628 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__sys_enter()
2629 ttrace = thread__trace(thread, trace->output); in trace__sys_enter()
2637 if (ttrace->entry_str == NULL) { in trace__sys_enter()
2638 ttrace->entry_str = malloc(trace__entry_str_size); in trace__sys_enter()
2639 if (!ttrace->entry_str) in trace__sys_enter()
2643 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) in trace__sys_enter()
2649 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, in trace__sys_enter()
2652 * thinking that the extra 2 u64 args are the augmented filename, so just check in trace__sys_enter()
2655 if (evsel != trace->syscalls.events.sys_enter) in trace__sys_enter()
2656 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy… in trace__sys_enter()
2657 ttrace->entry_time = sample->time; in trace__sys_enter()
2658 msg = ttrace->entry_str; in trace__sys_enter()
2659 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); in trace__sys_enter()
2661 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, in trace__sys_enter()
2664 if (sc->is_exit) { in trace__sys_enter()
2665 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { in trace__sys_enter()
2668 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); in trace__sys_enter()
2669 printed = fprintf(trace->output, "%s)", ttrace->entry_str); in trace__sys_enter()
2670 if (trace->args_alignment > printed) in trace__sys_enter()
2671 alignment = trace->args_alignment - printed; in trace__sys_enter()
2672 fprintf(trace->output, "%*s= ?\n", alignment, " "); in trace__sys_enter()
2675 ttrace->entry_pending = true; in trace__sys_enter()
2677 ttrace->filename.pending_open = false; in trace__sys_enter()
2680 if (trace->current != thread) { in trace__sys_enter()
2681 thread__put(trace->current); in trace__sys_enter()
2682 trace->current = thread__get(thread); in trace__sys_enter()
2695 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; in trace__fprintf_sys_enter()
2703 return -1; in trace__fprintf_sys_enter()
2705 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__fprintf_sys_enter()
2706 ttrace = thread__trace(thread, trace->output); in trace__fprintf_sys_enter()
2715 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy… in trace__fprintf_sys_enter()
2717 fprintf(trace->output, "%.*s", (int)printed, msg); in trace__fprintf_sys_enter()
2729 int max_stack = evsel->core.attr.sample_max_stack ? in trace__resolve_callchain()
2730 evsel->core.attr.sample_max_stack : in trace__resolve_callchain()
2731 trace->max_stack; in trace__resolve_callchain()
2732 int err = -1; in trace__resolve_callchain()
2735 if (machine__resolve(trace->host, &al, sample) < 0) in trace__resolve_callchain()
2746 /* TODO: user-configurable print_opts */ in trace__fprintf_callchain()
2751 …chain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); in trace__fprintf_callchain()
2769 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; in trace__sys_exit()
2770 int alignment = trace->args_alignment; in trace__sys_exit()
2775 return -1; in trace__sys_exit()
2777 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__sys_exit()
2778 ttrace = thread__trace(thread, trace->output); in trace__sys_exit()
2786 if (trace->summary) in trace__sys_exit()
2787 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); in trace__sys_exit()
2789 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { in trace__sys_exit()
2790 trace__set_fd_pathname(thread, ret, ttrace->filename.name); in trace__sys_exit()
2791 ttrace->filename.pending_open = false; in trace__sys_exit()
2792 ++trace->stats.vfs_getname; in trace__sys_exit()
2795 if (ttrace->entry_time) { in trace__sys_exit()
2796 duration = sample->time - ttrace->entry_time; in trace__sys_exit()
2800 } else if (trace->duration_filter) in trace__sys_exit()
2803 if (sample->callchain) { in trace__sys_exit()
2808 if (cursor->nr < trace->min_stack) in trace__sys_exit()
2814 if (trace->summary_only || (ret >= 0 && trace->failure_only)) in trace__sys_exit()
2817 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace- in trace__sys_exit()
2819 if (ttrace->entry_pending) { in trace__sys_exit()
2820 printed = fprintf(trace->output, "%s", ttrace->entry_str); in trace__sys_exit()
2822 printed += fprintf(trace->output, " ... ["); in trace__sys_exit()
2823 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); in trace__sys_exit()
2825 printed += fprintf(trace->output, "]: %s()", sc->name); in trace__sys_exit()
2831 alignment -= printed; in trace__sys_exit()
2835 fprintf(trace->output, ")%*s= ", alignment, " "); in trace__sys_exit()
2837 if (sc->fmt == NULL) { in trace__sys_exit()
2841 fprintf(trace->output, "%ld", ret); in trace__sys_exit()
2845 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), in trace__sys_exit()
2846 *e = errno_to_name(evsel, -ret); in trace__sys_exit()
2848 fprintf(trace->output, "-1 %s (%s)", e, emsg); in trace__sys_exit()
2850 } else if (ret == 0 && sc->fmt->timeout) in trace__sys_exit()
2851 fprintf(trace->output, "0 (Timeout)"); in trace__sys_exit()
2852 else if (ttrace->ret_scnprintf) { in trace__sys_exit()
2859 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); in trace__sys_exit()
2860 ttrace->ret_scnprintf = NULL; in trace__sys_exit()
2861 fprintf(trace->output, "%s", bf); in trace__sys_exit()
2862 } else if (sc->fmt->hexret) in trace__sys_exit()
2863 fprintf(trace->output, "%#lx", ret); in trace__sys_exit()
2864 else if (sc->fmt->errpid) { in trace__sys_exit()
2865 struct thread *child = machine__find_thread(trace->host, ret, ret); in trace__sys_exit()
2868 fprintf(trace->output, "%ld", ret); in trace__sys_exit()
2870 fprintf(trace->output, " (%s)", thread__comm_str(child)); in trace__sys_exit()
2876 fputc('\n', trace->output); in trace__sys_exit()
2879 * We only consider an 'event' for the sake of --max-events a non-filtered in trace__sys_exit()
2882 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) in trace__sys_exit()
2890 ttrace->entry_pending = false; in trace__sys_exit()
2901 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__vfs_getname()
2919 if (ttrace->filename.namelen < filename_len) { in trace__vfs_getname()
2920 char *f = realloc(ttrace->filename.name, filename_len + 1); in trace__vfs_getname()
2925 ttrace->filename.namelen = filename_len; in trace__vfs_getname()
2926 ttrace->filename.name = f; in trace__vfs_getname()
2929 strcpy(ttrace->filename.name, filename); in trace__vfs_getname()
2930 ttrace->filename.pending_open = true; in trace__vfs_getname()
2932 if (!ttrace->filename.ptr) in trace__vfs_getname()
2935 entry_str_len = strlen(ttrace->entry_str); in trace__vfs_getname()
2936 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ in trace__vfs_getname()
2941 filename += filename_len - remaining_space; in trace__vfs_getname()
2945 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ in trace__vfs_getname()
2946 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; in trace__vfs_getname()
2950 ttrace->filename.ptr = 0; in trace__vfs_getname()
2951 ttrace->filename.entry_str_pos = 0; in trace__vfs_getname()
2964 struct thread *thread = machine__findnew_thread(trace->host, in trace__sched_stat_runtime()
2965 sample->pid, in trace__sched_stat_runtime()
2966 sample->tid); in trace__sched_stat_runtime()
2967 struct thread_trace *ttrace = thread__trace(thread, trace->output); in trace__sched_stat_runtime()
2972 ttrace->runtime_ms += runtime_ms; in trace__sched_stat_runtime()
2973 trace->runtime_ms += runtime_ms; in trace__sched_stat_runtime()
2979 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", in trace__sched_stat_runtime()
2980 evsel->name, in trace__sched_stat_runtime()
2989 unsigned int val, void *extra __maybe_unused, FILE *fp) in bpf_output__printer()
3015 binary__fprintf(sample->raw_data, sample->raw_size, 8, in bpf_output__fprintf()
3016 bpf_output__printer, NULL, trace->output); in bpf_output__fprintf()
3017 ++trace->nr_events_printed; in bpf_output__fprintf()
3026 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; in trace__fprintf_tp_fields()
3040 .show_string_prefix = trace->show_string_prefix, in trace__fprintf_tp_fields()
3043 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { in trace__fprintf_tp_fields()
3049 if (field->flags & TEP_FIELD_IS_ARRAY) { in trace__fprintf_tp_fields()
3050 int offset = field->offset; in trace__fprintf_tp_fields()
3052 if (field->flags & TEP_FIELD_IS_DYNAMIC) { in trace__fprintf_tp_fields()
3053 offset = format_field__intval(field, sample, evsel->needs_swap); in trace__fprintf_tp_fields()
3056 if (tep_field_is_relative(field->flags)) in trace__fprintf_tp_fields()
3057 offset += field->offset + field->size; in trace__fprintf_tp_fields()
3060 val = (uintptr_t)(sample->raw_data + offset); in trace__fprintf_tp_fields()
3062 val = format_field__intval(field, sample, evsel->needs_swap); in trace__fprintf_tp_fields()
3070 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) in trace__fprintf_tp_fields()
3073 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); in trace__fprintf_tp_fields()
3075 if (trace->show_arg_names) in trace__fprintf_tp_fields()
3076 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); in trace__fprintf_tp_fields()
3078 …btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->… in trace__fprintf_tp_fields()
3084 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); in trace__fprintf_tp_fields()
3087 return printed + fprintf(trace->output, "%.*s", (int)printed, bf); in trace__fprintf_tp_fields()
3097 if (evsel->nr_events_printed >= evsel->max_events) in trace__event_handler()
3100 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__event_handler()
3102 if (sample->callchain) { in trace__event_handler()
3107 if (cursor->nr < trace->min_stack) in trace__event_handler()
3114 trace__fprintf_tstamp(trace, sample->time, trace->output); in trace__event_handler()
3116 if (trace->trace_syscalls && trace->show_duration) in trace__event_handler()
3117 fprintf(trace->output, "( ): "); in trace__event_handler()
3120 trace__fprintf_comm_tid(trace, thread, trace->output); in trace__event_handler()
3122 if (evsel == trace->syscalls.events.bpf_output) { in trace__event_handler()
3127 fprintf(trace->output, "%s(", sc->name); in trace__event_handler()
3129 fputc(')', trace->output); in trace__event_handler()
3140 fprintf(trace->output, "%s(", evsel->name); in trace__event_handler()
3147 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || in trace__event_handler()
3149 if (trace->libtraceevent_print) { in trace__event_handler()
3150 event_format__fprintf(tp_format, sample->cpu, in trace__event_handler()
3151 sample->raw_data, sample->raw_size, in trace__event_handler()
3152 trace->output); in trace__event_handler()
3160 fprintf(trace->output, ")\n"); in trace__event_handler()
3167 ++trace->nr_events_printed; in trace__event_handler()
3169 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { in trace__event_handler()
3183 if ((verbose > 0 || print_dso) && al->map) in print_location()
3184 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); in print_location()
3186 if ((verbose > 0 || print_sym) && al->sym) in print_location()
3187 fprintf(f, "%s+0x%" PRIx64, al->sym->name, in print_location()
3188 al->addr - al->sym->start); in print_location()
3189 else if (al->map) in print_location()
3190 fprintf(f, "0x%" PRIx64, al->addr); in print_location()
3192 fprintf(f, "0x%" PRIx64, sample->addr); in print_location()
3204 int err = -1; in trace__pgfault()
3208 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__pgfault()
3210 if (sample->callchain) { in trace__pgfault()
3215 if (cursor->nr < trace->min_stack) in trace__pgfault()
3221 ttrace = thread__trace(thread, trace->output); in trace__pgfault()
3225 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) in trace__pgfault()
3226 ttrace->pfmaj++; in trace__pgfault()
3228 ttrace->pfmin++; in trace__pgfault()
3230 if (trace->summary_only) in trace__pgfault()
3233 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); in trace__pgfault()
3235 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); in trace__pgfault()
3237 fprintf(trace->output, "%sfault [", in trace__pgfault()
3238 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? in trace__pgfault()
3241 print_location(trace->output, sample, &al, false, true); in trace__pgfault()
3243 fprintf(trace->output, "] => "); in trace__pgfault()
3245 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); in trace__pgfault()
3248 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); in trace__pgfault()
3256 print_location(trace->output, sample, &al, true, false); in trace__pgfault()
3258 fprintf(trace->output, " (%c%c)\n", map_type, al.level); in trace__pgfault()
3265 ++trace->nr_events_printed; in trace__pgfault()
3280 * and don't use sample->time unconditionally, we may end up having in trace__set_base_time()
3286 if (trace->base_time == 0 && !trace->full_time && in trace__set_base_time()
3287 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) in trace__set_base_time()
3288 trace->base_time = sample->time; in trace__set_base_time()
3301 tracepoint_handler handler = evsel->handler; in trace__process_sample()
3303 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); in trace__process_sample()
3310 ++trace->nr_events; in trace__process_sample()
3324 "-R", in trace__record()
3325 "-m", "1024", in trace__record()
3326 "-c", "1", in trace__record()
3330 const char * const sc_args[] = { "-e", }; in trace__record()
3332 const char * const majpf_args[] = { "-e", "major-faults" }; in trace__record()
3334 const char * const minpf_args[] = { "-e", "minor-faults" }; in trace__record()
3336 int err = -1; in trace__record()
3350 if (trace->trace_syscalls) { in trace__record()
3354 /* event string may be different for older kernels - e.g., RHEL6 */ in trace__record()
3365 rec_argv[j++] = "--filter"; in trace__record()
3368 if (trace->trace_pgfaults & TRACE_PFMAJ) in trace__record()
3372 if (trace->trace_pgfaults & TRACE_PFMIN) in trace__record()
3406 evsel->handler = trace__vfs_getname; in evlist__add_vfs_getname()
3411 list_del_init(&evsel->core.node); in evlist__add_vfs_getname()
3412 evsel->evlist = NULL; in evlist__add_vfs_getname()
3434 evsel->handler = trace__pgfault; in evsel__new_pgfault()
3444 evsel_trace__delete(evsel->priv); in evlist__free_syscall_tp_fields()
3445 evsel->priv = NULL; in evlist__free_syscall_tp_fields()
3451 const u32 type = event->header.type; in trace__handle_event()
3455 trace__process_event(trace, trace->host, event, sample); in trace__handle_event()
3459 evsel = evlist__id2evsel(trace->evlist, sample->id); in trace__handle_event()
3461 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); in trace__handle_event()
3465 if (evswitch__discard(&trace->evswitch, evsel)) in trace__handle_event()
3470 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && in trace__handle_event()
3471 sample->raw_data == NULL) { in trace__handle_event()
3472 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", in trace__handle_event()
3473 evsel__name(evsel), sample->tid, in trace__handle_event()
3474 sample->cpu, sample->raw_size); in trace__handle_event()
3476 tracepoint_handler handler = evsel->handler; in trace__handle_event()
3480 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) in trace__handle_event()
3486 int ret = -1; in trace__add_syscall_newtp()
3487 struct evlist *evlist = trace->evlist; in trace__add_syscall_newtp()
3504 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); in trace__add_syscall_newtp()
3505 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); in trace__add_syscall_newtp()
3510 if (callchain_param.enabled && !trace->kernel_syscallchains) { in trace__add_syscall_newtp()
3514 * debugging reasons using --kernel_syscall_callchains in trace__add_syscall_newtp()
3516 sys_exit->core.attr.exclude_callchain_kernel = 1; in trace__add_syscall_newtp()
3519 trace->syscalls.events.sys_enter = sys_enter; in trace__add_syscall_newtp()
3520 trace->syscalls.events.sys_exit = sys_exit; in trace__add_syscall_newtp()
3535 int err = -1; in trace__set_ev_qualifier_tp_filter()
3537 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, in trace__set_ev_qualifier_tp_filter()
3538 trace->ev_qualifier_ids.nr, in trace__set_ev_qualifier_tp_filter()
3539 trace->ev_qualifier_ids.entries); in trace__set_ev_qualifier_tp_filter()
3544 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { in trace__set_ev_qualifier_tp_filter()
3545 sys_exit = trace->syscalls.events.sys_exit; in trace__set_ev_qualifier_tp_filter()
3562 if (arg_fmt->type != NULL) in syscall_arg_fmt__cache_btf_struct()
3563 return -1; in syscall_arg_fmt__cache_btf_struct()
3567 return -1; in syscall_arg_fmt__cache_btf_struct()
3569 arg_fmt->type = btf__type_by_id(btf, id); in syscall_arg_fmt__cache_btf_struct()
3570 arg_fmt->type_id = id; in syscall_arg_fmt__cache_btf_struct()
3580 if (trace->skel->obj == NULL) in trace__find_bpf_program_by_title()
3583 bpf_object__for_each_program(pos, trace->skel->obj) { in trace__find_bpf_program_by_title()
3601 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); in trace__find_syscall_bpf_prog()
3605 if (sc->fmt && sc->fmt->alias) { in trace__find_syscall_bpf_prog()
3606 …scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->al… in trace__find_syscall_bpf_prog()
3622 prog_name, type, sc->name); in trace__find_syscall_bpf_prog()
3624 return trace->skel->progs.syscall_unaugmented; in trace__find_syscall_bpf_prog()
3634 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.… in trace__init_syscall_bpf_progs()
3635 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.… in trace__init_syscall_bpf_progs()
3641 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_u… in trace__bpf_prog_sys_enter_fd()
3647 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_un… in trace__bpf_prog_sys_exit_fd()
3660 return -1; in trace__bpf_sys_enter_beauty_map()
3663 if (trace->btf == NULL) in trace__bpf_sys_enter_beauty_map()
3664 return -1; in trace__bpf_sys_enter_beauty_map()
3666 for (i = 0, field = sc->args; field; ++i, field = field->next) { in trace__bpf_sys_enter_beauty_map()
3668 if (!sc->arg_fmt[i].from_user) in trace__bpf_sys_enter_beauty_map()
3671 struct_offset = strstr(field->type, "struct "); in trace__bpf_sys_enter_beauty_map()
3673 struct_offset = strstr(field->type, "union "); in trace__bpf_sys_enter_beauty_map()
3677 …if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr a… in trace__bpf_sys_enter_beauty_map()
3688 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) in trace__bpf_sys_enter_beauty_map()
3691 bt = sc->arg_fmt[i].type; in trace__bpf_sys_enter_beauty_map()
3692 beauty_array[i] = bt->size; in trace__bpf_sys_enter_beauty_map()
3694 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ in trace__bpf_sys_enter_beauty_map()
3695 strcmp(field->type, "const char *") == 0 && in trace__bpf_sys_enter_beauty_map()
3696 (strstr(field->name, "name") || in trace__bpf_sys_enter_beauty_map()
3697 strstr(field->name, "path") || in trace__bpf_sys_enter_beauty_map()
3698 strstr(field->name, "file") || in trace__bpf_sys_enter_beauty_map()
3699 strstr(field->name, "root") || in trace__bpf_sys_enter_beauty_map()
3700 strstr(field->name, "key") || in trace__bpf_sys_enter_beauty_map()
3701 strstr(field->name, "special") || in trace__bpf_sys_enter_beauty_map()
3702 strstr(field->name, "type") || in trace__bpf_sys_enter_beauty_map()
3703 strstr(field->name, "description"))) { in trace__bpf_sys_enter_beauty_map()
3706 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ in trace__bpf_sys_enter_beauty_map()
3707 strstr(field->type, "char *") && in trace__bpf_sys_enter_beauty_map()
3708 (strstr(field->name, "buf") || in trace__bpf_sys_enter_beauty_map()
3709 strstr(field->name, "val") || in trace__bpf_sys_enter_beauty_map()
3710 strstr(field->name, "msg"))) { in trace__bpf_sys_enter_beauty_map()
3715 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { in trace__bpf_sys_enter_beauty_map()
3716 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ in trace__bpf_sys_enter_beauty_map()
3717 (strstr(field_tmp->name, "count") || in trace__bpf_sys_enter_beauty_map()
3718 strstr(field_tmp->name, "siz") || /* size, bufsiz */ in trace__bpf_sys_enter_beauty_map()
3719 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { in trace__bpf_sys_enter_beauty_map()
3721 beauty_array[i] = -(j + 1); in trace__bpf_sys_enter_beauty_map()
3732 return -1; in trace__bpf_sys_enter_beauty_map()
3741 for (field = sc->args; field; field = field->next) { in trace__find_usable_bpf_prog_entry()
3742 if (field->flags & TEP_FIELD_IS_POINTER) in trace__find_usable_bpf_prog_entry()
3749 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__find_usable_bpf_prog_entry()
3750 int id = syscalltbl__id_at_idx(trace->sctbl, i); in trace__find_usable_bpf_prog_entry()
3756 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) in trace__find_usable_bpf_prog_entry()
3759 for (field = sc->args, candidate_field = pair->args; in trace__find_usable_bpf_prog_entry()
3760 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { in trace__find_usable_bpf_prog_entry()
3761 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, in trace__find_usable_bpf_prog_entry()
3762 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; in trace__find_usable_bpf_prog_entry()
3777 if (strcmp(field->type, candidate_field->type)) in trace__find_usable_bpf_prog_entry()
3785 if (strcmp(field->type, "const char *") == 0 && in trace__find_usable_bpf_prog_entry()
3786 !(strstr(field->name, "name") || in trace__find_usable_bpf_prog_entry()
3787 strstr(field->name, "path") || in trace__find_usable_bpf_prog_entry()
3788 strstr(field->name, "file") || in trace__find_usable_bpf_prog_entry()
3789 strstr(field->name, "root") || in trace__find_usable_bpf_prog_entry()
3790 strstr(field->name, "description"))) in trace__find_usable_bpf_prog_entry()
3805 …for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->… in trace__find_usable_bpf_prog_entry()
3806 if (candidate_field->flags & TEP_FIELD_IS_POINTER) in trace__find_usable_bpf_prog_entry()
3810 pair_prog = pair->bpf_prog.sys_enter; in trace__find_usable_bpf_prog_entry()
3815 * program for a filtered syscall on a non-filtered one. in trace__find_usable_bpf_prog_entry()
3821 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent… in trace__find_usable_bpf_prog_entry()
3822 if (pair_prog == trace->skel->progs.syscall_unaugmented) in trace__find_usable_bpf_prog_entry()
3826 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); in trace__find_usable_bpf_prog_entry()
3837 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); in trace__init_syscalls_bpf_prog_array_maps()
3838 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); in trace__init_syscalls_bpf_prog_array_maps()
3839 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); in trace__init_syscalls_bpf_prog_array_maps()
3843 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__init_syscalls_bpf_prog_array_maps()
3844 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i); in trace__init_syscalls_bpf_prog_array_maps()
3874 * syscall with an augmenter so that we can auto-reuse it. in trace__init_syscalls_bpf_prog_array_maps()
3899 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { in trace__init_syscalls_bpf_prog_array_maps()
3900 int key = syscalltbl__id_at_idx(trace->sctbl, i); in trace__init_syscalls_bpf_prog_array_maps()
3905 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) in trace__init_syscalls_bpf_prog_array_maps()
3912 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) in trace__init_syscalls_bpf_prog_array_maps()
3923 sc->bpf_prog.sys_enter = pair_prog; in trace__init_syscalls_bpf_prog_array_maps()
3929 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); in trace__init_syscalls_bpf_prog_array_maps()
3941 if (trace->syscalls.events.sys_enter) in trace__set_ev_qualifier_filter()
3970 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); in trace__set_filter_loop_pids()
3973 struct thread *parent = machine__find_thread(trace->host, in trace__set_filter_loop_pids()
3981 strstarts(thread__comm_str(parent), "gnome-terminal")) { in trace__set_filter_loop_pids()
3988 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); in trace__set_filter_loop_pids()
3989 if (!err && trace->filter_pids.map) in trace__set_filter_loop_pids()
3990 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); in trace__set_filter_loop_pids()
4004 if (trace->filter_pids.nr > 0) { in trace__set_filter_pids()
4005 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, in trace__set_filter_pids()
4006 trace->filter_pids.entries); in trace__set_filter_pids()
4007 if (!err && trace->filter_pids.map) { in trace__set_filter_pids()
4008 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, in trace__set_filter_pids()
4009 trace->filter_pids.entries); in trace__set_filter_pids()
4011 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { in trace__set_filter_pids()
4020 struct evlist *evlist = trace->evlist; in __trace__deliver_event()
4025 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); in __trace__deliver_event()
4034 u64 first = ordered_events__first_time(&trace->oe.data); in __trace__flush_events()
4035 u64 flush = trace->oe.last - NSEC_PER_SEC; in __trace__flush_events()
4039 return ordered_events__flush_time(&trace->oe.data, flush); in __trace__flush_events()
4046 return !trace->sort_events ? 0 : __trace__flush_events(trace); in trace__flush_events()
4053 if (!trace->sort_events) in trace__deliver_event()
4056 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); in trace__deliver_event()
4057 if (err && err != -1) in trace__deliver_event()
4060 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); in trace__deliver_event()
4067 static int ordered_events__deliver_event(struct ordered_events *oe, in ordered_events__deliver_event() argument
4070 struct trace *trace = container_of(oe, struct trace, oe.data); in ordered_events__deliver_event()
4072 return __trace__deliver_event(trace, event->event); in ordered_events__deliver_event()
4088 for (const struct tep_format_field *field = tp_format->format.fields; field; in evsel__find_syscall_arg_fmt_by_name()
4089 field = field->next, ++fmt) { in evsel__find_syscall_arg_fmt_by_name()
4090 if (strcmp(field->name, arg) == 0) { in evsel__find_syscall_arg_fmt_by_name()
4091 *type = field->type; in evsel__find_syscall_arg_fmt_by_name()
4101 char *tok, *left = evsel->filter, *new_filter = evsel->filter; in trace__expand_filter()
4130 int left_size = tok - left, in trace__expand_filter()
4131 right_size = right_end - right; in trace__expand_filter()
4134 while (isspace(left[left_size - 1])) in trace__expand_filter()
4135 --left_size; in trace__expand_filter()
4142 arg, evsel->name, evsel->filter); in trace__expand_filter()
4143 return -1; in trace__expand_filter()
4146 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", in trace__expand_filter()
4147 arg, (int)(right - tok), tok, right_size, right); in trace__expand_filter()
4149 if (fmt->strtoul) { in trace__expand_filter()
4155 .parm = fmt->parm, in trace__expand_filter()
4158 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { in trace__expand_filter()
4161 int expansion_offset = right - new_filter; in trace__expand_filter()
4168 return -1; in trace__expand_filter()
4170 if (new_filter != evsel->filter) in trace__expand_filter()
4176 right_size, right, arg, evsel->name, evsel->filter); in trace__expand_filter()
4177 return -1; in trace__expand_filter()
4181 arg, evsel->name, evsel->filter); in trace__expand_filter()
4182 return -1; in trace__expand_filter()
4191 if (new_filter != evsel->filter) { in trace__expand_filter()
4192 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); in trace__expand_filter()
4202 struct evlist *evlist = trace->evlist; in trace__expand_filters()
4206 if (evsel->filter == NULL) in trace__expand_filters()
4211 return -1; in trace__expand_filters()
4220 struct evlist *evlist = trace->evlist; in trace__run()
4222 int err = -1, i; in trace__run()
4227 trace->live = true; in trace__run()
4229 if (!trace->raw_augmented_syscalls) { in trace__run()
4230 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) in trace__run()
4233 if (trace->trace_syscalls) in trace__run()
4234 trace->vfs_getname = evlist__add_vfs_getname(evlist); in trace__run()
4237 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { in trace__run()
4241 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); in trace__run()
4245 if ((trace->trace_pgfaults & TRACE_PFMIN)) { in trace__run()
4249 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); in trace__run()
4253 /* Enable ignoring missing threads when -u/-p option is defined. */ in trace__run()
4254 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; in trace__run()
4256 if (trace->sched && in trace__run()
4263 * trace -G A -e sched:*switch in trace__run()
4268 * trace -e sched:*switch -G A in trace__run()
4276 * trace -G A -e sched:*switch -G B in trace__run()
4282 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. in trace__run()
4284 if (trace->cgroup) in trace__run()
4285 evlist__set_default_cgroup(trace->evlist, trace->cgroup); in trace__run()
4287 err = evlist__create_maps(evlist, &trace->opts.target); in trace__run()
4289 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); in trace__run()
4295 fprintf(trace->output, "Problems initializing symbol libraries!\n"); in trace__run()
4299 evlist__config(evlist, &trace->opts, &callchain_param); in trace__run()
4302 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); in trace__run()
4304 fprintf(trace->output, "Couldn't run the workload!\n"); in trace__run()
4307 workload_pid = evlist->workload.pid; in trace__run()
4314 if (trace->syscalls.events.bpf_output) { in trace__run()
4319 * CPU the bpf-output event's file descriptor. in trace__run()
4321 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { in trace__run()
4322 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, in trace__run()
4324 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, in trace__run()
4330 if (trace->skel) in trace__run()
4331 trace->filter_pids.map = trace->skel->maps.pids_filtered; in trace__run()
4338 if (trace->skel && trace->skel->progs.sys_enter) in trace__run()
4342 if (trace->ev_qualifier_ids.nr > 0) { in trace__run()
4347 if (trace->syscalls.events.sys_exit) { in trace__run()
4349 trace->syscalls.events.sys_exit->filter); in trace__run()
4356 * fd->pathname table and were ending up showing the last value set by in trace__run()
4364 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); in trace__run()
4369 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); in trace__run()
4373 err = evlist__mmap(evlist, trace->opts.mmap_pages); in trace__run()
4377 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) in trace__run()
4383 if (trace->opts.target.initial_delay) { in trace__run()
4384 usleep(trace->opts.target.initial_delay * 1000); in trace__run()
4388 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || in trace__run()
4389 perf_thread_map__nr(evlist->core.threads) > 1 || in trace__run()
4390 evlist__first(evlist)->core.attr.inherit; in trace__run()
4393 * Now that we already used evsel->core.attr to ask the kernel to setup the in trace__run()
4394 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in in trace__run()
4395 * trace__resolve_callchain(), allowing per-event max-stack settings in trace__run()
4396 * to override an explicitly set --max-stack global setting. in trace__run()
4400 evsel->core.attr.sample_max_stack == 0) in trace__run()
4401 evsel->core.attr.sample_max_stack = trace->max_stack; in trace__run()
4404 before = trace->nr_events; in trace__run()
4406 for (i = 0; i < evlist->core.nr_mmaps; i++) { in trace__run()
4410 md = &evlist->mmap[i]; in trace__run()
4411 if (perf_mmap__read_init(&md->core) < 0) in trace__run()
4414 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in trace__run()
4415 ++trace->nr_events; in trace__run()
4421 perf_mmap__consume(&md->core); in trace__run()
4431 perf_mmap__read_done(&md->core); in trace__run()
4434 if (trace->nr_events == before) { in trace__run()
4435 int timeout = done ? 100 : -1; in trace__run()
4451 thread__zput(trace->current); in trace__run()
4455 if (trace->sort_events) in trace__run()
4456 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); in trace__run()
4459 if (trace->summary) in trace__run()
4460 trace__fprintf_thread_summary(trace, trace->output); in trace__run()
4462 if (trace->show_tool_stats) { in trace__run()
4463 fprintf(trace->output, "Stats:\n " in trace__run()
4466 trace->stats.vfs_getname, in trace__run()
4467 trace->stats.proc_getname); in trace__run()
4475 cgroup__put(trace->cgroup); in trace__run()
4476 trace->evlist = NULL; in trace__run()
4477 trace->live = false; in trace__run()
4498 fprintf(trace->output, "%s\n", errbuf); in trace__run()
4502 fprintf(trace->output, in trace__run()
4504 evsel->filter, evsel__name(evsel), errno, in trace__run()
4509 fprintf(trace->output, "Not enough memory to run!\n"); in trace__run()
4513 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); in trace__run()
4525 .force = trace->force, in trace__replay()
4529 int err = -1; in trace__replay()
4531 trace->tool.sample = trace__process_sample; in trace__replay()
4532 trace->tool.mmap = perf_event__process_mmap; in trace__replay()
4533 trace->tool.mmap2 = perf_event__process_mmap2; in trace__replay()
4534 trace->tool.comm = perf_event__process_comm; in trace__replay()
4535 trace->tool.exit = perf_event__process_exit; in trace__replay()
4536 trace->tool.fork = perf_event__process_fork; in trace__replay()
4537 trace->tool.attr = perf_event__process_attr; in trace__replay()
4538 trace->tool.tracing_data = perf_event__process_tracing_data; in trace__replay()
4539 trace->tool.build_id = perf_event__process_build_id; in trace__replay()
4540 trace->tool.namespaces = perf_event__process_namespaces; in trace__replay()
4542 trace->tool.ordered_events = true; in trace__replay()
4543 trace->tool.ordering_requires_timestamps = true; in trace__replay()
4546 trace->multiple_threads = true; in trace__replay()
4548 session = perf_session__new(&data, &trace->tool); in trace__replay()
4552 if (trace->opts.target.pid) in trace__replay()
4553 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); in trace__replay()
4555 if (trace->opts.target.tid) in trace__replay()
4556 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); in trace__replay()
4558 if (symbol__init(&session->header.env) < 0) in trace__replay()
4561 trace->host = &session->machines.host; in trace__replay()
4567 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); in trace__replay()
4568 trace->syscalls.events.sys_enter = evsel; in trace__replay()
4571 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); in trace__replay()
4580 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); in trace__replay()
4581 trace->syscalls.events.sys_exit = evsel; in trace__replay()
4583 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); in trace__replay()
4591 evlist__for_each_entry(session->evlist, evsel) { in trace__replay()
4592 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && in trace__replay()
4593 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || in trace__replay()
4594 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || in trace__replay()
4595 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) in trace__replay()
4596 evsel->handler = trace__pgfault; in trace__replay()
4605 else if (trace->summary) in trace__replay()
4606 trace__fprintf_thread_summary(trace, trace->output); in trace__replay()
4623 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4630 struct syscall_stats *stats = source->priv;
4632 entry->syscall = source->i;
4633 entry->stats = stats;
4634 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4643 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); in thread__dump_stats()
4652 …printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- in thread__dump_stats()
4655 struct syscall_stats *stats = syscall_stats_entry->stats; in thread__dump_stats()
4657 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; in thread__dump_stats()
4658 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; in thread__dump_stats()
4659 double avg = avg_stats(&stats->stats); in thread__dump_stats()
4661 u64 n = (u64)stats->stats.n; in thread__dump_stats()
4663 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; in thread__dump_stats()
4666 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; in thread__dump_stats()
4667 printed += fprintf(fp, " %-15s", sc->name); in thread__dump_stats()
4669 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); in thread__dump_stats()
4672 if (trace->errno_summary && stats->nr_failures) { in thread__dump_stats()
4675 for (e = 0; e < stats->max_errno; ++e) { in thread__dump_stats()
4676 if (stats->errnos[e] != 0) in thread__dump_stats()
4677 …fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]… in thread__dump_stats()
4698 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; in trace__fprintf_thread()
4701 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); in trace__fprintf_thread()
4703 if (ttrace->pfmaj) in trace__fprintf_thread()
4704 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); in trace__fprintf_thread()
4705 if (ttrace->pfmin) in trace__fprintf_thread()
4706 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); in trace__fprintf_thread()
4707 if (trace->sched) in trace__fprintf_thread()
4708 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); in trace__fprintf_thread()
4719 return ttrace ? ttrace->nr_events : 0; in thread__nr_events()
4728 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); in trace_nr_events_cmp()
4729 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); in trace_nr_events_cmp()
4732 return a_nr_events < b_nr_events ? -1 : 1; in trace_nr_events_cmp()
4735 return thread__tid(a->thread) < thread__tid(b->thread) in trace_nr_events_cmp()
4736 ? -1 in trace_nr_events_cmp()
4737 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); in trace_nr_events_cmp()
4745 if (machine__thread_list(trace->host, &threads) == 0) { in trace__fprintf_thread_summary()
4751 printed += trace__fprintf_thread(fp, pos->thread, trace); in trace__fprintf_thread_summary()
4760 struct trace *trace = opt->value; in trace__set_duration()
4762 trace->duration_filter = atof(str); in trace__set_duration()
4769 int ret = -1; in trace__set_filter_pids_from_option()
4771 struct trace *trace = opt->value; in trace__set_filter_pids_from_option()
4779 return -1; in trace__set_filter_pids_from_option()
4781 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; in trace__set_filter_pids_from_option()
4782 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); in trace__set_filter_pids_from_option()
4784 if (trace->filter_pids.entries == NULL) in trace__set_filter_pids_from_option()
4787 trace->filter_pids.entries[0] = getpid(); in trace__set_filter_pids_from_option()
4789 for (i = 1; i < trace->filter_pids.nr; ++i) in trace__set_filter_pids_from_option()
4790 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; in trace__set_filter_pids_from_option()
4810 trace->output = fopen(filename, "w"); in trace__open_output()
4812 return trace->output == NULL ? -errno : 0; in trace__open_output()
4818 int *trace_pgfaults = opt->value; in parse_pagefaults()
4827 return -1; in parse_pagefaults()
4837 if (evsel->handler == NULL) in evlist__set_default_evsel_handler()
4838 evsel->handler = handler; in evlist__set_default_evsel_handler()
4855 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || in evsel__set_syscall_arg_fmt()
4856 strcmp(tp_format->format.fields->name, "nr") == 0) in evsel__set_syscall_arg_fmt()
4859 memcpy(fmt + skip, scfmt->arg, in evsel__set_syscall_arg_fmt()
4860 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); in evsel__set_syscall_arg_fmt()
4873 if (evsel->priv) in evlist__set_syscall_tp_fields()
4880 if (strcmp(tp_format->system, "syscalls")) { in evlist__set_syscall_tp_fields()
4886 return -1; in evlist__set_syscall_tp_fields()
4888 if (!strncmp(tp_format->name, "sys_enter_", 10)) { in evlist__set_syscall_tp_fields()
4891 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) in evlist__set_syscall_tp_fields()
4892 return -1; in evlist__set_syscall_tp_fields()
4895 tp_format->name + sizeof("sys_enter_") - 1); in evlist__set_syscall_tp_fields()
4896 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { in evlist__set_syscall_tp_fields()
4899 if (__tp_field__init_uint(&sc->ret, sizeof(u64), in evlist__set_syscall_tp_fields()
4900 sc->id.offset + sizeof(u64), in evlist__set_syscall_tp_fields()
4901 evsel->needs_swap)) in evlist__set_syscall_tp_fields()
4902 return -1; in evlist__set_syscall_tp_fields()
4905 tp_format->name + sizeof("sys_exit_") - 1); in evlist__set_syscall_tp_fields()
4913 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4915 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4923 struct trace *trace = (struct trace *)opt->value; in trace__parse_events_option()
4926 int len = strlen(str) + 1, err = -1, list, idx; in trace__parse_events_option()
4932 return -1; in trace__parse_events_option()
4936 trace->not_ev_qualifier = true; in trace__parse_events_option()
4944 if (syscalltbl__id(trace->sctbl, s) >= 0 || in trace__parse_events_option()
4945 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { in trace__parse_events_option()
4953 s = fmt->name; in trace__parse_events_option()
4981 trace->ev_qualifier = strlist__new(lists[1], &slist_config); in trace__parse_events_option()
4982 if (trace->ev_qualifier == NULL) { in trace__parse_events_option()
4983 fputs("Not enough memory to parse event qualifier", trace->output); in trace__parse_events_option()
4989 trace->trace_syscalls = true; in trace__parse_events_option()
4996 .evlistp = &trace->evlist, in trace__parse_events_option()
5015 struct trace *trace = opt->value; in trace__parse_cgroups()
5017 if (!list_empty(&trace->evlist->core.entries)) { in trace__parse_cgroups()
5019 .value = &trace->evlist, in trace__parse_cgroups()
5023 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); in trace__parse_cgroups()
5034 trace->perfconfig_events = strdup(value); in trace__config()
5035 if (trace->perfconfig_events == NULL) { in trace__config()
5037 return -1; in trace__config()
5040 trace->show_tstamp = perf_config_bool(var, value); in trace__config()
5042 trace->show_duration = perf_config_bool(var, value); in trace__config()
5044 trace->show_arg_names = perf_config_bool(var, value); in trace__config()
5045 if (!trace->show_arg_names) in trace__config()
5046 trace->show_zeros = true; in trace__config()
5049 if (!trace->show_arg_names && !new_show_zeros) { in trace__config()
5053 trace->show_zeros = new_show_zeros; in trace__config()
5055 trace->show_string_prefix = perf_config_bool(var, value); in trace__config()
5057 trace->opts.no_inherit = perf_config_bool(var, value); in trace__config()
5061 trace->args_alignment = args_alignment; in trace__config()
5064 trace->libtraceevent_print = true; in trace__config()
5066 trace->libtraceevent_print = false; in trace__config()
5076 strlist__delete(trace->ev_qualifier); in trace__exit()
5077 zfree(&trace->ev_qualifier_ids.entries); in trace__exit()
5078 if (trace->syscalls.table) { in trace__exit()
5079 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) in trace__exit()
5080 syscall__exit(&trace->syscalls.table[i]); in trace__exit()
5081 zfree(&trace->syscalls.table); in trace__exit()
5083 syscalltbl__delete(trace->sctbl); in trace__exit()
5084 zfree(&trace->perfconfig_events); in trace__exit()
5090 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); in bpf__setup_bpf_output()
5093 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); in bpf__setup_bpf_output()
5103 "perf trace [<options>] -- <command> [<options>]", in cmd_trace()
5105 "perf trace record [<options>] -- <command> [<options>]", in cmd_trace()
5148 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", in cmd_trace()
5150 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, in cmd_trace()
5151 "system-wide collection from all CPUs"), in cmd_trace()
5154 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, in cmd_trace()
5156 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", in cmd_trace()
5171 OPT_BOOLEAN('S', "with-summary", &trace.summary, in cmd_trace()
5173 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, in cmd_trace()
5174 "Show errno stats per syscall, use with -s or -S"), in cmd_trace()
5179 OPT_CALLBACK(0, "call-graph", &trace.opts, in cmd_trace()
5184 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, in cmd_trace()
5186 OPT_ULONG(0, "max-events", &trace.max_events, in cmd_trace()
5188 OPT_UINTEGER(0, "min-stack", &trace.min_stack, in cmd_trace()
5191 OPT_UINTEGER(0, "max-stack", &trace.max_stack, in cmd_trace()
5195 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, in cmd_trace()
5197 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, in cmd_trace()
5199 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, in cmd_trace()
5203 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, in cmd_trace()
5206 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" in cmd_trace()
5215 int err = -1; in cmd_trace()
5233 err = -ENOMEM; in cmd_trace()
5241 * global setting. If it fails we'll get something in 'perf trace -v' in cmd_trace()
5255 * already figured out if -e syscall_name, if not but if --event in cmd_trace()
5257 * tracepoint events, not in the strace-like syscall-name-based mode. in cmd_trace()
5259 * This is important because we need to check if strace-like mode is in cmd_trace()
5265 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { in cmd_trace()
5269 * Now that we have --verbose figured out, lets see if we need to parse in cmd_trace()
5271 * BPF program fails, then we'll be able to use --verbose to see what went in cmd_trace()
5288 "cgroup monitoring only available in system-wide mode"); in cmd_trace()
5310 bpf_object__for_each_program(prog, trace.skel->obj) { in cmd_trace()
5311 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) in cmd_trace()
5336 err = -1; in cmd_trace()
5364 if (trace.evlist->core.nr_entries > 0) { in cmd_trace()
5378 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); in cmd_trace()
5379 ordered_events__set_copy_on_queue(&trace.oe.data, true); in cmd_trace()
5402 if (trace.syscalls.events.bpf_output->priv == NULL && in cmd_trace()
5413 augmented->handler = trace__sys_enter; in cmd_trace()
5423 evsel->handler = trace__sys_enter; in cmd_trace()
5438 * don't look after the sc->args_size but in cmd_trace()
5443 * s->args_size to the BPF augmenter (now in cmd_trace()
5452 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; in cmd_trace()
5454 evsel->handler = trace__sys_exit; in cmd_trace()
5460 return trace__record(&trace, argc-1, &argv[1]); in cmd_trace()
5462 /* Using just --errno-summary will trigger --summary */ in cmd_trace()