Lines Matching +full:sig +full:- +full:dir +full:- +full:cmd
1 // SPDX-License-Identifier: GPL-2.0
3 * builtin-record.c
6 * (or a CPU, or a PID) into the perf.data output file - for
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
14 #include "util/parse-events.h"
37 #include "util/parse-branch-options.h"
38 #include "util/parse-regs-options.h"
41 #include "util/perf-hooks.h"
42 #include "util/cpu-set-sched.h"
43 #include "util/synthetic-events.h"
44 #include "util/time-utils.h"
46 #include "util/bpf-event.h"
53 #include "util/bpf-filter.h"
215 return rec->opts.threads_spec; in record__threads_enabled()
220 return rec->switch_output.signal && in switch_output_signal()
226 return rec->switch_output.size && in switch_output_size()
228 (rec->bytes_written >= rec->switch_output.size); in switch_output_size()
233 return rec->switch_output.time && in switch_output_time()
239 return rec->bytes_written + rec->thread_bytes_written; in record__bytes_written()
244 return rec->output_max_size && in record__output_max_size_exceeded()
245 (record__bytes_written(rec) >= rec->output_max_size); in record__output_max_size_exceeded()
251 struct perf_data_file *file = &rec->session->data->file; in record__write()
253 if (map && map->file) in record__write()
254 file = map->file; in record__write()
258 return -1; in record__write()
261 if (map && map->file) { in record__write()
262 thread->bytes_written += size; in record__write()
263 rec->thread_bytes_written += size; in record__write()
265 rec->bytes_written += size; in record__write()
292 cblock->aio_fildes = trace_fd; in record__aio_write()
293 cblock->aio_buf = buf; in record__aio_write()
294 cblock->aio_nbytes = size; in record__aio_write()
295 cblock->aio_offset = off; in record__aio_write()
296 cblock->aio_sigevent.sigev_notify = SIGEV_NONE; in record__aio_write()
303 cblock->aio_fildes = -1; in record__aio_write()
331 rem_size = cblock->aio_nbytes - written; in record__aio_complete()
334 cblock->aio_fildes = -1; in record__aio_complete()
336 * md->refcount is incremented in record__aio_pushfn() for in record__aio_complete()
340 perf_mmap__put(&md->core); in record__aio_complete()
348 rem_off = cblock->aio_offset + written; in record__aio_complete()
349 rem_buf = (void *)(cblock->aio_buf + written); in record__aio_complete()
350 record__aio_write(cblock, cblock->aio_fildes, in record__aio_complete()
360 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync()
361 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync()
367 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync()
368 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync()
384 return -1; in record__aio_sync()
386 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
404 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer in record__aio_pushfn()
409 * the kernel buffer earlier than other per-cpu kernel buffers are handled. in record__aio_pushfn()
413 * part of data from map->start till the upper bound and then the remainder in record__aio_pushfn()
417 if (record__comp_enabled(aio->rec)) { in record__aio_pushfn()
418 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, in record__aio_pushfn()
419 mmap__mmap_len(map) - aio->size, in record__aio_pushfn()
426 memcpy(aio->data + aio->size, buf, size); in record__aio_pushfn()
429 if (!aio->size) { in record__aio_pushfn()
431 * Increment map->refcount to guard map->aio.data[] buffer in record__aio_pushfn()
434 * map->aio.data[] buffer is complete. in record__aio_pushfn()
440 perf_mmap__get(&map->core); in record__aio_pushfn()
443 aio->size += size; in record__aio_pushfn()
451 int trace_fd = rec->session->data->file.fd; in record__aio_push()
455 * Call record__aio_sync() to wait till map->aio.data[] buffer in record__aio_push()
460 aio.data = map->aio.data[idx]; in record__aio_push()
462 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */ in record__aio_push()
465 rec->samples++; in record__aio_push()
466 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off); in record__aio_push()
469 rec->bytes_written += aio.size; in record__aio_push()
474 * Decrement map->refcount incremented in record__aio_pushfn() in record__aio_push()
476 * map->refcount is decremented in record__aio_complete() after in record__aio_push()
479 perf_mmap__put(&map->core); in record__aio_push()
498 struct evlist *evlist = rec->evlist; in record__aio_mmap_read_sync()
499 struct mmap *maps = evlist->mmap; in record__aio_mmap_read_sync()
504 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__aio_mmap_read_sync()
507 if (map->core.base) in record__aio_mmap_read_sync()
519 struct record_opts *opts = (struct record_opts *)opt->value; in record__aio_parse()
522 opts->nr_cblocks = 0; in record__aio_parse()
525 opts->nr_cblocks = strtol(str, NULL, 0); in record__aio_parse()
526 if (!opts->nr_cblocks) in record__aio_parse()
527 opts->nr_cblocks = nr_cblocks_default; in record__aio_parse()
538 return -1; in record__aio_push()
543 return -1; in record__aio_get_pos()
557 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
566 struct record_opts *opts = (struct record_opts *)opt->value; in record__mmap_flush_parse()
579 opts->mmap_flush = parse_tag_value(str, tags); in record__mmap_flush_parse()
580 if (opts->mmap_flush == (int)-1) in record__mmap_flush_parse()
581 opts->mmap_flush = strtol(str, NULL, 0); in record__mmap_flush_parse()
584 if (!opts->mmap_flush) in record__mmap_flush_parse()
585 opts->mmap_flush = MMAP_FLUSH_DEFAULT; in record__mmap_flush_parse()
587 flush_max = evlist__mmap_size(opts->mmap_pages); in record__mmap_flush_parse()
589 if (opts->mmap_flush > flush_max) in record__mmap_flush_parse()
590 opts->mmap_flush = flush_max; in record__mmap_flush_parse()
600 struct record_opts *opts = opt->value; in record__parse_comp_level()
603 opts->comp_level = 0; in record__parse_comp_level()
606 opts->comp_level = strtol(str, NULL, 0); in record__parse_comp_level()
607 if (!opts->comp_level) in record__parse_comp_level()
608 opts->comp_level = comp_level_default; in record__parse_comp_level()
618 return rec->opts.comp_level > 0; in record__comp_enabled()
627 return record__write(rec, NULL, event, event->header.size); in process_synthesized_event()
650 ssize_t compressed = zstd_compress(rec->session, map, map->data, in record__pushfn()
657 bf = map->data; in record__pushfn()
660 thread->samples++; in record__pushfn()
664 static volatile sig_atomic_t signr = -1;
667 static volatile sig_atomic_t done_fd = -1;
670 static void sig_handler(int sig) in sig_handler() argument
672 if (sig == SIGCHLD) in sig_handler()
675 signr = sig; in sig_handler()
700 static void sigsegv_handler(int sig) in sigsegv_handler() argument
703 sighandler_dump_stack(sig); in sigsegv_handler()
708 if (signr == -1) in record__sig_exit()
723 struct perf_data *data = &rec->data; in record__process_auxtrace()
733 if (file_offset == -1) in record__process_auxtrace()
734 return -1; in record__process_auxtrace()
735 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index, in record__process_auxtrace()
744 padding = 8 - padding; in record__process_auxtrace()
746 record__write(rec, map, event, event->header.size); in record__process_auxtrace()
760 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read()
766 rec->samples++; in record__auxtrace_mmap_read()
776 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read_snapshot()
778 rec->opts.auxtrace_snapshot_size); in record__auxtrace_mmap_read_snapshot()
783 rec->samples++; in record__auxtrace_mmap_read_snapshot()
793 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { in record__auxtrace_read_snapshot_all()
794 struct mmap *map = &rec->evlist->mmap[i]; in record__auxtrace_read_snapshot_all()
796 if (!map->auxtrace_mmap.base) in record__auxtrace_read_snapshot_all()
800 rc = -1; in record__auxtrace_read_snapshot_all()
814 if (auxtrace_record__snapshot_finish(rec->itr, on_exit)) in record__read_auxtrace_snapshot()
827 auxtrace_record__snapshot_start(rec->itr)) in record__auxtrace_snapshot_exit()
828 return -1; in record__auxtrace_snapshot_exit()
832 return -1; in record__auxtrace_snapshot_exit()
841 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts) in record__auxtrace_init()
844 return -EINVAL; in record__auxtrace_init()
847 if (!rec->itr) { in record__auxtrace_init()
848 rec->itr = auxtrace_record__init(rec->evlist, &err); in record__auxtrace_init()
853 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts, in record__auxtrace_init()
854 rec->opts.auxtrace_snapshot_opts); in record__auxtrace_init()
858 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts, in record__auxtrace_init()
859 rec->opts.auxtrace_sample_opts); in record__auxtrace_init()
863 err = auxtrace_parse_aux_action(rec->evlist); in record__auxtrace_init()
867 return auxtrace_parse_filters(rec->evlist); in record__auxtrace_init()
910 if (evsel->core.attr.text_poke) in record__config_text_poke()
916 return -ENOMEM; in record__config_text_poke()
918 evsel->core.attr.text_poke = 1; in record__config_text_poke()
919 evsel->core.attr.ksymbol = 1; in record__config_text_poke()
920 evsel->immediate = true; in record__config_text_poke()
928 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); in record__config_off_cpu()
933 struct evlist *evlist = rec->evlist; in record__tracking_system_wide()
937 * If non-dummy evsel exists, system_wide sideband is need to in record__tracking_system_wide()
952 struct record_opts *opts = &rec->opts; in record__config_tracking_events()
953 struct evlist *evlist = rec->evlist; in record__config_tracking_events()
962 if (opts->target.initial_delay || target__has_cpu(&opts->target) || in record__config_tracking_events()
969 if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) in record__config_tracking_events()
974 return -ENOMEM; in record__config_tracking_events()
980 if (opts->target.initial_delay && !evsel->immediate && in record__config_tracking_events()
981 !target__has_cpu(&opts->target)) in record__config_tracking_events()
982 evsel->core.attr.enable_on_exec = 1; in record__config_tracking_events()
984 evsel->immediate = 1; in record__config_tracking_events()
995 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir); in record__kcore_readable()
1012 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir); in record__kcore_copy()
1023 thread_data->pipes.msg[0] = -1; in record__thread_data_init_pipes()
1024 thread_data->pipes.msg[1] = -1; in record__thread_data_init_pipes()
1025 thread_data->pipes.ack[0] = -1; in record__thread_data_init_pipes()
1026 thread_data->pipes.ack[1] = -1; in record__thread_data_init_pipes()
1031 if (pipe(thread_data->pipes.msg)) in record__thread_data_open_pipes()
1032 return -EINVAL; in record__thread_data_open_pipes()
1034 if (pipe(thread_data->pipes.ack)) { in record__thread_data_open_pipes()
1035 close(thread_data->pipes.msg[0]); in record__thread_data_open_pipes()
1036 thread_data->pipes.msg[0] = -1; in record__thread_data_open_pipes()
1037 close(thread_data->pipes.msg[1]); in record__thread_data_open_pipes()
1038 thread_data->pipes.msg[1] = -1; in record__thread_data_open_pipes()
1039 return -EINVAL; in record__thread_data_open_pipes()
1043 thread_data->pipes.msg[0], thread_data->pipes.msg[1], in record__thread_data_open_pipes()
1044 thread_data->pipes.ack[0], thread_data->pipes.ack[1]); in record__thread_data_open_pipes()
1051 if (thread_data->pipes.msg[0] != -1) { in record__thread_data_close_pipes()
1052 close(thread_data->pipes.msg[0]); in record__thread_data_close_pipes()
1053 thread_data->pipes.msg[0] = -1; in record__thread_data_close_pipes()
1055 if (thread_data->pipes.msg[1] != -1) { in record__thread_data_close_pipes()
1056 close(thread_data->pipes.msg[1]); in record__thread_data_close_pipes()
1057 thread_data->pipes.msg[1] = -1; in record__thread_data_close_pipes()
1059 if (thread_data->pipes.ack[0] != -1) { in record__thread_data_close_pipes()
1060 close(thread_data->pipes.ack[0]); in record__thread_data_close_pipes()
1061 thread_data->pipes.ack[0] = -1; in record__thread_data_close_pipes()
1063 if (thread_data->pipes.ack[1] != -1) { in record__thread_data_close_pipes()
1064 close(thread_data->pipes.ack[1]); in record__thread_data_close_pipes()
1065 thread_data->pipes.ack[1] = -1; in record__thread_data_close_pipes()
1071 return cpu_map__is_dummy(evlist->core.user_requested_cpus); in evlist__per_thread()
1076 int m, tm, nr_mmaps = evlist->core.nr_mmaps; in record__thread_data_init_maps()
1077 struct mmap *mmap = evlist->mmap; in record__thread_data_init_maps()
1078 struct mmap *overwrite_mmap = evlist->overwrite_mmap; in record__thread_data_init_maps()
1079 struct perf_cpu_map *cpus = evlist->core.all_cpus; in record__thread_data_init_maps()
1083 thread_data->nr_mmaps = nr_mmaps; in record__thread_data_init_maps()
1085 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, in record__thread_data_init_maps()
1086 thread_data->mask->maps.nbits); in record__thread_data_init_maps()
1088 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1089 if (!thread_data->maps) in record__thread_data_init_maps()
1090 return -ENOMEM; in record__thread_data_init_maps()
1093 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1094 if (!thread_data->overwrite_maps) { in record__thread_data_init_maps()
1095 zfree(&thread_data->maps); in record__thread_data_init_maps()
1096 return -ENOMEM; in record__thread_data_init_maps()
1100 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps); in record__thread_data_init_maps()
1102 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) { in record__thread_data_init_maps()
1104 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) { in record__thread_data_init_maps()
1105 if (thread_data->maps) { in record__thread_data_init_maps()
1106 thread_data->maps[tm] = &mmap[m]; in record__thread_data_init_maps()
1107 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n", in record__thread_data_init_maps()
1110 if (thread_data->overwrite_maps) { in record__thread_data_init_maps()
1111 thread_data->overwrite_maps[tm] = &overwrite_mmap[m]; in record__thread_data_init_maps()
1112 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n", in record__thread_data_init_maps()
1127 fdarray__init(&thread_data->pollfd, 64); in record__thread_data_init_pollfd()
1129 for (tm = 0; tm < thread_data->nr_mmaps; tm++) { in record__thread_data_init_pollfd()
1130 map = thread_data->maps ? thread_data->maps[tm] : NULL; in record__thread_data_init_pollfd()
1131 overwrite_map = thread_data->overwrite_maps ? in record__thread_data_init_pollfd()
1132 thread_data->overwrite_maps[tm] : NULL; in record__thread_data_init_pollfd()
1134 for (f = 0; f < evlist->core.pollfd.nr; f++) { in record__thread_data_init_pollfd()
1135 void *ptr = evlist->core.pollfd.priv[f].ptr; in record__thread_data_init_pollfd()
1138 pos = fdarray__dup_entry_from(&thread_data->pollfd, f, in record__thread_data_init_pollfd()
1139 &evlist->core.pollfd); in record__thread_data_init_pollfd()
1142 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n", in record__thread_data_init_pollfd()
1143 thread_data, pos, evlist->core.pollfd.entries[f].fd); in record__thread_data_init_pollfd()
1154 struct record_thread *thread_data = rec->thread_data; in record__free_thread_data()
1159 for (t = 0; t < rec->nr_threads; t++) { in record__free_thread_data()
1166 zfree(&rec->thread_data); in record__free_thread_data()
1173 size_t x = rec->index_map_cnt; in record__map_thread_evlist_pollfd_indexes()
1175 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL)) in record__map_thread_evlist_pollfd_indexes()
1176 return -ENOMEM; in record__map_thread_evlist_pollfd_indexes()
1177 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1178 rec->index_map[x].thread_pollfd_index = thread_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1179 rec->index_map_cnt += 1; in record__map_thread_evlist_pollfd_indexes()
1187 struct pollfd *e_entries = evlist->core.pollfd.entries; in record__update_evlist_pollfd_from_thread()
1188 struct pollfd *t_entries = thread_data->pollfd.entries; in record__update_evlist_pollfd_from_thread()
1192 for (i = 0; i < rec->index_map_cnt; i++) { in record__update_evlist_pollfd_from_thread()
1193 int e_pos = rec->index_map[i].evlist_pollfd_index; in record__update_evlist_pollfd_from_thread()
1194 int t_pos = rec->index_map[i].thread_pollfd_index; in record__update_evlist_pollfd_from_thread()
1199 err = -EINVAL; in record__update_evlist_pollfd_from_thread()
1211 struct fdarray *fda = &evlist->core.pollfd; in record__dup_non_perf_events()
1214 for (i = 0; i < fda->nr; i++) { in record__dup_non_perf_events()
1215 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event)) in record__dup_non_perf_events()
1217 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda); in record__dup_non_perf_events()
1222 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n", in record__dup_non_perf_events()
1223 thread_data, ret, fda->entries[i].fd); in record__dup_non_perf_events()
1238 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); in record__alloc_thread_data()
1239 if (!rec->thread_data) { in record__alloc_thread_data()
1241 return -ENOMEM; in record__alloc_thread_data()
1243 thread_data = rec->thread_data; in record__alloc_thread_data()
1245 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data()
1248 for (t = 0; t < rec->nr_threads; t++) { in record__alloc_thread_data()
1250 thread_data[t].mask = &rec->thread_masks[t]; in record__alloc_thread_data()
1262 thread_data[t].tid = -1; in record__alloc_thread_data()
1275 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n", in record__alloc_thread_data()
1285 thread_data[t].ctlfd_pos = -1; /* Not used */ in record__alloc_thread_data()
1301 struct record_opts *opts = &rec->opts; in record__mmap_evlist()
1302 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode || in record__mmap_evlist()
1303 opts->auxtrace_sample_mode; in record__mmap_evlist()
1306 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist()
1309 if (evlist__mmap_ex(evlist, opts->mmap_pages, in record__mmap_evlist()
1310 opts->auxtrace_mmap_pages, in record__mmap_evlist()
1312 opts->nr_cblocks, opts->affinity, in record__mmap_evlist()
1313 opts->mmap_flush, opts->comp_level) < 0) { in record__mmap_evlist()
1318 "or try again with a smaller value of -m/--mmap_pages.\n" in record__mmap_evlist()
1320 opts->mmap_pages, opts->auxtrace_mmap_pages); in record__mmap_evlist()
1321 return -errno; in record__mmap_evlist()
1326 return -errno; in record__mmap_evlist()
1328 return -EINVAL; in record__mmap_evlist()
1332 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack)) in record__mmap_evlist()
1333 return -1; in record__mmap_evlist()
1340 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps); in record__mmap_evlist()
1342 pr_err("Failed to create data directory: %s\n", strerror(-ret)); in record__mmap_evlist()
1345 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__mmap_evlist()
1346 if (evlist->mmap) in record__mmap_evlist()
1347 evlist->mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1348 if (evlist->overwrite_mmap) in record__mmap_evlist()
1349 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1358 return record__mmap_evlist(rec, rec->evlist); in record__mmap()
1365 struct evlist *evlist = rec->evlist; in record__open()
1366 struct perf_session *session = rec->session; in record__open()
1367 struct record_opts *opts = &rec->opts; in record__open()
1372 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { in record__open()
1373 if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) { in record__open()
1379 pos->core.leader != &pos->core && in record__open()
1380 pos->weak_group) { in record__open()
1384 rc = -errno; in record__open()
1385 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg)); in record__open()
1390 pos->supported = true; in record__open()
1404 if (evlist__apply_filters(evlist, &pos, &opts->target)) { in record__open()
1406 pos->filter ?: "BPF", evsel__name(pos), errno, in record__open()
1408 rc = -1; in record__open()
1416 session->evlist = evlist; in record__open()
1424 if (rec->evlist->first_sample_time == 0) in set_timestamp_boundary()
1425 rec->evlist->first_sample_time = sample_time; in set_timestamp_boundary()
1428 rec->evlist->last_sample_time = sample_time; in set_timestamp_boundary()
1439 set_timestamp_boundary(rec, sample->time); in process_sample_event()
1441 if (rec->buildid_all) in process_sample_event()
1444 rec->samples++; in process_sample_event()
1450 struct perf_session *session = rec->session; in process_buildids()
1452 if (perf_data__size(&rec->data) == 0) in process_buildids()
1457 * dso->long_name to a real pathname it found. In this case in process_buildids()
1461 * rather than build-id path (in debug directory). in process_buildids()
1462 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551 in process_buildids()
1467 * If --buildid-all is given, it marks all DSO regardless of hits, in process_buildids()
1472 if (rec->buildid_all && !rec->timestamp_boundary) in process_buildids()
1473 rec->tool.sample = process_event_sample_stub; in process_buildids()
1494 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1504 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1519 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity()
1520 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity()
1521 thread->mask->affinity.nbits)) { in record__adjust_affinity()
1522 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1523 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity()
1524 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1525 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity()
1526 (cpu_set_t *)thread->mask->affinity.bits); in record__adjust_affinity()
1528 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu()); in record__adjust_affinity()
1529 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity"); in record__adjust_affinity()
1540 event->header.size += increment; in process_comp_header()
1544 event->header.type = PERF_RECORD_COMPRESSED; in process_comp_header()
1545 event->header.size = size; in process_comp_header()
1554 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1; in zstd_compress()
1555 struct zstd_data *zstd_data = &session->zstd_data; in zstd_compress()
1557 if (map && map->file) in zstd_compress()
1558 zstd_data = &map->zstd_data; in zstd_compress()
1565 if (map && map->file) { in zstd_compress()
1566 thread->bytes_transferred += src_size; in zstd_compress()
1567 thread->bytes_compressed += compressed; in zstd_compress()
1569 session->bytes_transferred += src_size; in zstd_compress()
1570 session->bytes_compressed += compressed; in zstd_compress()
1579 u64 bytes_written = rec->bytes_written; in record__mmap_read_evlist()
1584 int trace_fd = rec->data.file.fd; in record__mmap_read_evlist()
1590 nr_mmaps = thread->nr_mmaps; in record__mmap_read_evlist()
1591 maps = overwrite ? thread->overwrite_maps : thread->maps; in record__mmap_read_evlist()
1596 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) in record__mmap_read_evlist()
1606 if (map->core.base) { in record__mmap_read_evlist()
1609 flush = map->core.flush; in record__mmap_read_evlist()
1610 map->core.flush = 1; in record__mmap_read_evlist()
1615 map->core.flush = flush; in record__mmap_read_evlist()
1616 rc = -1; in record__mmap_read_evlist()
1623 map->core.flush = flush; in record__mmap_read_evlist()
1624 rc = -1; in record__mmap_read_evlist()
1629 map->core.flush = flush; in record__mmap_read_evlist()
1632 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && in record__mmap_read_evlist()
1633 !rec->opts.auxtrace_sample_mode && in record__mmap_read_evlist()
1635 rc = -1; in record__mmap_read_evlist()
1648 * because per-cpu maps and files have data in record__mmap_read_evlist()
1651 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written) in record__mmap_read_evlist()
1664 err = record__mmap_read_evlist(rec, rec->evlist, false, synch); in record__mmap_read_all()
1668 return record__mmap_read_evlist(rec, rec->evlist, true, synch); in record__mmap_read_all()
1674 struct perf_mmap *map = fda->priv[fd].ptr; in record__thread_munmap_filtered()
1688 thread->tid = gettid(); in record__thread()
1690 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1691 if (err == -1) in record__thread()
1693 thread->tid, strerror(errno)); in record__thread()
1695 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__thread()
1697 pollfd = &thread->pollfd; in record__thread()
1698 ctlfd_pos = thread->ctlfd_pos; in record__thread()
1701 unsigned long long hits = thread->samples; in record__thread()
1703 if (record__mmap_read_all(thread->rec, false) < 0 || terminate) in record__thread()
1706 if (hits == thread->samples) { in record__thread()
1708 err = fdarray__poll(pollfd, -1); in record__thread()
1715 thread->waking++; in record__thread()
1722 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) { in record__thread()
1724 close(thread->pipes.msg[0]); in record__thread()
1725 thread->pipes.msg[0] = -1; in record__thread()
1726 pollfd->entries[ctlfd_pos].fd = -1; in record__thread()
1727 pollfd->entries[ctlfd_pos].events = 0; in record__thread()
1730 pollfd->entries[ctlfd_pos].revents = 0; in record__thread()
1732 record__mmap_read_all(thread->rec, true); in record__thread()
1734 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1735 if (err == -1) in record__thread()
1737 thread->tid, strerror(errno)); in record__thread()
1744 struct perf_session *session = rec->session; in record__init_features()
1748 perf_header__set_feat(&session->header, feat); in record__init_features()
1750 if (rec->no_buildid) in record__init_features()
1751 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in record__init_features()
1753 if (!have_tracepoints(&rec->evlist->core.entries)) in record__init_features()
1754 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in record__init_features()
1756 if (!rec->opts.branch_stack) in record__init_features()
1757 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in record__init_features()
1759 if (!rec->opts.full_auxtrace) in record__init_features()
1760 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in record__init_features()
1762 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) in record__init_features()
1763 perf_header__clear_feat(&session->header, HEADER_CLOCKID); in record__init_features()
1765 if (!rec->opts.use_clockid) in record__init_features()
1766 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA); in record__init_features()
1769 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in record__init_features()
1772 perf_header__clear_feat(&session->header, HEADER_COMPRESSED); in record__init_features()
1774 perf_header__clear_feat(&session->header, HEADER_STAT); in record__init_features()
1781 struct perf_data *data = &rec->data; in record__finish_output()
1784 if (data->is_pipe) { in record__finish_output()
1786 data->file.size = rec->bytes_written; in record__finish_output()
1790 rec->session->header.data_size += rec->bytes_written; in record__finish_output()
1791 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); in record__finish_output()
1793 for (i = 0; i < data->dir.nr; i++) in record__finish_output()
1794 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR); in record__finish_output()
1797 if (!rec->no_buildid) { in record__finish_output()
1800 if (rec->buildid_all) in record__finish_output()
1801 perf_session__dsos_hit_all(rec->session); in record__finish_output()
1803 perf_session__write_header(rec->session, rec->evlist, fd, true); in record__finish_output()
1812 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize_workload()
1814 if (rec->opts.tail_synthesize != tail) in record__synthesize_workload()
1817 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid); in record__synthesize_workload()
1819 return -1; in record__synthesize_workload()
1821 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, in record__synthesize_workload()
1823 &rec->session->machines.host, in record__synthesize_workload()
1825 rec->opts.sample_address); in record__synthesize_workload()
1832 if (rec->opts.tail_synthesize != tail) in write_finished_init()
1843 struct perf_data *data = &rec->data; in record__switch_output()
1855 if (target__none(&rec->opts.target)) in record__switch_output()
1858 rec->samples = 0; in record__switch_output()
1863 return -EINVAL; in record__switch_output()
1867 rec->session->header.data_offset, in record__switch_output()
1870 rec->bytes_written = 0; in record__switch_output()
1871 rec->session->header.data_size = 0; in record__switch_output()
1876 data->path, timestamp); in record__switch_output()
1879 if (rec->switch_output.num_files) { in record__switch_output()
1880 int n = rec->switch_output.cur_file + 1; in record__switch_output()
1882 if (n >= rec->switch_output.num_files) in record__switch_output()
1884 rec->switch_output.cur_file = n; in record__switch_output()
1885 if (rec->switch_output.filenames[n]) { in record__switch_output()
1886 remove(rec->switch_output.filenames[n]); in record__switch_output()
1887 zfree(&rec->switch_output.filenames[n]); in record__switch_output()
1889 rec->switch_output.filenames[n] = new_filename; in record__switch_output()
1899 * In 'perf record --switch-output' without -a, in record__switch_output()
1907 if (target__none(&rec->opts.target)) in record__switch_output()
1923 lost->lost = lost_count; in __record__save_lost_samples()
1924 if (evsel->core.ids) { in __record__save_lost_samples()
1925 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); in __record__save_lost_samples()
1926 sample.id = sid->id; in __record__save_lost_samples()
1930 evsel->core.attr.sample_type, &sample); in __record__save_lost_samples()
1931 lost->header.size = sizeof(*lost) + id_hdr_size; in __record__save_lost_samples()
1932 lost->header.misc = misc_flag; in __record__save_lost_samples()
1933 record__write(rec, NULL, lost, lost->header.size); in __record__save_lost_samples()
1938 struct perf_session *session = rec->session; in record__read_lost_samples()
1943 if (session->evlist == NULL) in record__read_lost_samples()
1946 evlist__for_each_entry(session->evlist, evsel) { in record__read_lost_samples()
1947 struct xyarray *xy = evsel->core.sample_id; in record__read_lost_samples()
1950 if (xy == NULL || evsel->core.fd == NULL) in record__read_lost_samples()
1952 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) || in record__read_lost_samples()
1953 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) { in record__read_lost_samples()
1962 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) { in record__read_lost_samples()
1997 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
2002 static void snapshot_sig_handler(int sig);
2003 static void alarm_sig_handler(int sig);
2008 if (evlist->mmap && evlist->mmap[0].core.base) in evlist__pick_pc()
2009 return evlist->mmap[0].core.base; in evlist__pick_pc()
2010 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base) in evlist__pick_pc()
2011 return evlist->overwrite_mmap[0].core.base; in evlist__pick_pc()
2018 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist); in record__pick_pc()
2026 struct perf_session *session = rec->session; in record__synthesize()
2027 struct machine *machine = &session->machines.host; in record__synthesize()
2028 struct perf_data *data = &rec->data; in record__synthesize()
2029 struct record_opts *opts = &rec->opts; in record__synthesize()
2030 struct perf_tool *tool = &rec->tool; in record__synthesize()
2034 if (rec->opts.tail_synthesize != tail) in record__synthesize()
2037 if (data->is_pipe) { in record__synthesize()
2043 rec->bytes_written += err; in record__synthesize()
2054 session->evlist, machine); in record__synthesize()
2058 if (rec->opts.full_auxtrace) { in record__synthesize()
2059 err = perf_event__synthesize_auxtrace_info(rec->itr, tool, in record__synthesize()
2065 if (!evlist__exclude_kernel(rec->evlist)) { in record__synthesize()
2080 machines__process_guests(&session->machines, in record__synthesize()
2084 err = perf_event__synthesize_extra_attr(&rec->tool, in record__synthesize()
2085 rec->evlist, in record__synthesize()
2087 data->is_pipe); in record__synthesize()
2091 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads, in record__synthesize()
2099 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus, in record__synthesize()
2113 if (rec->opts.synth & PERF_SYNTH_CGROUP) { in record__synthesize()
2122 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2128 if (rec->opts.synth & PERF_SYNTH_TASK) { in record__synthesize()
2129 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize()
2131 err = __machine__synthesize_threads(machine, tool, &opts->target, in record__synthesize()
2132 rec->evlist->core.threads, in record__synthesize()
2133 f, needs_mmap, opts->sample_address, in record__synthesize()
2134 rec->opts.nr_threads_synthesize); in record__synthesize()
2137 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2149 pthread_kill(rec->thread_id, SIGUSR2); in record__process_signal_event()
2155 struct record_opts *opts = &rec->opts; in record__setup_sb_evlist()
2157 if (rec->sb_evlist != NULL) { in record__setup_sb_evlist()
2159 * We get here if --switch-output-event populated the in record__setup_sb_evlist()
2163 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec); in record__setup_sb_evlist()
2164 rec->thread_id = pthread_self(); in record__setup_sb_evlist()
2167 if (!opts->no_bpf_event) { in record__setup_sb_evlist()
2168 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2169 rec->sb_evlist = evlist__new(); in record__setup_sb_evlist()
2171 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2173 return -1; in record__setup_sb_evlist()
2177 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) { in record__setup_sb_evlist()
2179 return -1; in record__setup_sb_evlist()
2183 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) { in record__setup_sb_evlist()
2185 opts->no_bpf_event = true; in record__setup_sb_evlist()
2193 struct perf_session *session = rec->session; in record__init_clock()
2198 if (!rec->opts.use_clockid) in record__init_clock()
2201 if (rec->opts.use_clockid && rec->opts.clockid_res_ns) in record__init_clock()
2202 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns; in record__init_clock()
2204 session->header.env.clock.clockid = rec->opts.clockid; in record__init_clock()
2208 return -1; in record__init_clock()
2211 if (clock_gettime(rec->opts.clockid, &ref_clockid)) { in record__init_clock()
2213 return -1; in record__init_clock()
2219 session->header.env.clock.tod_ns = ref; in record__init_clock()
2224 session->header.env.clock.clockid_ns = ref; in record__init_clock()
2233 if (auxtrace_record__snapshot_start(rec->itr)) in hit_auxtrace_snapshot_trigger()
2242 pid_t tid = thread_data->tid; in record__terminate_thread()
2244 close(thread_data->pipes.msg[1]); in record__terminate_thread()
2245 thread_data->pipes.msg[1] = -1; in record__terminate_thread()
2246 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack)); in record__terminate_thread()
2251 thread->tid, tid); in record__terminate_thread()
2258 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads()
2259 struct record_thread *thread_data = rec->thread_data; in record__start_threads()
2272 return -1; in record__start_threads()
2283 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)), in record__start_threads()
2284 (cpu_set_t *)(thread_data[t].mask->affinity.bits)); in record__start_threads()
2290 ret = -1; in record__start_threads()
2296 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid, in record__start_threads()
2300 thread->tid, rec->thread_data[t].tid); in record__start_threads()
2303 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__start_threads()
2304 (cpu_set_t *)thread->mask->affinity.bits); in record__start_threads()
2306 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__start_threads()
2313 ret = -1; in record__start_threads()
2322 struct record_thread *thread_data = rec->thread_data; in record__stop_threads()
2324 for (t = 1; t < rec->nr_threads; t++) in record__stop_threads()
2327 for (t = 0; t < rec->nr_threads; t++) { in record__stop_threads()
2328 rec->samples += thread_data[t].samples; in record__stop_threads()
2331 rec->session->bytes_transferred += thread_data[t].bytes_transferred; in record__stop_threads()
2332 rec->session->bytes_compressed += thread_data[t].bytes_compressed; in record__stop_threads()
2349 struct record_thread *thread_data = rec->thread_data; in record__waking()
2351 for (t = 0; t < rec->nr_threads; t++) in record__waking()
2362 struct perf_tool *tool = &rec->tool; in __cmd_record()
2363 struct record_opts *opts = &rec->opts; in __cmd_record()
2364 struct perf_data *data = &rec->data; in __cmd_record()
2369 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; in __cmd_record() local
2377 if (rec->opts.record_cgroup) { in __cmd_record()
2380 return -1; in __cmd_record()
2384 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) { in __cmd_record()
2386 if (rec->opts.auxtrace_snapshot_mode) in __cmd_record()
2388 if (rec->switch_output.enabled) in __cmd_record()
2395 tool->sample = process_sample_event; in __cmd_record()
2396 tool->fork = perf_event__process_fork; in __cmd_record()
2397 tool->exit = perf_event__process_exit; in __cmd_record()
2398 tool->comm = perf_event__process_comm; in __cmd_record()
2399 tool->namespaces = perf_event__process_namespaces; in __cmd_record()
2400 tool->mmap = build_id__process_mmap; in __cmd_record()
2401 tool->mmap2 = build_id__process_mmap2; in __cmd_record()
2402 tool->itrace_start = process_timestamp_boundary; in __cmd_record()
2403 tool->aux = process_timestamp_boundary; in __cmd_record()
2404 tool->namespace_events = rec->opts.record_namespaces; in __cmd_record()
2405 tool->cgroup_events = rec->opts.record_cgroup; in __cmd_record()
2413 if (perf_data__is_pipe(&rec->data)) { in __cmd_record()
2415 return -1; in __cmd_record()
2417 if (rec->opts.full_auxtrace) { in __cmd_record()
2419 return -1; in __cmd_record()
2424 rec->session = session; in __cmd_record()
2426 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) { in __cmd_record()
2428 return -1; in __cmd_record()
2434 status = -1; in __cmd_record()
2437 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd); in __cmd_record()
2445 session->header.env.comp_type = PERF_COMP_ZSTD; in __cmd_record()
2446 session->header.env.comp_level = rec->opts.comp_level; in __cmd_record()
2448 if (rec->opts.kcore && in __cmd_record()
2449 !record__kcore_readable(&session->machines.host)) { in __cmd_record()
2451 return -1; in __cmd_record()
2455 return -1; in __cmd_record()
2460 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, in __cmd_record()
2475 if (data->is_pipe && rec->evlist->core.nr_entries == 1) in __cmd_record()
2476 rec->opts.sample_id = true; in __cmd_record()
2478 if (rec->timestamp_filename && perf_data__is_pipe(data)) { in __cmd_record()
2479 rec->timestamp_filename = false; in __cmd_record()
2480 pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n"); in __cmd_record()
2483 evlist__uniquify_name(rec->evlist); in __cmd_record()
2485 evlist__config(rec->evlist, opts, &callchain_param); in __cmd_record()
2490 err = -1; in __cmd_record()
2495 session->header.env.comp_mmap_len = session->evlist->core.mmap_len; in __cmd_record()
2497 if (rec->opts.kcore) { in __cmd_record()
2498 err = record__kcore_copy(&session->machines.host, data); in __cmd_record()
2509 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) { in __cmd_record()
2511 rec->tool.ordered_events = false; in __cmd_record()
2514 if (evlist__nr_groups(rec->evlist) == 0) in __cmd_record()
2515 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); in __cmd_record()
2517 if (data->is_pipe) { in __cmd_record()
2522 err = perf_session__write_header(session, rec->evlist, fd, false); in __cmd_record()
2527 err = -1; in __cmd_record()
2528 if (!rec->no_buildid in __cmd_record()
2529 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { in __cmd_record()
2531 "Use --no-buildid to profile anyway.\n"); in __cmd_record()
2543 if (rec->realtime_prio) { in __cmd_record()
2546 param.sched_priority = rec->realtime_prio; in __cmd_record()
2549 err = -1; in __cmd_record()
2562 if (!target__none(&opts->target) && !opts->target.initial_delay) in __cmd_record()
2563 evlist__enable(rec->evlist); in __cmd_record()
2569 struct machine *machine = &session->machines.host; in __cmd_record()
2573 event = malloc(sizeof(event->comm) + machine->id_hdr_size); in __cmd_record()
2575 err = -ENOMEM; in __cmd_record()
2586 rec->evlist->workload.pid, in __cmd_record()
2591 if (tgid == -1) in __cmd_record()
2594 event = malloc(sizeof(event->namespaces) + in __cmd_record()
2596 machine->id_hdr_size); in __cmd_record()
2598 err = -ENOMEM; in __cmd_record()
2606 rec->evlist->workload.pid, in __cmd_record()
2611 evlist__start_workload(rec->evlist); in __cmd_record()
2614 if (opts->target.initial_delay) { in __cmd_record()
2616 if (opts->target.initial_delay > 0) { in __cmd_record()
2617 usleep(opts->target.initial_delay * USEC_PER_MSEC); in __cmd_record()
2618 evlist__enable(rec->evlist); in __cmd_record()
2623 err = event_enable_timer__start(rec->evlist->eet); in __cmd_record()
2644 unsigned long long hits = thread->samples; in __cmd_record()
2647 * rec->evlist->bkw_mmap_state is possible to be in __cmd_record()
2649 * hits != rec->samples in previous round. in __cmd_record()
2655 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); in __cmd_record()
2660 err = -1; in __cmd_record()
2670 err = -1; in __cmd_record()
2685 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING) in __cmd_record()
2694 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING); in __cmd_record()
2699 thread->waking = 0; in __cmd_record()
2708 /* re-arm the alarm */ in __cmd_record()
2709 if (rec->switch_output.time) in __cmd_record()
2710 alarm(rec->switch_output.time); in __cmd_record()
2713 if (hits == thread->samples) { in __cmd_record()
2716 err = fdarray__poll(&thread->pollfd, -1); in __cmd_record()
2723 thread->waking++; in __cmd_record()
2725 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP, in __cmd_record()
2729 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread); in __cmd_record()
2734 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { in __cmd_record()
2735 switch (cmd) { in __cmd_record()
2738 evlist__ctlfd_ack(rec->evlist); in __cmd_record()
2754 err = event_enable_timer__process(rec->evlist->eet); in __cmd_record()
2767 if (done && !disabled && !target__none(&opts->target)) { in __cmd_record()
2769 evlist__disable(rec->evlist); in __cmd_record()
2777 if (opts->auxtrace_snapshot_on_exit) in __cmd_record()
2784 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels); in __cmd_record()
2788 err = -1; in __cmd_record()
2798 if (target__none(&rec->opts.target)) in __cmd_record()
2806 evlist__finalize_ctlfd(rec->evlist); in __cmd_record()
2809 if (rec->session->bytes_transferred && rec->session->bytes_compressed) { in __cmd_record()
2810 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed; in __cmd_record()
2811 session->header.env.comp_ratio = ratio + 0.5; in __cmd_record()
2818 kill(rec->evlist->workload.pid, SIGTERM); in __cmd_record()
2831 if (rec->off_cpu) in __cmd_record()
2832 rec->bytes_written += off_cpu_write(rec->session); in __cmd_record()
2837 rec->samples = 0; in __cmd_record()
2840 if (!rec->timestamp_filename) { in __cmd_record()
2855 const char *postfix = rec->timestamp_filename ? in __cmd_record()
2858 if (rec->samples && !rec->opts.full_auxtrace) in __cmd_record()
2860 " (%" PRIu64 " samples)", rec->samples); in __cmd_record()
2866 data->path, postfix, samples); in __cmd_record()
2869 rec->session->bytes_transferred / 1024.0 / 1024.0, in __cmd_record()
2879 done_fd = -1; in __cmd_record()
2884 zstd_fini(&session->zstd_data); in __cmd_record()
2885 if (!opts->no_bpf_event) in __cmd_record()
2886 evlist__stop_sb_thread(rec->sb_evlist); in __cmd_record()
2896 pr_debug("callchain: type %s\n", str[callchain->record_mode]); in callchain_debug()
2898 if (callchain->record_mode == CALLCHAIN_DWARF) in callchain_debug()
2900 callchain->dump_size); in callchain_debug()
2908 callchain->enabled = !unset; in record_opts__parse_callchain()
2910 /* --no-call-graph */ in record_opts__parse_callchain()
2912 callchain->record_mode = CALLCHAIN_NONE; in record_opts__parse_callchain()
2920 if (callchain->record_mode == CALLCHAIN_DWARF) in record_opts__parse_callchain()
2921 record->sample_address = true; in record_opts__parse_callchain()
2932 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset); in record_parse_callchain_opt()
2939 struct callchain_param *callchain = opt->value; in record_callchain_opt()
2941 callchain->enabled = true; in record_callchain_opt()
2943 if (callchain->record_mode == CALLCHAIN_NONE) in record_callchain_opt()
2944 callchain->record_mode = CALLCHAIN_FP; in record_callchain_opt()
2954 if (!strcmp(var, "record.build-id")) { in perf_record_config()
2956 rec->no_buildid_cache = false; in perf_record_config()
2957 else if (!strcmp(value, "no-cache")) in perf_record_config()
2958 rec->no_buildid_cache = true; in perf_record_config()
2960 rec->no_buildid = true; in perf_record_config()
2962 rec->buildid_mmap = true; in perf_record_config()
2964 return -1; in perf_record_config()
2967 if (!strcmp(var, "record.call-graph")) { in perf_record_config()
2968 var = "call-graph.record-mode"; in perf_record_config()
2973 rec->opts.nr_cblocks = strtol(value, NULL, 0); in perf_record_config()
2974 if (!rec->opts.nr_cblocks) in perf_record_config()
2975 rec->opts.nr_cblocks = nr_cblocks_default; in perf_record_config()
2979 rec->debuginfod.urls = strdup(value); in perf_record_config()
2980 if (!rec->debuginfod.urls) in perf_record_config()
2981 return -ENOMEM; in perf_record_config()
2982 rec->debuginfod.set = true; in perf_record_config()
2990 struct record *rec = (struct record *)opt->value; in record__parse_event_enable_time()
2992 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset); in record__parse_event_enable_time()
2997 struct record_opts *opts = (struct record_opts *)opt->value; in record__parse_affinity()
3003 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity()
3005 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity()
3012 mask->nbits = nr_bits; in record__mmap_cpu_mask_alloc()
3013 mask->bits = bitmap_zalloc(mask->nbits); in record__mmap_cpu_mask_alloc()
3014 if (!mask->bits) in record__mmap_cpu_mask_alloc()
3015 return -ENOMEM; in record__mmap_cpu_mask_alloc()
3022 bitmap_free(mask->bits); in record__mmap_cpu_mask_free()
3023 mask->nbits = 0; in record__mmap_cpu_mask_free()
3030 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); in record__thread_mask_alloc()
3032 mask->affinity.bits = NULL; in record__thread_mask_alloc()
3036 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); in record__thread_mask_alloc()
3038 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_alloc()
3039 mask->maps.bits = NULL; in record__thread_mask_alloc()
3047 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_free()
3048 record__mmap_cpu_mask_free(&mask->affinity); in record__thread_mask_free()
3054 struct record_opts *opts = opt->value; in record__parse_threads()
3057 opts->threads_spec = THREAD_SPEC__CPU; in record__parse_threads()
3061 opts->threads_user_spec = strdup(str); in record__parse_threads()
3062 if (!opts->threads_user_spec) in record__parse_threads()
3063 return -ENOMEM; in record__parse_threads()
3064 opts->threads_spec = THREAD_SPEC__USER; in record__parse_threads()
3068 opts->threads_spec = s; in record__parse_threads()
3074 if (opts->threads_spec == THREAD_SPEC__USER) in record__parse_threads()
3075 pr_debug("threads_spec: %s\n", opts->threads_user_spec); in record__parse_threads()
3077 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]); in record__parse_threads()
3085 unsigned long *s = (unsigned long *)opt->value; in parse_output_max_size()
3101 if (val != (unsigned long) -1) { in parse_output_max_size()
3106 return -1; in parse_output_max_size()
3113 struct record_opts *opts = opt->value; in record__parse_mmap_pages()
3119 return -EINVAL; in record__parse_mmap_pages()
3123 return -ENOMEM; in record__parse_mmap_pages()
3133 opts->mmap_pages = mmap_pages; in record__parse_mmap_pages()
3145 opts->auxtrace_mmap_pages = mmap_pages; in record__parse_mmap_pages()
3160 struct record_opts *opts = opt->value; in parse_control_option()
3162 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close); in parse_control_option()
3167 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); in switch_output_size_warn()
3168 struct switch_output *s = &rec->switch_output; in switch_output_size_warn()
3172 if (s->size < wakeup_size) { in switch_output_size_warn()
3176 pr_warning("WARNING: switch-output data size lower than " in switch_output_size_warn()
3184 struct switch_output *s = &rec->switch_output; in switch_output_setup()
3202 * If we're using --switch-output-events, then we imply its in switch_output_setup()
3203 * --switch-output=signal, as we'll send a SIGUSR2 from the side band in switch_output_setup()
3206 if (rec->switch_output_event_set) { in switch_output_setup()
3208 …pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n"); in switch_output_setup()
3214 if (!s->set) in switch_output_setup()
3218 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n"); in switch_output_setup()
3222 if (!strcmp(s->str, "signal")) { in switch_output_setup()
3224 s->signal = true; in switch_output_setup()
3225 pr_debug("switch-output with SIGUSR2 signal\n"); in switch_output_setup()
3229 val = parse_tag_value(s->str, tags_size); in switch_output_setup()
3230 if (val != (unsigned long) -1) { in switch_output_setup()
3231 s->size = val; in switch_output_setup()
3232 pr_debug("switch-output with %s size threshold\n", s->str); in switch_output_setup()
3236 val = parse_tag_value(s->str, tags_time); in switch_output_setup()
3237 if (val != (unsigned long) -1) { in switch_output_setup()
3238 s->time = val; in switch_output_setup()
3239 pr_debug("switch-output with %s time threshold (%lu seconds)\n", in switch_output_setup()
3240 s->str, s->time); in switch_output_setup()
3244 return -1; in switch_output_setup()
3247 rec->timestamp_filename = true; in switch_output_setup()
3248 s->enabled = true; in switch_output_setup()
3250 if (s->size && !rec->opts.no_buffering) in switch_output_setup()
3258 "perf record [<options>] -- <command> [<options>]",
3270 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap()
3282 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap2()
3295 set_timestamp_boundary(rec, sample->time); in process_timestamp_boundary()
3303 struct record_opts *opts = opt->value; in parse_record_synth_option()
3307 return -1; in parse_record_synth_option()
3309 opts->synth = parse_synth_opt(p); in parse_record_synth_option()
3312 if (opts->synth < 0) { in parse_record_synth_option()
3314 return -1; in parse_record_synth_option()
3323 * builtin-script, leave it here.
3342 .ctl_fd = -1,
3343 .ctl_fd_ack = -1,
3362 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3364 * from builtin-record.c, i.e. use record_opts,
3374 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3383 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3385 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3387 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3388 "system-wide collection from all CPUs"),
3394 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3397 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3398 "synthesize non-sample events at the end of output"),
3400 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3401 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3406 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3409 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3413 NULL, "enables call-graph recording" ,
3415 OPT_CALLBACK(0, "call-graph", &record.opts,
3424 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3426 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3428 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3430 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3431 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3438 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3440 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3443 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3450 … "ms to wait before starting measurement after program start (-1: start with events disabled), "
3451 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3457 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3461 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3468 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3469 "use per-thread mmaps"),
3470 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3472 " use '-I?' to list register names", parse_intr_regs),
3473 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3475 " use '--user-regs=?' to list register names", parse_user_regs),
3476 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3483 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3485 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3489 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3491 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3494 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3497 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3500 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3502 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3506 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3507 "Record build-id of all DSOs regardless of hits"),
3508 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3509 "Record build-id in map events"),
3510 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3512 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3514 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3518 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3522 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3524 OPT_BOOLEAN(0, "dry-run", &dry_run,
3535 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3536 …"Compress records using specified level (default: 1 - fastest compression, 22 - greatest compressi…
3539 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3541 OPT_UINTEGER(0, "num-thread-synthesize",
3545 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3549 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3550 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
3552 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3553 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3556 "Fine-tune event synthesis: default=all", parse_record_synth_option),
3564 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3565 OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
3582 if ((unsigned long)cpu.cpu > mask->nbits) in record__mmap_cpu_mask_init()
3583 return -ENODEV; in record__mmap_cpu_mask_init()
3584 __set_bit(cpu.cpu, mask->bits); in record__mmap_cpu_mask_init()
3596 return -ENOMEM; in record__mmap_cpu_mask_init_spec()
3598 bitmap_zero(mask->bits, mask->nbits); in record__mmap_cpu_mask_init_spec()
3600 return -ENODEV; in record__mmap_cpu_mask_init_spec()
3611 if (rec->thread_masks) in record__free_thread_masks()
3613 record__thread_mask_free(&rec->thread_masks[t]); in record__free_thread_masks()
3615 zfree(&rec->thread_masks); in record__free_thread_masks()
3622 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); in record__alloc_thread_masks()
3623 if (!rec->thread_masks) { in record__alloc_thread_masks()
3625 return -ENOMEM; in record__alloc_thread_masks()
3629 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); in record__alloc_thread_masks()
3652 rec->nr_threads = nr_cpus; in record__init_thread_cpu_masks()
3653 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_cpu_masks()
3655 for (t = 0; t < rec->nr_threads; t++) { in record__init_thread_cpu_masks()
3656 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); in record__init_thread_cpu_masks()
3657 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); in record__init_thread_cpu_masks()
3660 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_cpu_masks()
3662 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_cpu_masks()
3718 ret = -EINVAL; in record__init_thread_masks_spec()
3724 ret = -EINVAL; in record__init_thread_masks_spec()
3732 ret = -EINVAL; in record__init_thread_masks_spec()
3738 ret = -EINVAL; in record__init_thread_masks_spec()
3747 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask)); in record__init_thread_masks_spec()
3750 ret = -ENOMEM; in record__init_thread_masks_spec()
3753 rec->thread_masks = thread_masks; in record__init_thread_masks_spec()
3754 rec->thread_masks[t] = thread_mask; in record__init_thread_masks_spec()
3757 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_masks_spec()
3759 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_masks_spec()
3768 rec->nr_threads = t; in record__init_thread_masks_spec()
3769 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_masks_spec()
3770 if (!rec->nr_threads) in record__init_thread_masks_spec()
3771 ret = -EINVAL; in record__init_thread_masks_spec()
3791 return -ENOMEM; in record__init_thread_core_masks()
3794 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list, in record__init_thread_core_masks()
3795 topo->core_cpus_list, topo->core_cpus_lists); in record__init_thread_core_masks()
3809 return -ENOMEM; in record__init_thread_package_masks()
3812 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list, in record__init_thread_package_masks()
3813 topo->package_cpus_list, topo->package_cpus_lists); in record__init_thread_package_masks()
3829 return -ENOMEM; in record__init_thread_numa_masks()
3832 spec = zalloc(topo->nr * sizeof(char *)); in record__init_thread_numa_masks()
3835 ret = -ENOMEM; in record__init_thread_numa_masks()
3838 for (s = 0; s < topo->nr; s++) in record__init_thread_numa_masks()
3839 spec[s] = topo->nodes[s].cpus; in record__init_thread_numa_masks()
3841 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr); in record__init_thread_numa_masks()
3858 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) { in record__init_thread_user_masks()
3870 ret = -ENOMEM; in record__init_thread_user_masks()
3877 ret = -ENOMEM; in record__init_thread_user_masks()
3883 ret = -EINVAL; in record__init_thread_user_masks()
3890 ret = -ENOMEM; in record__init_thread_user_masks()
3897 ret = -ENOMEM; in record__init_thread_user_masks()
3929 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus)) in record__init_thread_default_masks()
3930 return -ENODEV; in record__init_thread_default_masks()
3932 rec->nr_threads = 1; in record__init_thread_default_masks()
3940 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus; in record__init_thread_masks()
3945 if (evlist__per_thread(rec->evlist)) { in record__init_thread_masks()
3946 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n"); in record__init_thread_masks()
3947 return -EINVAL; in record__init_thread_masks()
3950 switch (rec->opts.threads_spec) { in record__init_thread_masks()
3983 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true); in cmd_record()
3989 rec->opts.affinity = PERF_AFFINITY_SYS; in cmd_record()
3991 rec->evlist = evlist__new(); in cmd_record()
3992 if (rec->evlist == NULL) in cmd_record()
3993 return -ENOMEM; in cmd_record()
4010 /* Make system wide (-a) the default target. */ in cmd_record()
4011 if (!argc && target__none(&rec->opts.target)) in cmd_record()
4012 rec->opts.target.system_wide = true; in cmd_record()
4014 if (nr_cgroups && !rec->opts.target.system_wide) { in cmd_record()
4016 "cgroup monitoring only available in system-wide mode"); in cmd_record()
4020 if (rec->buildid_mmap) { in cmd_record()
4023 err = -EINVAL; in cmd_record()
4030 rec->opts.build_id = true; in cmd_record()
4032 rec->no_buildid = true; in cmd_record()
4035 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { in cmd_record()
4037 err = -EINVAL; in cmd_record()
4041 if (rec->opts.kcore) in cmd_record()
4042 rec->opts.text_poke = true; in cmd_record()
4044 if (rec->opts.kcore || record__threads_enabled(rec)) in cmd_record()
4045 rec->data.is_dir = true; in cmd_record()
4048 if (rec->opts.affinity != PERF_AFFINITY_SYS) { in cmd_record()
4049 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4053 … pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4058 if (rec->opts.comp_level != 0) { in cmd_record()
4060 rec->no_buildid = true; in cmd_record()
4063 if (rec->opts.record_switch_events && in cmd_record()
4066 parse_options_usage(record_usage, record_options, "switch-events", 0); in cmd_record()
4067 err = -EINVAL; in cmd_record()
4072 parse_options_usage(record_usage, record_options, "switch-output", 0); in cmd_record()
4073 err = -EINVAL; in cmd_record()
4077 if (rec->switch_output.time) { in cmd_record()
4079 alarm(rec->switch_output.time); in cmd_record()
4082 if (rec->switch_output.num_files) { in cmd_record()
4083 rec->switch_output.filenames = calloc(rec->switch_output.num_files, in cmd_record()
4085 if (!rec->switch_output.filenames) { in cmd_record()
4086 err = -EINVAL; in cmd_record()
4091 if (rec->timestamp_filename && record__threads_enabled(rec)) { in cmd_record()
4092 rec->timestamp_filename = false; in cmd_record()
4093 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n"); in cmd_record()
4096 if (rec->filter_action) { in cmd_record()
4097 if (!strcmp(rec->filter_action, "pin")) in cmd_record()
4099 else if (!strcmp(rec->filter_action, "unpin")) in cmd_record()
4102 pr_warning("Unknown BPF filter action: %s\n", rec->filter_action); in cmd_record()
4103 err = -EINVAL; in cmd_record()
4123 err = -ENOMEM; in cmd_record()
4125 if (rec->no_buildid_cache || rec->no_buildid) { in cmd_record()
4127 } else if (rec->switch_output.enabled) { in cmd_record()
4129 * In 'perf record --switch-output', disable buildid in cmd_record()
4134 * perf record --switch-output --no-no-buildid \ in cmd_record()
4135 * --no-no-buildid-cache in cmd_record()
4139 * if ((rec->no_buildid || !rec->no_buildid_set) && in cmd_record()
4140 * (rec->no_buildid_cache || !rec->no_buildid_cache_set)) in cmd_record()
4145 if (rec->no_buildid_set && !rec->no_buildid) in cmd_record()
4147 if (rec->no_buildid_cache_set && !rec->no_buildid_cache) in cmd_record()
4150 rec->no_buildid = true; in cmd_record()
4151 rec->no_buildid_cache = true; in cmd_record()
4159 if (rec->evlist->core.nr_entries == 0) { in cmd_record()
4160 err = parse_event(rec->evlist, "cycles:P"); in cmd_record()
4165 if (rec->opts.target.tid && !rec->opts.no_inherit_set) in cmd_record()
4166 rec->opts.no_inherit = true; in cmd_record()
4168 err = target__validate(&rec->opts.target); in cmd_record()
4170 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4174 err = target__parse_uid(&rec->opts.target); in cmd_record()
4178 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4181 err = -saved_errno; in cmd_record()
4185 /* Enable ignoring missing threads when -u/-p option is defined. */ in cmd_record()
4186 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; in cmd_record()
4188 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list); in cmd_record()
4191 arch__add_leaf_frame_record_opts(&rec->opts); in cmd_record()
4193 err = -ENOMEM; in cmd_record()
4194 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) { in cmd_record()
4195 if (rec->opts.target.pid != NULL) { in cmd_record()
4204 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); in cmd_record()
4213 if (rec->opts.full_auxtrace) in cmd_record()
4214 rec->buildid_all = true; in cmd_record()
4216 if (rec->opts.text_poke) { in cmd_record()
4217 err = record__config_text_poke(rec->evlist); in cmd_record()
4224 if (rec->off_cpu) { in cmd_record()
4232 if (record_opts__config(&rec->opts)) { in cmd_record()
4233 err = -EINVAL; in cmd_record()
4249 if (rec->opts.nr_cblocks > nr_cblocks_max) in cmd_record()
4250 rec->opts.nr_cblocks = nr_cblocks_max; in cmd_record()
4251 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); in cmd_record()
4253 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); in cmd_record()
4254 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); in cmd_record()
4256 if (rec->opts.comp_level > comp_level_max) in cmd_record()
4257 rec->opts.comp_level = comp_level_max; in cmd_record()
4258 pr_debug("comp level: %d\n", rec->opts.comp_level); in cmd_record()
4262 record__free_thread_masks(rec, rec->nr_threads); in cmd_record()
4263 rec->nr_threads = 0; in cmd_record()
4265 auxtrace_record__free(rec->itr); in cmd_record()
4267 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); in cmd_record()
4268 evlist__delete(rec->evlist); in cmd_record()
4272 static void snapshot_sig_handler(int sig __maybe_unused) in snapshot_sig_handler()
4282 static void alarm_sig_handler(int sig __maybe_unused) in alarm_sig_handler()