Lines Matching full:work
315 struct kwork_work *work; in work_search() local
319 work = container_of(node, struct kwork_work, node); in work_search()
320 cmp = work_cmp(sort_list, key, work); in work_search()
326 if (work->name == NULL) in work_search()
327 work->name = key->name; in work_search()
328 return work; in work_search()
362 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
364 if (work == NULL) { in work_new()
365 pr_err("Failed to zalloc kwork work\n"); in work_new()
370 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
372 work->id = key->id; in work_new()
373 work->cpu = key->cpu; in work_new()
374 work->name = key->name; in work_new()
375 work->class = key->class; in work_new()
376 return work; in work_new()
383 struct kwork_work *work = work_search(root, key, sort_list); in work_findnew() local
385 if (work != NULL) in work_findnew()
386 return work; in work_findnew()
388 work = work_new(key); in work_findnew()
389 if (work) in work_findnew()
390 work_insert(root, work, sort_list); in work_findnew()
392 return work; in work_findnew()
409 struct kwork_work *work) in profile_name_match() argument
411 if (kwork->profile_name && work->name && in profile_name_match()
412 (strcmp(work->name, kwork->profile_name) != 0)) { in profile_name_match()
420 struct kwork_work *work, in profile_event_match() argument
423 int cpu = work->cpu; in profile_event_match()
439 !profile_name_match(kwork, work)) { in profile_event_match()
458 struct kwork_work *work, key; in work_push_atom() local
467 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_push_atom()
468 if (work == NULL) { in work_push_atom()
473 if (!profile_event_match(kwork, work, sample)) { in work_push_atom()
479 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_push_atom()
488 *ret_work = work; in work_push_atom()
491 last_atom = list_last_entry_or_null(&work->atom_list[src_type], in work_push_atom()
501 list_add_tail(&atom->list, &work->atom_list[src_type]); in work_push_atom()
516 struct kwork_work *work, key; in work_pop_atom() local
521 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_pop_atom()
523 *ret_work = work; in work_pop_atom()
525 if (work == NULL) in work_pop_atom()
528 if (!profile_event_match(kwork, work, sample)) in work_pop_atom()
531 atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_pop_atom()
538 list_add_tail(&src_atom->list, &work->atom_list[src_type]); in work_pop_atom()
551 struct kwork_work *work; in find_work_by_id() local
555 work = rb_entry(next, struct kwork_work, node); in find_work_by_id()
556 if ((cpu != -1 && work->id == id && work->cpu == cpu) || in find_work_by_id()
557 (cpu == -1 && work->id == id)) in find_work_by_id()
558 return work; in find_work_by_id()
579 static void report_update_exit_event(struct kwork_work *work, in report_update_exit_event() argument
589 if ((delta > work->max_runtime) || in report_update_exit_event()
590 (work->max_runtime == 0)) { in report_update_exit_event()
591 work->max_runtime = delta; in report_update_exit_event()
592 work->max_runtime_start = entry_time; in report_update_exit_event()
593 work->max_runtime_end = exit_time; in report_update_exit_event()
595 work->total_runtime += delta; in report_update_exit_event()
596 work->nr_atoms++; in report_update_exit_event()
618 struct kwork_work *work = NULL; in report_exit_event() local
622 machine, &work); in report_exit_event()
623 if (work == NULL) in report_exit_event()
627 report_update_exit_event(work, atom, sample); in report_exit_event()
634 static void latency_update_entry_event(struct kwork_work *work, in latency_update_entry_event() argument
644 if ((delta > work->max_latency) || in latency_update_entry_event()
645 (work->max_latency == 0)) { in latency_update_entry_event()
646 work->max_latency = delta; in latency_update_entry_event()
647 work->max_latency_start = raise_time; in latency_update_entry_event()
648 work->max_latency_end = entry_time; in latency_update_entry_event()
650 work->total_latency += delta; in latency_update_entry_event()
651 work->nr_atoms++; in latency_update_entry_event()
673 struct kwork_work *work = NULL; in latency_entry_event() local
677 machine, &work); in latency_entry_event()
678 if (work == NULL) in latency_entry_event()
682 latency_update_entry_event(work, atom, sample); in latency_entry_event()
739 struct kwork_work *work, in timehist_print_event() argument
764 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); in timehist_print_event()
769 if (work->class && work->class->work_name) { in timehist_print_event()
770 work->class->work_name(work, kwork_name, in timehist_print_event()
832 struct kwork_work *work = NULL; in timehist_entry_event() local
836 machine, &work, true); in timehist_entry_event()
840 if (work != NULL) in timehist_entry_event()
853 struct kwork_work *work = NULL; in timehist_exit_event() local
866 machine, &work); in timehist_exit_event()
867 if (work == NULL) { in timehist_exit_event()
873 work->nr_atoms++; in timehist_exit_event()
874 timehist_print_event(kwork, work, atom, sample, &al); in timehist_exit_event()
883 static void top_update_runtime(struct kwork_work *work, in top_update_runtime() argument
893 work->total_runtime += delta; in top_update_runtime()
914 struct kwork_work *work, *sched_work; in top_exit_event() local
920 machine, &work); in top_exit_event()
921 if (!work) in top_exit_event()
928 work->id, work->cpu); in top_exit_event()
930 top_update_runtime(work, atom, sample); in top_exit_event()
945 struct kwork_work *work; in top_sched_switch_event() local
949 machine, &work); in top_sched_switch_event()
950 if (!work) in top_sched_switch_event()
954 top_update_runtime(work, atom, sample); in top_sched_switch_event()
1007 struct kwork_work *work, in irq_work_init() argument
1013 work->class = class; in irq_work_init()
1014 work->cpu = sample->cpu; in irq_work_init()
1017 work->id = evsel__intval_common(evsel, sample, "common_pid"); in irq_work_init()
1018 work->name = NULL; in irq_work_init()
1020 work->id = evsel__intval(evsel, sample, "irq"); in irq_work_init()
1021 work->name = evsel__strval(evsel, sample, "name"); in irq_work_init()
1025 static void irq_work_name(struct kwork_work *work, char *buf, int len) in irq_work_name() argument
1027 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); in irq_work_name()
1135 struct kwork_work *work, in softirq_work_init() argument
1143 work->class = class; in softirq_work_init()
1144 work->cpu = sample->cpu; in softirq_work_init()
1147 work->id = evsel__intval_common(evsel, sample, "common_pid"); in softirq_work_init()
1148 work->name = NULL; in softirq_work_init()
1151 work->id = num; in softirq_work_init()
1152 work->name = evsel__softirq_name(evsel, num); in softirq_work_init()
1156 static void softirq_work_name(struct kwork_work *work, char *buf, int len) in softirq_work_name() argument
1158 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); in softirq_work_name()
1235 struct kwork_work *work, in workqueue_work_init() argument
1245 work->class = class; in workqueue_work_init()
1246 work->cpu = sample->cpu; in workqueue_work_init()
1247 work->id = evsel__intval(evsel, sample, "work"); in workqueue_work_init()
1248 work->name = function_addr == 0 ? NULL : in workqueue_work_init()
1252 static void workqueue_work_name(struct kwork_work *work, char *buf, int len) in workqueue_work_name() argument
1254 if (work->name != NULL) in workqueue_work_name()
1255 snprintf(buf, len, "(w)%s", work->name); in workqueue_work_name()
1257 snprintf(buf, len, "(w)0x%" PRIx64, work->id); in workqueue_work_name()
1303 struct kwork_work *work, in sched_work_init() argument
1309 work->class = class; in sched_work_init()
1310 work->cpu = sample->cpu; in sched_work_init()
1313 work->id = evsel__intval(evsel, sample, "prev_pid"); in sched_work_init()
1314 work->name = strdup(evsel__strval(evsel, sample, "prev_comm")); in sched_work_init()
1316 work->id = evsel__intval(evsel, sample, "next_pid"); in sched_work_init()
1317 work->name = strdup(evsel__strval(evsel, sample, "next_comm")); in sched_work_init()
1321 static void sched_work_name(struct kwork_work *work, char *buf, int len) in sched_work_name() argument
1323 snprintf(buf, len, "%s", work->name); in sched_work_name()
1348 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work) in report_print_work() argument
1360 if (work->class && work->class->work_name) { in report_print_work()
1361 work->class->work_name(work, kwork_name, in report_print_work()
1371 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); in report_print_work()
1379 (double)work->total_runtime / NSEC_PER_MSEC); in report_print_work()
1383 (double)work->total_latency / in report_print_work()
1384 work->nr_atoms / NSEC_PER_MSEC); in report_print_work()
1390 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); in report_print_work()
1396 timestamp__scnprintf_usec(work->max_runtime_start, in report_print_work()
1399 timestamp__scnprintf_usec(work->max_runtime_end, in report_print_work()
1404 (double)work->max_runtime / NSEC_PER_MSEC, in report_print_work()
1412 timestamp__scnprintf_usec(work->max_latency_start, in report_print_work()
1415 timestamp__scnprintf_usec(work->max_latency_end, in report_print_work()
1420 (double)work->max_latency / NSEC_PER_MSEC, in report_print_work()
1639 static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work) in top_print_work() argument
1648 ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id); in top_print_work()
1654 ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid); in top_print_work()
1661 (double)work->cpu_usage / 100); in top_print_work()
1668 (double)work->total_runtime / NSEC_PER_MSEC); in top_print_work()
1675 work->is_kthread ? "[" : "", in top_print_work()
1676 work->name, in top_print_work()
1677 work->is_kthread ? "]" : ""); in top_print_work()
1679 ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name); in top_print_work()
1839 struct kwork_work *work) in process_skipped_events() argument
1845 count = nr_list_entry(&work->atom_list[i]); in process_skipped_events()
1855 struct kwork_work *work = NULL; in perf_kwork_add_work() local
1857 work = work_new(key); in perf_kwork_add_work()
1858 if (work == NULL) in perf_kwork_add_work()
1861 work_insert(&class->work_root, work, &kwork->cmp_id); in perf_kwork_add_work()
1862 return work; in perf_kwork_add_work()
1907 struct kwork_work *work; in perf_kwork__report() local
1924 work = rb_entry(next, struct kwork_work, node); in perf_kwork__report()
1925 process_skipped_events(kwork, work); in perf_kwork__report()
1927 if (work->nr_atoms != 0) { in perf_kwork__report()
1928 report_print_work(kwork, work); in perf_kwork__report()
1930 kwork->all_runtime += work->total_runtime; in perf_kwork__report()
1931 kwork->all_count += work->nr_atoms; in perf_kwork__report()
2000 struct kwork_work *work; in top_calc_total_runtime() local
2010 work = rb_entry(next, struct kwork_work, node); in top_calc_total_runtime()
2011 BUG_ON(work->cpu >= MAX_NR_CPUS); in top_calc_total_runtime()
2012 stat->cpus_runtime[work->cpu].total += work->total_runtime; in top_calc_total_runtime()
2013 stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime; in top_calc_total_runtime()
2019 struct kwork_work *work) in top_calc_idle_time() argument
2023 if (work->id == 0) { in top_calc_idle_time()
2024 stat->cpus_runtime[work->cpu].idle += work->total_runtime; in top_calc_idle_time()
2025 stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime; in top_calc_idle_time()
2031 struct kwork_work *work) in top_calc_irq_runtime() argument
2036 stat->cpus_runtime[work->cpu].irq += work->total_runtime; in top_calc_irq_runtime()
2037 stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime; in top_calc_irq_runtime()
2039 stat->cpus_runtime[work->cpu].softirq += work->total_runtime; in top_calc_irq_runtime()
2040 stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime; in top_calc_irq_runtime()
2045 struct kwork_work *work) in top_subtract_irq_runtime() argument
2058 work->id, work->cpu); in top_subtract_irq_runtime()
2062 if (work->total_runtime > data->total_runtime) { in top_subtract_irq_runtime()
2063 work->total_runtime -= data->total_runtime; in top_subtract_irq_runtime()
2072 struct kwork_work *work; in top_calc_cpu_usage() local
2082 work = rb_entry(next, struct kwork_work, node); in top_calc_cpu_usage()
2084 if (work->total_runtime == 0) in top_calc_cpu_usage()
2087 __set_bit(work->cpu, stat->all_cpus_bitmap); in top_calc_cpu_usage()
2089 top_subtract_irq_runtime(kwork, work); in top_calc_cpu_usage()
2091 work->cpu_usage = work->total_runtime * 10000 / in top_calc_cpu_usage()
2092 stat->cpus_runtime[work->cpu].total; in top_calc_cpu_usage()
2094 top_calc_idle_time(kwork, work); in top_calc_cpu_usage()
2101 struct kwork_work *work) in top_calc_load_runtime() argument
2105 if (work->id != 0) { in top_calc_load_runtime()
2106 stat->cpus_runtime[work->cpu].load += work->total_runtime; in top_calc_load_runtime()
2107 stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime; in top_calc_load_runtime()
2152 struct kwork_work *work; in perf_kwork__top_report() local
2161 work = rb_entry(next, struct kwork_work, node); in perf_kwork__top_report()
2162 process_skipped_events(kwork, work); in perf_kwork__top_report()
2164 if (work->total_runtime == 0) in perf_kwork__top_report()
2167 top_print_work(kwork, work); in perf_kwork__top_report()