Lines Matching +full:min +full:- +full:sample +full:- +full:time +full:- +full:nsecs

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
12 * Copyright (C) 2004-2006 Ingo Molnar
61 * A selftest will lurk into the ring-buffer to count the
63 * insertions into the ring-buffer such as trace_printk could occurred
64 * at the same time, giving false positive or negative results.
69 * If boot-time tracing including tracers/events via kernel cmdline
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
224 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
248 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
254 return -1;
280 int left = sizeof(boot_instance_info) - boot_instance_index;
284 return -1;
347 if (export->flags & flag) {
350 export->write(export, entry, size);
364 if (export->flags & TRACE_EXPORT_FUNCTION)
367 if (export->flags & TRACE_EXPORT_EVENT)
370 if (export->flags & TRACE_EXPORT_MARKER)
376 if (export->flags & TRACE_EXPORT_FUNCTION)
379 if (export->flags & TRACE_EXPORT_EVENT)
382 if (export->flags & TRACE_EXPORT_MARKER)
395 export = rcu_dereference_raw_check(export->next);
404 rcu_assign_pointer(export->next, *list);
408 * the export->next pointer is valid before another CPU sees
419 for (p = list; *p != NULL; p = &(*p)->next)
424 return -1;
426 rcu_assign_pointer(*p, (*p)->next);
452 if (WARN_ON_ONCE(!export->write))
453 return -1;
497 * The global_trace is the descriptor that holds the top-level tracing
514 return !(tr->flags & TRACE_ARRAY_FL_BOOT);
522 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
524 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
531 tr->ring_buffer_expanded = true;
543 tr->ref++;
548 return -ENODEV;
553 WARN_ON(!this_tr->ref);
554 this_tr->ref--;
558 * trace_array_put - Decrement the reference counter for this trace array.
586 return -ENODEV;
589 return -ENODEV;
595 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
608 * trace_ignore_this_task - should a task be ignored for tracing
631 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
633 trace_find_filtered_pid(filtered_no_pids, task->pid));
637 * trace_filter_add_remove_task - Add or remove a task from a pid_list
657 if (!trace_find_filtered_pid(pid_list, self->pid))
663 trace_pid_list_set(pid_list, task->pid);
665 trace_pid_list_clear(pid_list, task->pid);
669 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
698 * trace_pid_start - Used for seq_file to start reading pid lists
727 * trace_pid_show - show the current pid in seq_file processing
736 unsigned long pid = (unsigned long)v - 1;
759 return -ENOMEM;
770 return -ENOMEM;
794 cnt -= ret;
799 ret = -EINVAL;
806 ret = -1;
837 if (!buf->buffer)
840 ts = ring_buffer_time_stamp(buf->buffer);
841 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
852 * tracing_is_enabled - Show if global_trace has been enabled
879 * boot time and run time configurable.
911 * These primitives don't distinguish read-only and read-consume access.
912 * Multi read-only access are also serialized.
1027 if (tr->array_buffer.buffer)
1028 ring_buffer_record_on(tr->array_buffer.buffer);
1037 tr->buffer_disabled = 0;
1043 * tracing_on - enable tracing buffers
1062 /* Length is in event->array[0] */
1063 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1081 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1093 buffer = tr->array_buffer.buffer;
1103 entry->ip = ip;
1105 memcpy(&entry->buf, str, size);
1108 if (entry->buf[size - 1] != '\n') {
1109 entry->buf[size] = '\n';
1110 entry->buf[size + 1] = '\0';
1112 entry->buf[size] = '\0';
1123 * __trace_puts - write a constant string into the trace buffer.
1135 * __trace_bputs - write the pointer to a constant string into trace buffer
1152 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1159 buffer = tr->array_buffer.buffer;
1168 entry->ip = ip;
1169 entry->str = str;
1185 struct tracer *tracer = tr->current_trace;
1194 if (!tr->allocated_snapshot) {
1202 if (tracer->use_max_tr) {
1208 if (tr->mapped) {
1225 * tracing_snapshot - take a snapshot of the current buffer.
1247 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1252 * conditional - the snapshot will only happen if the
1266 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1270 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1274 * the tr->max_lock lock, which the code calling
1284 arch_spin_lock(&tr->max_lock);
1286 if (tr->cond_snapshot)
1287 cond_data = tr->cond_snapshot->cond_data;
1289 arch_spin_unlock(&tr->max_lock);
1305 if (!tr->allocated_snapshot) {
1308 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1309 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1314 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1315 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1319 tr->allocated_snapshot = true;
1329 * The max_tr ring buffer has some state (e.g. ring->clock) and
1332 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1333 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1334 set_buffer_entries(&tr->max_buffer, 1);
1335 tracing_reset_online_cpus(&tr->max_buffer);
1336 tr->allocated_snapshot = false;
1345 spin_lock(&tr->snapshot_trigger_lock);
1346 if (tr->snapshot == UINT_MAX || tr->mapped) {
1347 spin_unlock(&tr->snapshot_trigger_lock);
1348 return -EBUSY;
1351 tr->snapshot++;
1352 spin_unlock(&tr->snapshot_trigger_lock);
1356 spin_lock(&tr->snapshot_trigger_lock);
1357 tr->snapshot--;
1358 spin_unlock(&tr->snapshot_trigger_lock);
1377 spin_lock(&tr->snapshot_trigger_lock);
1378 if (!WARN_ON(!tr->snapshot))
1379 tr->snapshot--;
1380 spin_unlock(&tr->snapshot_trigger_lock);
1384 * tracing_alloc_snapshot - allocate snapshot buffer.
1387 * allocated - it doesn't also take a snapshot.
1406 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1429 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1436 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1449 return -ENOMEM;
1451 cond_snapshot->cond_data = cond_data;
1452 cond_snapshot->update = update;
1456 if (tr->current_trace->use_max_tr)
1457 return -EBUSY;
1467 if (tr->cond_snapshot)
1468 return -EBUSY;
1475 arch_spin_lock(&tr->max_lock);
1476 tr->cond_snapshot = no_free_ptr(cond_snapshot);
1477 arch_spin_unlock(&tr->max_lock);
1485 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1490 * otherwise return -EINVAL.
1499 arch_spin_lock(&tr->max_lock);
1501 if (!tr->cond_snapshot)
1502 ret = -EINVAL;
1504 kfree(tr->cond_snapshot);
1505 tr->cond_snapshot = NULL;
1508 arch_spin_unlock(&tr->max_lock);
1530 return -ENODEV;
1546 return -ENODEV;
1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1560 if (tr->array_buffer.buffer)
1561 ring_buffer_record_off(tr->array_buffer.buffer);
1570 tr->buffer_disabled = 1;
1576 * tracing_off - turn off tracing buffers
1599 * tracer_tracing_is_on - show real state of ring buffer enabled
1606 if (tr->array_buffer.buffer)
1607 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1608 return !tr->buffer_disabled;
1612 * tracing_is_on - show state of ring buffers enabled
1652 unsigned long nsecs_to_usecs(unsigned long nsecs)
1654 return nsecs / 1000;
1691 if (trace_clocks[tr->clock_id].in_ns)
1698 * trace_parser_get_init - gets the buffer for trace parser
1704 parser->buffer = kmalloc(size, GFP_KERNEL);
1705 if (!parser->buffer)
1708 parser->size = size;
1713 * trace_parser_put - frees the buffer for trace parser
1717 kfree(parser->buffer);
1718 parser->buffer = NULL;
1722 * trace_get_user - reads the user input string separated by space
1747 cnt--;
1753 if (!parser->cont) {
1760 cnt--;
1763 parser->idx = 0;
1773 /* read the non-space input */
1775 if (parser->idx < parser->size - 1)
1776 parser->buffer[parser->idx++] = ch;
1778 ret = -EINVAL;
1785 cnt--;
1790 parser->buffer[parser->idx] = 0;
1791 parser->cont = false;
1792 } else if (parser->idx < parser->size - 1) {
1793 parser->cont = true;
1794 parser->buffer[parser->idx++] = ch;
1796 parser->buffer[parser->idx] = 0;
1798 ret = -EINVAL;
1814 if (trace_seq_used(s) <= s->readpos)
1815 return -EBUSY;
1817 len = trace_seq_used(s) - s->readpos;
1820 memcpy(buf, s->buffer + s->readpos, cnt);
1822 s->readpos += cnt;
1839 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1846 queue_work(fsnotify_wq, &tr->fsnotify_work);
1852 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1853 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1854 tr->d_max_latency = trace_create_file("tracing_max_latency",
1866 return -ENOMEM;
1878 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1882 irq_work_queue(&tr->fsnotify_irqwork);
1894 * Copy the new maximum trace into the separate maximum-trace
1901 struct array_buffer *trace_buf = &tr->array_buffer;
1902 struct array_buffer *max_buf = &tr->max_buffer;
1903 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1904 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1906 max_buf->cpu = cpu;
1907 max_buf->time_start = data->preempt_timestamp;
1909 max_data->saved_latency = tr->max_latency;
1910 max_data->critical_start = data->critical_start;
1911 max_data->critical_end = data->critical_end;
1913 strscpy(max_data->comm, tsk->comm);
1914 max_data->pid = tsk->pid;
1920 max_data->uid = current_uid();
1922 max_data->uid = task_uid(tsk);
1924 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1925 max_data->policy = tsk->policy;
1926 max_data->rt_priority = tsk->rt_priority;
1934 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1947 if (tr->stop_count)
1952 if (!tr->allocated_snapshot) {
1954 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1958 arch_spin_lock(&tr->max_lock);
1961 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1962 ring_buffer_record_on(tr->max_buffer.buffer);
1964 ring_buffer_record_off(tr->max_buffer.buffer);
1967 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1968 arch_spin_unlock(&tr->max_lock);
1972 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1976 arch_spin_unlock(&tr->max_lock);
1979 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1983 * update_max_tr_single - only copy one trace over, and reset the rest
1995 if (tr->stop_count)
1999 if (!tr->allocated_snapshot) {
2001 WARN_ON_ONCE(tr->current_trace != &nop_trace);
2005 arch_spin_lock(&tr->max_lock);
2007 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2009 if (ret == -EBUSY) {
2017 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2021 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2024 arch_spin_unlock(&tr->max_lock);
2037 struct trace_iterator *iter = pwait->iter;
2039 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2042 return iter->closed;
2051 if (trace_buffer_iter(iter, iter->cpu_file))
2054 pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2057 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2065 if (iter->snapshot)
2066 iter->array_buffer = &iter->tr->max_buffer;
2087 return -ENOMEM;
2089 selftest->type = type;
2090 list_add(&selftest->list, &postponed_selftests);
2097 struct tracer *saved_tracer = tr->current_trace;
2100 if (!type->selftest || tracing_selftest_disabled)
2113 type->name);
2124 tracing_reset_online_cpus(&tr->array_buffer);
2126 tr->current_trace = type;
2129 if (type->use_max_tr) {
2131 if (tr->ring_buffer_expanded)
2132 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2134 tr->allocated_snapshot = true;
2139 pr_info("Testing tracer %s: ", type->name);
2140 ret = type->selftest(type, tr);
2142 tr->current_trace = saved_tracer;
2147 return -1;
2150 tracing_reset_online_cpus(&tr->array_buffer);
2153 if (type->use_max_tr) {
2154 tr->allocated_snapshot = false;
2157 if (tr->ring_buffer_expanded)
2158 ring_buffer_resize(tr->max_buffer.buffer, 1,
2172 * Tests can take a long time, especially if they are run one after the
2206 ret = run_tracer_selftest(p->type);
2210 p->type->name);
2212 for (t = trace_types; t; t = t->next) {
2213 if (t == p->type) {
2214 *last = t->next;
2217 last = &t->next;
2220 list_del(&p->list);
2240 * register_tracer - register a tracer with the ftrace system.
2250 if (!type->name) {
2252 return -1;
2255 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2257 return -1;
2262 type->name);
2263 return -EPERM;
2268 for (t = trace_types; t; t = t->next) {
2269 if (strcmp(type->name, t->name) == 0) {
2272 type->name);
2273 ret = -1;
2278 if (!type->set_flag)
2279 type->set_flag = &dummy_set_flag;
2280 if (!type->flags) {
2282 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2283 if (!type->flags) {
2284 ret = -ENOMEM;
2287 type->flags->val = 0;
2288 type->flags->opts = dummy_tracer_opt;
2290 if (!type->flags->opts)
2291 type->flags->opts = dummy_tracer_opt;
2294 type->flags->trace = type;
2300 type->next = trace_types;
2310 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2313 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2315 tracing_set_tracer(&global_trace, type->name);
2329 struct trace_buffer *buffer = buf->buffer;
2345 struct trace_buffer *buffer = buf->buffer;
2355 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2364 struct trace_buffer *buffer = buf->buffer;
2374 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2389 if (!tr->clear_trace)
2391 tr->clear_trace = false;
2392 tracing_reset_online_cpus(&tr->array_buffer);
2394 tracing_reset_online_cpus(&tr->max_buffer);
2419 raw_spin_lock_irqsave(&tr->start_lock, flags);
2420 if (--tr->stop_count) {
2421 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2423 tr->stop_count = 0;
2429 arch_spin_lock(&tr->max_lock);
2431 buffer = tr->array_buffer.buffer;
2436 buffer = tr->max_buffer.buffer;
2441 arch_spin_unlock(&tr->max_lock);
2444 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2448 * tracing_start - quick start of the tracer
2464 raw_spin_lock_irqsave(&tr->start_lock, flags);
2465 if (tr->stop_count++)
2469 arch_spin_lock(&tr->max_lock);
2471 buffer = tr->array_buffer.buffer;
2476 buffer = tr->max_buffer.buffer;
2481 arch_spin_unlock(&tr->max_lock);
2484 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2488 * tracing_stop - quick stop of the tracer
2513 return current->migration_disabled;
2559 * trace_buffered_event_enable - enable buffering events
2619 * trace_buffered_event_disable - disable buffering events
2635 if (--trace_buffered_event_ref)
2655 * could wrongly decide to use the pointed-to buffer which is now freed.
2673 struct trace_array *tr = trace_file->tr;
2676 *current_rb = tr->array_buffer.buffer;
2678 if (!tr->no_filter_buffering_ref &&
2679 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2686 * (see include/linux/ring-buffer.h for details on
2699 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2722 entry->array[0] = len;
2740 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2764 event_call = fbuffer->trace_file->event_call;
2765 if (!event_call || !event_call->event.funcs ||
2766 !event_call->event.funcs->trace)
2769 file = fbuffer->trace_file;
2770 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2771 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2772 !filter_match_preds(file->filter, fbuffer->entry)))
2775 event = &fbuffer->trace_file->event_call->event;
2778 trace_seq_init(&iter->seq);
2779 iter->ent = fbuffer->entry;
2780 event_call->event.funcs->trace(iter, 0, event);
2781 trace_seq_putc(&iter->seq, 0);
2782 printk("%s", iter->seq.buffer);
2820 struct trace_event_file *file = fbuffer->trace_file;
2822 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2823 fbuffer->entry, &tt))
2830 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2832 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2833 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2883 struct trace_buffer *buffer = tr->array_buffer.buffer;
2892 entry->ip = ip;
2893 entry->parent_ip = parent_ip;
2941 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2957 size = ARRAY_SIZE(fstack->calls);
2960 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2963 nr_entries = stack_trace_save(fstack->calls, size, skip);
2968 if (tr->ops && tr->ops->trampoline) {
2969 unsigned long tramp_start = tr->ops->trampoline;
2970 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
2971 unsigned long *calls = fstack->calls;
2987 entry->size = nr_entries;
2988 memcpy(&entry->caller, fstack->calls,
3006 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3015 struct trace_buffer *buffer = tr->array_buffer.buffer;
3040 * trace_dump_stack - record a stack back trace in the trace buffer
3052 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3093 entry->tgid = current->tgid;
3094 memset(&entry->caller, 0, sizeof(entry->caller));
3096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3118 entry->bottom_delta_ts = delta & U32_MAX;
3119 entry->top_delta_ts = (delta >> 32);
3126 struct trace_buffer *buffer = tr->array_buffer.buffer;
3136 delta = ring_buffer_event_time_stamp(buffer, event) -
3137 last_info->ts_last_call;
3140 entry->ip = last_info->ip;
3141 entry->parent_ip = last_info->parent_ip;
3142 entry->count = last_info->count;
3164 if (!trace_percpu_buffer || buffer->nesting >= 4)
3167 buffer->nesting++;
3171 return &buffer->buffer[buffer->nesting - 1][0];
3178 this_cpu_dec(trace_percpu_buffer->nesting);
3190 return -ENOMEM;
3259 * trace_vbprintk - write binary msg to tracing buffer
3298 buffer = tr->array_buffer.buffer;
3305 entry->ip = ip;
3306 entry->fmt = fmt;
3308 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3361 entry->ip = ip;
3363 memcpy(&entry->buf, tbuffer, len + 1);
3385 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3389 * trace_array_printk - Print a message to a specific instance
3416 return -ENOENT;
3422 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3433 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3443 return -ENOENT;
3447 return -EINVAL;
3460 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3478 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3480 iter->idx++;
3496 (unsigned long)-1 : 0;
3498 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3503 iter->ent_size = ring_buffer_event_length(event);
3506 iter->ent_size = 0;
3514 struct trace_buffer *buffer = iter->array_buffer->buffer;
3517 int cpu_file = iter->cpu_file;
3519 int next_cpu = -1;
3552 next_size = iter->ent_size;
3556 iter->ent_size = next_size;
3578 * iter->tr is NULL when used with tp_printk, which makes
3581 if (!iter->tr || iter->fmt == static_fmt_buf)
3584 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3587 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3588 iter->fmt = tmp;
3602 if ((addr >= (unsigned long)iter->ent) &&
3603 (addr < (unsigned long)iter->ent + iter->ent_size))
3607 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3608 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3622 if (!iter->ent)
3625 trace_event = ftrace_find_event(iter->ent->type);
3630 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3634 if (within_module_core(addr, event->module))
3641 * ignore_event - Check dereferenced fields while writing to the seq buffer
3649 * by the time the user reads the trace. This would cause a bad memory
3655 * If it is found that a field is unsafe, it will write into the @iter->seq
3670 trace_event = ftrace_find_event(iter->ent->type);
3672 seq = &iter->seq;
3675 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3680 if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3690 /* Offsets are from the iter->ent that points to the raw event */
3691 ptr = iter->ent;
3697 if (!field->needs_test)
3700 str = *(const char **)(ptr + field->offset);
3707 * was saved at the time of the event, but may not be
3710 * instead. See samples/trace_events/trace-events-sample.h
3714 trace_event_name(event), field->name)) {
3716 trace_event_name(event), field->name);
3731 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3735 new_fmt = q = iter->fmt;
3737 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3741 q += iter->fmt - new_fmt;
3742 new_fmt = iter->fmt;
3748 if (p[-1] == '%') {
3770 int ent_size = iter->ent_size;
3774 * If called from ftrace_dump(), then the iter->temp buffer
3778 * used to add markers when two consecutive events' time
3781 if (iter->temp == static_temp_buf &&
3787 * call ring_buffer_peek() that may make the contents of iter->ent
3788 * undefined. Need to copy iter->ent now.
3790 if (iter->ent && iter->ent != iter->temp) {
3791 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3792 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3794 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3797 kfree(iter->temp);
3798 iter->temp = temp;
3799 iter->temp_size = iter->ent_size;
3801 memcpy(iter->temp, iter->ent, iter->ent_size);
3802 iter->ent = iter->temp;
3806 iter->ent_size = ent_size;
3814 iter->ent = __find_next_entry(iter, &iter->cpu,
3815 &iter->lost_events, &iter->ts);
3817 if (iter->ent)
3820 return iter->ent ? iter : NULL;
3825 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3826 &iter->lost_events);
3831 struct trace_iterator *iter = m->private;
3835 WARN_ON_ONCE(iter->leftover);
3840 if (iter->idx > i)
3843 if (iter->idx < 0)
3848 while (ent && iter->idx < i)
3851 iter->pos = *pos;
3862 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3876 if (ts >= iter->array_buffer->time_start)
3884 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3893 struct trace_iterator *iter = m->private;
3894 struct trace_array *tr = iter->tr;
3895 int cpu_file = iter->cpu_file;
3901 if (unlikely(tr->current_trace != iter->trace)) {
3902 /* Close iter->trace before switching to the new current tracer */
3903 if (iter->trace->close)
3904 iter->trace->close(iter);
3905 iter->trace = tr->current_trace;
3907 if (iter->trace->open)
3908 iter->trace->open(iter);
3913 if (iter->snapshot && iter->trace->use_max_tr)
3914 return ERR_PTR(-EBUSY);
3917 if (*pos != iter->pos) {
3918 iter->ent = NULL;
3919 iter->cpu = 0;
3920 iter->idx = -1;
3928 iter->leftover = 0;
3937 if (iter->leftover)
3940 l = *pos - 1;
3952 struct trace_iterator *iter = m->private;
3955 if (iter->snapshot && iter->trace->use_max_tr)
3959 trace_access_unlock(iter->cpu_file);
3969 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3973 * ones before the time stamp.
3975 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3976 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3981 ring_buffer_overrun_cpu(buf->buffer, cpu);
4009 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4021 get_total_entries(&tr->array_buffer, &total, &entries);
4028 seq_puts(m, "# _------=> CPU# \n"
4029 "# / _-----=> irqs-off/BH-disabled\n"
4030 "# | / _----=> need-resched \n"
4031 "# || / _---=> hardirq/softirq \n"
4032 "# ||| / _--=> preempt-depth \n"
4033 "# |||| / _-=> migrate-disable \n"
4035 "# cmd pid |||||| time | caller \n"
4045 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4057 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4070 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4071 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4072 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4073 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4074 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4076 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4084 struct array_buffer *buf = iter->array_buffer;
4085 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4086 struct tracer *type = iter->trace;
4089 const char *name = type->name;
4094 name, init_utsname()->release);
4095 seq_puts(m, "# -----------------------------------"
4096 "---------------------------------\n");
4099 nsecs_to_usecs(data->saved_latency),
4102 buf->cpu,
4116 seq_puts(m, "# -----------------\n");
4117 seq_printf(m, "# | task: %.16s-%d "
4119 data->comm, data->pid,
4120 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4121 data->policy, data->rt_priority);
4122 seq_puts(m, "# -----------------\n");
4124 if (data->critical_start) {
4126 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4127 trace_print_seq(m, &iter->seq);
4129 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4130 trace_print_seq(m, &iter->seq);
4139 struct trace_seq *s = &iter->seq;
4140 struct trace_array *tr = iter->tr;
4142 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4145 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4148 if (cpumask_available(iter->started) &&
4149 cpumask_test_cpu(iter->cpu, iter->started))
4152 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4155 if (cpumask_available(iter->started))
4156 cpumask_set_cpu(iter->cpu, iter->started);
4159 if (iter->idx > 1)
4161 iter->cpu);
4166 struct trace_array *tr = iter->tr;
4167 struct trace_seq *s = &iter->seq;
4168 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4172 entry = iter->ent;
4176 event = ftrace_find_event(entry->type);
4178 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4179 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4189 if (tr->trace_flags & TRACE_ITER_FIELDS)
4196 if ((tr->text_delta || tr->data_delta) &&
4197 event->type > __TRACE_LAST_TYPE)
4200 return event->funcs->trace(iter, sym_flags, event);
4203 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4210 struct trace_array *tr = iter->tr;
4211 struct trace_seq *s = &iter->seq;
4215 entry = iter->ent;
4217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4219 entry->pid, iter->cpu, iter->ts);
4224 event = ftrace_find_event(entry->type);
4226 return event->funcs->raw(iter, 0, event);
4228 trace_seq_printf(s, "%d ?\n", entry->type);
4235 struct trace_array *tr = iter->tr;
4236 struct trace_seq *s = &iter->seq;
4241 entry = iter->ent;
4243 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4244 SEQ_PUT_HEX_FIELD(s, entry->pid);
4245 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4246 SEQ_PUT_HEX_FIELD(s, iter->ts);
4251 event = ftrace_find_event(entry->type);
4253 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4265 struct trace_array *tr = iter->tr;
4266 struct trace_seq *s = &iter->seq;
4270 entry = iter->ent;
4272 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4273 SEQ_PUT_FIELD(s, entry->pid);
4274 SEQ_PUT_FIELD(s, iter->cpu);
4275 SEQ_PUT_FIELD(s, iter->ts);
4280 event = ftrace_find_event(entry->type);
4281 return event ? event->funcs->binary(iter, 0, event) :
4291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4292 cpu = iter->cpu_file;
4298 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4310 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4321 struct trace_array *tr = iter->tr;
4322 unsigned long trace_flags = tr->trace_flags;
4325 if (iter->lost_events) {
4326 if (iter->lost_events == (unsigned long)-1)
4327 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4328 iter->cpu);
4330 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4331 iter->cpu, iter->lost_events);
4332 if (trace_seq_has_overflowed(&iter->seq))
4336 if (iter->trace && iter->trace->print_line) {
4337 ret = iter->trace->print_line(iter);
4342 if (iter->ent->type == TRACE_BPUTS &&
4347 if (iter->ent->type == TRACE_BPRINT &&
4352 if (iter->ent->type == TRACE_PRINT &&
4371 struct trace_iterator *iter = m->private;
4372 struct trace_array *tr = iter->tr;
4378 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4381 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4387 struct trace_iterator *iter = m->private;
4388 struct trace_array *tr = iter->tr;
4389 unsigned long trace_flags = tr->trace_flags;
4394 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4404 print_func_help_header_irq(iter->array_buffer,
4407 print_func_help_header(iter->array_buffer, m,
4449 if (iter->tr->allocated_snapshot)
4455 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4470 if (iter->ent == NULL) {
4471 if (iter->tr) {
4472 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4476 if (iter->snapshot && trace_empty(iter))
4478 else if (iter->trace && iter->trace->print_header)
4479 iter->trace->print_header(m);
4483 } else if (iter->leftover) {
4488 ret = trace_print_seq(m, &iter->seq);
4490 /* ret should this time be zero, but you never know */
4491 iter->leftover = ret;
4496 iter->seq.full = 0;
4497 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4499 ret = trace_print_seq(m, &iter->seq);
4505 * -1 otherwise.
4507 iter->leftover = ret;
4519 if (inode->i_cdev) /* See trace_create_cpu_file() */
4520 return (long)inode->i_cdev - 1;
4541 if (iter->fmt != static_fmt_buf)
4542 kfree(iter->fmt);
4544 kfree(iter->temp);
4545 kfree(iter->buffer_iter);
4546 mutex_destroy(&iter->mutex);
4547 free_cpumask_var(iter->started);
4553 struct trace_array *tr = inode->i_private;
4558 return ERR_PTR(-ENODEV);
4562 return ERR_PTR(-ENOMEM);
4564 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4566 if (!iter->buffer_iter)
4570 * trace_find_next_entry() may need to save off iter->ent.
4571 * It will place it into the iter->temp buffer. As most
4574 * allocate a new buffer to adjust for the bigger iter->ent.
4577 iter->temp = kmalloc(128, GFP_KERNEL);
4578 if (iter->temp)
4579 iter->temp_size = 128;
4588 iter->fmt = NULL;
4589 iter->fmt_size = 0;
4592 iter->trace = tr->current_trace;
4594 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4597 iter->tr = tr;
4601 if (tr->current_trace->print_max || snapshot)
4602 iter->array_buffer = &tr->max_buffer;
4605 iter->array_buffer = &tr->array_buffer;
4606 iter->snapshot = snapshot;
4607 iter->pos = -1;
4608 iter->cpu_file = tracing_get_cpu(inode);
4609 mutex_init(&iter->mutex);
4612 if (iter->trace->open)
4613 iter->trace->open(iter);
4616 if (ring_buffer_overruns(iter->array_buffer->buffer))
4617 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4620 if (trace_clocks[tr->clock_id].in_ns)
4621 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4624 * If pause-on-trace is enabled, then stop the trace while
4627 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4632 iter->buffer_iter[cpu] =
4633 ring_buffer_read_prepare(iter->array_buffer->buffer,
4638 ring_buffer_read_start(iter->buffer_iter[cpu]);
4642 cpu = iter->cpu_file;
4643 iter->buffer_iter[cpu] =
4644 ring_buffer_read_prepare(iter->array_buffer->buffer,
4647 ring_buffer_read_start(iter->buffer_iter[cpu]);
4660 return ERR_PTR(-ENOMEM);
4671 filp->private_data = inode->i_private;
4686 struct trace_array *tr = inode->i_private;
4693 filp->private_data = inode->i_private;
4704 struct trace_event_file *file = inode->i_private;
4707 ret = tracing_check_open_get_tr(file->tr);
4714 if (file->flags & EVENT_FILE_FL_FREED) {
4715 trace_array_put(file->tr);
4716 ret = -ENODEV;
4725 filp->private_data = inode->i_private;
4732 struct trace_event_file *file = inode->i_private;
4734 trace_array_put(file->tr);
4754 struct trace_array *tr = inode->i_private;
4755 struct seq_file *m = file->private_data;
4759 if (!(file->f_mode & FMODE_READ)) {
4765 iter = m->private;
4769 if (iter->buffer_iter[cpu])
4770 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4773 if (iter->trace && iter->trace->close)
4774 iter->trace->close(iter);
4776 if (!iter->snapshot && tr->stop_count)
4792 struct trace_array *tr = inode->i_private;
4800 struct trace_array *tr = inode->i_private;
4809 struct trace_array *tr = inode->i_private;
4818 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4820 struct array_buffer *trace_buf = &tr->array_buffer;
4823 if (tr->current_trace->print_max)
4824 trace_buf = &tr->max_buffer;
4833 if (file->f_mode & FMODE_READ) {
4837 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4838 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4857 if (tr->range_addr_start && t->use_max_tr)
4860 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4868 t = t->next;
4876 struct trace_array *tr = m->private;
4882 t = get_tracer_for_array(tr, t->next);
4889 struct trace_array *tr = m->private;
4914 seq_puts(m, t->name);
4915 if (t->next)
4932 struct trace_array *tr = inode->i_private;
4946 m = file->private_data;
4947 m->private = tr;
4954 struct trace_array *tr = inode->i_private;
4971 if (file->f_mode & FMODE_READ)
4974 file->f_pos = ret = 0;
5000 struct trace_array *tr = file_inode(filp)->i_private;
5005 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5008 return -ENOMEM;
5011 cpumask_pr_args(tr->tracing_cpumask));
5013 count = -EINVAL;
5030 return -EINVAL;
5033 arch_spin_lock(&tr->max_lock);
5039 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5041 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5042 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5044 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5047 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5049 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5050 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5052 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5056 arch_spin_unlock(&tr->max_lock);
5059 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5068 struct trace_array *tr = file_inode(filp)->i_private;
5073 return -EINVAL;
5076 return -ENOMEM;
5107 struct trace_array *tr = m->private;
5113 tracer_flags = tr->current_trace->flags->val;
5114 trace_opts = tr->current_trace->flags->opts;
5117 if (tr->trace_flags & (1 << i))
5137 struct tracer *trace = tracer_flags->trace;
5140 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5145 tracer_flags->val &= ~opts->bit;
5147 tracer_flags->val |= opts->bit;
5154 struct tracer *trace = tr->current_trace;
5155 struct tracer_flags *tracer_flags = trace->flags;
5159 for (i = 0; tracer_flags->opts[i].name; i++) {
5160 opts = &tracer_flags->opts[i];
5162 if (strcmp(cmp, opts->name) == 0)
5163 return __set_tracer_option(tr, trace->flags, opts, neg);
5166 return -EINVAL;
5172 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5173 return -1;
5186 if (!!(tr->trace_flags & mask) == !!enabled)
5190 if (tr->current_trace->flag_changed)
5191 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5192 return -EINVAL;
5203 return -EINVAL;
5214 tr->trace_flags |= mask;
5216 tr->trace_flags &= ~mask;
5224 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5225 return -ENOMEM;
5238 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5240 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5271 ret = match_string(trace_options, -1, cmp);
5307 *(buf - 1) = ',';
5315 struct seq_file *m = filp->private_data;
5316 struct trace_array *tr = m->private;
5321 return -EINVAL;
5324 return -EFAULT;
5339 struct trace_array *tr = inode->i_private;
5346 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5362 "tracing mini-HOWTO:\n\n"
5366 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5368 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5370 " trace\t\t\t- The static contents of the buffer\n"
5372 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5373 " current_tracer\t- function and latency tracers\n"
5374 " available_tracers\t- list of configured tracers for current_tracer\n"
5375 " error_log\t- error log for failed commands (that support it)\n"
5376 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5377 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5378 " trace_clock\t\t- change the clock used to order events\n"
5382 " uptime: Jiffy counter from time of boot\n"
5385 " x86-tsc: TSC cycle counter\n"
5387 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5388 " delta: Delta difference against a buffer-wide timestamp\n"
5390 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5391 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5392 " tracing_cpumask\t- Limit which CPUs to trace\n"
5393 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5394 "\t\t\t Remove sub-buffer with rmdir\n"
5395 " trace_options\t\t- Set format or modify how tracing happens\n"
5398 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5400 "\n available_filter_functions - list of functions that can be filtered on\n"
5401 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5403 "\t accepts: func_full_name or glob-matching-pattern\n"
5405 "\t Format: :mod:<module-name>\n"
5422 "\t The first one will disable tracing every time do_fault is hit\n"
5424 "\t The first time do trap is hit and it disables tracing, the\n"
5432 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5438 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5440 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5444 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5445 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5446 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5449 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5454 " stack_trace\t\t- Shows the max stack trace when active\n"
5455 " stack_max_size\t- Shows current max stack size that was traced\n"
5459 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5464 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5468 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5472 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5477 "\t accepts: event-definitions (one definition per line)\n"
5483 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5489 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5490 "\t -:[<group>/][<event>]\n"
5499 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5503 "\t <argname>[->field[->field|.field...]],\n"
5508 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5511 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5512 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5519 "\t of the <attached-group>/<attached-event>.\n"
5521 " set_event\t\t- Enables events by name written into it\n"
5523 " events/\t\t- Directory containing all trace event subsystems:\n"
5524 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5525 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5526 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5528 " filter\t\t- If set, only events passing filter are traced\n"
5529 " events/<system>/<event>/\t- Directory containing control files for\n"
5531 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5532 " filter\t\t- If set, only events passing filter are traced\n"
5533 " trigger\t\t- If set, a command to perform when event is hit\n"
5555 "\t The first disables tracing every time block_unplug is hit.\n"
5567 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5579 "\t common_timestamp - to record current timestamp\n"
5580 "\t common_cpu - to record the CPU the event happened on\n"
5583 "\t - a reference to a field e.g. x=current_timestamp,\n"
5584 "\t - a reference to another variable e.g. y=$x,\n"
5585 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5586 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5588 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5617 "\t .sym-offset display an address as a symbol and offset\n"
5624 "\t .graph display a bar-graph of a value\n\n"
5636 "\t already-attached hist trigger. The syntax is analogous to\n"
5642 "\t onmatch(matching.event) - invoke on addition or update\n"
5643 "\t onmax(var) - invoke if var exceeds current max\n"
5644 "\t onchange(var) - invoke action if var changes\n\n"
5646 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5647 "\t save(field,...) - save current event fields\n"
5649 "\t snapshot() - snapshot the trace buffer\n\n"
5652 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5677 if (!ptr->map.eval_string) {
5678 if (ptr->tail.next) {
5679 ptr = ptr->tail.next;
5735 ptr->map.eval_string, ptr->map.eval_value,
5736 ptr->map.system);
5770 return ptr + ptr->head.length + 1;
5803 if (!ptr->tail.next)
5805 ptr = ptr->tail.next;
5808 ptr->tail.next = map_array;
5810 map_array->head.mod = mod;
5811 map_array->head.length = len;
5815 map_array->map = **map;
5852 struct trace_array *tr = filp->private_data;
5857 r = sprintf(buf, "%s\n", tr->current_trace->name);
5865 tracing_reset_online_cpus(&tr->array_buffer);
5866 return t->init(tr);
5874 per_cpu_ptr(buf->data, cpu)->entries = val;
5880 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5882 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5895 ret = ring_buffer_resize(trace_buf->buffer,
5896 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5899 per_cpu_ptr(trace_buf->data, cpu)->entries =
5900 per_cpu_ptr(size_buf->data, cpu)->entries;
5903 ret = ring_buffer_resize(trace_buf->buffer,
5904 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5906 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5907 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5927 if (!tr->array_buffer.buffer)
5933 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5938 if (!tr->allocated_snapshot)
5941 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5943 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5944 &tr->array_buffer, cpu);
5966 update_buffer_entries(&tr->max_buffer, cpu);
5971 update_buffer_entries(&tr->array_buffer, cpu);
5985 return -EINVAL;
5993 if (!tr->text_delta && !tr->data_delta)
6002 tracing_reset_all_cpus(&tr->array_buffer);
6005 tr->text_delta = 0;
6006 tr->data_delta = 0;
6010 * tracing_update_buffers - used by tracing facility to expand ring buffers
6028 if (!tr->ring_buffer_expanded)
6047 if (tr->current_trace == &nop_trace)
6050 tr->current_trace->enabled--;
6052 if (tr->current_trace->reset)
6053 tr->current_trace->reset(tr);
6055 tr->current_trace = &nop_trace;
6063 if (!tr->dir)
6085 if (!tr->ring_buffer_expanded) {
6093 for (t = trace_types; t; t = t->next) {
6094 if (strcmp(t->name, buf) == 0)
6098 return -EINVAL;
6100 if (t == tr->current_trace)
6104 if (t->use_max_tr) {
6106 arch_spin_lock(&tr->max_lock);
6107 ret = tr->cond_snapshot ? -EBUSY : 0;
6108 arch_spin_unlock(&tr->max_lock);
6115 if (system_state < SYSTEM_RUNNING && t->noboot) {
6117 t->name);
6118 return -EINVAL;
6123 return -EINVAL;
6126 if (tr->trace_ref)
6127 return -EBUSY;
6131 tr->current_trace->enabled--;
6133 if (tr->current_trace->reset)
6134 tr->current_trace->reset(tr);
6137 had_max_tr = tr->current_trace->use_max_tr;
6140 tr->current_trace = &nop_trace;
6142 if (had_max_tr && !t->use_max_tr) {
6155 if (!had_max_tr && t->use_max_tr) {
6161 tr->current_trace = &nop_trace;
6164 if (t->init) {
6168 if (t->use_max_tr)
6175 tr->current_trace = t;
6176 tr->current_trace->enabled++;
6186 struct trace_array *tr = filp->private_data;
6198 return -EFAULT;
6221 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6254 struct trace_array *tr = filp->private_data;
6262 if (tr->current_trace->update_thresh) {
6263 ret = tr->current_trace->update_thresh(tr);
6277 struct trace_array *tr = filp->private_data;
6279 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6286 struct trace_array *tr = filp->private_data;
6288 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6296 if (cpumask_empty(tr->pipe_cpumask)) {
6297 cpumask_setall(tr->pipe_cpumask);
6300 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6301 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6304 return -EBUSY;
6310 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6311 cpumask_clear(tr->pipe_cpumask);
6313 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6314 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6320 struct trace_array *tr = inode->i_private;
6338 ret = -ENOMEM;
6342 trace_seq_init(&iter->seq);
6343 iter->trace = tr->current_trace;
6345 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6346 ret = -ENOMEM;
6351 cpumask_setall(iter->started);
6353 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6354 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6357 if (trace_clocks[tr->clock_id].in_ns)
6358 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6360 iter->tr = tr;
6361 iter->array_buffer = &tr->array_buffer;
6362 iter->cpu_file = cpu;
6363 mutex_init(&iter->mutex);
6364 filp->private_data = iter;
6366 if (iter->trace->pipe_open)
6367 iter->trace->pipe_open(iter);
6371 tr->trace_ref++;
6388 struct trace_iterator *iter = file->private_data;
6389 struct trace_array *tr = inode->i_private;
6393 tr->trace_ref--;
6395 if (iter->trace->pipe_close)
6396 iter->trace->pipe_close(iter);
6397 close_pipe_on_cpu(tr, iter->cpu_file);
6411 struct trace_array *tr = iter->tr;
6414 if (trace_buffer_iter(iter, iter->cpu_file))
6417 if (tr->trace_flags & TRACE_ITER_BLOCK)
6423 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6424 filp, poll_table, iter->tr->buffer_percent);
6430 struct trace_iterator *iter = filp->private_data;
6435 /* Must be called with iter->mutex held. */
6438 struct trace_iterator *iter = filp->private_data;
6443 if ((filp->f_flags & O_NONBLOCK)) {
6444 return -EAGAIN;
6454 * iter->pos will be 0 if we haven't read anything.
6456 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6459 mutex_unlock(&iter->mutex);
6463 mutex_lock(&iter->mutex);
6479 struct trace_iterator *iter = filp->private_data;
6487 guard(mutex)(&iter->mutex);
6490 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6491 if (sret != -EBUSY)
6494 trace_seq_init(&iter->seq);
6496 if (iter->trace->read) {
6497 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6512 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6516 cpumask_clear(iter->started);
6517 trace_seq_init(&iter->seq);
6520 trace_access_lock(iter->cpu_file);
6523 int save_len = iter->seq.seq.len;
6529 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6531 * this event next time, resulting in an infinite loop.
6534 iter->seq.full = 0;
6535 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6541 iter->seq.seq.len = save_len;
6547 if (trace_seq_used(&iter->seq) >= cnt)
6555 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6556 iter->ent->type);
6558 trace_access_unlock(iter->cpu_file);
6562 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6563 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6564 trace_seq_init(&iter->seq);
6570 if (sret == -EBUSY)
6579 __free_page(spd->pages[idx]);
6589 /* Seq buffer is page-sized, exactly what we need. */
6591 save_len = iter->seq.seq.len;
6594 if (trace_seq_has_overflowed(&iter->seq)) {
6595 iter->seq.seq.len = save_len;
6601 * be set if the iter->seq overflowed. But check it
6605 iter->seq.seq.len = save_len;
6609 count = trace_seq_used(&iter->seq) - save_len;
6612 iter->seq.seq.len = save_len;
6618 rem -= count;
6621 iter->ent = NULL;
6637 struct trace_iterator *iter = filp->private_data;
6651 return -ENOMEM;
6653 mutex_lock(&iter->mutex);
6655 if (iter->trace->splice_read) {
6656 ret = iter->trace->splice_read(iter, filp,
6666 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6667 ret = -EFAULT;
6672 trace_access_lock(iter->cpu_file);
6683 ret = trace_seq_to_buffer(&iter->seq,
6685 trace_seq_used(&iter->seq));
6691 spd.partial[i].len = trace_seq_used(&iter->seq);
6693 trace_seq_init(&iter->seq);
6696 trace_access_unlock(iter->cpu_file);
6698 mutex_unlock(&iter->mutex);
6711 mutex_unlock(&iter->mutex);
6720 struct trace_array *tr = inode->i_private;
6738 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6739 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6746 if (!tr->ring_buffer_expanded)
6755 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6768 struct trace_array *tr = inode->i_private;
6778 return -EINVAL;
6795 struct trace_array *tr = filp->private_data;
6802 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6803 if (!tr->ring_buffer_expanded)
6806 if (tr->ring_buffer_expanded)
6818 struct trace_array *tr = filp->private_data;
6824 seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
6825 seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
6832 struct trace_array *tr = inode->i_private;
6840 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
6863 struct trace_array *tr = inode->i_private;
6866 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6882 struct trace_array *tr = filp->private_data;
6894 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6897 return -EINVAL;
6899 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6900 return -EINVAL;
6903 return -EINVAL;
6914 size += FAULTED_SIZE - cnt;
6916 buffer = tr->array_buffer.buffer;
6927 return -EBADF;
6928 cnt = ring_buffer_max_event_size(buffer) - meta_size;
6931 return -EBADF;
6936 return -EBADF;
6940 entry->ip = _THIS_IP_;
6942 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6944 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6946 written = -EFAULT;
6950 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6952 entry->buf[cnt] = '\0';
6953 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6956 if (entry->buf[cnt - 1] != '\n') {
6957 entry->buf[cnt] = '\n';
6958 entry->buf[cnt + 1] = '\0';
6960 entry->buf[cnt] = '\0';
6967 event_triggers_post_call(tr->trace_marker_file, tt);
6976 struct trace_array *tr = filp->private_data;
6987 return -EINVAL;
6989 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6990 return -EINVAL;
6994 return -EINVAL;
6998 size += FAULT_SIZE_ID - cnt;
7000 buffer = tr->array_buffer.buffer;
7003 return -EINVAL;
7009 return -EBADF;
7013 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7015 entry->id = -1;
7016 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7017 written = -EFAULT;
7028 struct trace_array *tr = m->private;
7034 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7035 i == tr->clock_id ? "]" : "");
7050 return -EINVAL;
7054 tr->clock_id = i;
7056 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7062 tracing_reset_online_cpus(&tr->array_buffer);
7065 if (tr->max_buffer.buffer)
7066 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7067 tracing_reset_online_cpus(&tr->max_buffer);
7078 struct seq_file *m = filp->private_data;
7079 struct trace_array *tr = m->private;
7085 return -EINVAL;
7088 return -EFAULT;
7105 struct trace_array *tr = inode->i_private;
7112 ret = single_open(file, tracing_clock_show, inode->i_private);
7121 struct trace_array *tr = m->private;
7125 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7137 struct trace_array *tr = inode->i_private;
7144 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7166 if (set && tr->no_filter_buffering_ref++)
7170 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7171 return -EINVAL;
7173 --tr->no_filter_buffering_ref;
7190 struct trace_array *tr = inode->i_private;
7199 if (file->f_mode & FMODE_READ) {
7205 ret = -ENOMEM;
7216 iter->tr = tr;
7217 iter->array_buffer = &tr->max_buffer;
7218 iter->cpu_file = tracing_get_cpu(inode);
7219 m->private = iter;
7220 file->private_data = m;
7238 struct seq_file *m = filp->private_data;
7239 struct trace_iterator *iter = m->private;
7240 struct trace_array *tr = iter->tr;
7254 if (tr->current_trace->use_max_tr)
7255 return -EBUSY;
7258 arch_spin_lock(&tr->max_lock);
7259 if (tr->cond_snapshot)
7260 ret = -EBUSY;
7261 arch_spin_unlock(&tr->max_lock);
7268 if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7269 return -EINVAL;
7270 if (tr->allocated_snapshot)
7274 /* Only allow per-cpu swap if the ring buffer supports it */
7276 if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7277 return -EINVAL;
7279 if (tr->allocated_snapshot)
7280 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7281 &tr->array_buffer, iter->cpu_file);
7288 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7293 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7299 if (tr->allocated_snapshot) {
7300 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7301 tracing_reset_online_cpus(&tr->max_buffer);
7303 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7318 struct seq_file *m = file->private_data;
7323 if (file->f_mode & FMODE_READ)
7328 kfree(m->private);
7351 info = filp->private_data;
7353 if (info->iter.trace->use_max_tr) {
7355 return -EBUSY;
7358 info->iter.snapshot = true;
7359 info->iter.array_buffer = &info->iter.tr->max_buffer;
7481 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7488 * The filp->private_data must point to a trace_min_max_param structure that
7489 * defines where to write the value, the min and the max acceptable values,
7495 struct trace_min_max_param *param = filp->private_data;
7500 return -EFAULT;
7506 if (param->lock)
7507 mutex_lock(param->lock);
7509 if (param->min && val < *param->min)
7510 err = -EINVAL;
7512 if (param->max && val > *param->max)
7513 err = -EINVAL;
7516 *param->val = val;
7518 if (param->lock)
7519 mutex_unlock(param->lock);
7528 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7535 * The filp->private_data must point to a trace_min_max_param struct with valid
7541 struct trace_min_max_param *param = filp->private_data;
7547 return -EFAULT;
7549 val = *param->val;
7571 const char **errs; /* ptr to loc-specific array of err strings */
7572 u8 type; /* index into errs -> specific err string */
7592 return ERR_PTR(-ENOMEM);
7594 err->cmd = kzalloc(len, GFP_KERNEL);
7595 if (!err->cmd) {
7597 return ERR_PTR(-ENOMEM);
7605 kfree(err->cmd);
7615 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7617 if (PTR_ERR(err) != -ENOMEM)
7618 tr->n_err_log_entries++;
7624 return ERR_PTR(-ENOMEM);
7625 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7626 kfree(err->cmd);
7627 err->cmd = cmd;
7628 list_del(&err->list);
7634 * err_pos - find the position of a string within a command for error careting
7654 return found - cmd;
7660 * tracing_log_err - write an error to the tracing error log
7664 * @errs: The array of loc-specific static error strings
7681 * produce a static error string - this string is not copied and saved
7682 * when the error is logged - only a pointer to it is saved. See
7701 if (PTR_ERR(err) == -ENOMEM)
7704 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7705 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7707 err->info.errs = errs;
7708 err->info.type = type;
7709 err->info.pos = pos;
7710 err->info.ts = local_clock();
7712 list_add_tail(&err->list, &tr->err_log);
7720 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7721 list_del(&err->list);
7725 tr->n_err_log_entries = 0;
7731 struct trace_array *tr = m->private;
7735 return seq_list_start(&tr->err_log, *pos);
7740 struct trace_array *tr = m->private;
7742 return seq_list_next(v, &tr->err_log, pos);
7754 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7766 const char *err_text = err->info.errs[err->info.type];
7767 u64 sec = err->info.ts;
7772 err->loc, err_text);
7773 seq_printf(m, "%s", err->cmd);
7774 tracing_err_log_show_pos(m, err->info.pos);
7789 struct trace_array *tr = inode->i_private;
7797 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7800 if (file->f_mode & FMODE_READ) {
7803 struct seq_file *m = file->private_data;
7804 m->private = tr;
7821 struct trace_array *tr = inode->i_private;
7825 if (file->f_mode & FMODE_READ)
7841 struct trace_array *tr = inode->i_private;
7852 return -ENOMEM;
7857 info->iter.tr = tr;
7858 info->iter.cpu_file = tracing_get_cpu(inode);
7859 info->iter.trace = tr->current_trace;
7860 info->iter.array_buffer = &tr->array_buffer;
7861 info->spare = NULL;
7863 info->read = (unsigned int)-1;
7865 filp->private_data = info;
7867 tr->trace_ref++;
7881 struct ftrace_buffer_info *info = filp->private_data;
7882 struct trace_iterator *iter = &info->iter;
7891 struct ftrace_buffer_info *info = filp->private_data;
7892 struct trace_iterator *iter = &info->iter;
7902 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7903 return -EBUSY;
7906 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7909 if (info->spare) {
7910 if (page_size != info->spare_size) {
7911 ring_buffer_free_read_page(iter->array_buffer->buffer,
7912 info->spare_cpu, info->spare);
7913 info->spare = NULL;
7917 if (!info->spare) {
7918 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7919 iter->cpu_file);
7920 if (IS_ERR(info->spare)) {
7921 ret = PTR_ERR(info->spare);
7922 info->spare = NULL;
7924 info->spare_cpu = iter->cpu_file;
7925 info->spare_size = page_size;
7928 if (!info->spare)
7932 if (info->read < page_size)
7936 trace_access_lock(iter->cpu_file);
7937 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7938 info->spare,
7940 iter->cpu_file, 0);
7941 trace_access_unlock(iter->cpu_file);
7944 if (trace_empty(iter) && !iter->closed) {
7945 if ((filp->f_flags & O_NONBLOCK))
7946 return -EAGAIN;
7957 info->read = 0;
7959 size = page_size - info->read;
7962 trace_data = ring_buffer_read_page_data(info->spare);
7963 ret = copy_to_user(ubuf, trace_data + info->read, size);
7965 return -EFAULT;
7967 size -= ret;
7970 info->read += size;
7977 struct ftrace_buffer_info *info = file->private_data;
7978 struct trace_iterator *iter = &info->iter;
7980 iter->closed = true;
7982 (void)atomic_fetch_inc_release(&iter->wait_index);
7984 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
7991 struct ftrace_buffer_info *info = file->private_data;
7992 struct trace_iterator *iter = &info->iter;
7996 iter->tr->trace_ref--;
7998 __trace_array_put(iter->tr);
8000 if (info->spare)
8001 ring_buffer_free_read_page(iter->array_buffer->buffer,
8002 info->spare_cpu, info->spare);
8019 if (!refcount_dec_and_test(&ref->refcount))
8021 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8028 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8031 buf->private = 0;
8037 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8039 if (refcount_read(&ref->refcount) > INT_MAX/2)
8042 refcount_inc(&ref->refcount);
8059 (struct buffer_ref *)spd->partial[i].private;
8062 spd->partial[i].private = 0;
8070 struct ftrace_buffer_info *info = file->private_data;
8071 struct trace_iterator *iter = &info->iter;
8088 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8089 return -EBUSY;
8092 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8093 if (*ppos & (page_size - 1))
8094 return -EINVAL;
8096 if (len & (page_size - 1)) {
8098 return -EINVAL;
8099 len &= (~(page_size - 1));
8103 return -ENOMEM;
8106 trace_access_lock(iter->cpu_file);
8107 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8109 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8115 ret = -ENOMEM;
8119 refcount_set(&ref->refcount, 1);
8120 ref->buffer = iter->array_buffer->buffer;
8121 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8122 if (IS_ERR(ref->page)) {
8123 ret = PTR_ERR(ref->page);
8124 ref->page = NULL;
8128 ref->cpu = iter->cpu_file;
8130 r = ring_buffer_read_page(ref->buffer, ref->page,
8131 len, iter->cpu_file, 1);
8133 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8134 ref->page);
8139 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8148 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8151 trace_access_unlock(iter->cpu_file);
8163 ret = -EAGAIN;
8164 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8167 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8172 if (!tracer_tracing_is_on(iter->tr))
8175 /* Iterate one more time to collect any new data then exit */
8190 struct ftrace_buffer_info *info = file->private_data;
8191 struct trace_iterator *iter = &info->iter;
8195 if (!(file->f_flags & O_NONBLOCK)) {
8196 err = ring_buffer_wait(iter->array_buffer->buffer,
8197 iter->cpu_file,
8198 iter->tr->buffer_percent,
8204 return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8205 iter->cpu_file);
8207 return -ENOTTY;
8217 (void)atomic_fetch_inc_release(&iter->wait_index);
8219 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8235 spin_lock(&tr->snapshot_trigger_lock);
8237 if (tr->snapshot || tr->mapped == UINT_MAX)
8238 err = -EBUSY;
8240 tr->mapped++;
8242 spin_unlock(&tr->snapshot_trigger_lock);
8244 /* Wait for update_max_tr() to observe iter->tr->mapped */
8245 if (tr->mapped == 1)
8253 spin_lock(&tr->snapshot_trigger_lock);
8254 if (!WARN_ON(!tr->mapped))
8255 tr->mapped--;
8256 spin_unlock(&tr->snapshot_trigger_lock);
8265 struct ftrace_buffer_info *info = vma->vm_file->private_data;
8266 struct trace_iterator *iter = &info->iter;
8268 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8269 put_snapshot_map(iter->tr);
8278 struct ftrace_buffer_info *info = filp->private_data;
8279 struct trace_iterator *iter = &info->iter;
8283 if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
8284 return -ENODEV;
8286 ret = get_snapshot_map(iter->tr);
8290 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8292 put_snapshot_map(iter->tr);
8294 vma->vm_ops = &tracing_buffers_vmops;
8315 struct trace_array *tr = inode->i_private;
8316 struct array_buffer *trace_buf = &tr->array_buffer;
8325 return -ENOMEM;
8329 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8332 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8335 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8338 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8341 if (trace_clocks[tr->clock_id].in_ns) {
8343 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8348 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8354 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8357 ring_buffer_time_stamp(trace_buf->buffer));
8360 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8363 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8367 s->buffer, trace_seq_used(s));
8396 return -ENOMEM;
8400 "ftrace boot update time = %llu (ns)\n"
8401 "ftrace module total update time = %llu (ns)\n",
8445 (*count)--;
8482 return -ENOMEM;
8522 void *count = (void *)-1;
8527 return -ENODEV;
8531 return -EINVAL;
8586 if (WARN_ON(!tr->dir))
8587 return ERR_PTR(-ENODEV);
8590 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8594 return tr->dir;
8601 if (tr->percpu_dir)
8602 return tr->percpu_dir;
8608 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8610 MEM_FAIL(!tr->percpu_dir,
8613 return tr->percpu_dir;
8623 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8661 if (tr->range_addr_start)
8665 if (!tr->range_addr_start) {
8684 struct trace_option_dentry *topt = filp->private_data;
8687 if (topt->flags->val & topt->opt->bit)
8699 struct trace_option_dentry *topt = filp->private_data;
8708 return -EINVAL;
8710 if (!!(topt->flags->val & topt->opt->bit) != val) {
8712 ret = __set_tracer_option(topt->tr, topt->flags,
8713 topt->opt, !val);
8726 struct trace_option_dentry *topt = inode->i_private;
8729 ret = tracing_check_open_get_tr(topt->tr);
8733 filp->private_data = inode->i_private;
8739 struct trace_option_dentry *topt = file->private_data;
8741 trace_array_put(topt->tr);
8772 * ptr - idx == &index[0]
8782 *ptr = container_of(data - *pindex, struct trace_array,
8790 void *tr_index = filp->private_data;
8797 if (tr->trace_flags & (1 << index))
8809 void *tr_index = filp->private_data;
8822 return -EINVAL;
8865 if (tr->options)
8866 return tr->options;
8872 tr->options = tracefs_create_dir("options", d_tracer);
8873 if (!tr->options) {
8878 return tr->options;
8893 topt->flags = flags;
8894 topt->opt = opt;
8895 topt->tr = tr;
8897 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8915 flags = tracer->flags;
8917 if (!flags || !flags->opts)
8927 for (i = 0; i < tr->nr_topts; i++) {
8929 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8933 opts = flags->opts;
8942 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8949 tr->topts = tr_topts;
8950 tr->topts[tr->nr_topts].tracer = tracer;
8951 tr->topts[tr->nr_topts].topts = topts;
8952 tr->nr_topts++;
8974 (void *)&tr->trace_flags_index[index],
8999 struct trace_array *tr = filp->private_data;
9013 struct trace_array *tr = filp->private_data;
9014 struct trace_buffer *buffer = tr->array_buffer.buffer;
9028 if (tr->current_trace->start)
9029 tr->current_trace->start(tr);
9032 if (tr->current_trace->stop)
9033 tr->current_trace->stop(tr);
9057 struct trace_array *tr = filp->private_data;
9061 r = tr->buffer_percent;
9071 struct trace_array *tr = filp->private_data;
9080 return -EINVAL;
9082 tr->buffer_percent = val;
9100 struct trace_array *tr = filp->private_data;
9106 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9118 struct trace_array *tr = filp->private_data;
9132 order = fls(pages - 1);
9136 return -EINVAL;
9141 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9145 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9151 if (!tr->allocated_snapshot)
9154 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9157 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9204 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9206 buf->tr = tr;
9208 if (tr->range_addr_start && tr->range_addr_size) {
9209 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9210 tr->range_addr_start,
9211 tr->range_addr_size);
9213 ring_buffer_last_boot_delta(buf->buffer,
9214 &tr->text_delta, &tr->data_delta);
9219 tr->mapped++;
9221 buf->buffer = ring_buffer_alloc(size, rb_flags);
9223 if (!buf->buffer)
9224 return -ENOMEM;
9226 buf->data = alloc_percpu(struct trace_array_cpu);
9227 if (!buf->data) {
9228 ring_buffer_free(buf->buffer);
9229 buf->buffer = NULL;
9230 return -ENOMEM;
9234 set_buffer_entries(&tr->array_buffer,
9235 ring_buffer_size(tr->array_buffer.buffer, 0));
9242 if (buf->buffer) {
9243 ring_buffer_free(buf->buffer);
9244 buf->buffer = NULL;
9245 free_percpu(buf->data);
9246 buf->data = NULL;
9254 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9260 if (tr->range_addr_start)
9263 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9266 free_trace_buffer(&tr->array_buffer);
9267 return -ENOMEM;
9269 tr->allocated_snapshot = allocate_snapshot;
9282 free_trace_buffer(&tr->array_buffer);
9285 free_trace_buffer(&tr->max_buffer);
9295 tr->trace_flags_index[i] = i;
9302 for (t = trace_types; t; t = t->next)
9320 if (tr->name && strcmp(tr->name, instance) == 0) {
9336 tr->ref++;
9346 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9347 if (!tr->dir)
9348 return -EINVAL;
9350 ret = event_trace_add_tracer(tr->dir, tr);
9352 tracefs_remove(tr->dir);
9356 init_tracer_tracefs(tr, tr->dir);
9370 ret = -ENOMEM;
9375 tr->name = kstrdup(name, GFP_KERNEL);
9376 if (!tr->name)
9379 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9382 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9386 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9387 if (!tr->system_names)
9392 tr->range_addr_start = range_addr_start;
9393 tr->range_addr_size = range_addr_size;
9395 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9397 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9399 raw_spin_lock_init(&tr->start_lock);
9401 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9403 spin_lock_init(&tr->snapshot_trigger_lock);
9405 tr->current_trace = &nop_trace;
9407 INIT_LIST_HEAD(&tr->systems);
9408 INIT_LIST_HEAD(&tr->events);
9409 INIT_LIST_HEAD(&tr->hist_vars);
9410 INIT_LIST_HEAD(&tr->err_log);
9413 INIT_LIST_HEAD(&tr->mod_events);
9436 list_add(&tr->list, &ftrace_trace_arrays);
9438 tr->ref++;
9445 free_cpumask_var(tr->pipe_cpumask);
9446 free_cpumask_var(tr->tracing_cpumask);
9447 kfree_const(tr->system_names);
9448 kfree(tr->name);
9467 ret = -EEXIST;
9469 return -EEXIST;
9504 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9528 if (tr->name && strcmp(tr->name, name) == 0) {
9529 tr->ref++;
9539 tr->ref++;
9550 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9551 return -EBUSY;
9553 list_del(&tr->list);
9569 tracefs_remove(tr->dir);
9570 free_percpu(tr->last_func_repeats);
9574 for (i = 0; i < tr->nr_topts; i++) {
9575 kfree(tr->topts[i].topts);
9577 kfree(tr->topts);
9579 free_cpumask_var(tr->pipe_cpumask);
9580 free_cpumask_var(tr->tracing_cpumask);
9581 kfree_const(tr->system_names);
9582 kfree(tr->name);
9593 return -EINVAL;
9605 return -ENODEV;
9618 return -ENODEV;
9637 if (!tr->name)
9680 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9694 tr->buffer_percent = 50;
9711 if (tr->range_addr_start) {
9753 * tracing_init_dentry - initialize top level trace array
9765 return -EPERM;
9769 if (tr->dir)
9773 return -ENODEV;
9781 tr->dir = debugfs_create_automount("tracing", NULL,
9798 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9811 return -ENOMEM;
9843 if (n > sizeof(modname) - 1)
9852 if (!mod->num_trace_evals)
9862 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9871 if (!mod->num_trace_evals)
9879 if (map->head.mod == mod)
9882 last = &map->tail.next;
9883 map = map->tail.next;
9888 *last = trace_eval_jmp_to_tail(map)->tail.next;
9986 .priority = INT_MAX - 1,
9991 .priority = INT_MAX - 1,
10032 if (s->seq.len >= TRACE_MAX_PRINT)
10033 s->seq.len = TRACE_MAX_PRINT;
10040 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10041 s->seq.len = s->seq.size - 1;
10044 s->buffer[s->seq.len] = 0;
10046 printk(KERN_TRACE "%s", s->buffer);
10053 iter->tr = tr;
10054 iter->trace = iter->tr->current_trace;
10055 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10056 iter->array_buffer = &tr->array_buffer;
10058 if (iter->trace && iter->trace->open)
10059 iter->trace->open(iter);
10062 if (ring_buffer_overruns(iter->array_buffer->buffer))
10063 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10066 if (trace_clocks[iter->tr->clock_id].in_ns)
10067 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10070 iter->temp = static_temp_buf;
10071 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10072 iter->fmt = static_fmt_buf;
10073 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10094 * If the user does a sysrq-z, then they can re-enable
10105 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10108 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10111 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10121 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10139 printk(KERN_TRACE "---------------------------------\n");
10161 printk(KERN_TRACE "---------------------------------\n");
10163 tr->trace_flags |= old_userobj;
10166 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10216 /* Only allow one dump user at a time. */
10256 return -ENOMEM;
10259 size = count - done;
10262 size = WRITE_BUFSIZE - 1;
10265 ret = -EFAULT;
10274 size = tmp - buf + 1;
10280 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10282 WRITE_BUFSIZE - 2);
10283 ret = -EINVAL;
10360 boot_instance_info[boot_instance_index - 1] = '\0';
10462 tr->flags |= TRACE_ARRAY_FL_BOOT;
10463 tr->ref++;
10475 int ret = -ENOMEM;
10480 return -EPERM;
10523 ret = -ENOMEM;
10628 if (!tr->allocated_snapshot)