1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <[email protected]>
6  *
7  */
8 #define pr_fmt(fmt)	"trace_kprobe: " fmt
9 
10 #include <linux/bpf-cgroup.h>
11 #include <linux/cleanup.h>
12 #include <linux/security.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/rculist.h>
16 #include <linux/error-injection.h>
17 
18 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
19 
20 #include "trace_dynevent.h"
21 #include "trace_kprobe_selftest.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24 #include "trace_probe_kernel.h"
25 
26 #define KPROBE_EVENT_SYSTEM "kprobes"
27 #define KRETPROBE_MAXACTIVE_MAX 4096
28 
29 /* Kprobe early definition from command line */
30 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
31 
set_kprobe_boot_events(char * str)32 static int __init set_kprobe_boot_events(char *str)
33 {
34 	strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
35 	disable_tracing_selftest("running kprobe events");
36 
37 	return 1;
38 }
39 __setup("kprobe_event=", set_kprobe_boot_events);
40 
41 static int trace_kprobe_create(const char *raw_command);
42 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
43 static int trace_kprobe_release(struct dyn_event *ev);
44 static bool trace_kprobe_is_busy(struct dyn_event *ev);
45 static bool trace_kprobe_match(const char *system, const char *event,
46 			int argc, const char **argv, struct dyn_event *ev);
47 
48 static struct dyn_event_operations trace_kprobe_ops = {
49 	.create = trace_kprobe_create,
50 	.show = trace_kprobe_show,
51 	.is_busy = trace_kprobe_is_busy,
52 	.free = trace_kprobe_release,
53 	.match = trace_kprobe_match,
54 };
55 
56 /*
57  * Kprobe event core functions
58  */
59 struct trace_kprobe {
60 	struct dyn_event	devent;
61 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
62 	unsigned long __percpu *nhit;
63 	const char		*symbol;	/* symbol name */
64 	struct trace_probe	tp;
65 };
66 
is_trace_kprobe(struct dyn_event * ev)67 static bool is_trace_kprobe(struct dyn_event *ev)
68 {
69 	return ev->ops == &trace_kprobe_ops;
70 }
71 
to_trace_kprobe(struct dyn_event * ev)72 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
73 {
74 	return container_of(ev, struct trace_kprobe, devent);
75 }
76 
77 /**
78  * for_each_trace_kprobe - iterate over the trace_kprobe list
79  * @pos:	the struct trace_kprobe * for each entry
80  * @dpos:	the struct dyn_event * to use as a loop cursor
81  */
82 #define for_each_trace_kprobe(pos, dpos)	\
83 	for_each_dyn_event(dpos)		\
84 		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
85 
trace_kprobe_is_return(struct trace_kprobe * tk)86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
87 {
88 	return tk->rp.handler != NULL;
89 }
90 
trace_kprobe_symbol(struct trace_kprobe * tk)91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
92 {
93 	return tk->symbol ? tk->symbol : "unknown";
94 }
95 
trace_kprobe_offset(struct trace_kprobe * tk)96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
97 {
98 	return tk->rp.kp.offset;
99 }
100 
trace_kprobe_has_gone(struct trace_kprobe * tk)101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
102 {
103 	return kprobe_gone(&tk->rp.kp);
104 }
105 
trace_kprobe_within_module(struct trace_kprobe * tk,struct module * mod)106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107 						 struct module *mod)
108 {
109 	int len = strlen(module_name(mod));
110 	const char *name = trace_kprobe_symbol(tk);
111 
112 	return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
113 }
114 
115 #ifdef CONFIG_MODULES
trace_kprobe_module_exist(struct trace_kprobe * tk)116 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
117 {
118 	char *p;
119 	bool ret;
120 
121 	if (!tk->symbol)
122 		return false;
123 	p = strchr(tk->symbol, ':');
124 	if (!p)
125 		return true;
126 	*p = '\0';
127 	rcu_read_lock_sched();
128 	ret = !!find_module(tk->symbol);
129 	rcu_read_unlock_sched();
130 	*p = ':';
131 
132 	return ret;
133 }
134 #else
trace_kprobe_module_exist(struct trace_kprobe * tk)135 static inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
136 {
137 	return false;
138 }
139 #endif
140 
trace_kprobe_is_busy(struct dyn_event * ev)141 static bool trace_kprobe_is_busy(struct dyn_event *ev)
142 {
143 	struct trace_kprobe *tk = to_trace_kprobe(ev);
144 
145 	return trace_probe_is_enabled(&tk->tp);
146 }
147 
trace_kprobe_match_command_head(struct trace_kprobe * tk,int argc,const char ** argv)148 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
149 					    int argc, const char **argv)
150 {
151 	char buf[MAX_ARGSTR_LEN + 1];
152 
153 	if (!argc)
154 		return true;
155 
156 	if (!tk->symbol)
157 		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
158 	else if (tk->rp.kp.offset)
159 		snprintf(buf, sizeof(buf), "%s+%u",
160 			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
161 	else
162 		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
163 	if (strcmp(buf, argv[0]))
164 		return false;
165 	argc--; argv++;
166 
167 	return trace_probe_match_command_args(&tk->tp, argc, argv);
168 }
169 
trace_kprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)170 static bool trace_kprobe_match(const char *system, const char *event,
171 			int argc, const char **argv, struct dyn_event *ev)
172 {
173 	struct trace_kprobe *tk = to_trace_kprobe(ev);
174 
175 	return (event[0] == '\0' ||
176 		strcmp(trace_probe_name(&tk->tp), event) == 0) &&
177 	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
178 	    trace_kprobe_match_command_head(tk, argc, argv);
179 }
180 
trace_kprobe_nhit(struct trace_kprobe * tk)181 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
182 {
183 	unsigned long nhit = 0;
184 	int cpu;
185 
186 	for_each_possible_cpu(cpu)
187 		nhit += *per_cpu_ptr(tk->nhit, cpu);
188 
189 	return nhit;
190 }
191 
trace_kprobe_is_registered(struct trace_kprobe * tk)192 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
193 {
194 	return !(list_empty(&tk->rp.kp.list) &&
195 		 hlist_unhashed(&tk->rp.kp.hlist));
196 }
197 
198 /* Return 0 if it fails to find the symbol address */
199 static nokprobe_inline
trace_kprobe_address(struct trace_kprobe * tk)200 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
201 {
202 	unsigned long addr;
203 
204 	if (tk->symbol) {
205 		addr = (unsigned long)
206 			kallsyms_lookup_name(trace_kprobe_symbol(tk));
207 		if (addr)
208 			addr += tk->rp.kp.offset;
209 	} else {
210 		addr = (unsigned long)tk->rp.kp.addr;
211 	}
212 	return addr;
213 }
214 
215 static nokprobe_inline struct trace_kprobe *
trace_kprobe_primary_from_call(struct trace_event_call * call)216 trace_kprobe_primary_from_call(struct trace_event_call *call)
217 {
218 	struct trace_probe *tp;
219 
220 	tp = trace_probe_primary_from_call(call);
221 	if (WARN_ON_ONCE(!tp))
222 		return NULL;
223 
224 	return container_of(tp, struct trace_kprobe, tp);
225 }
226 
trace_kprobe_on_func_entry(struct trace_event_call * call)227 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
228 {
229 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
230 
231 	return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
232 			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
233 			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
234 }
235 
trace_kprobe_error_injectable(struct trace_event_call * call)236 bool trace_kprobe_error_injectable(struct trace_event_call *call)
237 {
238 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
239 
240 	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
241 	       false;
242 }
243 
244 static int register_kprobe_event(struct trace_kprobe *tk);
245 static int unregister_kprobe_event(struct trace_kprobe *tk);
246 
247 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
248 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
249 				struct pt_regs *regs);
250 
free_trace_kprobe(struct trace_kprobe * tk)251 static void free_trace_kprobe(struct trace_kprobe *tk)
252 {
253 	if (tk) {
254 		trace_probe_cleanup(&tk->tp);
255 		kfree(tk->symbol);
256 		free_percpu(tk->nhit);
257 		kfree(tk);
258 	}
259 }
260 
261 DEFINE_FREE(free_trace_kprobe, struct trace_kprobe *,
262 	if (!IS_ERR_OR_NULL(_T)) free_trace_kprobe(_T))
263 
264 /*
265  * Allocate new trace_probe and initialize it (including kprobes).
266  */
alloc_trace_kprobe(const char * group,const char * event,void * addr,const char * symbol,unsigned long offs,int maxactive,int nargs,bool is_return)267 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
268 					     const char *event,
269 					     void *addr,
270 					     const char *symbol,
271 					     unsigned long offs,
272 					     int maxactive,
273 					     int nargs, bool is_return)
274 {
275 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
276 	int ret = -ENOMEM;
277 
278 	tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
279 	if (!tk)
280 		return ERR_PTR(ret);
281 
282 	tk->nhit = alloc_percpu(unsigned long);
283 	if (!tk->nhit)
284 		return ERR_PTR(ret);
285 
286 	if (symbol) {
287 		tk->symbol = kstrdup(symbol, GFP_KERNEL);
288 		if (!tk->symbol)
289 			return ERR_PTR(ret);
290 		tk->rp.kp.symbol_name = tk->symbol;
291 		tk->rp.kp.offset = offs;
292 	} else
293 		tk->rp.kp.addr = addr;
294 
295 	if (is_return)
296 		tk->rp.handler = kretprobe_dispatcher;
297 	else
298 		tk->rp.kp.pre_handler = kprobe_dispatcher;
299 
300 	tk->rp.maxactive = maxactive;
301 	INIT_HLIST_NODE(&tk->rp.kp.hlist);
302 	INIT_LIST_HEAD(&tk->rp.kp.list);
303 
304 	ret = trace_probe_init(&tk->tp, event, group, false, nargs);
305 	if (ret < 0)
306 		return ERR_PTR(ret);
307 
308 	dyn_event_init(&tk->devent, &trace_kprobe_ops);
309 	return_ptr(tk);
310 }
311 
find_trace_kprobe(const char * event,const char * group)312 static struct trace_kprobe *find_trace_kprobe(const char *event,
313 					      const char *group)
314 {
315 	struct dyn_event *pos;
316 	struct trace_kprobe *tk;
317 
318 	for_each_trace_kprobe(tk, pos)
319 		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
320 		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
321 			return tk;
322 	return NULL;
323 }
324 
__enable_trace_kprobe(struct trace_kprobe * tk)325 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
326 {
327 	int ret = 0;
328 
329 	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
330 		if (trace_kprobe_is_return(tk))
331 			ret = enable_kretprobe(&tk->rp);
332 		else
333 			ret = enable_kprobe(&tk->rp.kp);
334 	}
335 
336 	return ret;
337 }
338 
__disable_trace_kprobe(struct trace_probe * tp)339 static void __disable_trace_kprobe(struct trace_probe *tp)
340 {
341 	struct trace_kprobe *tk;
342 
343 	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
344 		if (!trace_kprobe_is_registered(tk))
345 			continue;
346 		if (trace_kprobe_is_return(tk))
347 			disable_kretprobe(&tk->rp);
348 		else
349 			disable_kprobe(&tk->rp.kp);
350 	}
351 }
352 
353 /*
354  * Enable trace_probe
355  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
356  */
enable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)357 static int enable_trace_kprobe(struct trace_event_call *call,
358 				struct trace_event_file *file)
359 {
360 	struct trace_probe *tp;
361 	struct trace_kprobe *tk;
362 	bool enabled;
363 	int ret = 0;
364 
365 	tp = trace_probe_primary_from_call(call);
366 	if (WARN_ON_ONCE(!tp))
367 		return -ENODEV;
368 	enabled = trace_probe_is_enabled(tp);
369 
370 	/* This also changes "enabled" state */
371 	if (file) {
372 		ret = trace_probe_add_file(tp, file);
373 		if (ret)
374 			return ret;
375 	} else
376 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
377 
378 	if (enabled)
379 		return 0;
380 
381 	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
382 		if (trace_kprobe_has_gone(tk))
383 			continue;
384 		ret = __enable_trace_kprobe(tk);
385 		if (ret)
386 			break;
387 		enabled = true;
388 	}
389 
390 	if (ret) {
391 		/* Failed to enable one of them. Roll back all */
392 		if (enabled)
393 			__disable_trace_kprobe(tp);
394 		if (file)
395 			trace_probe_remove_file(tp, file);
396 		else
397 			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
398 	}
399 
400 	return ret;
401 }
402 
403 /*
404  * Disable trace_probe
405  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
406  */
disable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)407 static int disable_trace_kprobe(struct trace_event_call *call,
408 				struct trace_event_file *file)
409 {
410 	struct trace_probe *tp;
411 
412 	tp = trace_probe_primary_from_call(call);
413 	if (WARN_ON_ONCE(!tp))
414 		return -ENODEV;
415 
416 	if (file) {
417 		if (!trace_probe_get_file_link(tp, file))
418 			return -ENOENT;
419 		if (!trace_probe_has_single_file(tp))
420 			goto out;
421 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
422 	} else
423 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
424 
425 	if (!trace_probe_is_enabled(tp))
426 		__disable_trace_kprobe(tp);
427 
428  out:
429 	if (file)
430 		/*
431 		 * Synchronization is done in below function. For perf event,
432 		 * file == NULL and perf_trace_event_unreg() calls
433 		 * tracepoint_synchronize_unregister() to ensure synchronize
434 		 * event. We don't need to care about it.
435 		 */
436 		trace_probe_remove_file(tp, file);
437 
438 	return 0;
439 }
440 
441 #if defined(CONFIG_DYNAMIC_FTRACE) && \
442 	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
__within_notrace_func(unsigned long addr)443 static bool __within_notrace_func(unsigned long addr)
444 {
445 	unsigned long offset, size;
446 
447 	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
448 		return false;
449 
450 	/* Get the entry address of the target function */
451 	addr -= offset;
452 
453 	/*
454 	 * Since ftrace_location_range() does inclusive range check, we need
455 	 * to subtract 1 byte from the end address.
456 	 */
457 	return !ftrace_location_range(addr, addr + size - 1);
458 }
459 
within_notrace_func(struct trace_kprobe * tk)460 static bool within_notrace_func(struct trace_kprobe *tk)
461 {
462 	unsigned long addr = trace_kprobe_address(tk);
463 	char symname[KSYM_NAME_LEN], *p;
464 
465 	if (!__within_notrace_func(addr))
466 		return false;
467 
468 	/* Check if the address is on a suffixed-symbol */
469 	if (!lookup_symbol_name(addr, symname)) {
470 		p = strchr(symname, '.');
471 		if (!p)
472 			return true;
473 		*p = '\0';
474 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
475 		if (addr)
476 			return __within_notrace_func(addr);
477 	}
478 
479 	return true;
480 }
481 #else
482 #define within_notrace_func(tk)	(false)
483 #endif
484 
485 /* Internal register function - just handle k*probes and flags */
__register_trace_kprobe(struct trace_kprobe * tk)486 static int __register_trace_kprobe(struct trace_kprobe *tk)
487 {
488 	int i, ret;
489 
490 	ret = security_locked_down(LOCKDOWN_KPROBES);
491 	if (ret)
492 		return ret;
493 
494 	if (trace_kprobe_is_registered(tk))
495 		return -EINVAL;
496 
497 	if (within_notrace_func(tk)) {
498 		pr_warn("Could not probe notrace function %ps\n",
499 			(void *)trace_kprobe_address(tk));
500 		return -EINVAL;
501 	}
502 
503 	for (i = 0; i < tk->tp.nr_args; i++) {
504 		ret = traceprobe_update_arg(&tk->tp.args[i]);
505 		if (ret)
506 			return ret;
507 	}
508 
509 	/* Set/clear disabled flag according to tp->flag */
510 	if (trace_probe_is_enabled(&tk->tp))
511 		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
512 	else
513 		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
514 
515 	if (trace_kprobe_is_return(tk))
516 		ret = register_kretprobe(&tk->rp);
517 	else
518 		ret = register_kprobe(&tk->rp.kp);
519 
520 	return ret;
521 }
522 
523 /* Internal unregister function - just handle k*probes and flags */
__unregister_trace_kprobe(struct trace_kprobe * tk)524 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
525 {
526 	if (trace_kprobe_is_registered(tk)) {
527 		if (trace_kprobe_is_return(tk))
528 			unregister_kretprobe(&tk->rp);
529 		else
530 			unregister_kprobe(&tk->rp.kp);
531 		/* Cleanup kprobe for reuse and mark it unregistered */
532 		INIT_HLIST_NODE(&tk->rp.kp.hlist);
533 		INIT_LIST_HEAD(&tk->rp.kp.list);
534 		if (tk->rp.kp.symbol_name)
535 			tk->rp.kp.addr = NULL;
536 	}
537 }
538 
539 /* Unregister a trace_probe and probe_event */
unregister_trace_kprobe(struct trace_kprobe * tk)540 static int unregister_trace_kprobe(struct trace_kprobe *tk)
541 {
542 	/* If other probes are on the event, just unregister kprobe */
543 	if (trace_probe_has_sibling(&tk->tp))
544 		goto unreg;
545 
546 	/* Enabled event can not be unregistered */
547 	if (trace_probe_is_enabled(&tk->tp))
548 		return -EBUSY;
549 
550 	/* If there's a reference to the dynamic event */
551 	if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
552 		return -EBUSY;
553 
554 	/* Will fail if probe is being used by ftrace or perf */
555 	if (unregister_kprobe_event(tk))
556 		return -EBUSY;
557 
558 unreg:
559 	__unregister_trace_kprobe(tk);
560 	dyn_event_remove(&tk->devent);
561 	trace_probe_unlink(&tk->tp);
562 
563 	return 0;
564 }
565 
trace_kprobe_has_same_kprobe(struct trace_kprobe * orig,struct trace_kprobe * comp)566 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
567 					 struct trace_kprobe *comp)
568 {
569 	struct trace_probe_event *tpe = orig->tp.event;
570 	int i;
571 
572 	list_for_each_entry(orig, &tpe->probes, tp.list) {
573 		if (strcmp(trace_kprobe_symbol(orig),
574 			   trace_kprobe_symbol(comp)) ||
575 		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
576 			continue;
577 
578 		/*
579 		 * trace_probe_compare_arg_type() ensured that nr_args and
580 		 * each argument name and type are same. Let's compare comm.
581 		 */
582 		for (i = 0; i < orig->tp.nr_args; i++) {
583 			if (strcmp(orig->tp.args[i].comm,
584 				   comp->tp.args[i].comm))
585 				break;
586 		}
587 
588 		if (i == orig->tp.nr_args)
589 			return true;
590 	}
591 
592 	return false;
593 }
594 
append_trace_kprobe(struct trace_kprobe * tk,struct trace_kprobe * to)595 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
596 {
597 	int ret;
598 
599 	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
600 	if (ret) {
601 		/* Note that argument starts index = 2 */
602 		trace_probe_log_set_index(ret + 1);
603 		trace_probe_log_err(0, DIFF_ARG_TYPE);
604 		return -EEXIST;
605 	}
606 	if (trace_kprobe_has_same_kprobe(to, tk)) {
607 		trace_probe_log_set_index(0);
608 		trace_probe_log_err(0, SAME_PROBE);
609 		return -EEXIST;
610 	}
611 
612 	/* Append to existing event */
613 	ret = trace_probe_append(&tk->tp, &to->tp);
614 	if (ret)
615 		return ret;
616 
617 	/* Register k*probe */
618 	ret = __register_trace_kprobe(tk);
619 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
620 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
621 		ret = 0;
622 	}
623 
624 	if (ret)
625 		trace_probe_unlink(&tk->tp);
626 	else
627 		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
628 
629 	return ret;
630 }
631 
632 /* Register a trace_probe and probe_event */
register_trace_kprobe(struct trace_kprobe * tk)633 static int register_trace_kprobe(struct trace_kprobe *tk)
634 {
635 	struct trace_kprobe *old_tk;
636 	int ret;
637 
638 	guard(mutex)(&event_mutex);
639 
640 	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
641 				   trace_probe_group_name(&tk->tp));
642 	if (old_tk) {
643 		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
644 			trace_probe_log_set_index(0);
645 			trace_probe_log_err(0, DIFF_PROBE_TYPE);
646 			return -EEXIST;
647 		}
648 		return append_trace_kprobe(tk, old_tk);
649 	}
650 
651 	/* Register new event */
652 	ret = register_kprobe_event(tk);
653 	if (ret) {
654 		if (ret == -EEXIST) {
655 			trace_probe_log_set_index(0);
656 			trace_probe_log_err(0, EVENT_EXIST);
657 		} else
658 			pr_warn("Failed to register probe event(%d)\n", ret);
659 		return ret;
660 	}
661 
662 	/* Register k*probe */
663 	ret = __register_trace_kprobe(tk);
664 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
665 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
666 		ret = 0;
667 	}
668 
669 	if (ret < 0)
670 		unregister_kprobe_event(tk);
671 	else
672 		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
673 
674 	return ret;
675 }
676 
677 #ifdef CONFIG_MODULES
678 static int validate_module_probe_symbol(const char *modname, const char *symbol);
679 
register_module_trace_kprobe(struct module * mod,struct trace_kprobe * tk)680 static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk)
681 {
682 	const char *p;
683 	int ret = 0;
684 
685 	p = strchr(trace_kprobe_symbol(tk), ':');
686 	if (p)
687 		ret = validate_module_probe_symbol(module_name(mod), p + 1);
688 	if (!ret)
689 		ret = __register_trace_kprobe(tk);
690 	return ret;
691 }
692 
693 /* Module notifier call back, checking event on the module */
trace_kprobe_module_callback(struct notifier_block * nb,unsigned long val,void * data)694 static int trace_kprobe_module_callback(struct notifier_block *nb,
695 				       unsigned long val, void *data)
696 {
697 	struct module *mod = data;
698 	struct dyn_event *pos;
699 	struct trace_kprobe *tk;
700 	int ret;
701 
702 	if (val != MODULE_STATE_COMING)
703 		return NOTIFY_DONE;
704 
705 	/* Update probes on coming module */
706 	guard(mutex)(&event_mutex);
707 	for_each_trace_kprobe(tk, pos) {
708 		if (trace_kprobe_within_module(tk, mod)) {
709 			/* Don't need to check busy - this should have gone. */
710 			__unregister_trace_kprobe(tk);
711 			ret = register_module_trace_kprobe(mod, tk);
712 			if (ret)
713 				pr_warn("Failed to re-register probe %s on %s: %d\n",
714 					trace_probe_name(&tk->tp),
715 					module_name(mod), ret);
716 		}
717 	}
718 
719 	return NOTIFY_DONE;
720 }
721 
722 static struct notifier_block trace_kprobe_module_nb = {
723 	.notifier_call = trace_kprobe_module_callback,
724 	.priority = 2	/* Invoked after kprobe and jump_label module callback */
725 };
trace_kprobe_register_module_notifier(void)726 static int trace_kprobe_register_module_notifier(void)
727 {
728 	return register_module_notifier(&trace_kprobe_module_nb);
729 }
730 #else
trace_kprobe_register_module_notifier(void)731 static int trace_kprobe_register_module_notifier(void)
732 {
733 	return 0;
734 }
735 #endif /* CONFIG_MODULES */
736 
count_symbols(void * data,unsigned long unused)737 static int count_symbols(void *data, unsigned long unused)
738 {
739 	unsigned int *count = data;
740 
741 	(*count)++;
742 
743 	return 0;
744 }
745 
746 struct sym_count_ctx {
747 	unsigned int count;
748 	const char *name;
749 };
750 
count_mod_symbols(void * data,const char * name,unsigned long unused)751 static int count_mod_symbols(void *data, const char *name, unsigned long unused)
752 {
753 	struct sym_count_ctx *ctx = data;
754 
755 	if (strcmp(name, ctx->name) == 0)
756 		ctx->count++;
757 
758 	return 0;
759 }
760 
number_of_same_symbols(const char * mod,const char * func_name)761 static unsigned int number_of_same_symbols(const char *mod, const char *func_name)
762 {
763 	struct sym_count_ctx ctx = { .count = 0, .name = func_name };
764 
765 	if (!mod)
766 		kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
767 
768 	module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx);
769 
770 	return ctx.count;
771 }
772 
validate_module_probe_symbol(const char * modname,const char * symbol)773 static int validate_module_probe_symbol(const char *modname, const char *symbol)
774 {
775 	unsigned int count = number_of_same_symbols(modname, symbol);
776 
777 	if (count > 1) {
778 		/*
779 		 * Users should use ADDR to remove the ambiguity of
780 		 * using KSYM only.
781 		 */
782 		return -EADDRNOTAVAIL;
783 	} else if (count == 0) {
784 		/*
785 		 * We can return ENOENT earlier than when register the
786 		 * kprobe.
787 		 */
788 		return -ENOENT;
789 	}
790 	return 0;
791 }
792 
793 #ifdef CONFIG_MODULES
794 /* Return NULL if the module is not loaded or under unloading. */
try_module_get_by_name(const char * name)795 static struct module *try_module_get_by_name(const char *name)
796 {
797 	struct module *mod;
798 
799 	rcu_read_lock_sched();
800 	mod = find_module(name);
801 	if (mod && !try_module_get(mod))
802 		mod = NULL;
803 	rcu_read_unlock_sched();
804 
805 	return mod;
806 }
807 #else
808 #define try_module_get_by_name(name)	(NULL)
809 #endif
810 
validate_probe_symbol(char * symbol)811 static int validate_probe_symbol(char *symbol)
812 {
813 	struct module *mod = NULL;
814 	char *modname = NULL, *p;
815 	int ret = 0;
816 
817 	p = strchr(symbol, ':');
818 	if (p) {
819 		modname = symbol;
820 		symbol = p + 1;
821 		*p = '\0';
822 		mod = try_module_get_by_name(modname);
823 		if (!mod)
824 			goto out;
825 	}
826 
827 	ret = validate_module_probe_symbol(modname, symbol);
828 out:
829 	if (p)
830 		*p = ':';
831 	if (mod)
832 		module_put(mod);
833 	return ret;
834 }
835 
836 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
837 				      struct pt_regs *regs);
838 
trace_kprobe_create_internal(int argc,const char * argv[],struct traceprobe_parse_context * ctx)839 static int trace_kprobe_create_internal(int argc, const char *argv[],
840 					struct traceprobe_parse_context *ctx)
841 {
842 	/*
843 	 * Argument syntax:
844 	 *  - Add kprobe:
845 	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
846 	 *  - Add kretprobe:
847 	 *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
848 	 *    Or
849 	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
850 	 *
851 	 * Fetch args:
852 	 *  $retval	: fetch return value
853 	 *  $stack	: fetch stack address
854 	 *  $stackN	: fetch Nth of stack (N:0-)
855 	 *  $comm       : fetch current task comm
856 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
857 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
858 	 *  %REG	: fetch register REG
859 	 * Dereferencing memory fetch:
860 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
861 	 * Alias name of args:
862 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
863 	 * Type of args:
864 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
865 	 */
866 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
867 	int i, len, new_argc = 0, ret = 0;
868 	bool is_return = false;
869 	char *symbol __free(kfree) = NULL;
870 	char *tmp = NULL;
871 	const char **new_argv __free(kfree) = NULL;
872 	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
873 	enum probe_print_type ptype;
874 	int maxactive = 0;
875 	long offset = 0;
876 	void *addr = NULL;
877 	char buf[MAX_EVENT_NAME_LEN];
878 	char gbuf[MAX_EVENT_NAME_LEN];
879 	char abuf[MAX_BTF_ARGS_LEN];
880 	char *dbuf __free(kfree) = NULL;
881 
882 	switch (argv[0][0]) {
883 	case 'r':
884 		is_return = true;
885 		break;
886 	case 'p':
887 		break;
888 	default:
889 		return -ECANCELED;
890 	}
891 	if (argc < 2)
892 		return -ECANCELED;
893 
894 	event = strchr(&argv[0][1], ':');
895 	if (event)
896 		event++;
897 
898 	if (isdigit(argv[0][1])) {
899 		if (!is_return) {
900 			trace_probe_log_err(1, BAD_MAXACT_TYPE);
901 			return -EINVAL;
902 		}
903 		if (event)
904 			len = event - &argv[0][1] - 1;
905 		else
906 			len = strlen(&argv[0][1]);
907 		if (len > MAX_EVENT_NAME_LEN - 1) {
908 			trace_probe_log_err(1, BAD_MAXACT);
909 			return -EINVAL;
910 		}
911 		memcpy(buf, &argv[0][1], len);
912 		buf[len] = '\0';
913 		ret = kstrtouint(buf, 0, &maxactive);
914 		if (ret || !maxactive) {
915 			trace_probe_log_err(1, BAD_MAXACT);
916 			return -EINVAL;
917 		}
918 		/* kretprobes instances are iterated over via a list. The
919 		 * maximum should stay reasonable.
920 		 */
921 		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
922 			trace_probe_log_err(1, MAXACT_TOO_BIG);
923 			return -EINVAL;
924 		}
925 	}
926 
927 	/* try to parse an address. if that fails, try to read the
928 	 * input as a symbol. */
929 	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
930 		trace_probe_log_set_index(1);
931 		/* Check whether uprobe event specified */
932 		if (strchr(argv[1], '/') && strchr(argv[1], ':'))
933 			return -ECANCELED;
934 
935 		/* a symbol specified */
936 		symbol = kstrdup(argv[1], GFP_KERNEL);
937 		if (!symbol)
938 			return -ENOMEM;
939 
940 		tmp = strchr(symbol, '%');
941 		if (tmp) {
942 			if (!strcmp(tmp, "%return")) {
943 				*tmp = '\0';
944 				is_return = true;
945 			} else {
946 				trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
947 				return -EINVAL;
948 			}
949 		}
950 
951 		/* TODO: support .init module functions */
952 		ret = traceprobe_split_symbol_offset(symbol, &offset);
953 		if (ret || offset < 0 || offset > UINT_MAX) {
954 			trace_probe_log_err(0, BAD_PROBE_ADDR);
955 			return -EINVAL;
956 		}
957 		ret = validate_probe_symbol(symbol);
958 		if (ret) {
959 			if (ret == -EADDRNOTAVAIL)
960 				trace_probe_log_err(0, NON_UNIQ_SYMBOL);
961 			else
962 				trace_probe_log_err(0, BAD_PROBE_ADDR);
963 			return -EINVAL;
964 		}
965 		if (is_return)
966 			ctx->flags |= TPARG_FL_RETURN;
967 		ret = kprobe_on_func_entry(NULL, symbol, offset);
968 		if (ret == 0 && !is_return)
969 			ctx->flags |= TPARG_FL_FENTRY;
970 		/* Defer the ENOENT case until register kprobe */
971 		if (ret == -EINVAL && is_return) {
972 			trace_probe_log_err(0, BAD_RETPROBE);
973 			return -EINVAL;
974 		}
975 	}
976 
977 	trace_probe_log_set_index(0);
978 	if (event) {
979 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
980 						  event - argv[0]);
981 		if (ret)
982 			return ret;
983 	}
984 
985 	if (!event) {
986 		/* Make a new event name */
987 		if (symbol)
988 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
989 				 is_return ? 'r' : 'p', symbol, offset);
990 		else
991 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
992 				 is_return ? 'r' : 'p', addr);
993 		sanitize_event_name(buf);
994 		event = buf;
995 	}
996 
997 	argc -= 2; argv += 2;
998 	ctx->funcname = symbol;
999 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1000 					       abuf, MAX_BTF_ARGS_LEN, ctx);
1001 	if (IS_ERR(new_argv)) {
1002 		ret = PTR_ERR(new_argv);
1003 		new_argv = NULL;
1004 		return ret;
1005 	}
1006 	if (new_argv) {
1007 		argc = new_argc;
1008 		argv = new_argv;
1009 	}
1010 	if (argc > MAX_TRACE_ARGS) {
1011 		trace_probe_log_set_index(2);
1012 		trace_probe_log_err(0, TOO_MANY_ARGS);
1013 		return -E2BIG;
1014 	}
1015 
1016 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1017 	if (ret)
1018 		return ret;
1019 
1020 	/* setup a probe */
1021 	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
1022 				argc, is_return);
1023 	if (IS_ERR(tk)) {
1024 		ret = PTR_ERR(tk);
1025 		/* This must return -ENOMEM, else there is a bug */
1026 		WARN_ON_ONCE(ret != -ENOMEM);
1027 		return ret;	/* We know tk is not allocated */
1028 	}
1029 
1030 	/* parse arguments */
1031 	for (i = 0; i < argc; i++) {
1032 		trace_probe_log_set_index(i + 2);
1033 		ctx->offset = 0;
1034 		ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx);
1035 		if (ret)
1036 			return ret;	/* This can be -ENOMEM */
1037 	}
1038 	/* entry handler for kretprobe */
1039 	if (is_return && tk->tp.entry_arg) {
1040 		tk->rp.entry_handler = trace_kprobe_entry_handler;
1041 		tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
1042 	}
1043 
1044 	ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1045 	ret = traceprobe_set_print_fmt(&tk->tp, ptype);
1046 	if (ret < 0)
1047 		return ret;
1048 
1049 	ret = register_trace_kprobe(tk);
1050 	if (ret) {
1051 		trace_probe_log_set_index(1);
1052 		if (ret == -EILSEQ)
1053 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1054 		else if (ret == -ENOENT)
1055 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1056 		else if (ret != -ENOMEM && ret != -EEXIST)
1057 			trace_probe_log_err(0, FAIL_REG_PROBE);
1058 		return ret;
1059 	}
1060 	/*
1061 	 * Here, 'tk' has been registered to the list successfully,
1062 	 * so we don't need to free it.
1063 	 */
1064 	tk = NULL;
1065 
1066 	return 0;
1067 }
1068 
trace_kprobe_create_cb(int argc,const char * argv[])1069 static int trace_kprobe_create_cb(int argc, const char *argv[])
1070 {
1071 	struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
1072 	int ret;
1073 
1074 	trace_probe_log_init("trace_kprobe", argc, argv);
1075 
1076 	ret = trace_kprobe_create_internal(argc, argv, &ctx);
1077 
1078 	traceprobe_finish_parse(&ctx);
1079 	trace_probe_log_clear();
1080 	return ret;
1081 }
1082 
trace_kprobe_create(const char * raw_command)1083 static int trace_kprobe_create(const char *raw_command)
1084 {
1085 	return trace_probe_create(raw_command, trace_kprobe_create_cb);
1086 }
1087 
create_or_delete_trace_kprobe(const char * raw_command)1088 static int create_or_delete_trace_kprobe(const char *raw_command)
1089 {
1090 	int ret;
1091 
1092 	if (raw_command[0] == '-')
1093 		return dyn_event_release(raw_command, &trace_kprobe_ops);
1094 
1095 	ret = trace_kprobe_create(raw_command);
1096 	return ret == -ECANCELED ? -EINVAL : ret;
1097 }
1098 
trace_kprobe_run_command(struct dynevent_cmd * cmd)1099 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
1100 {
1101 	return create_or_delete_trace_kprobe(cmd->seq.buffer);
1102 }
1103 
1104 /**
1105  * kprobe_event_cmd_init - Initialize a kprobe event command object
1106  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1107  * @buf: A pointer to the buffer used to build the command
1108  * @maxlen: The length of the buffer passed in @buf
1109  *
1110  * Initialize a synthetic event command object.  Use this before
1111  * calling any of the other kprobe_event functions.
1112  */
kprobe_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1113 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1114 {
1115 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
1116 			  trace_kprobe_run_command);
1117 }
1118 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
1119 
1120 /**
1121  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
1122  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1123  * @kretprobe: Is this a return probe?
1124  * @name: The name of the kprobe event
1125  * @loc: The location of the kprobe event
1126  * @...: Variable number of arg (pairs), one pair for each field
1127  *
1128  * NOTE: Users normally won't want to call this function directly, but
1129  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
1130  * adds a NULL to the end of the arg list.  If this function is used
1131  * directly, make sure the last arg in the variable arg list is NULL.
1132  *
1133  * Generate a kprobe event command to be executed by
1134  * kprobe_event_gen_cmd_end().  This function can be used to generate the
1135  * complete command or only the first part of it; in the latter case,
1136  * kprobe_event_add_fields() can be used to add more fields following this.
1137  *
1138  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
1139  * returns -EINVAL if @loc == NULL.
1140  *
1141  * Return: 0 if successful, error otherwise.
1142  */
__kprobe_event_gen_cmd_start(struct dynevent_cmd * cmd,bool kretprobe,const char * name,const char * loc,...)1143 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
1144 				 const char *name, const char *loc, ...)
1145 {
1146 	char buf[MAX_EVENT_NAME_LEN];
1147 	struct dynevent_arg arg;
1148 	va_list args;
1149 	int ret;
1150 
1151 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1152 		return -EINVAL;
1153 
1154 	if (!loc)
1155 		return -EINVAL;
1156 
1157 	if (kretprobe)
1158 		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
1159 	else
1160 		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
1161 
1162 	ret = dynevent_str_add(cmd, buf);
1163 	if (ret)
1164 		return ret;
1165 
1166 	dynevent_arg_init(&arg, 0);
1167 	arg.str = loc;
1168 	ret = dynevent_arg_add(cmd, &arg, NULL);
1169 	if (ret)
1170 		return ret;
1171 
1172 	va_start(args, loc);
1173 	for (;;) {
1174 		const char *field;
1175 
1176 		field = va_arg(args, const char *);
1177 		if (!field)
1178 			break;
1179 
1180 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1181 			ret = -EINVAL;
1182 			break;
1183 		}
1184 
1185 		arg.str = field;
1186 		ret = dynevent_arg_add(cmd, &arg, NULL);
1187 		if (ret)
1188 			break;
1189 	}
1190 	va_end(args);
1191 
1192 	return ret;
1193 }
1194 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1195 
1196 /**
1197  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1198  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1199  * @...: Variable number of arg (pairs), one pair for each field
1200  *
1201  * NOTE: Users normally won't want to call this function directly, but
1202  * rather use the kprobe_event_add_fields() wrapper, which
1203  * automatically adds a NULL to the end of the arg list.  If this
1204  * function is used directly, make sure the last arg in the variable
1205  * arg list is NULL.
1206  *
1207  * Add probe fields to an existing kprobe command using a variable
1208  * list of args.  Fields are added in the same order they're listed.
1209  *
1210  * Return: 0 if successful, error otherwise.
1211  */
__kprobe_event_add_fields(struct dynevent_cmd * cmd,...)1212 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1213 {
1214 	struct dynevent_arg arg;
1215 	va_list args;
1216 	int ret = 0;
1217 
1218 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1219 		return -EINVAL;
1220 
1221 	dynevent_arg_init(&arg, 0);
1222 
1223 	va_start(args, cmd);
1224 	for (;;) {
1225 		const char *field;
1226 
1227 		field = va_arg(args, const char *);
1228 		if (!field)
1229 			break;
1230 
1231 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1232 			ret = -EINVAL;
1233 			break;
1234 		}
1235 
1236 		arg.str = field;
1237 		ret = dynevent_arg_add(cmd, &arg, NULL);
1238 		if (ret)
1239 			break;
1240 	}
1241 	va_end(args);
1242 
1243 	return ret;
1244 }
1245 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1246 
1247 /**
1248  * kprobe_event_delete - Delete a kprobe event
1249  * @name: The name of the kprobe event to delete
1250  *
1251  * Delete a kprobe event with the give @name from kernel code rather
1252  * than directly from the command line.
1253  *
1254  * Return: 0 if successful, error otherwise.
1255  */
kprobe_event_delete(const char * name)1256 int kprobe_event_delete(const char *name)
1257 {
1258 	char buf[MAX_EVENT_NAME_LEN];
1259 
1260 	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1261 
1262 	return create_or_delete_trace_kprobe(buf);
1263 }
1264 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1265 
trace_kprobe_release(struct dyn_event * ev)1266 static int trace_kprobe_release(struct dyn_event *ev)
1267 {
1268 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1269 	int ret = unregister_trace_kprobe(tk);
1270 
1271 	if (!ret)
1272 		free_trace_kprobe(tk);
1273 	return ret;
1274 }
1275 
trace_kprobe_show(struct seq_file * m,struct dyn_event * ev)1276 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1277 {
1278 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1279 	int i;
1280 
1281 	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1282 	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1283 		seq_printf(m, "%d", tk->rp.maxactive);
1284 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1285 				trace_probe_name(&tk->tp));
1286 
1287 	if (!tk->symbol)
1288 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
1289 	else if (tk->rp.kp.offset)
1290 		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1291 			   tk->rp.kp.offset);
1292 	else
1293 		seq_printf(m, " %s", trace_kprobe_symbol(tk));
1294 
1295 	for (i = 0; i < tk->tp.nr_args; i++)
1296 		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1297 	seq_putc(m, '\n');
1298 
1299 	return 0;
1300 }
1301 
probes_seq_show(struct seq_file * m,void * v)1302 static int probes_seq_show(struct seq_file *m, void *v)
1303 {
1304 	struct dyn_event *ev = v;
1305 
1306 	if (!is_trace_kprobe(ev))
1307 		return 0;
1308 
1309 	return trace_kprobe_show(m, ev);
1310 }
1311 
1312 static const struct seq_operations probes_seq_op = {
1313 	.start  = dyn_event_seq_start,
1314 	.next   = dyn_event_seq_next,
1315 	.stop   = dyn_event_seq_stop,
1316 	.show   = probes_seq_show
1317 };
1318 
probes_open(struct inode * inode,struct file * file)1319 static int probes_open(struct inode *inode, struct file *file)
1320 {
1321 	int ret;
1322 
1323 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1324 	if (ret)
1325 		return ret;
1326 
1327 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1328 		ret = dyn_events_release_all(&trace_kprobe_ops);
1329 		if (ret < 0)
1330 			return ret;
1331 	}
1332 
1333 	return seq_open(file, &probes_seq_op);
1334 }
1335 
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)1336 static ssize_t probes_write(struct file *file, const char __user *buffer,
1337 			    size_t count, loff_t *ppos)
1338 {
1339 	return trace_parse_run_command(file, buffer, count, ppos,
1340 				       create_or_delete_trace_kprobe);
1341 }
1342 
1343 static const struct file_operations kprobe_events_ops = {
1344 	.owner          = THIS_MODULE,
1345 	.open           = probes_open,
1346 	.read           = seq_read,
1347 	.llseek         = seq_lseek,
1348 	.release        = seq_release,
1349 	.write		= probes_write,
1350 };
1351 
trace_kprobe_missed(struct trace_kprobe * tk)1352 static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
1353 {
1354 	return trace_kprobe_is_return(tk) ?
1355 		tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1356 }
1357 
1358 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)1359 static int probes_profile_seq_show(struct seq_file *m, void *v)
1360 {
1361 	struct dyn_event *ev = v;
1362 	struct trace_kprobe *tk;
1363 	unsigned long nmissed;
1364 
1365 	if (!is_trace_kprobe(ev))
1366 		return 0;
1367 
1368 	tk = to_trace_kprobe(ev);
1369 	nmissed = trace_kprobe_missed(tk);
1370 	seq_printf(m, "  %-44s %15lu %15lu\n",
1371 		   trace_probe_name(&tk->tp),
1372 		   trace_kprobe_nhit(tk),
1373 		   nmissed);
1374 
1375 	return 0;
1376 }
1377 
1378 static const struct seq_operations profile_seq_op = {
1379 	.start  = dyn_event_seq_start,
1380 	.next   = dyn_event_seq_next,
1381 	.stop   = dyn_event_seq_stop,
1382 	.show   = probes_profile_seq_show
1383 };
1384 
profile_open(struct inode * inode,struct file * file)1385 static int profile_open(struct inode *inode, struct file *file)
1386 {
1387 	int ret;
1388 
1389 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1390 	if (ret)
1391 		return ret;
1392 
1393 	return seq_open(file, &profile_seq_op);
1394 }
1395 
1396 static const struct file_operations kprobe_profile_ops = {
1397 	.owner          = THIS_MODULE,
1398 	.open           = profile_open,
1399 	.read           = seq_read,
1400 	.llseek         = seq_lseek,
1401 	.release        = seq_release,
1402 };
1403 
1404 /* Note that we don't verify it, since the code does not come from user space */
1405 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)1406 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
1407 		   void *dest, void *base)
1408 {
1409 	struct pt_regs *regs = rec;
1410 	unsigned long val;
1411 	int ret;
1412 
1413 retry:
1414 	/* 1st stage: get value from context */
1415 	switch (code->op) {
1416 	case FETCH_OP_REG:
1417 		val = regs_get_register(regs, code->param);
1418 		break;
1419 	case FETCH_OP_STACK:
1420 		val = regs_get_kernel_stack_nth(regs, code->param);
1421 		break;
1422 	case FETCH_OP_STACKP:
1423 		val = kernel_stack_pointer(regs);
1424 		break;
1425 	case FETCH_OP_RETVAL:
1426 		val = regs_return_value(regs);
1427 		break;
1428 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1429 	case FETCH_OP_ARG:
1430 		val = regs_get_kernel_argument(regs, code->param);
1431 		break;
1432 	case FETCH_OP_EDATA:
1433 		val = *(unsigned long *)((unsigned long)edata + code->offset);
1434 		break;
1435 #endif
1436 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1437 		code++;
1438 		goto retry;
1439 	default:
1440 		ret = process_common_fetch_insn(code, &val);
1441 		if (ret < 0)
1442 			return ret;
1443 	}
1444 	code++;
1445 
1446 	return process_fetch_insn_bottom(code, val, dest, base);
1447 }
NOKPROBE_SYMBOL(process_fetch_insn)1448 NOKPROBE_SYMBOL(process_fetch_insn)
1449 
1450 /* Kprobe handler */
1451 static nokprobe_inline void
1452 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1453 		    struct trace_event_file *trace_file)
1454 {
1455 	struct kprobe_trace_entry_head *entry;
1456 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1457 	struct trace_event_buffer fbuffer;
1458 	int dsize;
1459 
1460 	WARN_ON(call != trace_file->event_call);
1461 
1462 	if (trace_trigger_soft_disabled(trace_file))
1463 		return;
1464 
1465 	dsize = __get_data_size(&tk->tp, regs, NULL);
1466 
1467 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1468 					   sizeof(*entry) + tk->tp.size + dsize);
1469 	if (!entry)
1470 		return;
1471 
1472 	fbuffer.regs = regs;
1473 	entry->ip = (unsigned long)tk->rp.kp.addr;
1474 	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1475 
1476 	trace_event_buffer_commit(&fbuffer);
1477 }
1478 
1479 static void
kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs)1480 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1481 {
1482 	struct event_file_link *link;
1483 
1484 	trace_probe_for_each_link_rcu(link, &tk->tp)
1485 		__kprobe_trace_func(tk, regs, link->file);
1486 }
1487 NOKPROBE_SYMBOL(kprobe_trace_func);
1488 
1489 /* Kretprobe handler */
1490 
trace_kprobe_entry_handler(struct kretprobe_instance * ri,struct pt_regs * regs)1491 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
1492 				      struct pt_regs *regs)
1493 {
1494 	struct kretprobe *rp = get_kretprobe(ri);
1495 	struct trace_kprobe *tk;
1496 
1497 	/*
1498 	 * There is a small chance that get_kretprobe(ri) returns NULL when
1499 	 * the kretprobe is unregister on another CPU between kretprobe's
1500 	 * trampoline_handler and this function.
1501 	 */
1502 	if (unlikely(!rp))
1503 		return -ENOENT;
1504 
1505 	tk = container_of(rp, struct trace_kprobe, rp);
1506 
1507 	/* store argument values into ri->data as entry data */
1508 	if (tk->tp.entry_arg)
1509 		store_trace_entry_data(ri->data, &tk->tp, regs);
1510 
1511 	return 0;
1512 }
1513 
1514 
1515 static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs,struct trace_event_file * trace_file)1516 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1517 		       struct pt_regs *regs,
1518 		       struct trace_event_file *trace_file)
1519 {
1520 	struct kretprobe_trace_entry_head *entry;
1521 	struct trace_event_buffer fbuffer;
1522 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1523 	int dsize;
1524 
1525 	WARN_ON(call != trace_file->event_call);
1526 
1527 	if (trace_trigger_soft_disabled(trace_file))
1528 		return;
1529 
1530 	dsize = __get_data_size(&tk->tp, regs, ri->data);
1531 
1532 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1533 					   sizeof(*entry) + tk->tp.size + dsize);
1534 	if (!entry)
1535 		return;
1536 
1537 	fbuffer.regs = regs;
1538 	entry->func = (unsigned long)tk->rp.kp.addr;
1539 	entry->ret_ip = get_kretprobe_retaddr(ri);
1540 	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1541 
1542 	trace_event_buffer_commit(&fbuffer);
1543 }
1544 
1545 static void
kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1546 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1547 		     struct pt_regs *regs)
1548 {
1549 	struct event_file_link *link;
1550 
1551 	trace_probe_for_each_link_rcu(link, &tk->tp)
1552 		__kretprobe_trace_func(tk, ri, regs, link->file);
1553 }
1554 NOKPROBE_SYMBOL(kretprobe_trace_func);
1555 
1556 /* Event entry printers */
1557 static enum print_line_t
print_kprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1558 print_kprobe_event(struct trace_iterator *iter, int flags,
1559 		   struct trace_event *event)
1560 {
1561 	struct kprobe_trace_entry_head *field;
1562 	struct trace_seq *s = &iter->seq;
1563 	struct trace_probe *tp;
1564 
1565 	field = (struct kprobe_trace_entry_head *)iter->ent;
1566 	tp = trace_probe_primary_from_call(
1567 		container_of(event, struct trace_event_call, event));
1568 	if (WARN_ON_ONCE(!tp))
1569 		goto out;
1570 
1571 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1572 
1573 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1574 		goto out;
1575 
1576 	trace_seq_putc(s, ')');
1577 
1578 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1579 			     (u8 *)&field[1], field) < 0)
1580 		goto out;
1581 
1582 	trace_seq_putc(s, '\n');
1583  out:
1584 	return trace_handle_return(s);
1585 }
1586 
1587 static enum print_line_t
print_kretprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1588 print_kretprobe_event(struct trace_iterator *iter, int flags,
1589 		      struct trace_event *event)
1590 {
1591 	struct kretprobe_trace_entry_head *field;
1592 	struct trace_seq *s = &iter->seq;
1593 	struct trace_probe *tp;
1594 
1595 	field = (struct kretprobe_trace_entry_head *)iter->ent;
1596 	tp = trace_probe_primary_from_call(
1597 		container_of(event, struct trace_event_call, event));
1598 	if (WARN_ON_ONCE(!tp))
1599 		goto out;
1600 
1601 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1602 
1603 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1604 		goto out;
1605 
1606 	trace_seq_puts(s, " <- ");
1607 
1608 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1609 		goto out;
1610 
1611 	trace_seq_putc(s, ')');
1612 
1613 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1614 			     (u8 *)&field[1], field) < 0)
1615 		goto out;
1616 
1617 	trace_seq_putc(s, '\n');
1618 
1619  out:
1620 	return trace_handle_return(s);
1621 }
1622 
1623 
kprobe_event_define_fields(struct trace_event_call * event_call)1624 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1625 {
1626 	int ret;
1627 	struct kprobe_trace_entry_head field;
1628 	struct trace_probe *tp;
1629 
1630 	tp = trace_probe_primary_from_call(event_call);
1631 	if (WARN_ON_ONCE(!tp))
1632 		return -ENOENT;
1633 
1634 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1635 
1636 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1637 }
1638 
kretprobe_event_define_fields(struct trace_event_call * event_call)1639 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1640 {
1641 	int ret;
1642 	struct kretprobe_trace_entry_head field;
1643 	struct trace_probe *tp;
1644 
1645 	tp = trace_probe_primary_from_call(event_call);
1646 	if (WARN_ON_ONCE(!tp))
1647 		return -ENOENT;
1648 
1649 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1650 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1651 
1652 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1653 }
1654 
1655 #ifdef CONFIG_PERF_EVENTS
1656 
1657 /* Kprobe profile handler */
1658 static int
kprobe_perf_func(struct trace_kprobe * tk,struct pt_regs * regs)1659 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1660 {
1661 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1662 	struct kprobe_trace_entry_head *entry;
1663 	struct hlist_head *head;
1664 	int size, __size, dsize;
1665 	int rctx;
1666 
1667 	if (bpf_prog_array_valid(call)) {
1668 		unsigned long orig_ip = instruction_pointer(regs);
1669 		int ret;
1670 
1671 		ret = trace_call_bpf(call, regs);
1672 
1673 		/*
1674 		 * We need to check and see if we modified the pc of the
1675 		 * pt_regs, and if so return 1 so that we don't do the
1676 		 * single stepping.
1677 		 */
1678 		if (orig_ip != instruction_pointer(regs))
1679 			return 1;
1680 		if (!ret)
1681 			return 0;
1682 	}
1683 
1684 	head = this_cpu_ptr(call->perf_events);
1685 	if (hlist_empty(head))
1686 		return 0;
1687 
1688 	dsize = __get_data_size(&tk->tp, regs, NULL);
1689 	__size = sizeof(*entry) + tk->tp.size + dsize;
1690 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1691 	size -= sizeof(u32);
1692 
1693 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1694 	if (!entry)
1695 		return 0;
1696 
1697 	entry->ip = (unsigned long)tk->rp.kp.addr;
1698 	memset(&entry[1], 0, dsize);
1699 	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1700 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1701 			      head, NULL);
1702 	return 0;
1703 }
1704 NOKPROBE_SYMBOL(kprobe_perf_func);
1705 
1706 /* Kretprobe profile handler */
1707 static void
kretprobe_perf_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1708 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1709 		    struct pt_regs *regs)
1710 {
1711 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1712 	struct kretprobe_trace_entry_head *entry;
1713 	struct hlist_head *head;
1714 	int size, __size, dsize;
1715 	int rctx;
1716 
1717 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1718 		return;
1719 
1720 	head = this_cpu_ptr(call->perf_events);
1721 	if (hlist_empty(head))
1722 		return;
1723 
1724 	dsize = __get_data_size(&tk->tp, regs, ri->data);
1725 	__size = sizeof(*entry) + tk->tp.size + dsize;
1726 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1727 	size -= sizeof(u32);
1728 
1729 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1730 	if (!entry)
1731 		return;
1732 
1733 	entry->func = (unsigned long)tk->rp.kp.addr;
1734 	entry->ret_ip = get_kretprobe_retaddr(ri);
1735 	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1736 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1737 			      head, NULL);
1738 }
1739 NOKPROBE_SYMBOL(kretprobe_perf_func);
1740 
bpf_get_kprobe_info(const struct perf_event * event,u32 * fd_type,const char ** symbol,u64 * probe_offset,u64 * probe_addr,unsigned long * missed,bool perf_type_tracepoint)1741 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1742 			const char **symbol, u64 *probe_offset,
1743 			u64 *probe_addr, unsigned long *missed,
1744 			bool perf_type_tracepoint)
1745 {
1746 	const char *pevent = trace_event_name(event->tp_event);
1747 	const char *group = event->tp_event->class->system;
1748 	struct trace_kprobe *tk;
1749 
1750 	if (perf_type_tracepoint)
1751 		tk = find_trace_kprobe(pevent, group);
1752 	else
1753 		tk = trace_kprobe_primary_from_call(event->tp_event);
1754 	if (!tk)
1755 		return -EINVAL;
1756 
1757 	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1758 					      : BPF_FD_TYPE_KPROBE;
1759 	*probe_offset = tk->rp.kp.offset;
1760 	*probe_addr = kallsyms_show_value(current_cred()) ?
1761 		      (unsigned long)tk->rp.kp.addr : 0;
1762 	*symbol = tk->symbol;
1763 	if (missed)
1764 		*missed = trace_kprobe_missed(tk);
1765 	return 0;
1766 }
1767 #endif	/* CONFIG_PERF_EVENTS */
1768 
1769 /*
1770  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1771  *
1772  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1773  * lockless, but we can't race with this __init function.
1774  */
kprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1775 static int kprobe_register(struct trace_event_call *event,
1776 			   enum trace_reg type, void *data)
1777 {
1778 	struct trace_event_file *file = data;
1779 
1780 	switch (type) {
1781 	case TRACE_REG_REGISTER:
1782 		return enable_trace_kprobe(event, file);
1783 	case TRACE_REG_UNREGISTER:
1784 		return disable_trace_kprobe(event, file);
1785 
1786 #ifdef CONFIG_PERF_EVENTS
1787 	case TRACE_REG_PERF_REGISTER:
1788 		return enable_trace_kprobe(event, NULL);
1789 	case TRACE_REG_PERF_UNREGISTER:
1790 		return disable_trace_kprobe(event, NULL);
1791 	case TRACE_REG_PERF_OPEN:
1792 	case TRACE_REG_PERF_CLOSE:
1793 	case TRACE_REG_PERF_ADD:
1794 	case TRACE_REG_PERF_DEL:
1795 		return 0;
1796 #endif
1797 	}
1798 	return 0;
1799 }
1800 
kprobe_dispatcher(struct kprobe * kp,struct pt_regs * regs)1801 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1802 {
1803 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1804 	int ret = 0;
1805 
1806 	raw_cpu_inc(*tk->nhit);
1807 
1808 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1809 		kprobe_trace_func(tk, regs);
1810 #ifdef CONFIG_PERF_EVENTS
1811 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1812 		ret = kprobe_perf_func(tk, regs);
1813 #endif
1814 	return ret;
1815 }
1816 NOKPROBE_SYMBOL(kprobe_dispatcher);
1817 
1818 static int
kretprobe_dispatcher(struct kretprobe_instance * ri,struct pt_regs * regs)1819 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1820 {
1821 	struct kretprobe *rp = get_kretprobe(ri);
1822 	struct trace_kprobe *tk;
1823 
1824 	/*
1825 	 * There is a small chance that get_kretprobe(ri) returns NULL when
1826 	 * the kretprobe is unregister on another CPU between kretprobe's
1827 	 * trampoline_handler and this function.
1828 	 */
1829 	if (unlikely(!rp))
1830 		return 0;
1831 
1832 	tk = container_of(rp, struct trace_kprobe, rp);
1833 	raw_cpu_inc(*tk->nhit);
1834 
1835 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1836 		kretprobe_trace_func(tk, ri, regs);
1837 #ifdef CONFIG_PERF_EVENTS
1838 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1839 		kretprobe_perf_func(tk, ri, regs);
1840 #endif
1841 	return 0;	/* We don't tweak kernel, so just return 0 */
1842 }
1843 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1844 
1845 static struct trace_event_functions kretprobe_funcs = {
1846 	.trace		= print_kretprobe_event
1847 };
1848 
1849 static struct trace_event_functions kprobe_funcs = {
1850 	.trace		= print_kprobe_event
1851 };
1852 
1853 static struct trace_event_fields kretprobe_fields_array[] = {
1854 	{ .type = TRACE_FUNCTION_TYPE,
1855 	  .define_fields = kretprobe_event_define_fields },
1856 	{}
1857 };
1858 
1859 static struct trace_event_fields kprobe_fields_array[] = {
1860 	{ .type = TRACE_FUNCTION_TYPE,
1861 	  .define_fields = kprobe_event_define_fields },
1862 	{}
1863 };
1864 
init_trace_event_call(struct trace_kprobe * tk)1865 static inline void init_trace_event_call(struct trace_kprobe *tk)
1866 {
1867 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1868 
1869 	if (trace_kprobe_is_return(tk)) {
1870 		call->event.funcs = &kretprobe_funcs;
1871 		call->class->fields_array = kretprobe_fields_array;
1872 	} else {
1873 		call->event.funcs = &kprobe_funcs;
1874 		call->class->fields_array = kprobe_fields_array;
1875 	}
1876 
1877 	call->flags = TRACE_EVENT_FL_KPROBE;
1878 	call->class->reg = kprobe_register;
1879 }
1880 
register_kprobe_event(struct trace_kprobe * tk)1881 static int register_kprobe_event(struct trace_kprobe *tk)
1882 {
1883 	init_trace_event_call(tk);
1884 
1885 	return trace_probe_register_event_call(&tk->tp);
1886 }
1887 
unregister_kprobe_event(struct trace_kprobe * tk)1888 static int unregister_kprobe_event(struct trace_kprobe *tk)
1889 {
1890 	return trace_probe_unregister_event_call(&tk->tp);
1891 }
1892 
1893 #ifdef CONFIG_PERF_EVENTS
1894 
1895 /* create a trace_kprobe, but don't add it to global lists */
1896 struct trace_event_call *
create_local_trace_kprobe(char * func,void * addr,unsigned long offs,bool is_return)1897 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1898 			  bool is_return)
1899 {
1900 	enum probe_print_type ptype;
1901 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
1902 	int ret;
1903 	char *event;
1904 
1905 	if (func) {
1906 		ret = validate_probe_symbol(func);
1907 		if (ret)
1908 			return ERR_PTR(ret);
1909 	}
1910 
1911 	/*
1912 	 * local trace_kprobes are not added to dyn_event, so they are never
1913 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1914 	 * duplicated name here.
1915 	 */
1916 	event = func ? func : "DUMMY_EVENT";
1917 
1918 	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1919 				offs, 0 /* maxactive */, 0 /* nargs */,
1920 				is_return);
1921 
1922 	if (IS_ERR(tk)) {
1923 		pr_info("Failed to allocate trace_probe.(%d)\n",
1924 			(int)PTR_ERR(tk));
1925 		return ERR_CAST(tk);
1926 	}
1927 
1928 	init_trace_event_call(tk);
1929 
1930 	ptype = trace_kprobe_is_return(tk) ?
1931 		PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1932 	if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0)
1933 		return ERR_PTR(-ENOMEM);
1934 
1935 	ret = __register_trace_kprobe(tk);
1936 	if (ret < 0)
1937 		return ERR_PTR(ret);
1938 
1939 	return trace_probe_event_call(&(no_free_ptr(tk)->tp));
1940 }
1941 
destroy_local_trace_kprobe(struct trace_event_call * event_call)1942 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1943 {
1944 	struct trace_kprobe *tk;
1945 
1946 	tk = trace_kprobe_primary_from_call(event_call);
1947 	if (unlikely(!tk))
1948 		return;
1949 
1950 	if (trace_probe_is_enabled(&tk->tp)) {
1951 		WARN_ON(1);
1952 		return;
1953 	}
1954 
1955 	__unregister_trace_kprobe(tk);
1956 
1957 	free_trace_kprobe(tk);
1958 }
1959 #endif /* CONFIG_PERF_EVENTS */
1960 
enable_boot_kprobe_events(void)1961 static __init void enable_boot_kprobe_events(void)
1962 {
1963 	struct trace_array *tr = top_trace_array();
1964 	struct trace_event_file *file;
1965 	struct trace_kprobe *tk;
1966 	struct dyn_event *pos;
1967 
1968 	guard(mutex)(&event_mutex);
1969 	for_each_trace_kprobe(tk, pos) {
1970 		list_for_each_entry(file, &tr->events, list)
1971 			if (file->event_call == trace_probe_event_call(&tk->tp))
1972 				trace_event_enable_disable(file, 1, 0);
1973 	}
1974 }
1975 
setup_boot_kprobe_events(void)1976 static __init void setup_boot_kprobe_events(void)
1977 {
1978 	char *p, *cmd = kprobe_boot_events_buf;
1979 	int ret;
1980 
1981 	strreplace(kprobe_boot_events_buf, ',', ' ');
1982 
1983 	while (cmd && *cmd != '\0') {
1984 		p = strchr(cmd, ';');
1985 		if (p)
1986 			*p++ = '\0';
1987 
1988 		ret = create_or_delete_trace_kprobe(cmd);
1989 		if (ret)
1990 			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1991 
1992 		cmd = p;
1993 	}
1994 
1995 	enable_boot_kprobe_events();
1996 }
1997 
1998 /*
1999  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2000  * events in postcore_initcall without tracefs.
2001  */
init_kprobe_trace_early(void)2002 static __init int init_kprobe_trace_early(void)
2003 {
2004 	int ret;
2005 
2006 	ret = dyn_event_register(&trace_kprobe_ops);
2007 	if (ret)
2008 		return ret;
2009 
2010 	if (trace_kprobe_register_module_notifier())
2011 		return -EINVAL;
2012 
2013 	return 0;
2014 }
2015 core_initcall(init_kprobe_trace_early);
2016 
2017 /* Make a tracefs interface for controlling probe points */
init_kprobe_trace(void)2018 static __init int init_kprobe_trace(void)
2019 {
2020 	int ret;
2021 
2022 	ret = tracing_init_dentry();
2023 	if (ret)
2024 		return 0;
2025 
2026 	/* Event list interface */
2027 	trace_create_file("kprobe_events", TRACE_MODE_WRITE,
2028 			  NULL, NULL, &kprobe_events_ops);
2029 
2030 	/* Profile interface */
2031 	trace_create_file("kprobe_profile", TRACE_MODE_READ,
2032 			  NULL, NULL, &kprobe_profile_ops);
2033 
2034 	setup_boot_kprobe_events();
2035 
2036 	return 0;
2037 }
2038 fs_initcall(init_kprobe_trace);
2039 
2040 
2041 #ifdef CONFIG_FTRACE_STARTUP_TEST
2042 static __init struct trace_event_file *
find_trace_probe_file(struct trace_kprobe * tk,struct trace_array * tr)2043 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
2044 {
2045 	struct trace_event_file *file;
2046 
2047 	list_for_each_entry(file, &tr->events, list)
2048 		if (file->event_call == trace_probe_event_call(&tk->tp))
2049 			return file;
2050 
2051 	return NULL;
2052 }
2053 
2054 /*
2055  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
2056  * stage, we can do this lockless.
2057  */
kprobe_trace_self_tests_init(void)2058 static __init int kprobe_trace_self_tests_init(void)
2059 {
2060 	int ret, warn = 0;
2061 	int (*target)(int, int, int, int, int, int);
2062 	struct trace_kprobe *tk;
2063 	struct trace_event_file *file;
2064 
2065 	if (tracing_is_disabled())
2066 		return -ENODEV;
2067 
2068 	if (tracing_selftest_disabled)
2069 		return 0;
2070 
2071 	target = kprobe_trace_selftest_target;
2072 
2073 	pr_info("Testing kprobe tracing: ");
2074 
2075 	ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
2076 	if (WARN_ONCE(ret, "error on probing function entry.")) {
2077 		warn++;
2078 	} else {
2079 		/* Enable trace point */
2080 		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2081 		if (WARN_ONCE(tk == NULL, "error on probing function entry.")) {
2082 			warn++;
2083 		} else {
2084 			file = find_trace_probe_file(tk, top_trace_array());
2085 			if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2086 				warn++;
2087 			} else
2088 				enable_trace_kprobe(
2089 					trace_probe_event_call(&tk->tp), file);
2090 		}
2091 	}
2092 
2093 	ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2094 	if (WARN_ONCE(ret, "error on probing function return.")) {
2095 		warn++;
2096 	} else {
2097 		/* Enable trace point */
2098 		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2099 		if (WARN_ONCE(tk == NULL, "error on getting 2nd new probe.")) {
2100 			warn++;
2101 		} else {
2102 			file = find_trace_probe_file(tk, top_trace_array());
2103 			if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2104 				warn++;
2105 			} else
2106 				enable_trace_kprobe(
2107 					trace_probe_event_call(&tk->tp), file);
2108 		}
2109 	}
2110 
2111 	if (warn)
2112 		goto end;
2113 
2114 	ret = target(1, 2, 3, 4, 5, 6);
2115 
2116 	/*
2117 	 * Not expecting an error here, the check is only to prevent the
2118 	 * optimizer from removing the call to target() as otherwise there
2119 	 * are no side-effects and the call is never performed.
2120 	 */
2121 	if (ret != 21)
2122 		warn++;
2123 
2124 	/* Disable trace points before removing it */
2125 	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2126 	if (WARN_ONCE(tk == NULL, "error on getting test probe.")) {
2127 		warn++;
2128 	} else {
2129 		if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2130 				 "incorrect number of testprobe hits."))
2131 			warn++;
2132 
2133 		file = find_trace_probe_file(tk, top_trace_array());
2134 		if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2135 			warn++;
2136 		} else
2137 			disable_trace_kprobe(
2138 				trace_probe_event_call(&tk->tp), file);
2139 	}
2140 
2141 	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2142 	if (WARN_ONCE(tk == NULL, "error on getting 2nd test probe.")) {
2143 		warn++;
2144 	} else {
2145 		if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2146 				 "incorrect number of testprobe2 hits."))
2147 			warn++;
2148 
2149 		file = find_trace_probe_file(tk, top_trace_array());
2150 		if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2151 			warn++;
2152 		} else
2153 			disable_trace_kprobe(
2154 				trace_probe_event_call(&tk->tp), file);
2155 	}
2156 
2157 	ret = create_or_delete_trace_kprobe("-:testprobe");
2158 	if (WARN_ONCE(ret, "error on deleting a probe."))
2159 		warn++;
2160 
2161 	ret = create_or_delete_trace_kprobe("-:testprobe2");
2162 	if (WARN_ONCE(ret, "error on deleting a probe."))
2163 		warn++;
2164 
2165 
2166 end:
2167 	/*
2168 	 * Wait for the optimizer work to finish. Otherwise it might fiddle
2169 	 * with probes in already freed __init text.
2170 	 */
2171 	wait_for_kprobe_optimizer();
2172 	if (warn)
2173 		pr_cont("NG: Some tests are failed. Please check them.\n");
2174 	else
2175 		pr_cont("OK\n");
2176 	return 0;
2177 }
2178 
2179 late_initcall(kprobe_trace_self_tests_init);
2180 
2181 #endif
2182