1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct	Created by Vamsi Krishna S <[email protected]> Kernel
8  *		Probes initial implementation (includes suggestions from
9  *		Rusty Russell).
10  * 2004-Aug	Updated by Prasanna S Panchamukhi <[email protected]> with
11  *		hlists and exceptions notifier as suggested by Andi Kleen.
12  * 2004-July	Suparna Bhattacharya <[email protected]> added jumper probes
13  *		interface to access function arguments.
14  * 2004-Sep	Prasanna S Panchamukhi <[email protected]> Changed Kprobes
15  *		exceptions notifier to be first on the priority list.
16  * 2005-May	Hien Nguyen <[email protected]>, Jim Keniston
17  *		<[email protected]> and Prasanna S Panchamukhi
18  *		<[email protected]> added function-return probes.
19  */
20 
21 #define pr_fmt(fmt) "kprobes: " fmt
22 
23 #include <linux/kprobes.h>
24 #include <linux/hash.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/stddef.h>
28 #include <linux/export.h>
29 #include <linux/kallsyms.h>
30 #include <linux/freezer.h>
31 #include <linux/seq_file.h>
32 #include <linux/debugfs.h>
33 #include <linux/sysctl.h>
34 #include <linux/kdebug.h>
35 #include <linux/memory.h>
36 #include <linux/ftrace.h>
37 #include <linux/cpu.h>
38 #include <linux/jump_label.h>
39 #include <linux/static_call.h>
40 #include <linux/perf_event.h>
41 #include <linux/execmem.h>
42 #include <linux/cleanup.h>
43 
44 #include <asm/sections.h>
45 #include <asm/cacheflush.h>
46 #include <asm/errno.h>
47 #include <linux/uaccess.h>
48 
49 #define KPROBE_HASH_BITS 6
50 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
51 
52 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
53 #define kprobe_sysctls_init() do { } while (0)
54 #endif
55 
56 static int kprobes_initialized;
57 /* kprobe_table can be accessed by
58  * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
59  * Or
60  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
61  */
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 
64 /* NOTE: change this value only with 'kprobe_mutex' held */
65 static bool kprobes_all_disarmed;
66 
67 /* This protects 'kprobe_table' and 'optimizing_list' */
68 static DEFINE_MUTEX(kprobe_mutex);
69 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
70 
kprobe_lookup_name(const char * name,unsigned int __unused)71 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
72 					unsigned int __unused)
73 {
74 	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
75 }
76 
77 /*
78  * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
79  * kprobes can not probe.
80  */
81 static LIST_HEAD(kprobe_blacklist);
82 
83 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
84 /*
85  * 'kprobe::ainsn.insn' points to the copy of the instruction to be
86  * single-stepped. x86_64, POWER4 and above have no-exec support and
87  * stepping on the instruction on a vmalloced/kmalloced/data page
88  * is a recipe for disaster
89  */
90 struct kprobe_insn_page {
91 	struct list_head list;
92 	kprobe_opcode_t *insns;		/* Page of instruction slots */
93 	struct kprobe_insn_cache *cache;
94 	int nused;
95 	int ngarbage;
96 	char slot_used[];
97 };
98 
slots_per_page(struct kprobe_insn_cache * c)99 static int slots_per_page(struct kprobe_insn_cache *c)
100 {
101 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
102 }
103 
104 enum kprobe_slot_state {
105 	SLOT_CLEAN = 0,
106 	SLOT_DIRTY = 1,
107 	SLOT_USED = 2,
108 };
109 
alloc_insn_page(void)110 void __weak *alloc_insn_page(void)
111 {
112 	/*
113 	 * Use execmem_alloc() so this page is within +/- 2GB of where the
114 	 * kernel image and loaded module images reside. This is required
115 	 * for most of the architectures.
116 	 * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
117 	 */
118 	return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
119 }
120 
free_insn_page(void * page)121 static void free_insn_page(void *page)
122 {
123 	execmem_free(page);
124 }
125 
126 struct kprobe_insn_cache kprobe_insn_slots = {
127 	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
128 	.alloc = alloc_insn_page,
129 	.free = free_insn_page,
130 	.sym = KPROBE_INSN_PAGE_SYM,
131 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
132 	.insn_size = MAX_INSN_SIZE,
133 	.nr_garbage = 0,
134 };
135 static int collect_garbage_slots(struct kprobe_insn_cache *c);
136 
137 /**
138  * __get_insn_slot() - Find a slot on an executable page for an instruction.
139  * We allocate an executable page if there's no room on existing ones.
140  */
__get_insn_slot(struct kprobe_insn_cache * c)141 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
142 {
143 	struct kprobe_insn_page *kip;
144 
145 	/* Since the slot array is not protected by rcu, we need a mutex */
146 	guard(mutex)(&c->mutex);
147 	do {
148 		guard(rcu)();
149 		list_for_each_entry_rcu(kip, &c->pages, list) {
150 			if (kip->nused < slots_per_page(c)) {
151 				int i;
152 
153 				for (i = 0; i < slots_per_page(c); i++) {
154 					if (kip->slot_used[i] == SLOT_CLEAN) {
155 						kip->slot_used[i] = SLOT_USED;
156 						kip->nused++;
157 						return kip->insns + (i * c->insn_size);
158 					}
159 				}
160 				/* kip->nused is broken. Fix it. */
161 				kip->nused = slots_per_page(c);
162 				WARN_ON(1);
163 			}
164 		}
165 	/* If there are any garbage slots, collect it and try again. */
166 	} while (c->nr_garbage && collect_garbage_slots(c) == 0);
167 
168 	/* All out of space.  Need to allocate a new page. */
169 	kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
170 	if (!kip)
171 		return NULL;
172 
173 	kip->insns = c->alloc();
174 	if (!kip->insns) {
175 		kfree(kip);
176 		return NULL;
177 	}
178 	INIT_LIST_HEAD(&kip->list);
179 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
180 	kip->slot_used[0] = SLOT_USED;
181 	kip->nused = 1;
182 	kip->ngarbage = 0;
183 	kip->cache = c;
184 	list_add_rcu(&kip->list, &c->pages);
185 
186 	/* Record the perf ksymbol register event after adding the page */
187 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
188 			   PAGE_SIZE, false, c->sym);
189 
190 	return kip->insns;
191 }
192 
193 /* Return true if all garbages are collected, otherwise false. */
collect_one_slot(struct kprobe_insn_page * kip,int idx)194 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
195 {
196 	kip->slot_used[idx] = SLOT_CLEAN;
197 	kip->nused--;
198 	if (kip->nused != 0)
199 		return false;
200 
201 	/*
202 	 * Page is no longer in use.  Free it unless
203 	 * it's the last one.  We keep the last one
204 	 * so as not to have to set it up again the
205 	 * next time somebody inserts a probe.
206 	 */
207 	if (!list_is_singular(&kip->list)) {
208 		/*
209 		 * Record perf ksymbol unregister event before removing
210 		 * the page.
211 		 */
212 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
213 				   (unsigned long)kip->insns, PAGE_SIZE, true,
214 				   kip->cache->sym);
215 		list_del_rcu(&kip->list);
216 		synchronize_rcu();
217 		kip->cache->free(kip->insns);
218 		kfree(kip);
219 	}
220 	return true;
221 }
222 
collect_garbage_slots(struct kprobe_insn_cache * c)223 static int collect_garbage_slots(struct kprobe_insn_cache *c)
224 {
225 	struct kprobe_insn_page *kip, *next;
226 
227 	/* Ensure no-one is interrupted on the garbages */
228 	synchronize_rcu();
229 
230 	list_for_each_entry_safe(kip, next, &c->pages, list) {
231 		int i;
232 
233 		if (kip->ngarbage == 0)
234 			continue;
235 		kip->ngarbage = 0;	/* we will collect all garbages */
236 		for (i = 0; i < slots_per_page(c); i++) {
237 			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
238 				break;
239 		}
240 	}
241 	c->nr_garbage = 0;
242 	return 0;
243 }
244 
__find_insn_page(struct kprobe_insn_cache * c,kprobe_opcode_t * slot,struct kprobe_insn_page ** pkip)245 static long __find_insn_page(struct kprobe_insn_cache *c,
246 	kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
247 {
248 	struct kprobe_insn_page *kip = NULL;
249 	long idx;
250 
251 	guard(rcu)();
252 	list_for_each_entry_rcu(kip, &c->pages, list) {
253 		idx = ((long)slot - (long)kip->insns) /
254 			(c->insn_size * sizeof(kprobe_opcode_t));
255 		if (idx >= 0 && idx < slots_per_page(c)) {
256 			*pkip = kip;
257 			return idx;
258 		}
259 	}
260 	/* Could not find this slot. */
261 	WARN_ON(1);
262 	*pkip = NULL;
263 	return -1;
264 }
265 
__free_insn_slot(struct kprobe_insn_cache * c,kprobe_opcode_t * slot,int dirty)266 void __free_insn_slot(struct kprobe_insn_cache *c,
267 		      kprobe_opcode_t *slot, int dirty)
268 {
269 	struct kprobe_insn_page *kip = NULL;
270 	long idx;
271 
272 	guard(mutex)(&c->mutex);
273 	idx = __find_insn_page(c, slot, &kip);
274 	/* Mark and sweep: this may sleep */
275 	if (kip) {
276 		/* Check double free */
277 		WARN_ON(kip->slot_used[idx] != SLOT_USED);
278 		if (dirty) {
279 			kip->slot_used[idx] = SLOT_DIRTY;
280 			kip->ngarbage++;
281 			if (++c->nr_garbage > slots_per_page(c))
282 				collect_garbage_slots(c);
283 		} else {
284 			collect_one_slot(kip, idx);
285 		}
286 	}
287 }
288 
289 /*
290  * Check given address is on the page of kprobe instruction slots.
291  * This will be used for checking whether the address on a stack
292  * is on a text area or not.
293  */
__is_insn_slot_addr(struct kprobe_insn_cache * c,unsigned long addr)294 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
295 {
296 	struct kprobe_insn_page *kip;
297 	bool ret = false;
298 
299 	rcu_read_lock();
300 	list_for_each_entry_rcu(kip, &c->pages, list) {
301 		if (addr >= (unsigned long)kip->insns &&
302 		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
303 			ret = true;
304 			break;
305 		}
306 	}
307 	rcu_read_unlock();
308 
309 	return ret;
310 }
311 
kprobe_cache_get_kallsym(struct kprobe_insn_cache * c,unsigned int * symnum,unsigned long * value,char * type,char * sym)312 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
313 			     unsigned long *value, char *type, char *sym)
314 {
315 	struct kprobe_insn_page *kip;
316 	int ret = -ERANGE;
317 
318 	rcu_read_lock();
319 	list_for_each_entry_rcu(kip, &c->pages, list) {
320 		if ((*symnum)--)
321 			continue;
322 		strscpy(sym, c->sym, KSYM_NAME_LEN);
323 		*type = 't';
324 		*value = (unsigned long)kip->insns;
325 		ret = 0;
326 		break;
327 	}
328 	rcu_read_unlock();
329 
330 	return ret;
331 }
332 
333 #ifdef CONFIG_OPTPROBES
alloc_optinsn_page(void)334 void __weak *alloc_optinsn_page(void)
335 {
336 	return alloc_insn_page();
337 }
338 
free_optinsn_page(void * page)339 void __weak free_optinsn_page(void *page)
340 {
341 	free_insn_page(page);
342 }
343 
344 /* For optimized_kprobe buffer */
345 struct kprobe_insn_cache kprobe_optinsn_slots = {
346 	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
347 	.alloc = alloc_optinsn_page,
348 	.free = free_optinsn_page,
349 	.sym = KPROBE_OPTINSN_PAGE_SYM,
350 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
351 	/* .insn_size is initialized later */
352 	.nr_garbage = 0,
353 };
354 #endif /* CONFIG_OPTPROBES */
355 #endif /* __ARCH_WANT_KPROBES_INSN_SLOT */
356 
357 /* We have preemption disabled.. so it is safe to use __ versions */
set_kprobe_instance(struct kprobe * kp)358 static inline void set_kprobe_instance(struct kprobe *kp)
359 {
360 	__this_cpu_write(kprobe_instance, kp);
361 }
362 
reset_kprobe_instance(void)363 static inline void reset_kprobe_instance(void)
364 {
365 	__this_cpu_write(kprobe_instance, NULL);
366 }
367 
368 /*
369  * This routine is called either:
370  *	- under the 'kprobe_mutex' - during kprobe_[un]register().
371  *				OR
372  *	- with preemption disabled - from architecture specific code.
373  */
get_kprobe(void * addr)374 struct kprobe *get_kprobe(void *addr)
375 {
376 	struct hlist_head *head;
377 	struct kprobe *p;
378 
379 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
380 	hlist_for_each_entry_rcu(p, head, hlist,
381 				 lockdep_is_held(&kprobe_mutex)) {
382 		if (p->addr == addr)
383 			return p;
384 	}
385 
386 	return NULL;
387 }
388 NOKPROBE_SYMBOL(get_kprobe);
389 
390 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
391 
392 /* Return true if 'p' is an aggregator */
kprobe_aggrprobe(struct kprobe * p)393 static inline bool kprobe_aggrprobe(struct kprobe *p)
394 {
395 	return p->pre_handler == aggr_pre_handler;
396 }
397 
398 /* Return true if 'p' is unused */
kprobe_unused(struct kprobe * p)399 static inline bool kprobe_unused(struct kprobe *p)
400 {
401 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
402 	       list_empty(&p->list);
403 }
404 
405 /* Keep all fields in the kprobe consistent. */
copy_kprobe(struct kprobe * ap,struct kprobe * p)406 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
407 {
408 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
409 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
410 }
411 
412 #ifdef CONFIG_OPTPROBES
413 /* NOTE: This is protected by 'kprobe_mutex'. */
414 static bool kprobes_allow_optimization;
415 
416 /*
417  * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
418  * This must be called from arch-dep optimized caller.
419  */
opt_pre_handler(struct kprobe * p,struct pt_regs * regs)420 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
421 {
422 	struct kprobe *kp;
423 
424 	list_for_each_entry_rcu(kp, &p->list, list) {
425 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
426 			set_kprobe_instance(kp);
427 			kp->pre_handler(kp, regs);
428 		}
429 		reset_kprobe_instance();
430 	}
431 }
432 NOKPROBE_SYMBOL(opt_pre_handler);
433 
434 /* Free optimized instructions and optimized_kprobe */
free_aggr_kprobe(struct kprobe * p)435 static void free_aggr_kprobe(struct kprobe *p)
436 {
437 	struct optimized_kprobe *op;
438 
439 	op = container_of(p, struct optimized_kprobe, kp);
440 	arch_remove_optimized_kprobe(op);
441 	arch_remove_kprobe(p);
442 	kfree(op);
443 }
444 
445 /* Return true if the kprobe is ready for optimization. */
kprobe_optready(struct kprobe * p)446 static inline int kprobe_optready(struct kprobe *p)
447 {
448 	struct optimized_kprobe *op;
449 
450 	if (kprobe_aggrprobe(p)) {
451 		op = container_of(p, struct optimized_kprobe, kp);
452 		return arch_prepared_optinsn(&op->optinsn);
453 	}
454 
455 	return 0;
456 }
457 
458 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
kprobe_disarmed(struct kprobe * p)459 bool kprobe_disarmed(struct kprobe *p)
460 {
461 	struct optimized_kprobe *op;
462 
463 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
464 	if (!kprobe_aggrprobe(p))
465 		return kprobe_disabled(p);
466 
467 	op = container_of(p, struct optimized_kprobe, kp);
468 
469 	return kprobe_disabled(p) && list_empty(&op->list);
470 }
471 
472 /* Return true if the probe is queued on (un)optimizing lists */
kprobe_queued(struct kprobe * p)473 static bool kprobe_queued(struct kprobe *p)
474 {
475 	struct optimized_kprobe *op;
476 
477 	if (kprobe_aggrprobe(p)) {
478 		op = container_of(p, struct optimized_kprobe, kp);
479 		if (!list_empty(&op->list))
480 			return true;
481 	}
482 	return false;
483 }
484 
485 /*
486  * Return an optimized kprobe whose optimizing code replaces
487  * instructions including 'addr' (exclude breakpoint).
488  */
get_optimized_kprobe(kprobe_opcode_t * addr)489 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
490 {
491 	int i;
492 	struct kprobe *p = NULL;
493 	struct optimized_kprobe *op;
494 
495 	/* Don't check i == 0, since that is a breakpoint case. */
496 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
497 		p = get_kprobe(addr - i);
498 
499 	if (p && kprobe_optready(p)) {
500 		op = container_of(p, struct optimized_kprobe, kp);
501 		if (arch_within_optimized_kprobe(op, addr))
502 			return p;
503 	}
504 
505 	return NULL;
506 }
507 
508 /* Optimization staging list, protected by 'kprobe_mutex' */
509 static LIST_HEAD(optimizing_list);
510 static LIST_HEAD(unoptimizing_list);
511 static LIST_HEAD(freeing_list);
512 
513 static void kprobe_optimizer(struct work_struct *work);
514 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
515 #define OPTIMIZE_DELAY 5
516 
517 /*
518  * Optimize (replace a breakpoint with a jump) kprobes listed on
519  * 'optimizing_list'.
520  */
do_optimize_kprobes(void)521 static void do_optimize_kprobes(void)
522 {
523 	lockdep_assert_held(&text_mutex);
524 	/*
525 	 * The optimization/unoptimization refers 'online_cpus' via
526 	 * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
527 	 * And same time, 'text_mutex' will be held in cpu-hotplug and here.
528 	 * This combination can cause a deadlock (cpu-hotplug tries to lock
529 	 * 'text_mutex' but stop_machine() can not be done because
530 	 * the 'online_cpus' has been changed)
531 	 * To avoid this deadlock, caller must have locked cpu-hotplug
532 	 * for preventing cpu-hotplug outside of 'text_mutex' locking.
533 	 */
534 	lockdep_assert_cpus_held();
535 
536 	/* Optimization never be done when disarmed */
537 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
538 	    list_empty(&optimizing_list))
539 		return;
540 
541 	arch_optimize_kprobes(&optimizing_list);
542 }
543 
544 /*
545  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
546  * if need) kprobes listed on 'unoptimizing_list'.
547  */
do_unoptimize_kprobes(void)548 static void do_unoptimize_kprobes(void)
549 {
550 	struct optimized_kprobe *op, *tmp;
551 
552 	lockdep_assert_held(&text_mutex);
553 	/* See comment in do_optimize_kprobes() */
554 	lockdep_assert_cpus_held();
555 
556 	if (!list_empty(&unoptimizing_list))
557 		arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
558 
559 	/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
560 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
561 		/* Switching from detour code to origin */
562 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
563 		/* Disarm probes if marked disabled and not gone */
564 		if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
565 			arch_disarm_kprobe(&op->kp);
566 		if (kprobe_unused(&op->kp)) {
567 			/*
568 			 * Remove unused probes from hash list. After waiting
569 			 * for synchronization, these probes are reclaimed.
570 			 * (reclaiming is done by do_free_cleaned_kprobes().)
571 			 */
572 			hlist_del_rcu(&op->kp.hlist);
573 		} else
574 			list_del_init(&op->list);
575 	}
576 }
577 
578 /* Reclaim all kprobes on the 'freeing_list' */
do_free_cleaned_kprobes(void)579 static void do_free_cleaned_kprobes(void)
580 {
581 	struct optimized_kprobe *op, *tmp;
582 
583 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
584 		list_del_init(&op->list);
585 		if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
586 			/*
587 			 * This must not happen, but if there is a kprobe
588 			 * still in use, keep it on kprobes hash list.
589 			 */
590 			continue;
591 		}
592 		free_aggr_kprobe(&op->kp);
593 	}
594 }
595 
596 /* Start optimizer after OPTIMIZE_DELAY passed */
kick_kprobe_optimizer(void)597 static void kick_kprobe_optimizer(void)
598 {
599 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
600 }
601 
602 /* Kprobe jump optimizer */
kprobe_optimizer(struct work_struct * work)603 static void kprobe_optimizer(struct work_struct *work)
604 {
605 	guard(mutex)(&kprobe_mutex);
606 
607 	scoped_guard(cpus_read_lock) {
608 		guard(mutex)(&text_mutex);
609 
610 		/*
611 		 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
612 		 * kprobes before waiting for quiesence period.
613 		 */
614 		do_unoptimize_kprobes();
615 
616 		/*
617 		 * Step 2: Wait for quiesence period to ensure all potentially
618 		 * preempted tasks to have normally scheduled. Because optprobe
619 		 * may modify multiple instructions, there is a chance that Nth
620 		 * instruction is preempted. In that case, such tasks can return
621 		 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
622 		 * Note that on non-preemptive kernel, this is transparently converted
623 		 * to synchronoze_sched() to wait for all interrupts to have completed.
624 		 */
625 		synchronize_rcu_tasks();
626 
627 		/* Step 3: Optimize kprobes after quiesence period */
628 		do_optimize_kprobes();
629 
630 		/* Step 4: Free cleaned kprobes after quiesence period */
631 		do_free_cleaned_kprobes();
632 	}
633 
634 	/* Step 5: Kick optimizer again if needed */
635 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
636 		kick_kprobe_optimizer();
637 }
638 
wait_for_kprobe_optimizer_locked(void)639 static void wait_for_kprobe_optimizer_locked(void)
640 {
641 	lockdep_assert_held(&kprobe_mutex);
642 
643 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
644 		mutex_unlock(&kprobe_mutex);
645 
646 		/* This will also make 'optimizing_work' execute immmediately */
647 		flush_delayed_work(&optimizing_work);
648 		/* 'optimizing_work' might not have been queued yet, relax */
649 		cpu_relax();
650 
651 		mutex_lock(&kprobe_mutex);
652 	}
653 }
654 
655 /* Wait for completing optimization and unoptimization */
wait_for_kprobe_optimizer(void)656 void wait_for_kprobe_optimizer(void)
657 {
658 	guard(mutex)(&kprobe_mutex);
659 
660 	wait_for_kprobe_optimizer_locked();
661 }
662 
optprobe_queued_unopt(struct optimized_kprobe * op)663 bool optprobe_queued_unopt(struct optimized_kprobe *op)
664 {
665 	struct optimized_kprobe *_op;
666 
667 	list_for_each_entry(_op, &unoptimizing_list, list) {
668 		if (op == _op)
669 			return true;
670 	}
671 
672 	return false;
673 }
674 
675 /* Optimize kprobe if p is ready to be optimized */
optimize_kprobe(struct kprobe * p)676 static void optimize_kprobe(struct kprobe *p)
677 {
678 	struct optimized_kprobe *op;
679 
680 	/* Check if the kprobe is disabled or not ready for optimization. */
681 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
682 	    (kprobe_disabled(p) || kprobes_all_disarmed))
683 		return;
684 
685 	/* kprobes with 'post_handler' can not be optimized */
686 	if (p->post_handler)
687 		return;
688 
689 	op = container_of(p, struct optimized_kprobe, kp);
690 
691 	/* Check there is no other kprobes at the optimized instructions */
692 	if (arch_check_optimized_kprobe(op) < 0)
693 		return;
694 
695 	/* Check if it is already optimized. */
696 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
697 		if (optprobe_queued_unopt(op)) {
698 			/* This is under unoptimizing. Just dequeue the probe */
699 			list_del_init(&op->list);
700 		}
701 		return;
702 	}
703 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
704 
705 	/*
706 	 * On the 'unoptimizing_list' and 'optimizing_list',
707 	 * 'op' must have OPTIMIZED flag
708 	 */
709 	if (WARN_ON_ONCE(!list_empty(&op->list)))
710 		return;
711 
712 	list_add(&op->list, &optimizing_list);
713 	kick_kprobe_optimizer();
714 }
715 
716 /* Short cut to direct unoptimizing */
force_unoptimize_kprobe(struct optimized_kprobe * op)717 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
718 {
719 	lockdep_assert_cpus_held();
720 	arch_unoptimize_kprobe(op);
721 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
722 }
723 
724 /* Unoptimize a kprobe if p is optimized */
unoptimize_kprobe(struct kprobe * p,bool force)725 static void unoptimize_kprobe(struct kprobe *p, bool force)
726 {
727 	struct optimized_kprobe *op;
728 
729 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
730 		return; /* This is not an optprobe nor optimized */
731 
732 	op = container_of(p, struct optimized_kprobe, kp);
733 	if (!kprobe_optimized(p))
734 		return;
735 
736 	if (!list_empty(&op->list)) {
737 		if (optprobe_queued_unopt(op)) {
738 			/* Queued in unoptimizing queue */
739 			if (force) {
740 				/*
741 				 * Forcibly unoptimize the kprobe here, and queue it
742 				 * in the freeing list for release afterwards.
743 				 */
744 				force_unoptimize_kprobe(op);
745 				list_move(&op->list, &freeing_list);
746 			}
747 		} else {
748 			/* Dequeue from the optimizing queue */
749 			list_del_init(&op->list);
750 			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
751 		}
752 		return;
753 	}
754 
755 	/* Optimized kprobe case */
756 	if (force) {
757 		/* Forcibly update the code: this is a special case */
758 		force_unoptimize_kprobe(op);
759 	} else {
760 		list_add(&op->list, &unoptimizing_list);
761 		kick_kprobe_optimizer();
762 	}
763 }
764 
765 /* Cancel unoptimizing for reusing */
reuse_unused_kprobe(struct kprobe * ap)766 static int reuse_unused_kprobe(struct kprobe *ap)
767 {
768 	struct optimized_kprobe *op;
769 
770 	/*
771 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
772 	 * there is still a relative jump) and disabled.
773 	 */
774 	op = container_of(ap, struct optimized_kprobe, kp);
775 	WARN_ON_ONCE(list_empty(&op->list));
776 	/* Enable the probe again */
777 	ap->flags &= ~KPROBE_FLAG_DISABLED;
778 	/* Optimize it again. (remove from 'op->list') */
779 	if (!kprobe_optready(ap))
780 		return -EINVAL;
781 
782 	optimize_kprobe(ap);
783 	return 0;
784 }
785 
786 /* Remove optimized instructions */
kill_optimized_kprobe(struct kprobe * p)787 static void kill_optimized_kprobe(struct kprobe *p)
788 {
789 	struct optimized_kprobe *op;
790 
791 	op = container_of(p, struct optimized_kprobe, kp);
792 	if (!list_empty(&op->list))
793 		/* Dequeue from the (un)optimization queue */
794 		list_del_init(&op->list);
795 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
796 
797 	if (kprobe_unused(p)) {
798 		/*
799 		 * Unused kprobe is on unoptimizing or freeing list. We move it
800 		 * to freeing_list and let the kprobe_optimizer() remove it from
801 		 * the kprobe hash list and free it.
802 		 */
803 		if (optprobe_queued_unopt(op))
804 			list_move(&op->list, &freeing_list);
805 	}
806 
807 	/* Don't touch the code, because it is already freed. */
808 	arch_remove_optimized_kprobe(op);
809 }
810 
811 static inline
__prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * p)812 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
813 {
814 	if (!kprobe_ftrace(p))
815 		arch_prepare_optimized_kprobe(op, p);
816 }
817 
818 /* Try to prepare optimized instructions */
prepare_optimized_kprobe(struct kprobe * p)819 static void prepare_optimized_kprobe(struct kprobe *p)
820 {
821 	struct optimized_kprobe *op;
822 
823 	op = container_of(p, struct optimized_kprobe, kp);
824 	__prepare_optimized_kprobe(op, p);
825 }
826 
827 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */
alloc_aggr_kprobe(struct kprobe * p)828 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
829 {
830 	struct optimized_kprobe *op;
831 
832 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
833 	if (!op)
834 		return NULL;
835 
836 	INIT_LIST_HEAD(&op->list);
837 	op->kp.addr = p->addr;
838 	__prepare_optimized_kprobe(op, p);
839 
840 	return &op->kp;
841 }
842 
843 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
844 
845 /*
846  * Prepare an optimized_kprobe and optimize it.
847  * NOTE: 'p' must be a normal registered kprobe.
848  */
try_to_optimize_kprobe(struct kprobe * p)849 static void try_to_optimize_kprobe(struct kprobe *p)
850 {
851 	struct kprobe *ap;
852 	struct optimized_kprobe *op;
853 
854 	/* Impossible to optimize ftrace-based kprobe. */
855 	if (kprobe_ftrace(p))
856 		return;
857 
858 	/* For preparing optimization, jump_label_text_reserved() is called. */
859 	guard(cpus_read_lock)();
860 	guard(jump_label_lock)();
861 	guard(mutex)(&text_mutex);
862 
863 	ap = alloc_aggr_kprobe(p);
864 	if (!ap)
865 		return;
866 
867 	op = container_of(ap, struct optimized_kprobe, kp);
868 	if (!arch_prepared_optinsn(&op->optinsn)) {
869 		/* If failed to setup optimizing, fallback to kprobe. */
870 		arch_remove_optimized_kprobe(op);
871 		kfree(op);
872 		return;
873 	}
874 
875 	init_aggr_kprobe(ap, p);
876 	optimize_kprobe(ap);	/* This just kicks optimizer thread. */
877 }
878 
optimize_all_kprobes(void)879 static void optimize_all_kprobes(void)
880 {
881 	struct hlist_head *head;
882 	struct kprobe *p;
883 	unsigned int i;
884 
885 	guard(mutex)(&kprobe_mutex);
886 	/* If optimization is already allowed, just return. */
887 	if (kprobes_allow_optimization)
888 		return;
889 
890 	cpus_read_lock();
891 	kprobes_allow_optimization = true;
892 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
893 		head = &kprobe_table[i];
894 		hlist_for_each_entry(p, head, hlist)
895 			if (!kprobe_disabled(p))
896 				optimize_kprobe(p);
897 	}
898 	cpus_read_unlock();
899 	pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
900 }
901 
902 #ifdef CONFIG_SYSCTL
unoptimize_all_kprobes(void)903 static void unoptimize_all_kprobes(void)
904 {
905 	struct hlist_head *head;
906 	struct kprobe *p;
907 	unsigned int i;
908 
909 	guard(mutex)(&kprobe_mutex);
910 	/* If optimization is already prohibited, just return. */
911 	if (!kprobes_allow_optimization)
912 		return;
913 
914 	cpus_read_lock();
915 	kprobes_allow_optimization = false;
916 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
917 		head = &kprobe_table[i];
918 		hlist_for_each_entry(p, head, hlist) {
919 			if (!kprobe_disabled(p))
920 				unoptimize_kprobe(p, false);
921 		}
922 	}
923 	cpus_read_unlock();
924 	/* Wait for unoptimizing completion. */
925 	wait_for_kprobe_optimizer_locked();
926 	pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
927 }
928 
929 static DEFINE_MUTEX(kprobe_sysctl_mutex);
930 static int sysctl_kprobes_optimization;
proc_kprobes_optimization_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)931 static int proc_kprobes_optimization_handler(const struct ctl_table *table,
932 					     int write, void *buffer,
933 					     size_t *length, loff_t *ppos)
934 {
935 	int ret;
936 
937 	guard(mutex)(&kprobe_sysctl_mutex);
938 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
939 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
940 
941 	if (sysctl_kprobes_optimization)
942 		optimize_all_kprobes();
943 	else
944 		unoptimize_all_kprobes();
945 
946 	return ret;
947 }
948 
949 static const struct ctl_table kprobe_sysctls[] = {
950 	{
951 		.procname	= "kprobes-optimization",
952 		.data		= &sysctl_kprobes_optimization,
953 		.maxlen		= sizeof(int),
954 		.mode		= 0644,
955 		.proc_handler	= proc_kprobes_optimization_handler,
956 		.extra1		= SYSCTL_ZERO,
957 		.extra2		= SYSCTL_ONE,
958 	},
959 };
960 
kprobe_sysctls_init(void)961 static void __init kprobe_sysctls_init(void)
962 {
963 	register_sysctl_init("debug", kprobe_sysctls);
964 }
965 #endif /* CONFIG_SYSCTL */
966 
967 /* Put a breakpoint for a probe. */
__arm_kprobe(struct kprobe * p)968 static void __arm_kprobe(struct kprobe *p)
969 {
970 	struct kprobe *_p;
971 
972 	lockdep_assert_held(&text_mutex);
973 
974 	/* Find the overlapping optimized kprobes. */
975 	_p = get_optimized_kprobe(p->addr);
976 	if (unlikely(_p))
977 		/* Fallback to unoptimized kprobe */
978 		unoptimize_kprobe(_p, true);
979 
980 	arch_arm_kprobe(p);
981 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
982 }
983 
984 /* Remove the breakpoint of a probe. */
__disarm_kprobe(struct kprobe * p,bool reopt)985 static void __disarm_kprobe(struct kprobe *p, bool reopt)
986 {
987 	struct kprobe *_p;
988 
989 	lockdep_assert_held(&text_mutex);
990 
991 	/* Try to unoptimize */
992 	unoptimize_kprobe(p, kprobes_all_disarmed);
993 
994 	if (!kprobe_queued(p)) {
995 		arch_disarm_kprobe(p);
996 		/* If another kprobe was blocked, re-optimize it. */
997 		_p = get_optimized_kprobe(p->addr);
998 		if (unlikely(_p) && reopt)
999 			optimize_kprobe(_p);
1000 	}
1001 	/*
1002 	 * TODO: Since unoptimization and real disarming will be done by
1003 	 * the worker thread, we can not check whether another probe are
1004 	 * unoptimized because of this probe here. It should be re-optimized
1005 	 * by the worker thread.
1006 	 */
1007 }
1008 
1009 #else /* !CONFIG_OPTPROBES */
1010 
1011 #define optimize_kprobe(p)			do {} while (0)
1012 #define unoptimize_kprobe(p, f)			do {} while (0)
1013 #define kill_optimized_kprobe(p)		do {} while (0)
1014 #define prepare_optimized_kprobe(p)		do {} while (0)
1015 #define try_to_optimize_kprobe(p)		do {} while (0)
1016 #define __arm_kprobe(p)				arch_arm_kprobe(p)
1017 #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
1018 #define kprobe_disarmed(p)			kprobe_disabled(p)
1019 #define wait_for_kprobe_optimizer_locked()			\
1020 	lockdep_assert_held(&kprobe_mutex)
1021 
reuse_unused_kprobe(struct kprobe * ap)1022 static int reuse_unused_kprobe(struct kprobe *ap)
1023 {
1024 	/*
1025 	 * If the optimized kprobe is NOT supported, the aggr kprobe is
1026 	 * released at the same time that the last aggregated kprobe is
1027 	 * unregistered.
1028 	 * Thus there should be no chance to reuse unused kprobe.
1029 	 */
1030 	WARN_ON_ONCE(1);
1031 	return -EINVAL;
1032 }
1033 
free_aggr_kprobe(struct kprobe * p)1034 static void free_aggr_kprobe(struct kprobe *p)
1035 {
1036 	arch_remove_kprobe(p);
1037 	kfree(p);
1038 }
1039 
alloc_aggr_kprobe(struct kprobe * p)1040 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1041 {
1042 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1043 }
1044 #endif /* CONFIG_OPTPROBES */
1045 
1046 #ifdef CONFIG_KPROBES_ON_FTRACE
1047 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1048 	.func = kprobe_ftrace_handler,
1049 	.flags = FTRACE_OPS_FL_SAVE_REGS,
1050 };
1051 
1052 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1053 	.func = kprobe_ftrace_handler,
1054 	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1055 };
1056 
1057 static int kprobe_ipmodify_enabled;
1058 static int kprobe_ftrace_enabled;
1059 bool kprobe_ftrace_disabled;
1060 
__arm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1061 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1062 			       int *cnt)
1063 {
1064 	int ret;
1065 
1066 	lockdep_assert_held(&kprobe_mutex);
1067 
1068 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1069 	if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
1070 		return ret;
1071 
1072 	if (*cnt == 0) {
1073 		ret = register_ftrace_function(ops);
1074 		if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
1075 			/*
1076 			 * At this point, sinec ops is not registered, we should be sefe from
1077 			 * registering empty filter.
1078 			 */
1079 			ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1080 			return ret;
1081 		}
1082 	}
1083 
1084 	(*cnt)++;
1085 	return ret;
1086 }
1087 
arm_kprobe_ftrace(struct kprobe * p)1088 static int arm_kprobe_ftrace(struct kprobe *p)
1089 {
1090 	bool ipmodify = (p->post_handler != NULL);
1091 
1092 	return __arm_kprobe_ftrace(p,
1093 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1094 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1095 }
1096 
__disarm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1097 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1098 				  int *cnt)
1099 {
1100 	int ret;
1101 
1102 	lockdep_assert_held(&kprobe_mutex);
1103 
1104 	if (*cnt == 1) {
1105 		ret = unregister_ftrace_function(ops);
1106 		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
1107 			return ret;
1108 	}
1109 
1110 	(*cnt)--;
1111 
1112 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1113 	WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
1114 		  p->addr, ret);
1115 	return ret;
1116 }
1117 
disarm_kprobe_ftrace(struct kprobe * p)1118 static int disarm_kprobe_ftrace(struct kprobe *p)
1119 {
1120 	bool ipmodify = (p->post_handler != NULL);
1121 
1122 	return __disarm_kprobe_ftrace(p,
1123 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1124 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1125 }
1126 
kprobe_ftrace_kill(void)1127 void kprobe_ftrace_kill(void)
1128 {
1129 	kprobe_ftrace_disabled = true;
1130 }
1131 #else	/* !CONFIG_KPROBES_ON_FTRACE */
arm_kprobe_ftrace(struct kprobe * p)1132 static inline int arm_kprobe_ftrace(struct kprobe *p)
1133 {
1134 	return -ENODEV;
1135 }
1136 
disarm_kprobe_ftrace(struct kprobe * p)1137 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1138 {
1139 	return -ENODEV;
1140 }
1141 #endif
1142 
prepare_kprobe(struct kprobe * p)1143 static int prepare_kprobe(struct kprobe *p)
1144 {
1145 	/* Must ensure p->addr is really on ftrace */
1146 	if (kprobe_ftrace(p))
1147 		return arch_prepare_kprobe_ftrace(p);
1148 
1149 	return arch_prepare_kprobe(p);
1150 }
1151 
arm_kprobe(struct kprobe * kp)1152 static int arm_kprobe(struct kprobe *kp)
1153 {
1154 	if (unlikely(kprobe_ftrace(kp)))
1155 		return arm_kprobe_ftrace(kp);
1156 
1157 	guard(cpus_read_lock)();
1158 	guard(mutex)(&text_mutex);
1159 	__arm_kprobe(kp);
1160 	return 0;
1161 }
1162 
disarm_kprobe(struct kprobe * kp,bool reopt)1163 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1164 {
1165 	if (unlikely(kprobe_ftrace(kp)))
1166 		return disarm_kprobe_ftrace(kp);
1167 
1168 	guard(cpus_read_lock)();
1169 	guard(mutex)(&text_mutex);
1170 	__disarm_kprobe(kp, reopt);
1171 	return 0;
1172 }
1173 
1174 /*
1175  * Aggregate handlers for multiple kprobes support - these handlers
1176  * take care of invoking the individual kprobe handlers on p->list
1177  */
aggr_pre_handler(struct kprobe * p,struct pt_regs * regs)1178 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1179 {
1180 	struct kprobe *kp;
1181 
1182 	list_for_each_entry_rcu(kp, &p->list, list) {
1183 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1184 			set_kprobe_instance(kp);
1185 			if (kp->pre_handler(kp, regs))
1186 				return 1;
1187 		}
1188 		reset_kprobe_instance();
1189 	}
1190 	return 0;
1191 }
1192 NOKPROBE_SYMBOL(aggr_pre_handler);
1193 
aggr_post_handler(struct kprobe * p,struct pt_regs * regs,unsigned long flags)1194 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1195 			      unsigned long flags)
1196 {
1197 	struct kprobe *kp;
1198 
1199 	list_for_each_entry_rcu(kp, &p->list, list) {
1200 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1201 			set_kprobe_instance(kp);
1202 			kp->post_handler(kp, regs, flags);
1203 			reset_kprobe_instance();
1204 		}
1205 	}
1206 }
1207 NOKPROBE_SYMBOL(aggr_post_handler);
1208 
1209 /* Walks the list and increments 'nmissed' if 'p' has child probes. */
kprobes_inc_nmissed_count(struct kprobe * p)1210 void kprobes_inc_nmissed_count(struct kprobe *p)
1211 {
1212 	struct kprobe *kp;
1213 
1214 	if (!kprobe_aggrprobe(p)) {
1215 		p->nmissed++;
1216 	} else {
1217 		list_for_each_entry_rcu(kp, &p->list, list)
1218 			kp->nmissed++;
1219 	}
1220 }
1221 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1222 
1223 static struct kprobe kprobe_busy = {
1224 	.addr = (void *) get_kprobe,
1225 };
1226 
kprobe_busy_begin(void)1227 void kprobe_busy_begin(void)
1228 {
1229 	struct kprobe_ctlblk *kcb;
1230 
1231 	preempt_disable();
1232 	__this_cpu_write(current_kprobe, &kprobe_busy);
1233 	kcb = get_kprobe_ctlblk();
1234 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1235 }
1236 
kprobe_busy_end(void)1237 void kprobe_busy_end(void)
1238 {
1239 	__this_cpu_write(current_kprobe, NULL);
1240 	preempt_enable();
1241 }
1242 
1243 /* Add the new probe to 'ap->list'. */
add_new_kprobe(struct kprobe * ap,struct kprobe * p)1244 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1245 {
1246 	if (p->post_handler)
1247 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1248 
1249 	list_add_rcu(&p->list, &ap->list);
1250 	if (p->post_handler && !ap->post_handler)
1251 		ap->post_handler = aggr_post_handler;
1252 
1253 	return 0;
1254 }
1255 
1256 /*
1257  * Fill in the required fields of the aggregator kprobe. Replace the
1258  * earlier kprobe in the hlist with the aggregator kprobe.
1259  */
init_aggr_kprobe(struct kprobe * ap,struct kprobe * p)1260 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1261 {
1262 	/* Copy the insn slot of 'p' to 'ap'. */
1263 	copy_kprobe(p, ap);
1264 	flush_insn_slot(ap);
1265 	ap->addr = p->addr;
1266 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1267 	ap->pre_handler = aggr_pre_handler;
1268 	/* We don't care the kprobe which has gone. */
1269 	if (p->post_handler && !kprobe_gone(p))
1270 		ap->post_handler = aggr_post_handler;
1271 
1272 	INIT_LIST_HEAD(&ap->list);
1273 	INIT_HLIST_NODE(&ap->hlist);
1274 
1275 	list_add_rcu(&p->list, &ap->list);
1276 	hlist_replace_rcu(&p->hlist, &ap->hlist);
1277 }
1278 
1279 /*
1280  * This registers the second or subsequent kprobe at the same address.
1281  */
register_aggr_kprobe(struct kprobe * orig_p,struct kprobe * p)1282 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1283 {
1284 	int ret = 0;
1285 	struct kprobe *ap = orig_p;
1286 
1287 	scoped_guard(cpus_read_lock) {
1288 		/* For preparing optimization, jump_label_text_reserved() is called */
1289 		guard(jump_label_lock)();
1290 		guard(mutex)(&text_mutex);
1291 
1292 		if (!kprobe_aggrprobe(orig_p)) {
1293 			/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1294 			ap = alloc_aggr_kprobe(orig_p);
1295 			if (!ap)
1296 				return -ENOMEM;
1297 			init_aggr_kprobe(ap, orig_p);
1298 		} else if (kprobe_unused(ap)) {
1299 			/* This probe is going to die. Rescue it */
1300 			ret = reuse_unused_kprobe(ap);
1301 			if (ret)
1302 				return ret;
1303 		}
1304 
1305 		if (kprobe_gone(ap)) {
1306 			/*
1307 			 * Attempting to insert new probe at the same location that
1308 			 * had a probe in the module vaddr area which already
1309 			 * freed. So, the instruction slot has already been
1310 			 * released. We need a new slot for the new probe.
1311 			 */
1312 			ret = arch_prepare_kprobe(ap);
1313 			if (ret)
1314 				/*
1315 				 * Even if fail to allocate new slot, don't need to
1316 				 * free the 'ap'. It will be used next time, or
1317 				 * freed by unregister_kprobe().
1318 				 */
1319 				return ret;
1320 
1321 			/* Prepare optimized instructions if possible. */
1322 			prepare_optimized_kprobe(ap);
1323 
1324 			/*
1325 			 * Clear gone flag to prevent allocating new slot again, and
1326 			 * set disabled flag because it is not armed yet.
1327 			 */
1328 			ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1329 					| KPROBE_FLAG_DISABLED;
1330 		}
1331 
1332 		/* Copy the insn slot of 'p' to 'ap'. */
1333 		copy_kprobe(ap, p);
1334 		ret = add_new_kprobe(ap, p);
1335 	}
1336 
1337 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1338 		ap->flags &= ~KPROBE_FLAG_DISABLED;
1339 		if (!kprobes_all_disarmed) {
1340 			/* Arm the breakpoint again. */
1341 			ret = arm_kprobe(ap);
1342 			if (ret) {
1343 				ap->flags |= KPROBE_FLAG_DISABLED;
1344 				list_del_rcu(&p->list);
1345 				synchronize_rcu();
1346 			}
1347 		}
1348 	}
1349 	return ret;
1350 }
1351 
arch_within_kprobe_blacklist(unsigned long addr)1352 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1353 {
1354 	/* The '__kprobes' functions and entry code must not be probed. */
1355 	return addr >= (unsigned long)__kprobes_text_start &&
1356 	       addr < (unsigned long)__kprobes_text_end;
1357 }
1358 
__within_kprobe_blacklist(unsigned long addr)1359 static bool __within_kprobe_blacklist(unsigned long addr)
1360 {
1361 	struct kprobe_blacklist_entry *ent;
1362 
1363 	if (arch_within_kprobe_blacklist(addr))
1364 		return true;
1365 	/*
1366 	 * If 'kprobe_blacklist' is defined, check the address and
1367 	 * reject any probe registration in the prohibited area.
1368 	 */
1369 	list_for_each_entry(ent, &kprobe_blacklist, list) {
1370 		if (addr >= ent->start_addr && addr < ent->end_addr)
1371 			return true;
1372 	}
1373 	return false;
1374 }
1375 
within_kprobe_blacklist(unsigned long addr)1376 bool within_kprobe_blacklist(unsigned long addr)
1377 {
1378 	char symname[KSYM_NAME_LEN], *p;
1379 
1380 	if (__within_kprobe_blacklist(addr))
1381 		return true;
1382 
1383 	/* Check if the address is on a suffixed-symbol */
1384 	if (!lookup_symbol_name(addr, symname)) {
1385 		p = strchr(symname, '.');
1386 		if (!p)
1387 			return false;
1388 		*p = '\0';
1389 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
1390 		if (addr)
1391 			return __within_kprobe_blacklist(addr);
1392 	}
1393 	return false;
1394 }
1395 
1396 /*
1397  * arch_adjust_kprobe_addr - adjust the address
1398  * @addr: symbol base address
1399  * @offset: offset within the symbol
1400  * @on_func_entry: was this @addr+@offset on the function entry
1401  *
1402  * Typically returns @addr + @offset, except for special cases where the
1403  * function might be prefixed by a CFI landing pad, in that case any offset
1404  * inside the landing pad is mapped to the first 'real' instruction of the
1405  * symbol.
1406  *
1407  * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1408  * instruction at +0.
1409  */
arch_adjust_kprobe_addr(unsigned long addr,unsigned long offset,bool * on_func_entry)1410 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1411 						unsigned long offset,
1412 						bool *on_func_entry)
1413 {
1414 	*on_func_entry = !offset;
1415 	return (kprobe_opcode_t *)(addr + offset);
1416 }
1417 
1418 /*
1419  * If 'symbol_name' is specified, look it up and add the 'offset'
1420  * to it. This way, we can specify a relative address to a symbol.
1421  * This returns encoded errors if it fails to look up symbol or invalid
1422  * combination of parameters.
1423  */
1424 static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t * addr,const char * symbol_name,unsigned long offset,bool * on_func_entry)1425 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1426 	     unsigned long offset, bool *on_func_entry)
1427 {
1428 	if ((symbol_name && addr) || (!symbol_name && !addr))
1429 		return ERR_PTR(-EINVAL);
1430 
1431 	if (symbol_name) {
1432 		/*
1433 		 * Input: @sym + @offset
1434 		 * Output: @addr + @offset
1435 		 *
1436 		 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1437 		 *       argument into it's output!
1438 		 */
1439 		addr = kprobe_lookup_name(symbol_name, offset);
1440 		if (!addr)
1441 			return ERR_PTR(-ENOENT);
1442 	}
1443 
1444 	/*
1445 	 * So here we have @addr + @offset, displace it into a new
1446 	 * @addr' + @offset' where @addr' is the symbol start address.
1447 	 */
1448 	addr = (void *)addr + offset;
1449 	if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1450 		return ERR_PTR(-ENOENT);
1451 	addr = (void *)addr - offset;
1452 
1453 	/*
1454 	 * Then ask the architecture to re-combine them, taking care of
1455 	 * magical function entry details while telling us if this was indeed
1456 	 * at the start of the function.
1457 	 */
1458 	addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
1459 	if (!addr)
1460 		return ERR_PTR(-EINVAL);
1461 
1462 	return addr;
1463 }
1464 
kprobe_addr(struct kprobe * p)1465 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1466 {
1467 	bool on_func_entry;
1468 
1469 	return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1470 }
1471 
1472 /*
1473  * Check the 'p' is valid and return the aggregator kprobe
1474  * at the same address.
1475  */
__get_valid_kprobe(struct kprobe * p)1476 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1477 {
1478 	struct kprobe *ap, *list_p;
1479 
1480 	lockdep_assert_held(&kprobe_mutex);
1481 
1482 	ap = get_kprobe(p->addr);
1483 	if (unlikely(!ap))
1484 		return NULL;
1485 
1486 	if (p == ap)
1487 		return ap;
1488 
1489 	list_for_each_entry(list_p, &ap->list, list)
1490 		if (list_p == p)
1491 		/* kprobe p is a valid probe */
1492 			return ap;
1493 
1494 	return NULL;
1495 }
1496 
1497 /*
1498  * Warn and return error if the kprobe is being re-registered since
1499  * there must be a software bug.
1500  */
warn_kprobe_rereg(struct kprobe * p)1501 static inline int warn_kprobe_rereg(struct kprobe *p)
1502 {
1503 	guard(mutex)(&kprobe_mutex);
1504 
1505 	if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1506 		return -EINVAL;
1507 
1508 	return 0;
1509 }
1510 
check_ftrace_location(struct kprobe * p)1511 static int check_ftrace_location(struct kprobe *p)
1512 {
1513 	unsigned long addr = (unsigned long)p->addr;
1514 
1515 	if (ftrace_location(addr) == addr) {
1516 #ifdef CONFIG_KPROBES_ON_FTRACE
1517 		p->flags |= KPROBE_FLAG_FTRACE;
1518 #else
1519 		return -EINVAL;
1520 #endif
1521 	}
1522 	return 0;
1523 }
1524 
is_cfi_preamble_symbol(unsigned long addr)1525 static bool is_cfi_preamble_symbol(unsigned long addr)
1526 {
1527 	char symbuf[KSYM_NAME_LEN];
1528 
1529 	if (lookup_symbol_name(addr, symbuf))
1530 		return false;
1531 
1532 	return str_has_prefix(symbuf, "__cfi_") ||
1533 		str_has_prefix(symbuf, "__pfx_");
1534 }
1535 
check_kprobe_address_safe(struct kprobe * p,struct module ** probed_mod)1536 static int check_kprobe_address_safe(struct kprobe *p,
1537 				     struct module **probed_mod)
1538 {
1539 	int ret;
1540 
1541 	ret = check_ftrace_location(p);
1542 	if (ret)
1543 		return ret;
1544 
1545 	guard(jump_label_lock)();
1546 
1547 	/* Ensure the address is in a text area, and find a module if exists. */
1548 	*probed_mod = NULL;
1549 	if (!core_kernel_text((unsigned long) p->addr)) {
1550 		guard(preempt)();
1551 		*probed_mod = __module_text_address((unsigned long) p->addr);
1552 		if (!(*probed_mod))
1553 			return -EINVAL;
1554 
1555 		/*
1556 		 * We must hold a refcount of the probed module while updating
1557 		 * its code to prohibit unexpected unloading.
1558 		 */
1559 		if (unlikely(!try_module_get(*probed_mod)))
1560 			return -ENOENT;
1561 	}
1562 	/* Ensure it is not in reserved area. */
1563 	if (in_gate_area_no_mm((unsigned long) p->addr) ||
1564 	    within_kprobe_blacklist((unsigned long) p->addr) ||
1565 	    jump_label_text_reserved(p->addr, p->addr) ||
1566 	    static_call_text_reserved(p->addr, p->addr) ||
1567 	    find_bug((unsigned long)p->addr) ||
1568 	    is_cfi_preamble_symbol((unsigned long)p->addr)) {
1569 		module_put(*probed_mod);
1570 		return -EINVAL;
1571 	}
1572 
1573 	/* Get module refcount and reject __init functions for loaded modules. */
1574 	if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
1575 		/*
1576 		 * If the module freed '.init.text', we couldn't insert
1577 		 * kprobes in there.
1578 		 */
1579 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1580 		    !module_is_coming(*probed_mod)) {
1581 			module_put(*probed_mod);
1582 			return -ENOENT;
1583 		}
1584 	}
1585 
1586 	return 0;
1587 }
1588 
__register_kprobe(struct kprobe * p)1589 static int __register_kprobe(struct kprobe *p)
1590 {
1591 	int ret;
1592 	struct kprobe *old_p;
1593 
1594 	guard(mutex)(&kprobe_mutex);
1595 
1596 	old_p = get_kprobe(p->addr);
1597 	if (old_p)
1598 		/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
1599 		return register_aggr_kprobe(old_p, p);
1600 
1601 	scoped_guard(cpus_read_lock) {
1602 		/* Prevent text modification */
1603 		guard(mutex)(&text_mutex);
1604 		ret = prepare_kprobe(p);
1605 		if (ret)
1606 			return ret;
1607 	}
1608 
1609 	INIT_HLIST_NODE(&p->hlist);
1610 	hlist_add_head_rcu(&p->hlist,
1611 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1612 
1613 	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1614 		ret = arm_kprobe(p);
1615 		if (ret) {
1616 			hlist_del_rcu(&p->hlist);
1617 			synchronize_rcu();
1618 		}
1619 	}
1620 
1621 	/* Try to optimize kprobe */
1622 	try_to_optimize_kprobe(p);
1623 	return 0;
1624 }
1625 
register_kprobe(struct kprobe * p)1626 int register_kprobe(struct kprobe *p)
1627 {
1628 	int ret;
1629 	struct module *probed_mod;
1630 	kprobe_opcode_t *addr;
1631 	bool on_func_entry;
1632 
1633 	/* Canonicalize probe address from symbol */
1634 	addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1635 	if (IS_ERR(addr))
1636 		return PTR_ERR(addr);
1637 	p->addr = addr;
1638 
1639 	ret = warn_kprobe_rereg(p);
1640 	if (ret)
1641 		return ret;
1642 
1643 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1644 	p->flags &= KPROBE_FLAG_DISABLED;
1645 	if (on_func_entry)
1646 		p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
1647 	p->nmissed = 0;
1648 	INIT_LIST_HEAD(&p->list);
1649 
1650 	ret = check_kprobe_address_safe(p, &probed_mod);
1651 	if (ret)
1652 		return ret;
1653 
1654 	ret = __register_kprobe(p);
1655 
1656 	if (probed_mod)
1657 		module_put(probed_mod);
1658 
1659 	return ret;
1660 }
1661 EXPORT_SYMBOL_GPL(register_kprobe);
1662 
1663 /* Check if all probes on the 'ap' are disabled. */
aggr_kprobe_disabled(struct kprobe * ap)1664 static bool aggr_kprobe_disabled(struct kprobe *ap)
1665 {
1666 	struct kprobe *kp;
1667 
1668 	lockdep_assert_held(&kprobe_mutex);
1669 
1670 	list_for_each_entry(kp, &ap->list, list)
1671 		if (!kprobe_disabled(kp))
1672 			/*
1673 			 * Since there is an active probe on the list,
1674 			 * we can't disable this 'ap'.
1675 			 */
1676 			return false;
1677 
1678 	return true;
1679 }
1680 
__disable_kprobe(struct kprobe * p)1681 static struct kprobe *__disable_kprobe(struct kprobe *p)
1682 {
1683 	struct kprobe *orig_p;
1684 	int ret;
1685 
1686 	lockdep_assert_held(&kprobe_mutex);
1687 
1688 	/* Get an original kprobe for return */
1689 	orig_p = __get_valid_kprobe(p);
1690 	if (unlikely(orig_p == NULL))
1691 		return ERR_PTR(-EINVAL);
1692 
1693 	if (kprobe_disabled(p))
1694 		return orig_p;
1695 
1696 	/* Disable probe if it is a child probe */
1697 	if (p != orig_p)
1698 		p->flags |= KPROBE_FLAG_DISABLED;
1699 
1700 	/* Try to disarm and disable this/parent probe */
1701 	if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1702 		/*
1703 		 * Don't be lazy here.  Even if 'kprobes_all_disarmed'
1704 		 * is false, 'orig_p' might not have been armed yet.
1705 		 * Note arm_all_kprobes() __tries__ to arm all kprobes
1706 		 * on the best effort basis.
1707 		 */
1708 		if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1709 			ret = disarm_kprobe(orig_p, true);
1710 			if (ret) {
1711 				p->flags &= ~KPROBE_FLAG_DISABLED;
1712 				return ERR_PTR(ret);
1713 			}
1714 		}
1715 		orig_p->flags |= KPROBE_FLAG_DISABLED;
1716 	}
1717 
1718 	return orig_p;
1719 }
1720 
1721 /*
1722  * Unregister a kprobe without a scheduler synchronization.
1723  */
__unregister_kprobe_top(struct kprobe * p)1724 static int __unregister_kprobe_top(struct kprobe *p)
1725 {
1726 	struct kprobe *ap, *list_p;
1727 
1728 	/* Disable kprobe. This will disarm it if needed. */
1729 	ap = __disable_kprobe(p);
1730 	if (IS_ERR(ap))
1731 		return PTR_ERR(ap);
1732 
1733 	WARN_ON(ap != p && !kprobe_aggrprobe(ap));
1734 
1735 	/*
1736 	 * If the probe is an independent(and non-optimized) kprobe
1737 	 * (not an aggrprobe), the last kprobe on the aggrprobe, or
1738 	 * kprobe is already disarmed, just remove from the hash list.
1739 	 */
1740 	if (ap == p ||
1741 		(list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
1742 		/*
1743 		 * !disarmed could be happen if the probe is under delayed
1744 		 * unoptimizing.
1745 		 */
1746 		hlist_del_rcu(&ap->hlist);
1747 		return 0;
1748 	}
1749 
1750 	/* If disabling probe has special handlers, update aggrprobe */
1751 	if (p->post_handler && !kprobe_gone(p)) {
1752 		list_for_each_entry(list_p, &ap->list, list) {
1753 			if ((list_p != p) && (list_p->post_handler))
1754 				break;
1755 		}
1756 		/* No other probe has post_handler */
1757 		if (list_entry_is_head(list_p, &ap->list, list)) {
1758 			/*
1759 			 * For the kprobe-on-ftrace case, we keep the
1760 			 * post_handler setting to identify this aggrprobe
1761 			 * armed with kprobe_ipmodify_ops.
1762 			 */
1763 			if (!kprobe_ftrace(ap))
1764 				ap->post_handler = NULL;
1765 		}
1766 	}
1767 
1768 	/*
1769 	 * Remove from the aggrprobe: this path will do nothing in
1770 	 * __unregister_kprobe_bottom().
1771 	 */
1772 	list_del_rcu(&p->list);
1773 	if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1774 		/*
1775 		 * Try to optimize this probe again, because post
1776 		 * handler may have been changed.
1777 		 */
1778 		optimize_kprobe(ap);
1779 	return 0;
1780 
1781 }
1782 
__unregister_kprobe_bottom(struct kprobe * p)1783 static void __unregister_kprobe_bottom(struct kprobe *p)
1784 {
1785 	struct kprobe *ap;
1786 
1787 	if (list_empty(&p->list))
1788 		/* This is an independent kprobe */
1789 		arch_remove_kprobe(p);
1790 	else if (list_is_singular(&p->list)) {
1791 		/* This is the last child of an aggrprobe */
1792 		ap = list_entry(p->list.next, struct kprobe, list);
1793 		list_del(&p->list);
1794 		free_aggr_kprobe(ap);
1795 	}
1796 	/* Otherwise, do nothing. */
1797 }
1798 
register_kprobes(struct kprobe ** kps,int num)1799 int register_kprobes(struct kprobe **kps, int num)
1800 {
1801 	int i, ret = 0;
1802 
1803 	if (num <= 0)
1804 		return -EINVAL;
1805 	for (i = 0; i < num; i++) {
1806 		ret = register_kprobe(kps[i]);
1807 		if (ret < 0) {
1808 			if (i > 0)
1809 				unregister_kprobes(kps, i);
1810 			break;
1811 		}
1812 	}
1813 	return ret;
1814 }
1815 EXPORT_SYMBOL_GPL(register_kprobes);
1816 
unregister_kprobe(struct kprobe * p)1817 void unregister_kprobe(struct kprobe *p)
1818 {
1819 	unregister_kprobes(&p, 1);
1820 }
1821 EXPORT_SYMBOL_GPL(unregister_kprobe);
1822 
unregister_kprobes(struct kprobe ** kps,int num)1823 void unregister_kprobes(struct kprobe **kps, int num)
1824 {
1825 	int i;
1826 
1827 	if (num <= 0)
1828 		return;
1829 	scoped_guard(mutex, &kprobe_mutex) {
1830 		for (i = 0; i < num; i++)
1831 			if (__unregister_kprobe_top(kps[i]) < 0)
1832 				kps[i]->addr = NULL;
1833 	}
1834 	synchronize_rcu();
1835 	for (i = 0; i < num; i++)
1836 		if (kps[i]->addr)
1837 			__unregister_kprobe_bottom(kps[i]);
1838 }
1839 EXPORT_SYMBOL_GPL(unregister_kprobes);
1840 
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)1841 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1842 					unsigned long val, void *data)
1843 {
1844 	return NOTIFY_DONE;
1845 }
1846 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1847 
1848 static struct notifier_block kprobe_exceptions_nb = {
1849 	.notifier_call = kprobe_exceptions_notify,
1850 	.priority = 0x7fffffff /* we need to be notified first */
1851 };
1852 
1853 #ifdef CONFIG_KRETPROBES
1854 
1855 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
1856 
1857 /* callbacks for objpool of kretprobe instances */
kretprobe_init_inst(void * nod,void * context)1858 static int kretprobe_init_inst(void *nod, void *context)
1859 {
1860 	struct kretprobe_instance *ri = nod;
1861 
1862 	ri->rph = context;
1863 	return 0;
1864 }
kretprobe_fini_pool(struct objpool_head * head,void * context)1865 static int kretprobe_fini_pool(struct objpool_head *head, void *context)
1866 {
1867 	kfree(context);
1868 	return 0;
1869 }
1870 
free_rp_inst_rcu(struct rcu_head * head)1871 static void free_rp_inst_rcu(struct rcu_head *head)
1872 {
1873 	struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1874 	struct kretprobe_holder *rph = ri->rph;
1875 
1876 	objpool_drop(ri, &rph->pool);
1877 }
1878 NOKPROBE_SYMBOL(free_rp_inst_rcu);
1879 
recycle_rp_inst(struct kretprobe_instance * ri)1880 static void recycle_rp_inst(struct kretprobe_instance *ri)
1881 {
1882 	struct kretprobe *rp = get_kretprobe(ri);
1883 
1884 	if (likely(rp))
1885 		objpool_push(ri, &rp->rph->pool);
1886 	else
1887 		call_rcu(&ri->rcu, free_rp_inst_rcu);
1888 }
1889 NOKPROBE_SYMBOL(recycle_rp_inst);
1890 
1891 /*
1892  * This function is called from delayed_put_task_struct() when a task is
1893  * dead and cleaned up to recycle any kretprobe instances associated with
1894  * this task. These left over instances represent probed functions that
1895  * have been called but will never return.
1896  */
kprobe_flush_task(struct task_struct * tk)1897 void kprobe_flush_task(struct task_struct *tk)
1898 {
1899 	struct kretprobe_instance *ri;
1900 	struct llist_node *node;
1901 
1902 	/* Early boot, not yet initialized. */
1903 	if (unlikely(!kprobes_initialized))
1904 		return;
1905 
1906 	kprobe_busy_begin();
1907 
1908 	node = __llist_del_all(&tk->kretprobe_instances);
1909 	while (node) {
1910 		ri = container_of(node, struct kretprobe_instance, llist);
1911 		node = node->next;
1912 
1913 		recycle_rp_inst(ri);
1914 	}
1915 
1916 	kprobe_busy_end();
1917 }
1918 NOKPROBE_SYMBOL(kprobe_flush_task);
1919 
free_rp_inst(struct kretprobe * rp)1920 static inline void free_rp_inst(struct kretprobe *rp)
1921 {
1922 	struct kretprobe_holder *rph = rp->rph;
1923 
1924 	if (!rph)
1925 		return;
1926 	rp->rph = NULL;
1927 	objpool_fini(&rph->pool);
1928 }
1929 
1930 /* This assumes the 'tsk' is the current task or the is not running. */
__kretprobe_find_ret_addr(struct task_struct * tsk,struct llist_node ** cur)1931 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1932 						  struct llist_node **cur)
1933 {
1934 	struct kretprobe_instance *ri = NULL;
1935 	struct llist_node *node = *cur;
1936 
1937 	if (!node)
1938 		node = tsk->kretprobe_instances.first;
1939 	else
1940 		node = node->next;
1941 
1942 	while (node) {
1943 		ri = container_of(node, struct kretprobe_instance, llist);
1944 		if (ri->ret_addr != kretprobe_trampoline_addr()) {
1945 			*cur = node;
1946 			return ri->ret_addr;
1947 		}
1948 		node = node->next;
1949 	}
1950 	return NULL;
1951 }
1952 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1953 
1954 /**
1955  * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1956  * @tsk: Target task
1957  * @fp: A frame pointer
1958  * @cur: a storage of the loop cursor llist_node pointer for next call
1959  *
1960  * Find the correct return address modified by a kretprobe on @tsk in unsigned
1961  * long type. If it finds the return address, this returns that address value,
1962  * or this returns 0.
1963  * The @tsk must be 'current' or a task which is not running. @fp is a hint
1964  * to get the currect return address - which is compared with the
1965  * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
1966  * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
1967  * first call, but '@cur' itself must NOT NULL.
1968  */
kretprobe_find_ret_addr(struct task_struct * tsk,void * fp,struct llist_node ** cur)1969 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
1970 				      struct llist_node **cur)
1971 {
1972 	struct kretprobe_instance *ri;
1973 	kprobe_opcode_t *ret;
1974 
1975 	if (WARN_ON_ONCE(!cur))
1976 		return 0;
1977 
1978 	do {
1979 		ret = __kretprobe_find_ret_addr(tsk, cur);
1980 		if (!ret)
1981 			break;
1982 		ri = container_of(*cur, struct kretprobe_instance, llist);
1983 	} while (ri->fp != fp);
1984 
1985 	return (unsigned long)ret;
1986 }
1987 NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
1988 
arch_kretprobe_fixup_return(struct pt_regs * regs,kprobe_opcode_t * correct_ret_addr)1989 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
1990 					kprobe_opcode_t *correct_ret_addr)
1991 {
1992 	/*
1993 	 * Do nothing by default. Please fill this to update the fake return
1994 	 * address on the stack with the correct one on each arch if possible.
1995 	 */
1996 }
1997 
__kretprobe_trampoline_handler(struct pt_regs * regs,void * frame_pointer)1998 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1999 					     void *frame_pointer)
2000 {
2001 	struct kretprobe_instance *ri = NULL;
2002 	struct llist_node *first, *node = NULL;
2003 	kprobe_opcode_t *correct_ret_addr;
2004 	struct kretprobe *rp;
2005 
2006 	/* Find correct address and all nodes for this frame. */
2007 	correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2008 	if (!correct_ret_addr) {
2009 		pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2010 		BUG_ON(1);
2011 	}
2012 
2013 	/*
2014 	 * Set the return address as the instruction pointer, because if the
2015 	 * user handler calls stack_trace_save_regs() with this 'regs',
2016 	 * the stack trace will start from the instruction pointer.
2017 	 */
2018 	instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
2019 
2020 	/* Run the user handler of the nodes. */
2021 	first = current->kretprobe_instances.first;
2022 	while (first) {
2023 		ri = container_of(first, struct kretprobe_instance, llist);
2024 
2025 		if (WARN_ON_ONCE(ri->fp != frame_pointer))
2026 			break;
2027 
2028 		rp = get_kretprobe(ri);
2029 		if (rp && rp->handler) {
2030 			struct kprobe *prev = kprobe_running();
2031 
2032 			__this_cpu_write(current_kprobe, &rp->kp);
2033 			ri->ret_addr = correct_ret_addr;
2034 			rp->handler(ri, regs);
2035 			__this_cpu_write(current_kprobe, prev);
2036 		}
2037 		if (first == node)
2038 			break;
2039 
2040 		first = first->next;
2041 	}
2042 
2043 	arch_kretprobe_fixup_return(regs, correct_ret_addr);
2044 
2045 	/* Unlink all nodes for this frame. */
2046 	first = current->kretprobe_instances.first;
2047 	current->kretprobe_instances.first = node->next;
2048 	node->next = NULL;
2049 
2050 	/* Recycle free instances. */
2051 	while (first) {
2052 		ri = container_of(first, struct kretprobe_instance, llist);
2053 		first = first->next;
2054 
2055 		recycle_rp_inst(ri);
2056 	}
2057 
2058 	return (unsigned long)correct_ret_addr;
2059 }
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)2060 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2061 
2062 /*
2063  * This kprobe pre_handler is registered with every kretprobe. When probe
2064  * hits it will set up the return probe.
2065  */
2066 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2067 {
2068 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2069 	struct kretprobe_holder *rph = rp->rph;
2070 	struct kretprobe_instance *ri;
2071 
2072 	ri = objpool_pop(&rph->pool);
2073 	if (!ri) {
2074 		rp->nmissed++;
2075 		return 0;
2076 	}
2077 
2078 	if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2079 		objpool_push(ri, &rph->pool);
2080 		return 0;
2081 	}
2082 
2083 	arch_prepare_kretprobe(ri, regs);
2084 
2085 	__llist_add(&ri->llist, &current->kretprobe_instances);
2086 
2087 	return 0;
2088 }
2089 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2090 #else /* CONFIG_KRETPROBE_ON_RETHOOK */
2091 /*
2092  * This kprobe pre_handler is registered with every kretprobe. When probe
2093  * hits it will set up the return probe.
2094  */
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)2095 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2096 {
2097 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2098 	struct kretprobe_instance *ri;
2099 	struct rethook_node *rhn;
2100 
2101 	rhn = rethook_try_get(rp->rh);
2102 	if (!rhn) {
2103 		rp->nmissed++;
2104 		return 0;
2105 	}
2106 
2107 	ri = container_of(rhn, struct kretprobe_instance, node);
2108 
2109 	if (rp->entry_handler && rp->entry_handler(ri, regs))
2110 		rethook_recycle(rhn);
2111 	else
2112 		rethook_hook(rhn, regs, kprobe_ftrace(p));
2113 
2114 	return 0;
2115 }
2116 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2117 
kretprobe_rethook_handler(struct rethook_node * rh,void * data,unsigned long ret_addr,struct pt_regs * regs)2118 static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
2119 				      unsigned long ret_addr,
2120 				      struct pt_regs *regs)
2121 {
2122 	struct kretprobe *rp = (struct kretprobe *)data;
2123 	struct kretprobe_instance *ri;
2124 	struct kprobe_ctlblk *kcb;
2125 
2126 	/* The data must NOT be null. This means rethook data structure is broken. */
2127 	if (WARN_ON_ONCE(!data) || !rp->handler)
2128 		return;
2129 
2130 	__this_cpu_write(current_kprobe, &rp->kp);
2131 	kcb = get_kprobe_ctlblk();
2132 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2133 
2134 	ri = container_of(rh, struct kretprobe_instance, node);
2135 	rp->handler(ri, regs);
2136 
2137 	__this_cpu_write(current_kprobe, NULL);
2138 }
2139 NOKPROBE_SYMBOL(kretprobe_rethook_handler);
2140 
2141 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
2142 
2143 /**
2144  * kprobe_on_func_entry() -- check whether given address is function entry
2145  * @addr: Target address
2146  * @sym:  Target symbol name
2147  * @offset: The offset from the symbol or the address
2148  *
2149  * This checks whether the given @addr+@offset or @sym+@offset is on the
2150  * function entry address or not.
2151  * This returns 0 if it is the function entry, or -EINVAL if it is not.
2152  * And also it returns -ENOENT if it fails the symbol or address lookup.
2153  * Caller must pass @addr or @sym (either one must be NULL), or this
2154  * returns -EINVAL.
2155  */
kprobe_on_func_entry(kprobe_opcode_t * addr,const char * sym,unsigned long offset)2156 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2157 {
2158 	bool on_func_entry;
2159 	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
2160 
2161 	if (IS_ERR(kp_addr))
2162 		return PTR_ERR(kp_addr);
2163 
2164 	if (!on_func_entry)
2165 		return -EINVAL;
2166 
2167 	return 0;
2168 }
2169 
register_kretprobe(struct kretprobe * rp)2170 int register_kretprobe(struct kretprobe *rp)
2171 {
2172 	int ret;
2173 	int i;
2174 	void *addr;
2175 
2176 	ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2177 	if (ret)
2178 		return ret;
2179 
2180 	/* If only 'rp->kp.addr' is specified, check reregistering kprobes */
2181 	if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
2182 		return -EINVAL;
2183 
2184 	if (kretprobe_blacklist_size) {
2185 		addr = kprobe_addr(&rp->kp);
2186 		if (IS_ERR(addr))
2187 			return PTR_ERR(addr);
2188 
2189 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2190 			if (kretprobe_blacklist[i].addr == addr)
2191 				return -EINVAL;
2192 		}
2193 	}
2194 
2195 	if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2196 		return -E2BIG;
2197 
2198 	rp->kp.pre_handler = pre_handler_kretprobe;
2199 	rp->kp.post_handler = NULL;
2200 
2201 	/* Pre-allocate memory for max kretprobe instances */
2202 	if (rp->maxactive <= 0)
2203 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2204 
2205 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2206 	rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
2207 				sizeof(struct kretprobe_instance) +
2208 				rp->data_size, rp->maxactive);
2209 	if (IS_ERR(rp->rh))
2210 		return PTR_ERR(rp->rh);
2211 
2212 	rp->nmissed = 0;
2213 	/* Establish function entry probe point */
2214 	ret = register_kprobe(&rp->kp);
2215 	if (ret != 0) {
2216 		rethook_free(rp->rh);
2217 		rp->rh = NULL;
2218 	}
2219 #else	/* !CONFIG_KRETPROBE_ON_RETHOOK */
2220 	rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2221 	if (!rp->rph)
2222 		return -ENOMEM;
2223 
2224 	if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
2225 			sizeof(struct kretprobe_instance), GFP_KERNEL,
2226 			rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
2227 		kfree(rp->rph);
2228 		rp->rph = NULL;
2229 		return -ENOMEM;
2230 	}
2231 	rcu_assign_pointer(rp->rph->rp, rp);
2232 	rp->nmissed = 0;
2233 	/* Establish function entry probe point */
2234 	ret = register_kprobe(&rp->kp);
2235 	if (ret != 0)
2236 		free_rp_inst(rp);
2237 #endif
2238 	return ret;
2239 }
2240 EXPORT_SYMBOL_GPL(register_kretprobe);
2241 
register_kretprobes(struct kretprobe ** rps,int num)2242 int register_kretprobes(struct kretprobe **rps, int num)
2243 {
2244 	int ret = 0, i;
2245 
2246 	if (num <= 0)
2247 		return -EINVAL;
2248 	for (i = 0; i < num; i++) {
2249 		ret = register_kretprobe(rps[i]);
2250 		if (ret < 0) {
2251 			if (i > 0)
2252 				unregister_kretprobes(rps, i);
2253 			break;
2254 		}
2255 	}
2256 	return ret;
2257 }
2258 EXPORT_SYMBOL_GPL(register_kretprobes);
2259 
unregister_kretprobe(struct kretprobe * rp)2260 void unregister_kretprobe(struct kretprobe *rp)
2261 {
2262 	unregister_kretprobes(&rp, 1);
2263 }
2264 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2265 
unregister_kretprobes(struct kretprobe ** rps,int num)2266 void unregister_kretprobes(struct kretprobe **rps, int num)
2267 {
2268 	int i;
2269 
2270 	if (num <= 0)
2271 		return;
2272 	for (i = 0; i < num; i++) {
2273 		guard(mutex)(&kprobe_mutex);
2274 
2275 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2276 			rps[i]->kp.addr = NULL;
2277 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2278 		rethook_free(rps[i]->rh);
2279 #else
2280 		rcu_assign_pointer(rps[i]->rph->rp, NULL);
2281 #endif
2282 	}
2283 
2284 	synchronize_rcu();
2285 	for (i = 0; i < num; i++) {
2286 		if (rps[i]->kp.addr) {
2287 			__unregister_kprobe_bottom(&rps[i]->kp);
2288 #ifndef CONFIG_KRETPROBE_ON_RETHOOK
2289 			free_rp_inst(rps[i]);
2290 #endif
2291 		}
2292 	}
2293 }
2294 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2295 
2296 #else /* CONFIG_KRETPROBES */
register_kretprobe(struct kretprobe * rp)2297 int register_kretprobe(struct kretprobe *rp)
2298 {
2299 	return -EOPNOTSUPP;
2300 }
2301 EXPORT_SYMBOL_GPL(register_kretprobe);
2302 
register_kretprobes(struct kretprobe ** rps,int num)2303 int register_kretprobes(struct kretprobe **rps, int num)
2304 {
2305 	return -EOPNOTSUPP;
2306 }
2307 EXPORT_SYMBOL_GPL(register_kretprobes);
2308 
unregister_kretprobe(struct kretprobe * rp)2309 void unregister_kretprobe(struct kretprobe *rp)
2310 {
2311 }
2312 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2313 
unregister_kretprobes(struct kretprobe ** rps,int num)2314 void unregister_kretprobes(struct kretprobe **rps, int num)
2315 {
2316 }
2317 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2318 
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)2319 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2320 {
2321 	return 0;
2322 }
2323 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2324 
2325 #endif /* CONFIG_KRETPROBES */
2326 
2327 /* Set the kprobe gone and remove its instruction buffer. */
kill_kprobe(struct kprobe * p)2328 static void kill_kprobe(struct kprobe *p)
2329 {
2330 	struct kprobe *kp;
2331 
2332 	lockdep_assert_held(&kprobe_mutex);
2333 
2334 	/*
2335 	 * The module is going away. We should disarm the kprobe which
2336 	 * is using ftrace, because ftrace framework is still available at
2337 	 * 'MODULE_STATE_GOING' notification.
2338 	 */
2339 	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2340 		disarm_kprobe_ftrace(p);
2341 
2342 	p->flags |= KPROBE_FLAG_GONE;
2343 	if (kprobe_aggrprobe(p)) {
2344 		/*
2345 		 * If this is an aggr_kprobe, we have to list all the
2346 		 * chained probes and mark them GONE.
2347 		 */
2348 		list_for_each_entry(kp, &p->list, list)
2349 			kp->flags |= KPROBE_FLAG_GONE;
2350 		p->post_handler = NULL;
2351 		kill_optimized_kprobe(p);
2352 	}
2353 	/*
2354 	 * Here, we can remove insn_slot safely, because no thread calls
2355 	 * the original probed function (which will be freed soon) any more.
2356 	 */
2357 	arch_remove_kprobe(p);
2358 }
2359 
2360 /* Disable one kprobe */
disable_kprobe(struct kprobe * kp)2361 int disable_kprobe(struct kprobe *kp)
2362 {
2363 	struct kprobe *p;
2364 
2365 	guard(mutex)(&kprobe_mutex);
2366 
2367 	/* Disable this kprobe */
2368 	p = __disable_kprobe(kp);
2369 
2370 	return IS_ERR(p) ? PTR_ERR(p) : 0;
2371 }
2372 EXPORT_SYMBOL_GPL(disable_kprobe);
2373 
2374 /* Enable one kprobe */
enable_kprobe(struct kprobe * kp)2375 int enable_kprobe(struct kprobe *kp)
2376 {
2377 	int ret = 0;
2378 	struct kprobe *p;
2379 
2380 	guard(mutex)(&kprobe_mutex);
2381 
2382 	/* Check whether specified probe is valid. */
2383 	p = __get_valid_kprobe(kp);
2384 	if (unlikely(p == NULL))
2385 		return -EINVAL;
2386 
2387 	if (kprobe_gone(kp))
2388 		/* This kprobe has gone, we couldn't enable it. */
2389 		return -EINVAL;
2390 
2391 	if (p != kp)
2392 		kp->flags &= ~KPROBE_FLAG_DISABLED;
2393 
2394 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2395 		p->flags &= ~KPROBE_FLAG_DISABLED;
2396 		ret = arm_kprobe(p);
2397 		if (ret) {
2398 			p->flags |= KPROBE_FLAG_DISABLED;
2399 			if (p != kp)
2400 				kp->flags |= KPROBE_FLAG_DISABLED;
2401 		}
2402 	}
2403 	return ret;
2404 }
2405 EXPORT_SYMBOL_GPL(enable_kprobe);
2406 
2407 /* Caller must NOT call this in usual path. This is only for critical case */
dump_kprobe(struct kprobe * kp)2408 void dump_kprobe(struct kprobe *kp)
2409 {
2410 	pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
2411 	       kp->symbol_name, kp->offset, kp->addr);
2412 }
2413 NOKPROBE_SYMBOL(dump_kprobe);
2414 
kprobe_add_ksym_blacklist(unsigned long entry)2415 int kprobe_add_ksym_blacklist(unsigned long entry)
2416 {
2417 	struct kprobe_blacklist_entry *ent;
2418 	unsigned long offset = 0, size = 0;
2419 
2420 	if (!kernel_text_address(entry) ||
2421 	    !kallsyms_lookup_size_offset(entry, &size, &offset))
2422 		return -EINVAL;
2423 
2424 	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2425 	if (!ent)
2426 		return -ENOMEM;
2427 	ent->start_addr = entry;
2428 	ent->end_addr = entry + size;
2429 	INIT_LIST_HEAD(&ent->list);
2430 	list_add_tail(&ent->list, &kprobe_blacklist);
2431 
2432 	return (int)size;
2433 }
2434 
2435 /* Add all symbols in given area into kprobe blacklist */
kprobe_add_area_blacklist(unsigned long start,unsigned long end)2436 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2437 {
2438 	unsigned long entry;
2439 	int ret = 0;
2440 
2441 	for (entry = start; entry < end; entry += ret) {
2442 		ret = kprobe_add_ksym_blacklist(entry);
2443 		if (ret < 0)
2444 			return ret;
2445 		if (ret == 0)	/* In case of alias symbol */
2446 			ret = 1;
2447 	}
2448 	return 0;
2449 }
2450 
arch_kprobe_get_kallsym(unsigned int * symnum,unsigned long * value,char * type,char * sym)2451 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2452 				   char *type, char *sym)
2453 {
2454 	return -ERANGE;
2455 }
2456 
kprobe_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)2457 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2458 		       char *sym)
2459 {
2460 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2461 	if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2462 		return 0;
2463 #ifdef CONFIG_OPTPROBES
2464 	if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2465 		return 0;
2466 #endif
2467 #endif
2468 	if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2469 		return 0;
2470 	return -ERANGE;
2471 }
2472 
arch_populate_kprobe_blacklist(void)2473 int __init __weak arch_populate_kprobe_blacklist(void)
2474 {
2475 	return 0;
2476 }
2477 
2478 /*
2479  * Lookup and populate the kprobe_blacklist.
2480  *
2481  * Unlike the kretprobe blacklist, we'll need to determine
2482  * the range of addresses that belong to the said functions,
2483  * since a kprobe need not necessarily be at the beginning
2484  * of a function.
2485  */
populate_kprobe_blacklist(unsigned long * start,unsigned long * end)2486 static int __init populate_kprobe_blacklist(unsigned long *start,
2487 					     unsigned long *end)
2488 {
2489 	unsigned long entry;
2490 	unsigned long *iter;
2491 	int ret;
2492 
2493 	for (iter = start; iter < end; iter++) {
2494 		entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
2495 		ret = kprobe_add_ksym_blacklist(entry);
2496 		if (ret == -EINVAL)
2497 			continue;
2498 		if (ret < 0)
2499 			return ret;
2500 	}
2501 
2502 	/* Symbols in '__kprobes_text' are blacklisted */
2503 	ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2504 					(unsigned long)__kprobes_text_end);
2505 	if (ret)
2506 		return ret;
2507 
2508 	/* Symbols in 'noinstr' section are blacklisted */
2509 	ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2510 					(unsigned long)__noinstr_text_end);
2511 
2512 	return ret ? : arch_populate_kprobe_blacklist();
2513 }
2514 
2515 #ifdef CONFIG_MODULES
2516 /* Remove all symbols in given area from kprobe blacklist */
kprobe_remove_area_blacklist(unsigned long start,unsigned long end)2517 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2518 {
2519 	struct kprobe_blacklist_entry *ent, *n;
2520 
2521 	list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2522 		if (ent->start_addr < start || ent->start_addr >= end)
2523 			continue;
2524 		list_del(&ent->list);
2525 		kfree(ent);
2526 	}
2527 }
2528 
kprobe_remove_ksym_blacklist(unsigned long entry)2529 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2530 {
2531 	kprobe_remove_area_blacklist(entry, entry + 1);
2532 }
2533 
add_module_kprobe_blacklist(struct module * mod)2534 static void add_module_kprobe_blacklist(struct module *mod)
2535 {
2536 	unsigned long start, end;
2537 	int i;
2538 
2539 	if (mod->kprobe_blacklist) {
2540 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2541 			kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2542 	}
2543 
2544 	start = (unsigned long)mod->kprobes_text_start;
2545 	if (start) {
2546 		end = start + mod->kprobes_text_size;
2547 		kprobe_add_area_blacklist(start, end);
2548 	}
2549 
2550 	start = (unsigned long)mod->noinstr_text_start;
2551 	if (start) {
2552 		end = start + mod->noinstr_text_size;
2553 		kprobe_add_area_blacklist(start, end);
2554 	}
2555 }
2556 
remove_module_kprobe_blacklist(struct module * mod)2557 static void remove_module_kprobe_blacklist(struct module *mod)
2558 {
2559 	unsigned long start, end;
2560 	int i;
2561 
2562 	if (mod->kprobe_blacklist) {
2563 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2564 			kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2565 	}
2566 
2567 	start = (unsigned long)mod->kprobes_text_start;
2568 	if (start) {
2569 		end = start + mod->kprobes_text_size;
2570 		kprobe_remove_area_blacklist(start, end);
2571 	}
2572 
2573 	start = (unsigned long)mod->noinstr_text_start;
2574 	if (start) {
2575 		end = start + mod->noinstr_text_size;
2576 		kprobe_remove_area_blacklist(start, end);
2577 	}
2578 }
2579 
2580 /* Module notifier call back, checking kprobes on the module */
kprobes_module_callback(struct notifier_block * nb,unsigned long val,void * data)2581 static int kprobes_module_callback(struct notifier_block *nb,
2582 				   unsigned long val, void *data)
2583 {
2584 	struct module *mod = data;
2585 	struct hlist_head *head;
2586 	struct kprobe *p;
2587 	unsigned int i;
2588 	int checkcore = (val == MODULE_STATE_GOING);
2589 
2590 	guard(mutex)(&kprobe_mutex);
2591 
2592 	if (val == MODULE_STATE_COMING)
2593 		add_module_kprobe_blacklist(mod);
2594 
2595 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2596 		return NOTIFY_DONE;
2597 
2598 	/*
2599 	 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2600 	 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2601 	 * notified, only '.init.text' section would be freed. We need to
2602 	 * disable kprobes which have been inserted in the sections.
2603 	 */
2604 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2605 		head = &kprobe_table[i];
2606 		hlist_for_each_entry(p, head, hlist)
2607 			if (within_module_init((unsigned long)p->addr, mod) ||
2608 			    (checkcore &&
2609 			     within_module_core((unsigned long)p->addr, mod))) {
2610 				/*
2611 				 * The vaddr this probe is installed will soon
2612 				 * be vfreed buy not synced to disk. Hence,
2613 				 * disarming the breakpoint isn't needed.
2614 				 *
2615 				 * Note, this will also move any optimized probes
2616 				 * that are pending to be removed from their
2617 				 * corresponding lists to the 'freeing_list' and
2618 				 * will not be touched by the delayed
2619 				 * kprobe_optimizer() work handler.
2620 				 */
2621 				kill_kprobe(p);
2622 			}
2623 	}
2624 	if (val == MODULE_STATE_GOING)
2625 		remove_module_kprobe_blacklist(mod);
2626 	return NOTIFY_DONE;
2627 }
2628 
2629 static struct notifier_block kprobe_module_nb = {
2630 	.notifier_call = kprobes_module_callback,
2631 	.priority = 0
2632 };
2633 
kprobe_register_module_notifier(void)2634 static int kprobe_register_module_notifier(void)
2635 {
2636 	return register_module_notifier(&kprobe_module_nb);
2637 }
2638 #else
kprobe_register_module_notifier(void)2639 static int kprobe_register_module_notifier(void)
2640 {
2641 	return 0;
2642 }
2643 #endif /* CONFIG_MODULES */
2644 
kprobe_free_init_mem(void)2645 void kprobe_free_init_mem(void)
2646 {
2647 	void *start = (void *)(&__init_begin);
2648 	void *end = (void *)(&__init_end);
2649 	struct hlist_head *head;
2650 	struct kprobe *p;
2651 	int i;
2652 
2653 	guard(mutex)(&kprobe_mutex);
2654 
2655 	/* Kill all kprobes on initmem because the target code has been freed. */
2656 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2657 		head = &kprobe_table[i];
2658 		hlist_for_each_entry(p, head, hlist) {
2659 			if (start <= (void *)p->addr && (void *)p->addr < end)
2660 				kill_kprobe(p);
2661 		}
2662 	}
2663 }
2664 
init_kprobes(void)2665 static int __init init_kprobes(void)
2666 {
2667 	int i, err;
2668 
2669 	/* FIXME allocate the probe table, currently defined statically */
2670 	/* initialize all list heads */
2671 	for (i = 0; i < KPROBE_TABLE_SIZE; i++)
2672 		INIT_HLIST_HEAD(&kprobe_table[i]);
2673 
2674 	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2675 					__stop_kprobe_blacklist);
2676 	if (err)
2677 		pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
2678 
2679 	if (kretprobe_blacklist_size) {
2680 		/* lookup the function address from its name */
2681 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2682 			kretprobe_blacklist[i].addr =
2683 				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2684 			if (!kretprobe_blacklist[i].addr)
2685 				pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
2686 				       kretprobe_blacklist[i].name);
2687 		}
2688 	}
2689 
2690 	/* By default, kprobes are armed */
2691 	kprobes_all_disarmed = false;
2692 
2693 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2694 	/* Init 'kprobe_optinsn_slots' for allocation */
2695 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2696 #endif
2697 
2698 	err = arch_init_kprobes();
2699 	if (!err)
2700 		err = register_die_notifier(&kprobe_exceptions_nb);
2701 	if (!err)
2702 		err = kprobe_register_module_notifier();
2703 
2704 	kprobes_initialized = (err == 0);
2705 	kprobe_sysctls_init();
2706 	return err;
2707 }
2708 early_initcall(init_kprobes);
2709 
2710 #if defined(CONFIG_OPTPROBES)
init_optprobes(void)2711 static int __init init_optprobes(void)
2712 {
2713 	/*
2714 	 * Enable kprobe optimization - this kicks the optimizer which
2715 	 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2716 	 * not spawned in early initcall. So delay the optimization.
2717 	 */
2718 	optimize_all_kprobes();
2719 
2720 	return 0;
2721 }
2722 subsys_initcall(init_optprobes);
2723 #endif
2724 
2725 #ifdef CONFIG_DEBUG_FS
report_probe(struct seq_file * pi,struct kprobe * p,const char * sym,int offset,char * modname,struct kprobe * pp)2726 static void report_probe(struct seq_file *pi, struct kprobe *p,
2727 		const char *sym, int offset, char *modname, struct kprobe *pp)
2728 {
2729 	char *kprobe_type;
2730 	void *addr = p->addr;
2731 
2732 	if (p->pre_handler == pre_handler_kretprobe)
2733 		kprobe_type = "r";
2734 	else
2735 		kprobe_type = "k";
2736 
2737 	if (!kallsyms_show_value(pi->file->f_cred))
2738 		addr = NULL;
2739 
2740 	if (sym)
2741 		seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2742 			addr, kprobe_type, sym, offset,
2743 			(modname ? modname : " "));
2744 	else	/* try to use %pS */
2745 		seq_printf(pi, "%px  %s  %pS ",
2746 			addr, kprobe_type, p->addr);
2747 
2748 	if (!pp)
2749 		pp = p;
2750 	seq_printf(pi, "%s%s%s%s\n",
2751 		(kprobe_gone(p) ? "[GONE]" : ""),
2752 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2753 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2754 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2755 }
2756 
kprobe_seq_start(struct seq_file * f,loff_t * pos)2757 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2758 {
2759 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2760 }
2761 
kprobe_seq_next(struct seq_file * f,void * v,loff_t * pos)2762 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2763 {
2764 	(*pos)++;
2765 	if (*pos >= KPROBE_TABLE_SIZE)
2766 		return NULL;
2767 	return pos;
2768 }
2769 
kprobe_seq_stop(struct seq_file * f,void * v)2770 static void kprobe_seq_stop(struct seq_file *f, void *v)
2771 {
2772 	/* Nothing to do */
2773 }
2774 
show_kprobe_addr(struct seq_file * pi,void * v)2775 static int show_kprobe_addr(struct seq_file *pi, void *v)
2776 {
2777 	struct hlist_head *head;
2778 	struct kprobe *p, *kp;
2779 	const char *sym;
2780 	unsigned int i = *(loff_t *) v;
2781 	unsigned long offset = 0;
2782 	char *modname, namebuf[KSYM_NAME_LEN];
2783 
2784 	head = &kprobe_table[i];
2785 	preempt_disable();
2786 	hlist_for_each_entry_rcu(p, head, hlist) {
2787 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2788 					&offset, &modname, namebuf);
2789 		if (kprobe_aggrprobe(p)) {
2790 			list_for_each_entry_rcu(kp, &p->list, list)
2791 				report_probe(pi, kp, sym, offset, modname, p);
2792 		} else
2793 			report_probe(pi, p, sym, offset, modname, NULL);
2794 	}
2795 	preempt_enable();
2796 	return 0;
2797 }
2798 
2799 static const struct seq_operations kprobes_sops = {
2800 	.start = kprobe_seq_start,
2801 	.next  = kprobe_seq_next,
2802 	.stop  = kprobe_seq_stop,
2803 	.show  = show_kprobe_addr
2804 };
2805 
2806 DEFINE_SEQ_ATTRIBUTE(kprobes);
2807 
2808 /* kprobes/blacklist -- shows which functions can not be probed */
kprobe_blacklist_seq_start(struct seq_file * m,loff_t * pos)2809 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2810 {
2811 	mutex_lock(&kprobe_mutex);
2812 	return seq_list_start(&kprobe_blacklist, *pos);
2813 }
2814 
kprobe_blacklist_seq_next(struct seq_file * m,void * v,loff_t * pos)2815 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2816 {
2817 	return seq_list_next(v, &kprobe_blacklist, pos);
2818 }
2819 
kprobe_blacklist_seq_show(struct seq_file * m,void * v)2820 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2821 {
2822 	struct kprobe_blacklist_entry *ent =
2823 		list_entry(v, struct kprobe_blacklist_entry, list);
2824 
2825 	/*
2826 	 * If '/proc/kallsyms' is not showing kernel address, we won't
2827 	 * show them here either.
2828 	 */
2829 	if (!kallsyms_show_value(m->file->f_cred))
2830 		seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2831 			   (void *)ent->start_addr);
2832 	else
2833 		seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2834 			   (void *)ent->end_addr, (void *)ent->start_addr);
2835 	return 0;
2836 }
2837 
kprobe_blacklist_seq_stop(struct seq_file * f,void * v)2838 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2839 {
2840 	mutex_unlock(&kprobe_mutex);
2841 }
2842 
2843 static const struct seq_operations kprobe_blacklist_sops = {
2844 	.start = kprobe_blacklist_seq_start,
2845 	.next  = kprobe_blacklist_seq_next,
2846 	.stop  = kprobe_blacklist_seq_stop,
2847 	.show  = kprobe_blacklist_seq_show,
2848 };
2849 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2850 
arm_all_kprobes(void)2851 static int arm_all_kprobes(void)
2852 {
2853 	struct hlist_head *head;
2854 	struct kprobe *p;
2855 	unsigned int i, total = 0, errors = 0;
2856 	int err, ret = 0;
2857 
2858 	guard(mutex)(&kprobe_mutex);
2859 
2860 	/* If kprobes are armed, just return */
2861 	if (!kprobes_all_disarmed)
2862 		return 0;
2863 
2864 	/*
2865 	 * optimize_kprobe() called by arm_kprobe() checks
2866 	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2867 	 * arm_kprobe.
2868 	 */
2869 	kprobes_all_disarmed = false;
2870 	/* Arming kprobes doesn't optimize kprobe itself */
2871 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2872 		head = &kprobe_table[i];
2873 		/* Arm all kprobes on a best-effort basis */
2874 		hlist_for_each_entry(p, head, hlist) {
2875 			if (!kprobe_disabled(p)) {
2876 				err = arm_kprobe(p);
2877 				if (err)  {
2878 					errors++;
2879 					ret = err;
2880 				}
2881 				total++;
2882 			}
2883 		}
2884 	}
2885 
2886 	if (errors)
2887 		pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
2888 			errors, total);
2889 	else
2890 		pr_info("Kprobes globally enabled\n");
2891 
2892 	return ret;
2893 }
2894 
disarm_all_kprobes(void)2895 static int disarm_all_kprobes(void)
2896 {
2897 	struct hlist_head *head;
2898 	struct kprobe *p;
2899 	unsigned int i, total = 0, errors = 0;
2900 	int err, ret = 0;
2901 
2902 	guard(mutex)(&kprobe_mutex);
2903 
2904 	/* If kprobes are already disarmed, just return */
2905 	if (kprobes_all_disarmed)
2906 		return 0;
2907 
2908 	kprobes_all_disarmed = true;
2909 
2910 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2911 		head = &kprobe_table[i];
2912 		/* Disarm all kprobes on a best-effort basis */
2913 		hlist_for_each_entry(p, head, hlist) {
2914 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2915 				err = disarm_kprobe(p, false);
2916 				if (err) {
2917 					errors++;
2918 					ret = err;
2919 				}
2920 				total++;
2921 			}
2922 		}
2923 	}
2924 
2925 	if (errors)
2926 		pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
2927 			errors, total);
2928 	else
2929 		pr_info("Kprobes globally disabled\n");
2930 
2931 	/* Wait for disarming all kprobes by optimizer */
2932 	wait_for_kprobe_optimizer_locked();
2933 	return ret;
2934 }
2935 
2936 /*
2937  * XXX: The debugfs bool file interface doesn't allow for callbacks
2938  * when the bool state is switched. We can reuse that facility when
2939  * available
2940  */
read_enabled_file_bool(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2941 static ssize_t read_enabled_file_bool(struct file *file,
2942 	       char __user *user_buf, size_t count, loff_t *ppos)
2943 {
2944 	char buf[3];
2945 
2946 	if (!kprobes_all_disarmed)
2947 		buf[0] = '1';
2948 	else
2949 		buf[0] = '0';
2950 	buf[1] = '\n';
2951 	buf[2] = 0x00;
2952 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2953 }
2954 
write_enabled_file_bool(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2955 static ssize_t write_enabled_file_bool(struct file *file,
2956 	       const char __user *user_buf, size_t count, loff_t *ppos)
2957 {
2958 	bool enable;
2959 	int ret;
2960 
2961 	ret = kstrtobool_from_user(user_buf, count, &enable);
2962 	if (ret)
2963 		return ret;
2964 
2965 	ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
2966 	if (ret)
2967 		return ret;
2968 
2969 	return count;
2970 }
2971 
2972 static const struct file_operations fops_kp = {
2973 	.read =         read_enabled_file_bool,
2974 	.write =        write_enabled_file_bool,
2975 	.llseek =	default_llseek,
2976 };
2977 
debugfs_kprobe_init(void)2978 static int __init debugfs_kprobe_init(void)
2979 {
2980 	struct dentry *dir;
2981 
2982 	dir = debugfs_create_dir("kprobes", NULL);
2983 
2984 	debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
2985 
2986 	debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
2987 
2988 	debugfs_create_file("blacklist", 0400, dir, NULL,
2989 			    &kprobe_blacklist_fops);
2990 
2991 	return 0;
2992 }
2993 
2994 late_initcall(debugfs_kprobe_init);
2995 #endif /* CONFIG_DEBUG_FS */
2996