1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/ftrace.c
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <[email protected]>
7 */
8
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
13
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
18 #include <asm/text-patching.h>
19
20 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
21 struct fregs_offset {
22 const char *name;
23 int offset;
24 };
25
26 #define FREGS_OFFSET(n, field) \
27 { \
28 .name = n, \
29 .offset = offsetof(struct __arch_ftrace_regs, field), \
30 }
31
32 static const struct fregs_offset fregs_offsets[] = {
33 FREGS_OFFSET("x0", regs[0]),
34 FREGS_OFFSET("x1", regs[1]),
35 FREGS_OFFSET("x2", regs[2]),
36 FREGS_OFFSET("x3", regs[3]),
37 FREGS_OFFSET("x4", regs[4]),
38 FREGS_OFFSET("x5", regs[5]),
39 FREGS_OFFSET("x6", regs[6]),
40 FREGS_OFFSET("x7", regs[7]),
41 FREGS_OFFSET("x8", regs[8]),
42
43 FREGS_OFFSET("x29", fp),
44 FREGS_OFFSET("x30", lr),
45 FREGS_OFFSET("lr", lr),
46
47 FREGS_OFFSET("sp", sp),
48 FREGS_OFFSET("pc", pc),
49 };
50
ftrace_regs_query_register_offset(const char * name)51 int ftrace_regs_query_register_offset(const char *name)
52 {
53 for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
54 const struct fregs_offset *roff = &fregs_offsets[i];
55 if (!strcmp(roff->name, name))
56 return roff->offset;
57 }
58
59 return -EINVAL;
60 }
61 #endif
62
ftrace_call_adjust(unsigned long addr)63 unsigned long ftrace_call_adjust(unsigned long addr)
64 {
65 /*
66 * When using mcount, addr is the address of the mcount call
67 * instruction, and no adjustment is necessary.
68 */
69 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
70 return addr;
71
72 /*
73 * When using patchable-function-entry without pre-function NOPS, addr
74 * is the address of the first NOP after the function entry point.
75 *
76 * The compiler has either generated:
77 *
78 * addr+00: func: NOP // To be patched to MOV X9, LR
79 * addr+04: NOP // To be patched to BL <caller>
80 *
81 * Or:
82 *
83 * addr-04: BTI C
84 * addr+00: func: NOP // To be patched to MOV X9, LR
85 * addr+04: NOP // To be patched to BL <caller>
86 *
87 * We must adjust addr to the address of the NOP which will be patched
88 * to `BL <caller>`, which is at `addr + 4` bytes in either case.
89 *
90 */
91 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
92 return addr + AARCH64_INSN_SIZE;
93
94 /*
95 * When using patchable-function-entry with pre-function NOPs, addr is
96 * the address of the first pre-function NOP.
97 *
98 * Starting from an 8-byte aligned base, the compiler has either
99 * generated:
100 *
101 * addr+00: NOP // Literal (first 32 bits)
102 * addr+04: NOP // Literal (last 32 bits)
103 * addr+08: func: NOP // To be patched to MOV X9, LR
104 * addr+12: NOP // To be patched to BL <caller>
105 *
106 * Or:
107 *
108 * addr+00: NOP // Literal (first 32 bits)
109 * addr+04: NOP // Literal (last 32 bits)
110 * addr+08: func: BTI C
111 * addr+12: NOP // To be patched to MOV X9, LR
112 * addr+16: NOP // To be patched to BL <caller>
113 *
114 * We must adjust addr to the address of the NOP which will be patched
115 * to `BL <caller>`, which is at either addr+12 or addr+16 depending on
116 * whether there is a BTI.
117 */
118
119 if (!IS_ALIGNED(addr, sizeof(unsigned long))) {
120 WARN_RATELIMIT(1, "Misaligned patch-site %pS\n",
121 (void *)(addr + 8));
122 return 0;
123 }
124
125 /* Skip the NOPs placed before the function entry point */
126 addr += 2 * AARCH64_INSN_SIZE;
127
128 /* Skip any BTI */
129 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
130 u32 insn = le32_to_cpu(*(__le32 *)addr);
131
132 if (aarch64_insn_is_bti(insn)) {
133 addr += AARCH64_INSN_SIZE;
134 } else if (insn != aarch64_insn_gen_nop()) {
135 WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n",
136 (void *)addr, insn);
137 }
138 }
139
140 /* Skip the first NOP after function entry */
141 addr += AARCH64_INSN_SIZE;
142
143 return addr;
144 }
145
146 /* Convert fentry_ip to the symbol address without kallsyms */
arch_ftrace_get_symaddr(unsigned long fentry_ip)147 unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
148 {
149 u32 insn;
150
151 /*
152 * When using patchable-function-entry without pre-function NOPS, ftrace
153 * entry is the address of the first NOP after the function entry point.
154 *
155 * The compiler has either generated:
156 *
157 * func+00: func: NOP // To be patched to MOV X9, LR
158 * func+04: NOP // To be patched to BL <caller>
159 *
160 * Or:
161 *
162 * func-04: BTI C
163 * func+00: func: NOP // To be patched to MOV X9, LR
164 * func+04: NOP // To be patched to BL <caller>
165 *
166 * The fentry_ip is the address of `BL <caller>` which is at `func + 4`
167 * bytes in either case.
168 */
169 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
170 return fentry_ip - AARCH64_INSN_SIZE;
171
172 /*
173 * When using patchable-function-entry with pre-function NOPs, BTI is
174 * a bit different.
175 *
176 * func+00: func: NOP // To be patched to MOV X9, LR
177 * func+04: NOP // To be patched to BL <caller>
178 *
179 * Or:
180 *
181 * func+00: func: BTI C
182 * func+04: NOP // To be patched to MOV X9, LR
183 * func+08: NOP // To be patched to BL <caller>
184 *
185 * The fentry_ip is the address of `BL <caller>` which is at either
186 * `func + 4` or `func + 8` depends on whether there is a BTI.
187 */
188
189 /* If there is no BTI, the func address should be one instruction before. */
190 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
191 return fentry_ip - AARCH64_INSN_SIZE;
192
193 /* We want to be extra safe in case entry ip is on the page edge,
194 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
195 */
196 if ((fentry_ip & ~PAGE_MASK) < AARCH64_INSN_SIZE * 2) {
197 if (get_kernel_nofault(insn, (u32 *)(fentry_ip - AARCH64_INSN_SIZE * 2)))
198 return 0;
199 } else {
200 insn = *(u32 *)(fentry_ip - AARCH64_INSN_SIZE * 2);
201 }
202
203 if (aarch64_insn_is_bti(le32_to_cpu((__le32)insn)))
204 return fentry_ip - AARCH64_INSN_SIZE * 2;
205
206 return fentry_ip - AARCH64_INSN_SIZE;
207 }
208
209 /*
210 * Replace a single instruction, which may be a branch or NOP.
211 * If @validate == true, a replaced instruction is checked against 'old'.
212 */
ftrace_modify_code(unsigned long pc,u32 old,u32 new,bool validate)213 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
214 bool validate)
215 {
216 u32 replaced;
217
218 /*
219 * Note:
220 * We are paranoid about modifying text, as if a bug were to happen, it
221 * could cause us to read or write to someplace that could cause harm.
222 * Carefully read and modify the code with aarch64_insn_*() which uses
223 * probe_kernel_*(), and make sure what we read is what we expected it
224 * to be before modifying it.
225 */
226 if (validate) {
227 if (aarch64_insn_read((void *)pc, &replaced))
228 return -EFAULT;
229
230 if (replaced != old)
231 return -EINVAL;
232 }
233 if (aarch64_insn_patch_text_nosync((void *)pc, new))
234 return -EPERM;
235
236 return 0;
237 }
238
239 /*
240 * Replace tracer function in ftrace_caller()
241 */
ftrace_update_ftrace_func(ftrace_func_t func)242 int ftrace_update_ftrace_func(ftrace_func_t func)
243 {
244 unsigned long pc;
245 u32 new;
246
247 /*
248 * When using CALL_OPS, the function to call is associated with the
249 * call site, and we don't have a global function pointer to update.
250 */
251 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
252 return 0;
253
254 pc = (unsigned long)ftrace_call;
255 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
256 AARCH64_INSN_BRANCH_LINK);
257
258 return ftrace_modify_code(pc, 0, new, false);
259 }
260
get_ftrace_plt(struct module * mod)261 static struct plt_entry *get_ftrace_plt(struct module *mod)
262 {
263 #ifdef CONFIG_MODULES
264 struct plt_entry *plt = mod->arch.ftrace_trampolines;
265
266 return &plt[FTRACE_PLT_IDX];
267 #else
268 return NULL;
269 #endif
270 }
271
reachable_by_bl(unsigned long addr,unsigned long pc)272 static bool reachable_by_bl(unsigned long addr, unsigned long pc)
273 {
274 long offset = (long)addr - (long)pc;
275
276 return offset >= -SZ_128M && offset < SZ_128M;
277 }
278
279 /*
280 * Find the address the callsite must branch to in order to reach '*addr'.
281 *
282 * Due to the limited range of 'BL' instructions, modules may be placed too far
283 * away to branch directly and must use a PLT.
284 *
285 * Returns true when '*addr' contains a reachable target address, or has been
286 * modified to contain a PLT address. Returns false otherwise.
287 */
ftrace_find_callable_addr(struct dyn_ftrace * rec,struct module * mod,unsigned long * addr)288 static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
289 struct module *mod,
290 unsigned long *addr)
291 {
292 unsigned long pc = rec->ip;
293 struct plt_entry *plt;
294
295 /*
296 * If a custom trampoline is unreachable, rely on the ftrace_caller
297 * trampoline which knows how to indirectly reach that trampoline
298 * through ops->direct_call.
299 */
300 if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc))
301 *addr = FTRACE_ADDR;
302
303 /*
304 * When the target is within range of the 'BL' instruction, use 'addr'
305 * as-is and branch to that directly.
306 */
307 if (reachable_by_bl(*addr, pc))
308 return true;
309
310 /*
311 * When the target is outside of the range of a 'BL' instruction, we
312 * must use a PLT to reach it. We can only place PLTs for modules, and
313 * only when module PLT support is built-in.
314 */
315 if (!IS_ENABLED(CONFIG_MODULES))
316 return false;
317
318 /*
319 * 'mod' is only set at module load time, but if we end up
320 * dealing with an out-of-range condition, we can assume it
321 * is due to a module being loaded far away from the kernel.
322 *
323 * NOTE: __module_text_address() must be called with preemption
324 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
325 * retains its validity throughout the remainder of this code.
326 */
327 if (!mod) {
328 preempt_disable();
329 mod = __module_text_address(pc);
330 preempt_enable();
331 }
332
333 if (WARN_ON(!mod))
334 return false;
335
336 plt = get_ftrace_plt(mod);
337 if (!plt) {
338 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
339 return false;
340 }
341
342 *addr = (unsigned long)plt;
343 return true;
344 }
345
346 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
arm64_rec_get_ops(struct dyn_ftrace * rec)347 static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec)
348 {
349 const struct ftrace_ops *ops = NULL;
350
351 if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
352 ops = ftrace_find_unique_ops(rec);
353 WARN_ON_ONCE(!ops);
354 }
355
356 if (!ops)
357 ops = &ftrace_list_ops;
358
359 return ops;
360 }
361
ftrace_rec_set_ops(const struct dyn_ftrace * rec,const struct ftrace_ops * ops)362 static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
363 const struct ftrace_ops *ops)
364 {
365 unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
366 return aarch64_insn_write_literal_u64((void *)literal,
367 (unsigned long)ops);
368 }
369
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)370 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
371 {
372 return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
373 }
374
ftrace_rec_update_ops(struct dyn_ftrace * rec)375 static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
376 {
377 return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
378 }
379 #else
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)380 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
ftrace_rec_update_ops(struct dyn_ftrace * rec)381 static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
382 #endif
383
384 /*
385 * Turn on the call to ftrace_caller() in instrumented function
386 */
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)387 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
388 {
389 unsigned long pc = rec->ip;
390 u32 old, new;
391 int ret;
392
393 ret = ftrace_rec_update_ops(rec);
394 if (ret)
395 return ret;
396
397 if (!ftrace_find_callable_addr(rec, NULL, &addr))
398 return -EINVAL;
399
400 old = aarch64_insn_gen_nop();
401 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
402
403 return ftrace_modify_code(pc, old, new, true);
404 }
405
406 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)407 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
408 unsigned long addr)
409 {
410 unsigned long pc = rec->ip;
411 u32 old, new;
412 int ret;
413
414 ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
415 if (ret)
416 return ret;
417
418 if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
419 return -EINVAL;
420 if (!ftrace_find_callable_addr(rec, NULL, &addr))
421 return -EINVAL;
422
423 old = aarch64_insn_gen_branch_imm(pc, old_addr,
424 AARCH64_INSN_BRANCH_LINK);
425 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
426
427 return ftrace_modify_code(pc, old, new, true);
428 }
429 #endif
430
431 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
432 /*
433 * The compiler has inserted two NOPs before the regular function prologue.
434 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
435 * and x9-x18 are free for our use.
436 *
437 * At runtime we want to be able to swing a single NOP <-> BL to enable or
438 * disable the ftrace call. The BL requires us to save the original LR value,
439 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
440 * before the regular prologue are:
441 *
442 * | Compiled | Disabled | Enabled |
443 * +----------+------------+------------+
444 * | NOP | MOV X9, LR | MOV X9, LR |
445 * | NOP | NOP | BL <entry> |
446 *
447 * The LR value will be recovered by ftrace_caller, and restored into LR
448 * before returning to the regular function prologue. When a function is not
449 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
450 *
451 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
452 * the BL.
453 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)454 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
455 {
456 unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
457 u32 old, new;
458 int ret;
459
460 ret = ftrace_rec_set_nop_ops(rec);
461 if (ret)
462 return ret;
463
464 old = aarch64_insn_gen_nop();
465 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
466 AARCH64_INSN_REG_LR,
467 AARCH64_INSN_VARIANT_64BIT);
468 return ftrace_modify_code(pc, old, new, true);
469 }
470 #endif
471
472 /*
473 * Turn off the call to ftrace_caller() in instrumented function
474 */
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)475 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
476 unsigned long addr)
477 {
478 unsigned long pc = rec->ip;
479 u32 old = 0, new;
480 int ret;
481
482 new = aarch64_insn_gen_nop();
483
484 ret = ftrace_rec_set_nop_ops(rec);
485 if (ret)
486 return ret;
487
488 /*
489 * When using mcount, callsites in modules may have been initalized to
490 * call an arbitrary module PLT (which redirects to the _mcount stub)
491 * rather than the ftrace PLT we'll use at runtime (which redirects to
492 * the ftrace trampoline). We can ignore the old PLT when initializing
493 * the callsite.
494 *
495 * Note: 'mod' is only set at module load time.
496 */
497 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
498 return aarch64_insn_patch_text_nosync((void *)pc, new);
499
500 if (!ftrace_find_callable_addr(rec, mod, &addr))
501 return -EINVAL;
502
503 old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
504
505 return ftrace_modify_code(pc, old, new, true);
506 }
507
arch_ftrace_update_code(int command)508 void arch_ftrace_update_code(int command)
509 {
510 command |= FTRACE_MAY_SLEEP;
511 ftrace_modify_all_code(command);
512 }
513
514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
515 /*
516 * function_graph tracer expects ftrace_return_to_handler() to be called
517 * on the way back to parent. For this purpose, this function is called
518 * in _mcount() or ftrace_caller() to replace return address (*parent) on
519 * the call stack to return_to_handler.
520 */
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent,unsigned long frame_pointer)521 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
522 unsigned long frame_pointer)
523 {
524 unsigned long return_hooker = (unsigned long)&return_to_handler;
525 unsigned long old;
526
527 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
528 return;
529
530 /*
531 * Note:
532 * No protection against faulting at *parent, which may be seen
533 * on other archs. It's unlikely on AArch64.
534 */
535 old = *parent;
536
537 if (!function_graph_enter(old, self_addr, frame_pointer,
538 (void *)frame_pointer)) {
539 *parent = return_hooker;
540 }
541 }
542
543 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)544 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
545 struct ftrace_ops *op, struct ftrace_regs *fregs)
546 {
547 unsigned long return_hooker = (unsigned long)&return_to_handler;
548 unsigned long frame_pointer = arch_ftrace_regs(fregs)->fp;
549 unsigned long *parent = &arch_ftrace_regs(fregs)->lr;
550 unsigned long old;
551
552 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
553 return;
554
555 old = *parent;
556
557 if (!function_graph_enter_regs(old, ip, frame_pointer,
558 (void *)frame_pointer, fregs)) {
559 *parent = return_hooker;
560 }
561 }
562 #else
563 /*
564 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
565 * depending on @enable.
566 */
ftrace_modify_graph_caller(bool enable)567 static int ftrace_modify_graph_caller(bool enable)
568 {
569 unsigned long pc = (unsigned long)&ftrace_graph_call;
570 u32 branch, nop;
571
572 branch = aarch64_insn_gen_branch_imm(pc,
573 (unsigned long)ftrace_graph_caller,
574 AARCH64_INSN_BRANCH_NOLINK);
575 nop = aarch64_insn_gen_nop();
576
577 if (enable)
578 return ftrace_modify_code(pc, nop, branch, true);
579 else
580 return ftrace_modify_code(pc, branch, nop, true);
581 }
582
ftrace_enable_ftrace_graph_caller(void)583 int ftrace_enable_ftrace_graph_caller(void)
584 {
585 return ftrace_modify_graph_caller(true);
586 }
587
ftrace_disable_ftrace_graph_caller(void)588 int ftrace_disable_ftrace_graph_caller(void)
589 {
590 return ftrace_modify_graph_caller(false);
591 }
592 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
593 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
594