1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/probes/kprobes.c
4 *
5 * Kprobes support for ARM64
6 *
7 * Copyright (C) 2013 Linaro Limited.
8 * Author: Sandeepa Prabhu <[email protected]>
9 */
10
11 #define pr_fmt(fmt) "kprobes: " fmt
12
13 #include <linux/extable.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/kprobes.h>
17 #include <linux/sched/debug.h>
18 #include <linux/set_memory.h>
19 #include <linux/slab.h>
20 #include <linux/stop_machine.h>
21 #include <linux/stringify.h>
22 #include <linux/uaccess.h>
23 #include <linux/vmalloc.h>
24
25 #include <asm/cacheflush.h>
26 #include <asm/daifflags.h>
27 #include <asm/debug-monitors.h>
28 #include <asm/insn.h>
29 #include <asm/irq.h>
30 #include <asm/text-patching.h>
31 #include <asm/ptrace.h>
32 #include <asm/sections.h>
33 #include <asm/system_misc.h>
34 #include <asm/traps.h>
35
36 #include "decode-insn.h"
37
38 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
39 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
40
41 static void __kprobes
42 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
43
arch_prepare_ss_slot(struct kprobe * p)44 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
45 {
46 kprobe_opcode_t *addr = p->ainsn.xol_insn;
47
48 /*
49 * Prepare insn slot, Mark Rutland points out it depends on a coupe of
50 * subtleties:
51 *
52 * - That the I-cache maintenance for these instructions is complete
53 * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
54 * ensures this, but just omits causing a Context-Synchronization-Event
55 * on all CPUS).
56 *
57 * - That the kprobe BRK results in an exception (and consequently a
58 * Context-Synchronoization-Event), which ensures that the CPU will
59 * fetch thesingle-step slot instructions *after* this, ensuring that
60 * the new instructions are used
61 *
62 * It supposes to place ISB after patching to guarantee I-cache maintenance
63 * is observed on all CPUS, however, single-step slot is installed in
64 * the BRK exception handler, so it is unnecessary to generate
65 * Contex-Synchronization-Event via ISB again.
66 */
67 aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode));
68 aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
69
70 /*
71 * Needs restoring of return address after stepping xol.
72 */
73 p->ainsn.xol_restore = (unsigned long) p->addr +
74 sizeof(kprobe_opcode_t);
75 }
76
arch_prepare_simulate(struct kprobe * p)77 static void __kprobes arch_prepare_simulate(struct kprobe *p)
78 {
79 /* This instructions is not executed xol. No need to adjust the PC */
80 p->ainsn.xol_restore = 0;
81 }
82
arch_simulate_insn(struct kprobe * p,struct pt_regs * regs)83 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
84 {
85 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
86
87 if (p->ainsn.api.handler)
88 p->ainsn.api.handler(le32_to_cpu(p->opcode), (long)p->addr, regs);
89
90 /* single step simulated, now go for post processing */
91 post_kprobe_handler(p, kcb, regs);
92 }
93
arch_prepare_kprobe(struct kprobe * p)94 int __kprobes arch_prepare_kprobe(struct kprobe *p)
95 {
96 unsigned long probe_addr = (unsigned long)p->addr;
97
98 if (probe_addr & 0x3)
99 return -EINVAL;
100
101 /* copy instruction */
102 p->opcode = *p->addr;
103
104 if (search_exception_tables(probe_addr))
105 return -EINVAL;
106
107 /* decode instruction */
108 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
109 case INSN_REJECTED: /* insn not supported */
110 return -EINVAL;
111
112 case INSN_GOOD_NO_SLOT: /* insn need simulation */
113 p->ainsn.xol_insn = NULL;
114 break;
115
116 case INSN_GOOD: /* instruction uses slot */
117 p->ainsn.xol_insn = get_insn_slot();
118 if (!p->ainsn.xol_insn)
119 return -ENOMEM;
120 break;
121 }
122
123 /* prepare the instruction */
124 if (p->ainsn.xol_insn)
125 arch_prepare_ss_slot(p);
126 else
127 arch_prepare_simulate(p);
128
129 return 0;
130 }
131
132 /* arm kprobe: install breakpoint in text */
arch_arm_kprobe(struct kprobe * p)133 void __kprobes arch_arm_kprobe(struct kprobe *p)
134 {
135 void *addr = p->addr;
136 u32 insn = BRK64_OPCODE_KPROBES;
137
138 aarch64_insn_patch_text(&addr, &insn, 1);
139 }
140
141 /* disarm kprobe: remove breakpoint from text */
arch_disarm_kprobe(struct kprobe * p)142 void __kprobes arch_disarm_kprobe(struct kprobe *p)
143 {
144 void *addr = p->addr;
145 u32 insn = le32_to_cpu(p->opcode);
146
147 aarch64_insn_patch_text(&addr, &insn, 1);
148 }
149
arch_remove_kprobe(struct kprobe * p)150 void __kprobes arch_remove_kprobe(struct kprobe *p)
151 {
152 if (p->ainsn.xol_insn) {
153 free_insn_slot(p->ainsn.xol_insn, 0);
154 p->ainsn.xol_insn = NULL;
155 }
156 }
157
save_previous_kprobe(struct kprobe_ctlblk * kcb)158 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
159 {
160 kcb->prev_kprobe.kp = kprobe_running();
161 kcb->prev_kprobe.status = kcb->kprobe_status;
162 }
163
restore_previous_kprobe(struct kprobe_ctlblk * kcb)164 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
165 {
166 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
167 kcb->kprobe_status = kcb->prev_kprobe.status;
168 }
169
set_current_kprobe(struct kprobe * p)170 static void __kprobes set_current_kprobe(struct kprobe *p)
171 {
172 __this_cpu_write(current_kprobe, p);
173 }
174
175 /*
176 * Mask all of DAIF while executing the instruction out-of-line, to keep things
177 * simple and avoid nesting exceptions. Interrupts do have to be disabled since
178 * the kprobe state is per-CPU and doesn't get migrated.
179 */
kprobes_save_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)180 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
181 struct pt_regs *regs)
182 {
183 kcb->saved_irqflag = regs->pstate & DAIF_MASK;
184 regs->pstate |= DAIF_MASK;
185 }
186
kprobes_restore_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)187 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
188 struct pt_regs *regs)
189 {
190 regs->pstate &= ~DAIF_MASK;
191 regs->pstate |= kcb->saved_irqflag;
192 }
193
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)194 static void __kprobes setup_singlestep(struct kprobe *p,
195 struct pt_regs *regs,
196 struct kprobe_ctlblk *kcb, int reenter)
197 {
198 unsigned long slot;
199
200 if (reenter) {
201 save_previous_kprobe(kcb);
202 set_current_kprobe(p);
203 kcb->kprobe_status = KPROBE_REENTER;
204 } else {
205 kcb->kprobe_status = KPROBE_HIT_SS;
206 }
207
208
209 if (p->ainsn.xol_insn) {
210 /* prepare for single stepping */
211 slot = (unsigned long)p->ainsn.xol_insn;
212
213 kprobes_save_local_irqflag(kcb, regs);
214 instruction_pointer_set(regs, slot);
215 } else {
216 /* insn simulation */
217 arch_simulate_insn(p, regs);
218 }
219 }
220
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)221 static int __kprobes reenter_kprobe(struct kprobe *p,
222 struct pt_regs *regs,
223 struct kprobe_ctlblk *kcb)
224 {
225 switch (kcb->kprobe_status) {
226 case KPROBE_HIT_SSDONE:
227 case KPROBE_HIT_ACTIVE:
228 kprobes_inc_nmissed_count(p);
229 setup_singlestep(p, regs, kcb, 1);
230 break;
231 case KPROBE_HIT_SS:
232 case KPROBE_REENTER:
233 pr_warn("Failed to recover from reentered kprobes.\n");
234 dump_kprobe(p);
235 BUG();
236 break;
237 default:
238 WARN_ON(1);
239 return 0;
240 }
241
242 return 1;
243 }
244
245 static void __kprobes
post_kprobe_handler(struct kprobe * cur,struct kprobe_ctlblk * kcb,struct pt_regs * regs)246 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
247 {
248 /* return addr restore if non-branching insn */
249 if (cur->ainsn.xol_restore != 0)
250 instruction_pointer_set(regs, cur->ainsn.xol_restore);
251
252 /* restore back original saved kprobe variables and continue */
253 if (kcb->kprobe_status == KPROBE_REENTER) {
254 restore_previous_kprobe(kcb);
255 return;
256 }
257 /* call post handler */
258 kcb->kprobe_status = KPROBE_HIT_SSDONE;
259 if (cur->post_handler)
260 cur->post_handler(cur, regs, 0);
261
262 reset_current_kprobe();
263 }
264
kprobe_fault_handler(struct pt_regs * regs,unsigned int fsr)265 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
266 {
267 struct kprobe *cur = kprobe_running();
268 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
269
270 switch (kcb->kprobe_status) {
271 case KPROBE_HIT_SS:
272 case KPROBE_REENTER:
273 /*
274 * We are here because the instruction being single
275 * stepped caused a page fault. We reset the current
276 * kprobe and the ip points back to the probe address
277 * and allow the page fault handler to continue as a
278 * normal page fault.
279 */
280 instruction_pointer_set(regs, (unsigned long) cur->addr);
281 BUG_ON(!instruction_pointer(regs));
282
283 if (kcb->kprobe_status == KPROBE_REENTER) {
284 restore_previous_kprobe(kcb);
285 } else {
286 kprobes_restore_local_irqflag(kcb, regs);
287 reset_current_kprobe();
288 }
289
290 break;
291 }
292 return 0;
293 }
294
295 static int __kprobes
kprobe_breakpoint_handler(struct pt_regs * regs,unsigned long esr)296 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
297 {
298 struct kprobe *p, *cur_kprobe;
299 struct kprobe_ctlblk *kcb;
300 unsigned long addr = instruction_pointer(regs);
301
302 kcb = get_kprobe_ctlblk();
303 cur_kprobe = kprobe_running();
304
305 p = get_kprobe((kprobe_opcode_t *) addr);
306 if (WARN_ON_ONCE(!p)) {
307 /*
308 * Something went wrong. This BRK used an immediate reserved
309 * for kprobes, but we couldn't find any corresponding probe.
310 */
311 return DBG_HOOK_ERROR;
312 }
313
314 if (cur_kprobe) {
315 /* Hit a kprobe inside another kprobe */
316 if (!reenter_kprobe(p, regs, kcb))
317 return DBG_HOOK_ERROR;
318 } else {
319 /* Probe hit */
320 set_current_kprobe(p);
321 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
322
323 /*
324 * If we have no pre-handler or it returned 0, we
325 * continue with normal processing. If we have a
326 * pre-handler and it returned non-zero, it will
327 * modify the execution path and not need to single-step
328 * Let's just reset current kprobe and exit.
329 */
330 if (!p->pre_handler || !p->pre_handler(p, regs))
331 setup_singlestep(p, regs, kcb, 0);
332 else
333 reset_current_kprobe();
334 }
335
336 return DBG_HOOK_HANDLED;
337 }
338
339 static struct break_hook kprobes_break_hook = {
340 .imm = KPROBES_BRK_IMM,
341 .fn = kprobe_breakpoint_handler,
342 };
343
344 static int __kprobes
kprobe_breakpoint_ss_handler(struct pt_regs * regs,unsigned long esr)345 kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
346 {
347 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
348 unsigned long addr = instruction_pointer(regs);
349 struct kprobe *cur = kprobe_running();
350
351 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
352 ((unsigned long)&cur->ainsn.xol_insn[1] == addr)) {
353 kprobes_restore_local_irqflag(kcb, regs);
354 post_kprobe_handler(cur, kcb, regs);
355
356 return DBG_HOOK_HANDLED;
357 }
358
359 /* not ours, kprobes should ignore it */
360 return DBG_HOOK_ERROR;
361 }
362
363 static struct break_hook kprobes_break_ss_hook = {
364 .imm = KPROBES_BRK_SS_IMM,
365 .fn = kprobe_breakpoint_ss_handler,
366 };
367
368 static int __kprobes
kretprobe_breakpoint_handler(struct pt_regs * regs,unsigned long esr)369 kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
370 {
371 if (regs->pc != (unsigned long)__kretprobe_trampoline)
372 return DBG_HOOK_ERROR;
373
374 regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
375 return DBG_HOOK_HANDLED;
376 }
377
378 static struct break_hook kretprobes_break_hook = {
379 .imm = KRETPROBES_BRK_IMM,
380 .fn = kretprobe_breakpoint_handler,
381 };
382
383 /*
384 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
385 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
386 */
arch_populate_kprobe_blacklist(void)387 int __init arch_populate_kprobe_blacklist(void)
388 {
389 int ret;
390
391 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
392 (unsigned long)__entry_text_end);
393 if (ret)
394 return ret;
395 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
396 (unsigned long)__irqentry_text_end);
397 if (ret)
398 return ret;
399 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
400 (unsigned long)__hyp_text_end);
401 if (ret || is_kernel_in_hyp_mode())
402 return ret;
403 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
404 (unsigned long)__hyp_idmap_text_end);
405 return ret;
406 }
407
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)408 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
409 struct pt_regs *regs)
410 {
411 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
412 ri->fp = (void *)regs->regs[29];
413
414 /* replace return addr (x30) with trampoline */
415 regs->regs[30] = (long)&__kretprobe_trampoline;
416 }
417
arch_trampoline_kprobe(struct kprobe * p)418 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
419 {
420 return 0;
421 }
422
arch_init_kprobes(void)423 int __init arch_init_kprobes(void)
424 {
425 register_kernel_break_hook(&kprobes_break_hook);
426 register_kernel_break_hook(&kprobes_break_ss_hook);
427 register_kernel_break_hook(&kretprobes_break_hook);
428
429 return 0;
430 }
431