1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 #include <asm/asm-prototypes.h>
33 #include <asm/cfi.h>
34
35 int __read_mostly alternatives_patched;
36
37 EXPORT_SYMBOL_GPL(alternatives_patched);
38
39 #define MAX_PATCH_LEN (255-1)
40
41 #define DA_ALL (~0)
42 #define DA_ALT 0x01
43 #define DA_RET 0x02
44 #define DA_RETPOLINE 0x04
45 #define DA_ENDBR 0x08
46 #define DA_SMP 0x10
47
48 static unsigned int debug_alternative;
49
debug_alt(char * str)50 static int __init debug_alt(char *str)
51 {
52 if (str && *str == '=')
53 str++;
54
55 if (!str || kstrtouint(str, 0, &debug_alternative))
56 debug_alternative = DA_ALL;
57
58 return 1;
59 }
60 __setup("debug-alternative", debug_alt);
61
62 static int noreplace_smp;
63
setup_noreplace_smp(char * str)64 static int __init setup_noreplace_smp(char *str)
65 {
66 noreplace_smp = 1;
67 return 1;
68 }
69 __setup("noreplace-smp", setup_noreplace_smp);
70
71 #define DPRINTK(type, fmt, args...) \
72 do { \
73 if (debug_alternative & DA_##type) \
74 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
75 } while (0)
76
77 #define DUMP_BYTES(type, buf, len, fmt, args...) \
78 do { \
79 if (unlikely(debug_alternative & DA_##type)) { \
80 int j; \
81 \
82 if (!(len)) \
83 break; \
84 \
85 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
86 for (j = 0; j < (len) - 1; j++) \
87 printk(KERN_CONT "%02hhx ", buf[j]); \
88 printk(KERN_CONT "%02hhx\n", buf[j]); \
89 } \
90 } while (0)
91
92 static const unsigned char x86nops[] =
93 {
94 BYTES_NOP1,
95 BYTES_NOP2,
96 BYTES_NOP3,
97 BYTES_NOP4,
98 BYTES_NOP5,
99 BYTES_NOP6,
100 BYTES_NOP7,
101 BYTES_NOP8,
102 #ifdef CONFIG_64BIT
103 BYTES_NOP9,
104 BYTES_NOP10,
105 BYTES_NOP11,
106 #endif
107 };
108
109 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
110 {
111 NULL,
112 x86nops,
113 x86nops + 1,
114 x86nops + 1 + 2,
115 x86nops + 1 + 2 + 3,
116 x86nops + 1 + 2 + 3 + 4,
117 x86nops + 1 + 2 + 3 + 4 + 5,
118 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
119 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
120 #ifdef CONFIG_64BIT
121 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
122 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
123 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
124 #endif
125 };
126
127 /*
128 * Nomenclature for variable names to simplify and clarify this code and ease
129 * any potential staring at it:
130 *
131 * @instr: source address of the original instructions in the kernel text as
132 * generated by the compiler.
133 *
134 * @buf: temporary buffer on which the patching operates. This buffer is
135 * eventually text-poked into the kernel image.
136 *
137 * @replacement/@repl: pointer to the opcodes which are replacing @instr, located
138 * in the .altinstr_replacement section.
139 */
140
141 /*
142 * Fill the buffer with a single effective instruction of size @len.
143 *
144 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
145 * for every single-byte NOP, try to generate the maximally available NOP of
146 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
147 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
148 * *jump* over instead of executing long and daft NOPs.
149 */
add_nop(u8 * buf,unsigned int len)150 static void add_nop(u8 *buf, unsigned int len)
151 {
152 u8 *target = buf + len;
153
154 if (!len)
155 return;
156
157 if (len <= ASM_NOP_MAX) {
158 memcpy(buf, x86_nops[len], len);
159 return;
160 }
161
162 if (len < 128) {
163 __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
164 buf += JMP8_INSN_SIZE;
165 } else {
166 __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
167 buf += JMP32_INSN_SIZE;
168 }
169
170 for (;buf < target; buf++)
171 *buf = INT3_INSN_OPCODE;
172 }
173
174 extern s32 __retpoline_sites[], __retpoline_sites_end[];
175 extern s32 __return_sites[], __return_sites_end[];
176 extern s32 __cfi_sites[], __cfi_sites_end[];
177 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
178 extern s32 __smp_locks[], __smp_locks_end[];
179 void text_poke_early(void *addr, const void *opcode, size_t len);
180
181 /*
182 * Matches NOP and NOPL, not any of the other possible NOPs.
183 */
insn_is_nop(struct insn * insn)184 static bool insn_is_nop(struct insn *insn)
185 {
186 /* Anything NOP, but no REP NOP */
187 if (insn->opcode.bytes[0] == 0x90 &&
188 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
189 return true;
190
191 /* NOPL */
192 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
193 return true;
194
195 /* TODO: more nops */
196
197 return false;
198 }
199
200 /*
201 * Find the offset of the first non-NOP instruction starting at @offset
202 * but no further than @len.
203 */
skip_nops(u8 * buf,int offset,int len)204 static int skip_nops(u8 *buf, int offset, int len)
205 {
206 struct insn insn;
207
208 for (; offset < len; offset += insn.length) {
209 if (insn_decode_kernel(&insn, &buf[offset]))
210 break;
211
212 if (!insn_is_nop(&insn))
213 break;
214 }
215
216 return offset;
217 }
218
219 /*
220 * "noinline" to cause control flow change and thus invalidate I$ and
221 * cause refetch after modification.
222 */
optimize_nops(const u8 * const instr,u8 * buf,size_t len)223 static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
224 {
225 for (int next, i = 0; i < len; i = next) {
226 struct insn insn;
227
228 if (insn_decode_kernel(&insn, &buf[i]))
229 return;
230
231 next = i + insn.length;
232
233 if (insn_is_nop(&insn)) {
234 int nop = i;
235
236 /* Has the NOP already been optimized? */
237 if (i + insn.length == len)
238 return;
239
240 next = skip_nops(buf, next, len);
241
242 add_nop(buf + nop, next - nop);
243 DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
244 }
245 }
246 }
247
248 /*
249 * In this context, "source" is where the instructions are placed in the
250 * section .altinstr_replacement, for example during kernel build by the
251 * toolchain.
252 * "Destination" is where the instructions are being patched in by this
253 * machinery.
254 *
255 * The source offset is:
256 *
257 * src_imm = target - src_next_ip (1)
258 *
259 * and the target offset is:
260 *
261 * dst_imm = target - dst_next_ip (2)
262 *
263 * so rework (1) as an expression for target like:
264 *
265 * target = src_imm + src_next_ip (1a)
266 *
267 * and substitute in (2) to get:
268 *
269 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
270 *
271 * Now, since the instruction stream is 'identical' at src and dst (it
272 * is being copied after all) it can be stated that:
273 *
274 * src_next_ip = src + ip_offset
275 * dst_next_ip = dst + ip_offset (4)
276 *
277 * Substitute (4) in (3) and observe ip_offset being cancelled out to
278 * obtain:
279 *
280 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
281 * = src_imm + src - dst + ip_offset - ip_offset
282 * = src_imm + src - dst (5)
283 *
284 * IOW, only the relative displacement of the code block matters.
285 */
286
287 #define apply_reloc_n(n_, p_, d_) \
288 do { \
289 s32 v = *(s##n_ *)(p_); \
290 v += (d_); \
291 BUG_ON((v >> 31) != (v >> (n_-1))); \
292 *(s##n_ *)(p_) = (s##n_)v; \
293 } while (0)
294
295
296 static __always_inline
apply_reloc(int n,void * ptr,uintptr_t diff)297 void apply_reloc(int n, void *ptr, uintptr_t diff)
298 {
299 switch (n) {
300 case 1: apply_reloc_n(8, ptr, diff); break;
301 case 2: apply_reloc_n(16, ptr, diff); break;
302 case 4: apply_reloc_n(32, ptr, diff); break;
303 default: BUG();
304 }
305 }
306
307 static __always_inline
need_reloc(unsigned long offset,u8 * src,size_t src_len)308 bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
309 {
310 u8 *target = src + offset;
311 /*
312 * If the target is inside the patched block, it's relative to the
313 * block itself and does not need relocation.
314 */
315 return (target < src || target > src + src_len);
316 }
317
__apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)318 static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
319 {
320 for (int next, i = 0; i < instrlen; i = next) {
321 struct insn insn;
322
323 if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
324 return;
325
326 next = i + insn.length;
327
328 switch (insn.opcode.bytes[0]) {
329 case 0x0f:
330 if (insn.opcode.bytes[1] < 0x80 ||
331 insn.opcode.bytes[1] > 0x8f)
332 break;
333
334 fallthrough; /* Jcc.d32 */
335 case 0x70 ... 0x7f: /* Jcc.d8 */
336 case JMP8_INSN_OPCODE:
337 case JMP32_INSN_OPCODE:
338 case CALL_INSN_OPCODE:
339 if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
340 apply_reloc(insn.immediate.nbytes,
341 buf + i + insn_offset_immediate(&insn),
342 repl - instr);
343 }
344
345 /*
346 * Where possible, convert JMP.d32 into JMP.d8.
347 */
348 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
349 s32 imm = insn.immediate.value;
350 imm += repl - instr;
351 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
352 if ((imm >> 31) == (imm >> 7)) {
353 buf[i+0] = JMP8_INSN_OPCODE;
354 buf[i+1] = (s8)imm;
355
356 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
357 }
358 }
359 break;
360 }
361
362 if (insn_rip_relative(&insn)) {
363 if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
364 apply_reloc(insn.displacement.nbytes,
365 buf + i + insn_offset_displacement(&insn),
366 repl - instr);
367 }
368 }
369 }
370 }
371
apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)372 void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
373 {
374 __apply_relocation(buf, instr, instrlen, repl, repl_len);
375 optimize_nops(instr, buf, instrlen);
376 }
377
378 /* Low-level backend functions usable from alternative code replacements. */
379 DEFINE_ASM_FUNC(nop_func, "", .entry.text);
380 EXPORT_SYMBOL_GPL(nop_func);
381
BUG_func(void)382 noinstr void BUG_func(void)
383 {
384 BUG();
385 }
386 EXPORT_SYMBOL(BUG_func);
387
388 #define CALL_RIP_REL_OPCODE 0xff
389 #define CALL_RIP_REL_MODRM 0x15
390
391 /*
392 * Rewrite the "call BUG_func" replacement to point to the target of the
393 * indirect pv_ops call "call *disp(%ip)".
394 */
alt_replace_call(u8 * instr,u8 * insn_buff,struct alt_instr * a,struct module * mod)395 static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a,
396 struct module *mod)
397 {
398 u8 *wr_instr = module_writable_address(mod, instr);
399 void *target, *bug = &BUG_func;
400 s32 disp;
401
402 if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
403 pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
404 BUG();
405 }
406
407 if (a->instrlen != 6 ||
408 wr_instr[0] != CALL_RIP_REL_OPCODE ||
409 wr_instr[1] != CALL_RIP_REL_MODRM) {
410 pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
411 BUG();
412 }
413
414 /* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
415 disp = *(s32 *)(wr_instr + 2);
416 #ifdef CONFIG_X86_64
417 /* ff 15 00 00 00 00 call *0x0(%rip) */
418 /* target address is stored at "next instruction + disp". */
419 target = *(void **)(instr + a->instrlen + disp);
420 #else
421 /* ff 15 00 00 00 00 call *0x0 */
422 /* target address is stored at disp. */
423 target = *(void **)disp;
424 #endif
425 if (!target)
426 target = bug;
427
428 /* (BUG_func - .) + (target - BUG_func) := target - . */
429 *(s32 *)(insn_buff + 1) += target - bug;
430
431 if (target == &nop_func)
432 return 0;
433
434 return 5;
435 }
436
instr_va(struct alt_instr * i)437 static inline u8 * instr_va(struct alt_instr *i)
438 {
439 return (u8 *)&i->instr_offset + i->instr_offset;
440 }
441
442 /*
443 * Replace instructions with better alternatives for this CPU type. This runs
444 * before SMP is initialized to avoid SMP problems with self modifying code.
445 * This implies that asymmetric systems where APs have less capabilities than
446 * the boot processor are not handled. Tough. Make sure you disable such
447 * features by hand.
448 *
449 * Marked "noinline" to cause control flow change and thus insn cache
450 * to refetch changed I$ lines.
451 */
apply_alternatives(struct alt_instr * start,struct alt_instr * end,struct module * mod)452 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
453 struct alt_instr *end,
454 struct module *mod)
455 {
456 u8 insn_buff[MAX_PATCH_LEN];
457 u8 *instr, *replacement;
458 struct alt_instr *a, *b;
459
460 DPRINTK(ALT, "alt table %px, -> %px", start, end);
461
462 /*
463 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
464 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
465 * During the process, KASAN becomes confused seeing partial LA57
466 * conversion and triggers a false-positive out-of-bound report.
467 *
468 * Disable KASAN until the patching is complete.
469 */
470 kasan_disable_current();
471
472 /*
473 * The scan order should be from start to end. A later scanned
474 * alternative code can overwrite previously scanned alternative code.
475 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
476 * patch code.
477 *
478 * So be careful if you want to change the scan order to any other
479 * order.
480 */
481 for (a = start; a < end; a++) {
482 int insn_buff_sz = 0;
483 u8 *wr_instr, *wr_replacement;
484
485 /*
486 * In case of nested ALTERNATIVE()s the outer alternative might
487 * add more padding. To ensure consistent patching find the max
488 * padding for all alt_instr entries for this site (nested
489 * alternatives result in consecutive entries).
490 */
491 for (b = a+1; b < end && instr_va(b) == instr_va(a); b++) {
492 u8 len = max(a->instrlen, b->instrlen);
493 a->instrlen = b->instrlen = len;
494 }
495
496 instr = instr_va(a);
497 wr_instr = module_writable_address(mod, instr);
498
499 replacement = (u8 *)&a->repl_offset + a->repl_offset;
500 wr_replacement = module_writable_address(mod, replacement);
501
502 BUG_ON(a->instrlen > sizeof(insn_buff));
503 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
504
505 /*
506 * Patch if either:
507 * - feature is present
508 * - feature not present but ALT_FLAG_NOT is set to mean,
509 * patch if feature is *NOT* present.
510 */
511 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
512 memcpy(insn_buff, wr_instr, a->instrlen);
513 optimize_nops(instr, insn_buff, a->instrlen);
514 text_poke_early(wr_instr, insn_buff, a->instrlen);
515 continue;
516 }
517
518 DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
519 a->cpuid >> 5,
520 a->cpuid & 0x1f,
521 instr, instr, a->instrlen,
522 replacement, a->replacementlen, a->flags);
523
524 memcpy(insn_buff, wr_replacement, a->replacementlen);
525 insn_buff_sz = a->replacementlen;
526
527 if (a->flags & ALT_FLAG_DIRECT_CALL) {
528 insn_buff_sz = alt_replace_call(instr, insn_buff, a,
529 mod);
530 if (insn_buff_sz < 0)
531 continue;
532 }
533
534 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
535 insn_buff[insn_buff_sz] = 0x90;
536
537 apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
538
539 DUMP_BYTES(ALT, wr_instr, a->instrlen, "%px: old_insn: ", instr);
540 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
541 DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
542
543 text_poke_early(wr_instr, insn_buff, insn_buff_sz);
544 }
545
546 kasan_enable_current();
547 }
548
is_jcc32(struct insn * insn)549 static inline bool is_jcc32(struct insn *insn)
550 {
551 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
552 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
553 }
554
555 #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL)
556
557 /*
558 * CALL/JMP *%\reg
559 */
emit_indirect(int op,int reg,u8 * bytes)560 static int emit_indirect(int op, int reg, u8 *bytes)
561 {
562 int i = 0;
563 u8 modrm;
564
565 switch (op) {
566 case CALL_INSN_OPCODE:
567 modrm = 0x10; /* Reg = 2; CALL r/m */
568 break;
569
570 case JMP32_INSN_OPCODE:
571 modrm = 0x20; /* Reg = 4; JMP r/m */
572 break;
573
574 default:
575 WARN_ON_ONCE(1);
576 return -1;
577 }
578
579 if (reg >= 8) {
580 bytes[i++] = 0x41; /* REX.B prefix */
581 reg -= 8;
582 }
583
584 modrm |= 0xc0; /* Mod = 3 */
585 modrm += reg;
586
587 bytes[i++] = 0xff; /* opcode */
588 bytes[i++] = modrm;
589
590 return i;
591 }
592
emit_call_track_retpoline(void * addr,struct insn * insn,int reg,u8 * bytes)593 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
594 {
595 u8 op = insn->opcode.bytes[0];
596 int i = 0;
597
598 /*
599 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
600 * tail-calls. Deal with them.
601 */
602 if (is_jcc32(insn)) {
603 bytes[i++] = op;
604 op = insn->opcode.bytes[1];
605 goto clang_jcc;
606 }
607
608 if (insn->length == 6)
609 bytes[i++] = 0x2e; /* CS-prefix */
610
611 switch (op) {
612 case CALL_INSN_OPCODE:
613 __text_gen_insn(bytes+i, op, addr+i,
614 __x86_indirect_call_thunk_array[reg],
615 CALL_INSN_SIZE);
616 i += CALL_INSN_SIZE;
617 break;
618
619 case JMP32_INSN_OPCODE:
620 clang_jcc:
621 __text_gen_insn(bytes+i, op, addr+i,
622 __x86_indirect_jump_thunk_array[reg],
623 JMP32_INSN_SIZE);
624 i += JMP32_INSN_SIZE;
625 break;
626
627 default:
628 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
629 return -1;
630 }
631
632 WARN_ON_ONCE(i != insn->length);
633
634 return i;
635 }
636
637 /*
638 * Rewrite the compiler generated retpoline thunk calls.
639 *
640 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
641 * indirect instructions, avoiding the extra indirection.
642 *
643 * For example, convert:
644 *
645 * CALL __x86_indirect_thunk_\reg
646 *
647 * into:
648 *
649 * CALL *%\reg
650 *
651 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
652 */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)653 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
654 {
655 retpoline_thunk_t *target;
656 int reg, ret, i = 0;
657 u8 op, cc;
658
659 target = addr + insn->length + insn->immediate.value;
660 reg = target - __x86_indirect_thunk_array;
661
662 if (WARN_ON_ONCE(reg & ~0xf))
663 return -1;
664
665 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
666 BUG_ON(reg == 4);
667
668 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
669 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
670 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
671 return emit_call_track_retpoline(addr, insn, reg, bytes);
672
673 return -1;
674 }
675
676 op = insn->opcode.bytes[0];
677
678 /*
679 * Convert:
680 *
681 * Jcc.d32 __x86_indirect_thunk_\reg
682 *
683 * into:
684 *
685 * Jncc.d8 1f
686 * [ LFENCE ]
687 * JMP *%\reg
688 * [ NOP ]
689 * 1:
690 */
691 if (is_jcc32(insn)) {
692 cc = insn->opcode.bytes[1] & 0xf;
693 cc ^= 1; /* invert condition */
694
695 bytes[i++] = 0x70 + cc; /* Jcc.d8 */
696 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
697
698 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
699 op = JMP32_INSN_OPCODE;
700 }
701
702 /*
703 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
704 */
705 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
706 bytes[i++] = 0x0f;
707 bytes[i++] = 0xae;
708 bytes[i++] = 0xe8; /* LFENCE */
709 }
710
711 ret = emit_indirect(op, reg, bytes + i);
712 if (ret < 0)
713 return ret;
714 i += ret;
715
716 /*
717 * The compiler is supposed to EMIT an INT3 after every unconditional
718 * JMP instruction due to AMD BTC. However, if the compiler is too old
719 * or MITIGATION_SLS isn't enabled, we still need an INT3 after
720 * indirect JMPs even on Intel.
721 */
722 if (op == JMP32_INSN_OPCODE && i < insn->length)
723 bytes[i++] = INT3_INSN_OPCODE;
724
725 for (; i < insn->length;)
726 bytes[i++] = BYTES_NOP1;
727
728 return i;
729 }
730
731 /*
732 * Generated by 'objtool --retpoline'.
733 */
apply_retpolines(s32 * start,s32 * end,struct module * mod)734 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
735 struct module *mod)
736 {
737 s32 *s;
738
739 for (s = start; s < end; s++) {
740 void *addr = (void *)s + *s;
741 void *wr_addr = module_writable_address(mod, addr);
742 struct insn insn;
743 int len, ret;
744 u8 bytes[16];
745 u8 op1, op2;
746
747 ret = insn_decode_kernel(&insn, wr_addr);
748 if (WARN_ON_ONCE(ret < 0))
749 continue;
750
751 op1 = insn.opcode.bytes[0];
752 op2 = insn.opcode.bytes[1];
753
754 switch (op1) {
755 case CALL_INSN_OPCODE:
756 case JMP32_INSN_OPCODE:
757 break;
758
759 case 0x0f: /* escape */
760 if (op2 >= 0x80 && op2 <= 0x8f)
761 break;
762 fallthrough;
763 default:
764 WARN_ON_ONCE(1);
765 continue;
766 }
767
768 DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
769 addr, addr, insn.length,
770 addr + insn.length + insn.immediate.value);
771
772 len = patch_retpoline(addr, &insn, bytes);
773 if (len == insn.length) {
774 optimize_nops(addr, bytes, len);
775 DUMP_BYTES(RETPOLINE, ((u8*)wr_addr), len, "%px: orig: ", addr);
776 DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
777 text_poke_early(wr_addr, bytes, len);
778 }
779 }
780 }
781
782 #ifdef CONFIG_MITIGATION_RETHUNK
783
784 /*
785 * Rewrite the compiler generated return thunk tail-calls.
786 *
787 * For example, convert:
788 *
789 * JMP __x86_return_thunk
790 *
791 * into:
792 *
793 * RET
794 */
patch_return(void * addr,struct insn * insn,u8 * bytes)795 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
796 {
797 int i = 0;
798
799 /* Patch the custom return thunks... */
800 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
801 i = JMP32_INSN_SIZE;
802 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
803 } else {
804 /* ... or patch them out if not needed. */
805 bytes[i++] = RET_INSN_OPCODE;
806 }
807
808 for (; i < insn->length;)
809 bytes[i++] = INT3_INSN_OPCODE;
810 return i;
811 }
812
apply_returns(s32 * start,s32 * end,struct module * mod)813 void __init_or_module noinline apply_returns(s32 *start, s32 *end,
814 struct module *mod)
815 {
816 s32 *s;
817
818 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
819 static_call_force_reinit();
820
821 for (s = start; s < end; s++) {
822 void *dest = NULL, *addr = (void *)s + *s;
823 void *wr_addr = module_writable_address(mod, addr);
824 struct insn insn;
825 int len, ret;
826 u8 bytes[16];
827 u8 op;
828
829 ret = insn_decode_kernel(&insn, wr_addr);
830 if (WARN_ON_ONCE(ret < 0))
831 continue;
832
833 op = insn.opcode.bytes[0];
834 if (op == JMP32_INSN_OPCODE)
835 dest = addr + insn.length + insn.immediate.value;
836
837 if (__static_call_fixup(addr, op, dest) ||
838 WARN_ONCE(dest != &__x86_return_thunk,
839 "missing return thunk: %pS-%pS: %*ph",
840 addr, dest, 5, addr))
841 continue;
842
843 DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
844 addr, addr, insn.length,
845 addr + insn.length + insn.immediate.value);
846
847 len = patch_return(addr, &insn, bytes);
848 if (len == insn.length) {
849 DUMP_BYTES(RET, ((u8*)wr_addr), len, "%px: orig: ", addr);
850 DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
851 text_poke_early(wr_addr, bytes, len);
852 }
853 }
854 }
855 #else
apply_returns(s32 * start,s32 * end,struct module * mod)856 void __init_or_module noinline apply_returns(s32 *start, s32 *end,
857 struct module *mod) { }
858 #endif /* CONFIG_MITIGATION_RETHUNK */
859
860 #else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
861
apply_retpolines(s32 * start,s32 * end,struct module * mod)862 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
863 struct module *mod) { }
apply_returns(s32 * start,s32 * end,struct module * mod)864 void __init_or_module noinline apply_returns(s32 *start, s32 *end,
865 struct module *mod) { }
866
867 #endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */
868
869 #ifdef CONFIG_X86_KERNEL_IBT
870
871 static void poison_cfi(void *addr, void *wr_addr);
872
poison_endbr(void * addr,void * wr_addr,bool warn)873 static void __init_or_module poison_endbr(void *addr, void *wr_addr, bool warn)
874 {
875 u32 endbr, poison = gen_endbr_poison();
876
877 if (WARN_ON_ONCE(get_kernel_nofault(endbr, wr_addr)))
878 return;
879
880 if (!is_endbr(endbr)) {
881 WARN_ON_ONCE(warn);
882 return;
883 }
884
885 DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
886
887 /*
888 * When we have IBT, the lack of ENDBR will trigger #CP
889 */
890 DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
891 DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
892 text_poke_early(wr_addr, &poison, 4);
893 }
894
895 /*
896 * Generated by: objtool --ibt
897 *
898 * Seal the functions for indirect calls by clobbering the ENDBR instructions
899 * and the kCFI hash value.
900 */
apply_seal_endbr(s32 * start,s32 * end,struct module * mod)901 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end, struct module *mod)
902 {
903 s32 *s;
904
905 for (s = start; s < end; s++) {
906 void *addr = (void *)s + *s;
907 void *wr_addr = module_writable_address(mod, addr);
908
909 poison_endbr(addr, wr_addr, true);
910 if (IS_ENABLED(CONFIG_FINEIBT))
911 poison_cfi(addr - 16, wr_addr - 16);
912 }
913 }
914
915 #else
916
apply_seal_endbr(s32 * start,s32 * end,struct module * mod)917 void __init_or_module apply_seal_endbr(s32 *start, s32 *end, struct module *mod) { }
918
919 #endif /* CONFIG_X86_KERNEL_IBT */
920
921 #ifdef CONFIG_CFI_AUTO_DEFAULT
922 #define __CFI_DEFAULT CFI_AUTO
923 #elif defined(CONFIG_CFI_CLANG)
924 #define __CFI_DEFAULT CFI_KCFI
925 #else
926 #define __CFI_DEFAULT CFI_OFF
927 #endif
928
929 enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
930
931 #ifdef CONFIG_CFI_CLANG
932 struct bpf_insn;
933
934 /* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
935 extern unsigned int __bpf_prog_runX(const void *ctx,
936 const struct bpf_insn *insn);
937
938 /*
939 * Force a reference to the external symbol so the compiler generates
940 * __kcfi_typid.
941 */
942 __ADDRESSABLE(__bpf_prog_runX);
943
944 /* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
945 asm (
946 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
947 " .type cfi_bpf_hash,@object \n"
948 " .globl cfi_bpf_hash \n"
949 " .p2align 2, 0x0 \n"
950 "cfi_bpf_hash: \n"
951 " .long __kcfi_typeid___bpf_prog_runX \n"
952 " .size cfi_bpf_hash, 4 \n"
953 " .popsection \n"
954 );
955
956 /* Must match bpf_callback_t */
957 extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
958
959 __ADDRESSABLE(__bpf_callback_fn);
960
961 /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
962 asm (
963 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
964 " .type cfi_bpf_subprog_hash,@object \n"
965 " .globl cfi_bpf_subprog_hash \n"
966 " .p2align 2, 0x0 \n"
967 "cfi_bpf_subprog_hash: \n"
968 " .long __kcfi_typeid___bpf_callback_fn \n"
969 " .size cfi_bpf_subprog_hash, 4 \n"
970 " .popsection \n"
971 );
972
cfi_get_func_hash(void * func)973 u32 cfi_get_func_hash(void *func)
974 {
975 u32 hash;
976
977 func -= cfi_get_offset();
978 switch (cfi_mode) {
979 case CFI_FINEIBT:
980 func += 7;
981 break;
982 case CFI_KCFI:
983 func += 1;
984 break;
985 default:
986 return 0;
987 }
988
989 if (get_kernel_nofault(hash, func))
990 return 0;
991
992 return hash;
993 }
994 #endif
995
996 #ifdef CONFIG_FINEIBT
997
998 static bool cfi_rand __ro_after_init = true;
999 static u32 cfi_seed __ro_after_init;
1000
1001 /*
1002 * Re-hash the CFI hash with a boot-time seed while making sure the result is
1003 * not a valid ENDBR instruction.
1004 */
cfi_rehash(u32 hash)1005 static u32 cfi_rehash(u32 hash)
1006 {
1007 hash ^= cfi_seed;
1008 while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
1009 bool lsb = hash & 1;
1010 hash >>= 1;
1011 if (lsb)
1012 hash ^= 0x80200003;
1013 }
1014 return hash;
1015 }
1016
cfi_parse_cmdline(char * str)1017 static __init int cfi_parse_cmdline(char *str)
1018 {
1019 if (!str)
1020 return -EINVAL;
1021
1022 while (str) {
1023 char *next = strchr(str, ',');
1024 if (next) {
1025 *next = 0;
1026 next++;
1027 }
1028
1029 if (!strcmp(str, "auto")) {
1030 cfi_mode = CFI_AUTO;
1031 } else if (!strcmp(str, "off")) {
1032 cfi_mode = CFI_OFF;
1033 cfi_rand = false;
1034 } else if (!strcmp(str, "kcfi")) {
1035 cfi_mode = CFI_KCFI;
1036 } else if (!strcmp(str, "fineibt")) {
1037 cfi_mode = CFI_FINEIBT;
1038 } else if (!strcmp(str, "norand")) {
1039 cfi_rand = false;
1040 } else {
1041 pr_err("Ignoring unknown cfi option (%s).", str);
1042 }
1043
1044 str = next;
1045 }
1046
1047 return 0;
1048 }
1049 early_param("cfi", cfi_parse_cmdline);
1050
1051 /*
1052 * kCFI FineIBT
1053 *
1054 * __cfi_\func: __cfi_\func:
1055 * movl $0x12345678,%eax // 5 endbr64 // 4
1056 * nop subl $0x12345678,%r10d // 7
1057 * nop jz 1f // 2
1058 * nop ud2 // 2
1059 * nop 1: nop // 1
1060 * nop
1061 * nop
1062 * nop
1063 * nop
1064 * nop
1065 * nop
1066 * nop
1067 *
1068 *
1069 * caller: caller:
1070 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6
1071 * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4
1072 * je 1f // 2 nop4 // 4
1073 * ud2 // 2
1074 * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5
1075 *
1076 */
1077
1078 asm( ".pushsection .rodata \n"
1079 "fineibt_preamble_start: \n"
1080 " endbr64 \n"
1081 " subl $0x12345678, %r10d \n"
1082 " je fineibt_preamble_end \n"
1083 " ud2 \n"
1084 " nop \n"
1085 "fineibt_preamble_end: \n"
1086 ".popsection\n"
1087 );
1088
1089 extern u8 fineibt_preamble_start[];
1090 extern u8 fineibt_preamble_end[];
1091
1092 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1093 #define fineibt_preamble_hash 7
1094
1095 asm( ".pushsection .rodata \n"
1096 "fineibt_caller_start: \n"
1097 " movl $0x12345678, %r10d \n"
1098 " sub $16, %r11 \n"
1099 ASM_NOP4
1100 "fineibt_caller_end: \n"
1101 ".popsection \n"
1102 );
1103
1104 extern u8 fineibt_caller_start[];
1105 extern u8 fineibt_caller_end[];
1106
1107 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1108 #define fineibt_caller_hash 2
1109
1110 #define fineibt_caller_jmp (fineibt_caller_size - 2)
1111
decode_preamble_hash(void * addr)1112 static u32 decode_preamble_hash(void *addr)
1113 {
1114 u8 *p = addr;
1115
1116 /* b8 78 56 34 12 mov $0x12345678,%eax */
1117 if (p[0] == 0xb8)
1118 return *(u32 *)(addr + 1);
1119
1120 return 0; /* invalid hash value */
1121 }
1122
decode_caller_hash(void * addr)1123 static u32 decode_caller_hash(void *addr)
1124 {
1125 u8 *p = addr;
1126
1127 /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */
1128 if (p[0] == 0x41 && p[1] == 0xba)
1129 return -*(u32 *)(addr + 2);
1130
1131 /* e8 0c 78 56 34 12 jmp.d8 +12 */
1132 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1133 return -*(u32 *)(addr + 2);
1134
1135 return 0; /* invalid hash value */
1136 }
1137
1138 /* .retpoline_sites */
cfi_disable_callers(s32 * start,s32 * end,struct module * mod)1139 static int cfi_disable_callers(s32 *start, s32 *end, struct module *mod)
1140 {
1141 /*
1142 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1143 * in tact for later usage. Also see decode_caller_hash() and
1144 * cfi_rewrite_callers().
1145 */
1146 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1147 s32 *s;
1148
1149 for (s = start; s < end; s++) {
1150 void *addr = (void *)s + *s;
1151 void *wr_addr;
1152 u32 hash;
1153
1154 addr -= fineibt_caller_size;
1155 wr_addr = module_writable_address(mod, addr);
1156 hash = decode_caller_hash(wr_addr);
1157
1158 if (!hash) /* nocfi callers */
1159 continue;
1160
1161 text_poke_early(wr_addr, jmp, 2);
1162 }
1163
1164 return 0;
1165 }
1166
cfi_enable_callers(s32 * start,s32 * end,struct module * mod)1167 static int cfi_enable_callers(s32 *start, s32 *end, struct module *mod)
1168 {
1169 /*
1170 * Re-enable kCFI, undo what cfi_disable_callers() did.
1171 */
1172 const u8 mov[] = { 0x41, 0xba };
1173 s32 *s;
1174
1175 for (s = start; s < end; s++) {
1176 void *addr = (void *)s + *s;
1177 void *wr_addr;
1178 u32 hash;
1179
1180 addr -= fineibt_caller_size;
1181 wr_addr = module_writable_address(mod, addr);
1182 hash = decode_caller_hash(wr_addr);
1183 if (!hash) /* nocfi callers */
1184 continue;
1185
1186 text_poke_early(wr_addr, mov, 2);
1187 }
1188
1189 return 0;
1190 }
1191
1192 /* .cfi_sites */
cfi_rand_preamble(s32 * start,s32 * end,struct module * mod)1193 static int cfi_rand_preamble(s32 *start, s32 *end, struct module *mod)
1194 {
1195 s32 *s;
1196
1197 for (s = start; s < end; s++) {
1198 void *addr = (void *)s + *s;
1199 void *wr_addr = module_writable_address(mod, addr);
1200 u32 hash;
1201
1202 hash = decode_preamble_hash(wr_addr);
1203 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1204 addr, addr, 5, addr))
1205 return -EINVAL;
1206
1207 hash = cfi_rehash(hash);
1208 text_poke_early(wr_addr + 1, &hash, 4);
1209 }
1210
1211 return 0;
1212 }
1213
cfi_rewrite_preamble(s32 * start,s32 * end,struct module * mod)1214 static int cfi_rewrite_preamble(s32 *start, s32 *end, struct module *mod)
1215 {
1216 s32 *s;
1217
1218 for (s = start; s < end; s++) {
1219 void *addr = (void *)s + *s;
1220 void *wr_addr = module_writable_address(mod, addr);
1221 u32 hash;
1222
1223 hash = decode_preamble_hash(wr_addr);
1224 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1225 addr, addr, 5, addr))
1226 return -EINVAL;
1227
1228 text_poke_early(wr_addr, fineibt_preamble_start, fineibt_preamble_size);
1229 WARN_ON(*(u32 *)(wr_addr + fineibt_preamble_hash) != 0x12345678);
1230 text_poke_early(wr_addr + fineibt_preamble_hash, &hash, 4);
1231 }
1232
1233 return 0;
1234 }
1235
cfi_rewrite_endbr(s32 * start,s32 * end,struct module * mod)1236 static void cfi_rewrite_endbr(s32 *start, s32 *end, struct module *mod)
1237 {
1238 s32 *s;
1239
1240 for (s = start; s < end; s++) {
1241 void *addr = (void *)s + *s;
1242 void *wr_addr = module_writable_address(mod, addr);
1243
1244 poison_endbr(addr + 16, wr_addr + 16, false);
1245 }
1246 }
1247
1248 /* .retpoline_sites */
cfi_rand_callers(s32 * start,s32 * end,struct module * mod)1249 static int cfi_rand_callers(s32 *start, s32 *end, struct module *mod)
1250 {
1251 s32 *s;
1252
1253 for (s = start; s < end; s++) {
1254 void *addr = (void *)s + *s;
1255 void *wr_addr;
1256 u32 hash;
1257
1258 addr -= fineibt_caller_size;
1259 wr_addr = module_writable_address(mod, addr);
1260 hash = decode_caller_hash(wr_addr);
1261 if (hash) {
1262 hash = -cfi_rehash(hash);
1263 text_poke_early(wr_addr + 2, &hash, 4);
1264 }
1265 }
1266
1267 return 0;
1268 }
1269
cfi_rewrite_callers(s32 * start,s32 * end,struct module * mod)1270 static int cfi_rewrite_callers(s32 *start, s32 *end, struct module *mod)
1271 {
1272 s32 *s;
1273
1274 for (s = start; s < end; s++) {
1275 void *addr = (void *)s + *s;
1276 void *wr_addr;
1277 u32 hash;
1278
1279 addr -= fineibt_caller_size;
1280 wr_addr = module_writable_address(mod, addr);
1281 hash = decode_caller_hash(wr_addr);
1282 if (hash) {
1283 text_poke_early(wr_addr, fineibt_caller_start, fineibt_caller_size);
1284 WARN_ON(*(u32 *)(wr_addr + fineibt_caller_hash) != 0x12345678);
1285 text_poke_early(wr_addr + fineibt_caller_hash, &hash, 4);
1286 }
1287 /* rely on apply_retpolines() */
1288 }
1289
1290 return 0;
1291 }
1292
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,struct module * mod)1293 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1294 s32 *start_cfi, s32 *end_cfi, struct module *mod)
1295 {
1296 bool builtin = mod ? false : true;
1297 int ret;
1298
1299 if (WARN_ONCE(fineibt_preamble_size != 16,
1300 "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1301 return;
1302
1303 if (cfi_mode == CFI_AUTO) {
1304 cfi_mode = CFI_KCFI;
1305 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1306 cfi_mode = CFI_FINEIBT;
1307 }
1308
1309 /*
1310 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1311 * rewrite them. This disables all CFI. If this succeeds but any of the
1312 * later stages fails, we're without CFI.
1313 */
1314 ret = cfi_disable_callers(start_retpoline, end_retpoline, mod);
1315 if (ret)
1316 goto err;
1317
1318 if (cfi_rand) {
1319 if (builtin) {
1320 cfi_seed = get_random_u32();
1321 cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1322 cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1323 }
1324
1325 ret = cfi_rand_preamble(start_cfi, end_cfi, mod);
1326 if (ret)
1327 goto err;
1328
1329 ret = cfi_rand_callers(start_retpoline, end_retpoline, mod);
1330 if (ret)
1331 goto err;
1332 }
1333
1334 switch (cfi_mode) {
1335 case CFI_OFF:
1336 if (builtin)
1337 pr_info("Disabling CFI\n");
1338 return;
1339
1340 case CFI_KCFI:
1341 ret = cfi_enable_callers(start_retpoline, end_retpoline, mod);
1342 if (ret)
1343 goto err;
1344
1345 if (builtin)
1346 pr_info("Using kCFI\n");
1347 return;
1348
1349 case CFI_FINEIBT:
1350 /* place the FineIBT preamble at func()-16 */
1351 ret = cfi_rewrite_preamble(start_cfi, end_cfi, mod);
1352 if (ret)
1353 goto err;
1354
1355 /* rewrite the callers to target func()-16 */
1356 ret = cfi_rewrite_callers(start_retpoline, end_retpoline, mod);
1357 if (ret)
1358 goto err;
1359
1360 /* now that nobody targets func()+0, remove ENDBR there */
1361 cfi_rewrite_endbr(start_cfi, end_cfi, mod);
1362
1363 if (builtin)
1364 pr_info("Using FineIBT CFI\n");
1365 return;
1366
1367 default:
1368 break;
1369 }
1370
1371 err:
1372 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1373 }
1374
poison_hash(void * addr)1375 static inline void poison_hash(void *addr)
1376 {
1377 *(u32 *)addr = 0;
1378 }
1379
poison_cfi(void * addr,void * wr_addr)1380 static void poison_cfi(void *addr, void *wr_addr)
1381 {
1382 switch (cfi_mode) {
1383 case CFI_FINEIBT:
1384 /*
1385 * __cfi_\func:
1386 * osp nopl (%rax)
1387 * subl $0, %r10d
1388 * jz 1f
1389 * ud2
1390 * 1: nop
1391 */
1392 poison_endbr(addr, wr_addr, false);
1393 poison_hash(wr_addr + fineibt_preamble_hash);
1394 break;
1395
1396 case CFI_KCFI:
1397 /*
1398 * __cfi_\func:
1399 * movl $0, %eax
1400 * .skip 11, 0x90
1401 */
1402 poison_hash(wr_addr + 1);
1403 break;
1404
1405 default:
1406 break;
1407 }
1408 }
1409
1410 #else
1411
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,struct module * mod)1412 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1413 s32 *start_cfi, s32 *end_cfi, struct module *mod)
1414 {
1415 }
1416
1417 #ifdef CONFIG_X86_KERNEL_IBT
poison_cfi(void * addr,void * wr_addr)1418 static void poison_cfi(void *addr, void *wr_addr) { }
1419 #endif
1420
1421 #endif
1422
apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,struct module * mod)1423 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1424 s32 *start_cfi, s32 *end_cfi, struct module *mod)
1425 {
1426 return __apply_fineibt(start_retpoline, end_retpoline,
1427 start_cfi, end_cfi, mod);
1428 }
1429
1430 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1431 static void alternatives_smp_lock(const s32 *start, const s32 *end,
1432 u8 *text, u8 *text_end)
1433 {
1434 const s32 *poff;
1435
1436 for (poff = start; poff < end; poff++) {
1437 u8 *ptr = (u8 *)poff + *poff;
1438
1439 if (!*poff || ptr < text || ptr >= text_end)
1440 continue;
1441 /* turn DS segment override prefix into lock prefix */
1442 if (*ptr == 0x3e)
1443 text_poke(ptr, ((unsigned char []){0xf0}), 1);
1444 }
1445 }
1446
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1447 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1448 u8 *text, u8 *text_end)
1449 {
1450 const s32 *poff;
1451
1452 for (poff = start; poff < end; poff++) {
1453 u8 *ptr = (u8 *)poff + *poff;
1454
1455 if (!*poff || ptr < text || ptr >= text_end)
1456 continue;
1457 /* turn lock prefix into DS segment override prefix */
1458 if (*ptr == 0xf0)
1459 text_poke(ptr, ((unsigned char []){0x3E}), 1);
1460 }
1461 }
1462
1463 struct smp_alt_module {
1464 /* what is this ??? */
1465 struct module *mod;
1466 char *name;
1467
1468 /* ptrs to lock prefixes */
1469 const s32 *locks;
1470 const s32 *locks_end;
1471
1472 /* .text segment, needed to avoid patching init code ;) */
1473 u8 *text;
1474 u8 *text_end;
1475
1476 struct list_head next;
1477 };
1478 static LIST_HEAD(smp_alt_modules);
1479 static bool uniproc_patched = false; /* protected by text_mutex */
1480
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)1481 void __init_or_module alternatives_smp_module_add(struct module *mod,
1482 char *name,
1483 void *locks, void *locks_end,
1484 void *text, void *text_end)
1485 {
1486 struct smp_alt_module *smp;
1487
1488 mutex_lock(&text_mutex);
1489 if (!uniproc_patched)
1490 goto unlock;
1491
1492 if (num_possible_cpus() == 1)
1493 /* Don't bother remembering, we'll never have to undo it. */
1494 goto smp_unlock;
1495
1496 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1497 if (NULL == smp)
1498 /* we'll run the (safe but slow) SMP code then ... */
1499 goto unlock;
1500
1501 smp->mod = mod;
1502 smp->name = name;
1503 smp->locks = locks;
1504 smp->locks_end = locks_end;
1505 smp->text = text;
1506 smp->text_end = text_end;
1507 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1508 smp->locks, smp->locks_end,
1509 smp->text, smp->text_end, smp->name);
1510
1511 list_add_tail(&smp->next, &smp_alt_modules);
1512 smp_unlock:
1513 alternatives_smp_unlock(locks, locks_end, text, text_end);
1514 unlock:
1515 mutex_unlock(&text_mutex);
1516 }
1517
alternatives_smp_module_del(struct module * mod)1518 void __init_or_module alternatives_smp_module_del(struct module *mod)
1519 {
1520 struct smp_alt_module *item;
1521
1522 mutex_lock(&text_mutex);
1523 list_for_each_entry(item, &smp_alt_modules, next) {
1524 if (mod != item->mod)
1525 continue;
1526 list_del(&item->next);
1527 kfree(item);
1528 break;
1529 }
1530 mutex_unlock(&text_mutex);
1531 }
1532
alternatives_enable_smp(void)1533 void alternatives_enable_smp(void)
1534 {
1535 struct smp_alt_module *mod;
1536
1537 /* Why bother if there are no other CPUs? */
1538 BUG_ON(num_possible_cpus() == 1);
1539
1540 mutex_lock(&text_mutex);
1541
1542 if (uniproc_patched) {
1543 pr_info("switching to SMP code\n");
1544 BUG_ON(num_online_cpus() != 1);
1545 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1546 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1547 list_for_each_entry(mod, &smp_alt_modules, next)
1548 alternatives_smp_lock(mod->locks, mod->locks_end,
1549 mod->text, mod->text_end);
1550 uniproc_patched = false;
1551 }
1552 mutex_unlock(&text_mutex);
1553 }
1554
1555 /*
1556 * Return 1 if the address range is reserved for SMP-alternatives.
1557 * Must hold text_mutex.
1558 */
alternatives_text_reserved(void * start,void * end)1559 int alternatives_text_reserved(void *start, void *end)
1560 {
1561 struct smp_alt_module *mod;
1562 const s32 *poff;
1563 u8 *text_start = start;
1564 u8 *text_end = end;
1565
1566 lockdep_assert_held(&text_mutex);
1567
1568 list_for_each_entry(mod, &smp_alt_modules, next) {
1569 if (mod->text > text_end || mod->text_end < text_start)
1570 continue;
1571 for (poff = mod->locks; poff < mod->locks_end; poff++) {
1572 const u8 *ptr = (const u8 *)poff + *poff;
1573
1574 if (text_start <= ptr && text_end > ptr)
1575 return 1;
1576 }
1577 }
1578
1579 return 0;
1580 }
1581 #endif /* CONFIG_SMP */
1582
1583 /*
1584 * Self-test for the INT3 based CALL emulation code.
1585 *
1586 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1587 * properly and that there is a stack gap between the INT3 frame and the
1588 * previous context. Without this gap doing a virtual PUSH on the interrupted
1589 * stack would corrupt the INT3 IRET frame.
1590 *
1591 * See entry_{32,64}.S for more details.
1592 */
1593
1594 /*
1595 * We define the int3_magic() function in assembly to control the calling
1596 * convention such that we can 'call' it from assembly.
1597 */
1598
1599 extern void int3_magic(unsigned int *ptr); /* defined in asm */
1600
1601 asm (
1602 " .pushsection .init.text, \"ax\", @progbits\n"
1603 " .type int3_magic, @function\n"
1604 "int3_magic:\n"
1605 ANNOTATE_NOENDBR
1606 " movl $1, (%" _ASM_ARG1 ")\n"
1607 ASM_RET
1608 " .size int3_magic, .-int3_magic\n"
1609 " .popsection\n"
1610 );
1611
1612 extern void int3_selftest_ip(void); /* defined in asm below */
1613
1614 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)1615 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1616 {
1617 unsigned long selftest = (unsigned long)&int3_selftest_ip;
1618 struct die_args *args = data;
1619 struct pt_regs *regs = args->regs;
1620
1621 OPTIMIZER_HIDE_VAR(selftest);
1622
1623 if (!regs || user_mode(regs))
1624 return NOTIFY_DONE;
1625
1626 if (val != DIE_INT3)
1627 return NOTIFY_DONE;
1628
1629 if (regs->ip - INT3_INSN_SIZE != selftest)
1630 return NOTIFY_DONE;
1631
1632 int3_emulate_call(regs, (unsigned long)&int3_magic);
1633 return NOTIFY_STOP;
1634 }
1635
1636 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
int3_selftest(void)1637 static noinline void __init int3_selftest(void)
1638 {
1639 static __initdata struct notifier_block int3_exception_nb = {
1640 .notifier_call = int3_exception_notify,
1641 .priority = INT_MAX-1, /* last */
1642 };
1643 unsigned int val = 0;
1644
1645 BUG_ON(register_die_notifier(&int3_exception_nb));
1646
1647 /*
1648 * Basically: int3_magic(&val); but really complicated :-)
1649 *
1650 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1651 * notifier above will emulate CALL for us.
1652 */
1653 asm volatile ("int3_selftest_ip:\n\t"
1654 ANNOTATE_NOENDBR
1655 " int3; nop; nop; nop; nop\n\t"
1656 : ASM_CALL_CONSTRAINT
1657 : __ASM_SEL_RAW(a, D) (&val)
1658 : "memory");
1659
1660 BUG_ON(val != 1);
1661
1662 unregister_die_notifier(&int3_exception_nb);
1663 }
1664
1665 static __initdata int __alt_reloc_selftest_addr;
1666
1667 extern void __init __alt_reloc_selftest(void *arg);
__alt_reloc_selftest(void * arg)1668 __visible noinline void __init __alt_reloc_selftest(void *arg)
1669 {
1670 WARN_ON(arg != &__alt_reloc_selftest_addr);
1671 }
1672
alt_reloc_selftest(void)1673 static noinline void __init alt_reloc_selftest(void)
1674 {
1675 /*
1676 * Tests apply_relocation().
1677 *
1678 * This has a relative immediate (CALL) in a place other than the first
1679 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1680 *
1681 * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c
1682 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4
1683 *
1684 * Getting this wrong will either crash and burn or tickle the WARN
1685 * above.
1686 */
1687 asm_inline volatile (
1688 ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1689 : ASM_CALL_CONSTRAINT
1690 : [mem] "m" (__alt_reloc_selftest_addr)
1691 : _ASM_ARG1
1692 );
1693 }
1694
alternative_instructions(void)1695 void __init alternative_instructions(void)
1696 {
1697 int3_selftest();
1698
1699 /*
1700 * The patching is not fully atomic, so try to avoid local
1701 * interruptions that might execute the to be patched code.
1702 * Other CPUs are not running.
1703 */
1704 stop_nmi();
1705
1706 /*
1707 * Don't stop machine check exceptions while patching.
1708 * MCEs only happen when something got corrupted and in this
1709 * case we must do something about the corruption.
1710 * Ignoring it is worse than an unlikely patching race.
1711 * Also machine checks tend to be broadcast and if one CPU
1712 * goes into machine check the others follow quickly, so we don't
1713 * expect a machine check to cause undue problems during to code
1714 * patching.
1715 */
1716
1717 /*
1718 * Make sure to set (artificial) features depending on used paravirt
1719 * functions which can later influence alternative patching.
1720 */
1721 paravirt_set_cap();
1722
1723 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1724 __cfi_sites, __cfi_sites_end, NULL);
1725
1726 /*
1727 * Rewrite the retpolines, must be done before alternatives since
1728 * those can rewrite the retpoline thunks.
1729 */
1730 apply_retpolines(__retpoline_sites, __retpoline_sites_end, NULL);
1731 apply_returns(__return_sites, __return_sites_end, NULL);
1732
1733 apply_alternatives(__alt_instructions, __alt_instructions_end, NULL);
1734
1735 /*
1736 * Now all calls are established. Apply the call thunks if
1737 * required.
1738 */
1739 callthunks_patch_builtin_calls();
1740
1741 /*
1742 * Seal all functions that do not have their address taken.
1743 */
1744 apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end, NULL);
1745
1746 #ifdef CONFIG_SMP
1747 /* Patch to UP if other cpus not imminent. */
1748 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1749 uniproc_patched = true;
1750 alternatives_smp_module_add(NULL, "core kernel",
1751 __smp_locks, __smp_locks_end,
1752 _text, _etext);
1753 }
1754
1755 if (!uniproc_patched || num_possible_cpus() == 1) {
1756 free_init_pages("SMP alternatives",
1757 (unsigned long)__smp_locks,
1758 (unsigned long)__smp_locks_end);
1759 }
1760 #endif
1761
1762 restart_nmi();
1763 alternatives_patched = 1;
1764
1765 alt_reloc_selftest();
1766 }
1767
1768 /**
1769 * text_poke_early - Update instructions on a live kernel at boot time
1770 * @addr: address to modify
1771 * @opcode: source of the copy
1772 * @len: length to copy
1773 *
1774 * When you use this code to patch more than one byte of an instruction
1775 * you need to make sure that other CPUs cannot execute this code in parallel.
1776 * Also no thread must be currently preempted in the middle of these
1777 * instructions. And on the local CPU you need to be protected against NMI or
1778 * MCE handlers seeing an inconsistent instruction while you patch.
1779 */
text_poke_early(void * addr,const void * opcode,size_t len)1780 void __init_or_module text_poke_early(void *addr, const void *opcode,
1781 size_t len)
1782 {
1783 unsigned long flags;
1784
1785 if (boot_cpu_has(X86_FEATURE_NX) &&
1786 is_module_text_address((unsigned long)addr)) {
1787 /*
1788 * Modules text is marked initially as non-executable, so the
1789 * code cannot be running and speculative code-fetches are
1790 * prevented. Just change the code.
1791 */
1792 memcpy(addr, opcode, len);
1793 } else {
1794 local_irq_save(flags);
1795 memcpy(addr, opcode, len);
1796 sync_core();
1797 local_irq_restore(flags);
1798
1799 /*
1800 * Could also do a CLFLUSH here to speed up CPU recovery; but
1801 * that causes hangs on some VIA CPUs.
1802 */
1803 }
1804 }
1805
1806 typedef struct {
1807 struct mm_struct *mm;
1808 } temp_mm_state_t;
1809
1810 /*
1811 * Using a temporary mm allows to set temporary mappings that are not accessible
1812 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1813 * that override the kernel memory protections (e.g., W^X), without exposing the
1814 * temporary page-table mappings that are required for these write operations to
1815 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1816 * mapping is torn down.
1817 *
1818 * Context: The temporary mm needs to be used exclusively by a single core. To
1819 * harden security IRQs must be disabled while the temporary mm is
1820 * loaded, thereby preventing interrupt handler bugs from overriding
1821 * the kernel memory protection.
1822 */
use_temporary_mm(struct mm_struct * mm)1823 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1824 {
1825 temp_mm_state_t temp_state;
1826
1827 lockdep_assert_irqs_disabled();
1828
1829 /*
1830 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1831 * with a stale address space WITHOUT being in lazy mode after
1832 * restoring the previous mm.
1833 */
1834 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1835 leave_mm();
1836
1837 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1838 switch_mm_irqs_off(NULL, mm, current);
1839
1840 /*
1841 * If breakpoints are enabled, disable them while the temporary mm is
1842 * used. Userspace might set up watchpoints on addresses that are used
1843 * in the temporary mm, which would lead to wrong signals being sent or
1844 * crashes.
1845 *
1846 * Note that breakpoints are not disabled selectively, which also causes
1847 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1848 * undesirable, but still seems reasonable as the code that runs in the
1849 * temporary mm should be short.
1850 */
1851 if (hw_breakpoint_active())
1852 hw_breakpoint_disable();
1853
1854 return temp_state;
1855 }
1856
1857 __ro_after_init struct mm_struct *poking_mm;
1858 __ro_after_init unsigned long poking_addr;
1859
unuse_temporary_mm(temp_mm_state_t prev_state)1860 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1861 {
1862 lockdep_assert_irqs_disabled();
1863
1864 switch_mm_irqs_off(NULL, prev_state.mm, current);
1865
1866 /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
1867 cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
1868
1869 /*
1870 * Restore the breakpoints if they were disabled before the temporary mm
1871 * was loaded.
1872 */
1873 if (hw_breakpoint_active())
1874 hw_breakpoint_restore();
1875 }
1876
text_poke_memcpy(void * dst,const void * src,size_t len)1877 static void text_poke_memcpy(void *dst, const void *src, size_t len)
1878 {
1879 memcpy(dst, src, len);
1880 }
1881
text_poke_memset(void * dst,const void * src,size_t len)1882 static void text_poke_memset(void *dst, const void *src, size_t len)
1883 {
1884 int c = *(const int *)src;
1885
1886 memset(dst, c, len);
1887 }
1888
1889 typedef void text_poke_f(void *dst, const void *src, size_t len);
1890
__text_poke(text_poke_f func,void * addr,const void * src,size_t len)1891 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1892 {
1893 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1894 struct page *pages[2] = {NULL};
1895 temp_mm_state_t prev;
1896 unsigned long flags;
1897 pte_t pte, *ptep;
1898 spinlock_t *ptl;
1899 pgprot_t pgprot;
1900
1901 /*
1902 * While boot memory allocator is running we cannot use struct pages as
1903 * they are not yet initialized. There is no way to recover.
1904 */
1905 BUG_ON(!after_bootmem);
1906
1907 if (!core_kernel_text((unsigned long)addr)) {
1908 pages[0] = vmalloc_to_page(addr);
1909 if (cross_page_boundary)
1910 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1911 } else {
1912 pages[0] = virt_to_page(addr);
1913 WARN_ON(!PageReserved(pages[0]));
1914 if (cross_page_boundary)
1915 pages[1] = virt_to_page(addr + PAGE_SIZE);
1916 }
1917 /*
1918 * If something went wrong, crash and burn since recovery paths are not
1919 * implemented.
1920 */
1921 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1922
1923 /*
1924 * Map the page without the global bit, as TLB flushing is done with
1925 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1926 */
1927 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1928
1929 /*
1930 * The lock is not really needed, but this allows to avoid open-coding.
1931 */
1932 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1933
1934 /*
1935 * This must not fail; preallocated in poking_init().
1936 */
1937 VM_BUG_ON(!ptep);
1938
1939 local_irq_save(flags);
1940
1941 pte = mk_pte(pages[0], pgprot);
1942 set_pte_at(poking_mm, poking_addr, ptep, pte);
1943
1944 if (cross_page_boundary) {
1945 pte = mk_pte(pages[1], pgprot);
1946 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1947 }
1948
1949 /*
1950 * Loading the temporary mm behaves as a compiler barrier, which
1951 * guarantees that the PTE will be set at the time memcpy() is done.
1952 */
1953 prev = use_temporary_mm(poking_mm);
1954
1955 kasan_disable_current();
1956 func((u8 *)poking_addr + offset_in_page(addr), src, len);
1957 kasan_enable_current();
1958
1959 /*
1960 * Ensure that the PTE is only cleared after the instructions of memcpy
1961 * were issued by using a compiler barrier.
1962 */
1963 barrier();
1964
1965 pte_clear(poking_mm, poking_addr, ptep);
1966 if (cross_page_boundary)
1967 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1968
1969 /*
1970 * Loading the previous page-table hierarchy requires a serializing
1971 * instruction that already allows the core to see the updated version.
1972 * Xen-PV is assumed to serialize execution in a similar manner.
1973 */
1974 unuse_temporary_mm(prev);
1975
1976 /*
1977 * Flushing the TLB might involve IPIs, which would require enabled
1978 * IRQs, but not if the mm is not used, as it is in this point.
1979 */
1980 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1981 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1982 PAGE_SHIFT, false);
1983
1984 if (func == text_poke_memcpy) {
1985 /*
1986 * If the text does not match what we just wrote then something is
1987 * fundamentally screwy; there's nothing we can really do about that.
1988 */
1989 BUG_ON(memcmp(addr, src, len));
1990 }
1991
1992 local_irq_restore(flags);
1993 pte_unmap_unlock(ptep, ptl);
1994 return addr;
1995 }
1996
1997 /**
1998 * text_poke - Update instructions on a live kernel
1999 * @addr: address to modify
2000 * @opcode: source of the copy
2001 * @len: length to copy
2002 *
2003 * Only atomic text poke/set should be allowed when not doing early patching.
2004 * It means the size must be writable atomically and the address must be aligned
2005 * in a way that permits an atomic write. It also makes sure we fit on a single
2006 * page.
2007 *
2008 * Note that the caller must ensure that if the modified code is part of a
2009 * module, the module would not be removed during poking. This can be achieved
2010 * by registering a module notifier, and ordering module removal and patching
2011 * through a mutex.
2012 */
text_poke(void * addr,const void * opcode,size_t len)2013 void *text_poke(void *addr, const void *opcode, size_t len)
2014 {
2015 lockdep_assert_held(&text_mutex);
2016
2017 return __text_poke(text_poke_memcpy, addr, opcode, len);
2018 }
2019
2020 /**
2021 * text_poke_kgdb - Update instructions on a live kernel by kgdb
2022 * @addr: address to modify
2023 * @opcode: source of the copy
2024 * @len: length to copy
2025 *
2026 * Only atomic text poke/set should be allowed when not doing early patching.
2027 * It means the size must be writable atomically and the address must be aligned
2028 * in a way that permits an atomic write. It also makes sure we fit on a single
2029 * page.
2030 *
2031 * Context: should only be used by kgdb, which ensures no other core is running,
2032 * despite the fact it does not hold the text_mutex.
2033 */
text_poke_kgdb(void * addr,const void * opcode,size_t len)2034 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2035 {
2036 return __text_poke(text_poke_memcpy, addr, opcode, len);
2037 }
2038
text_poke_copy_locked(void * addr,const void * opcode,size_t len,bool core_ok)2039 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2040 bool core_ok)
2041 {
2042 unsigned long start = (unsigned long)addr;
2043 size_t patched = 0;
2044
2045 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2046 return NULL;
2047
2048 while (patched < len) {
2049 unsigned long ptr = start + patched;
2050 size_t s;
2051
2052 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2053
2054 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2055 patched += s;
2056 }
2057 return addr;
2058 }
2059
2060 /**
2061 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2062 * @addr: address to modify
2063 * @opcode: source of the copy
2064 * @len: length to copy, could be more than 2x PAGE_SIZE
2065 *
2066 * Not safe against concurrent execution; useful for JITs to dump
2067 * new code blocks into unused regions of RX memory. Can be used in
2068 * conjunction with synchronize_rcu_tasks() to wait for existing
2069 * execution to quiesce after having made sure no existing functions
2070 * pointers are live.
2071 */
text_poke_copy(void * addr,const void * opcode,size_t len)2072 void *text_poke_copy(void *addr, const void *opcode, size_t len)
2073 {
2074 mutex_lock(&text_mutex);
2075 addr = text_poke_copy_locked(addr, opcode, len, false);
2076 mutex_unlock(&text_mutex);
2077 return addr;
2078 }
2079
2080 /**
2081 * text_poke_set - memset into (an unused part of) RX memory
2082 * @addr: address to modify
2083 * @c: the byte to fill the area with
2084 * @len: length to copy, could be more than 2x PAGE_SIZE
2085 *
2086 * This is useful to overwrite unused regions of RX memory with illegal
2087 * instructions.
2088 */
text_poke_set(void * addr,int c,size_t len)2089 void *text_poke_set(void *addr, int c, size_t len)
2090 {
2091 unsigned long start = (unsigned long)addr;
2092 size_t patched = 0;
2093
2094 if (WARN_ON_ONCE(core_kernel_text(start)))
2095 return NULL;
2096
2097 mutex_lock(&text_mutex);
2098 while (patched < len) {
2099 unsigned long ptr = start + patched;
2100 size_t s;
2101
2102 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2103
2104 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2105 patched += s;
2106 }
2107 mutex_unlock(&text_mutex);
2108 return addr;
2109 }
2110
do_sync_core(void * info)2111 static void do_sync_core(void *info)
2112 {
2113 sync_core();
2114 }
2115
text_poke_sync(void)2116 void text_poke_sync(void)
2117 {
2118 on_each_cpu(do_sync_core, NULL, 1);
2119 }
2120
2121 /*
2122 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2123 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2124 * opcode to Jcc.d8, using len to distinguish.
2125 */
2126 struct text_poke_loc {
2127 /* addr := _stext + rel_addr */
2128 s32 rel_addr;
2129 s32 disp;
2130 u8 len;
2131 u8 opcode;
2132 const u8 text[POKE_MAX_OPCODE_SIZE];
2133 /* see text_poke_bp_batch() */
2134 u8 old;
2135 };
2136
2137 struct bp_patching_desc {
2138 struct text_poke_loc *vec;
2139 int nr_entries;
2140 atomic_t refs;
2141 };
2142
2143 static struct bp_patching_desc bp_desc;
2144
2145 static __always_inline
try_get_desc(void)2146 struct bp_patching_desc *try_get_desc(void)
2147 {
2148 struct bp_patching_desc *desc = &bp_desc;
2149
2150 if (!raw_atomic_inc_not_zero(&desc->refs))
2151 return NULL;
2152
2153 return desc;
2154 }
2155
put_desc(void)2156 static __always_inline void put_desc(void)
2157 {
2158 struct bp_patching_desc *desc = &bp_desc;
2159
2160 smp_mb__before_atomic();
2161 raw_atomic_dec(&desc->refs);
2162 }
2163
text_poke_addr(struct text_poke_loc * tp)2164 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2165 {
2166 return _stext + tp->rel_addr;
2167 }
2168
patch_cmp(const void * key,const void * elt)2169 static __always_inline int patch_cmp(const void *key, const void *elt)
2170 {
2171 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2172
2173 if (key < text_poke_addr(tp))
2174 return -1;
2175 if (key > text_poke_addr(tp))
2176 return 1;
2177 return 0;
2178 }
2179
poke_int3_handler(struct pt_regs * regs)2180 noinstr int poke_int3_handler(struct pt_regs *regs)
2181 {
2182 struct bp_patching_desc *desc;
2183 struct text_poke_loc *tp;
2184 int ret = 0;
2185 void *ip;
2186
2187 if (user_mode(regs))
2188 return 0;
2189
2190 /*
2191 * Having observed our INT3 instruction, we now must observe
2192 * bp_desc with non-zero refcount:
2193 *
2194 * bp_desc.refs = 1 INT3
2195 * WMB RMB
2196 * write INT3 if (bp_desc.refs != 0)
2197 */
2198 smp_rmb();
2199
2200 desc = try_get_desc();
2201 if (!desc)
2202 return 0;
2203
2204 /*
2205 * Discount the INT3. See text_poke_bp_batch().
2206 */
2207 ip = (void *) regs->ip - INT3_INSN_SIZE;
2208
2209 /*
2210 * Skip the binary search if there is a single member in the vector.
2211 */
2212 if (unlikely(desc->nr_entries > 1)) {
2213 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2214 sizeof(struct text_poke_loc),
2215 patch_cmp);
2216 if (!tp)
2217 goto out_put;
2218 } else {
2219 tp = desc->vec;
2220 if (text_poke_addr(tp) != ip)
2221 goto out_put;
2222 }
2223
2224 ip += tp->len;
2225
2226 switch (tp->opcode) {
2227 case INT3_INSN_OPCODE:
2228 /*
2229 * Someone poked an explicit INT3, they'll want to handle it,
2230 * do not consume.
2231 */
2232 goto out_put;
2233
2234 case RET_INSN_OPCODE:
2235 int3_emulate_ret(regs);
2236 break;
2237
2238 case CALL_INSN_OPCODE:
2239 int3_emulate_call(regs, (long)ip + tp->disp);
2240 break;
2241
2242 case JMP32_INSN_OPCODE:
2243 case JMP8_INSN_OPCODE:
2244 int3_emulate_jmp(regs, (long)ip + tp->disp);
2245 break;
2246
2247 case 0x70 ... 0x7f: /* Jcc */
2248 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2249 break;
2250
2251 default:
2252 BUG();
2253 }
2254
2255 ret = 1;
2256
2257 out_put:
2258 put_desc();
2259 return ret;
2260 }
2261
2262 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2263 static struct text_poke_loc tp_vec[TP_VEC_MAX];
2264 static int tp_vec_nr;
2265
2266 /**
2267 * text_poke_bp_batch() -- update instructions on live kernel on SMP
2268 * @tp: vector of instructions to patch
2269 * @nr_entries: number of entries in the vector
2270 *
2271 * Modify multi-byte instruction by using int3 breakpoint on SMP.
2272 * We completely avoid stop_machine() here, and achieve the
2273 * synchronization using int3 breakpoint.
2274 *
2275 * The way it is done:
2276 * - For each entry in the vector:
2277 * - add a int3 trap to the address that will be patched
2278 * - sync cores
2279 * - For each entry in the vector:
2280 * - update all but the first byte of the patched range
2281 * - sync cores
2282 * - For each entry in the vector:
2283 * - replace the first byte (int3) by the first byte of
2284 * replacing opcode
2285 * - sync cores
2286 */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)2287 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2288 {
2289 unsigned char int3 = INT3_INSN_OPCODE;
2290 unsigned int i;
2291 int do_sync;
2292
2293 lockdep_assert_held(&text_mutex);
2294
2295 bp_desc.vec = tp;
2296 bp_desc.nr_entries = nr_entries;
2297
2298 /*
2299 * Corresponds to the implicit memory barrier in try_get_desc() to
2300 * ensure reading a non-zero refcount provides up to date bp_desc data.
2301 */
2302 atomic_set_release(&bp_desc.refs, 1);
2303
2304 /*
2305 * Function tracing can enable thousands of places that need to be
2306 * updated. This can take quite some time, and with full kernel debugging
2307 * enabled, this could cause the softlockup watchdog to trigger.
2308 * This function gets called every 256 entries added to be patched.
2309 * Call cond_resched() here to make sure that other tasks can get scheduled
2310 * while processing all the functions being patched.
2311 */
2312 cond_resched();
2313
2314 /*
2315 * Corresponding read barrier in int3 notifier for making sure the
2316 * nr_entries and handler are correctly ordered wrt. patching.
2317 */
2318 smp_wmb();
2319
2320 /*
2321 * First step: add a int3 trap to the address that will be patched.
2322 */
2323 for (i = 0; i < nr_entries; i++) {
2324 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2325 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2326 }
2327
2328 text_poke_sync();
2329
2330 /*
2331 * Second step: update all but the first byte of the patched range.
2332 */
2333 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2334 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2335 u8 _new[POKE_MAX_OPCODE_SIZE+1];
2336 const u8 *new = tp[i].text;
2337 int len = tp[i].len;
2338
2339 if (len - INT3_INSN_SIZE > 0) {
2340 memcpy(old + INT3_INSN_SIZE,
2341 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2342 len - INT3_INSN_SIZE);
2343
2344 if (len == 6) {
2345 _new[0] = 0x0f;
2346 memcpy(_new + 1, new, 5);
2347 new = _new;
2348 }
2349
2350 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2351 new + INT3_INSN_SIZE,
2352 len - INT3_INSN_SIZE);
2353
2354 do_sync++;
2355 }
2356
2357 /*
2358 * Emit a perf event to record the text poke, primarily to
2359 * support Intel PT decoding which must walk the executable code
2360 * to reconstruct the trace. The flow up to here is:
2361 * - write INT3 byte
2362 * - IPI-SYNC
2363 * - write instruction tail
2364 * At this point the actual control flow will be through the
2365 * INT3 and handler and not hit the old or new instruction.
2366 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2367 * can still be decoded. Subsequently:
2368 * - emit RECORD_TEXT_POKE with the new instruction
2369 * - IPI-SYNC
2370 * - write first byte
2371 * - IPI-SYNC
2372 * So before the text poke event timestamp, the decoder will see
2373 * either the old instruction flow or FUP/TIP of INT3. After the
2374 * text poke event timestamp, the decoder will see either the
2375 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2376 * use the timestamp as the point at which to modify the
2377 * executable code.
2378 * The old instruction is recorded so that the event can be
2379 * processed forwards or backwards.
2380 */
2381 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2382 }
2383
2384 if (do_sync) {
2385 /*
2386 * According to Intel, this core syncing is very likely
2387 * not necessary and we'd be safe even without it. But
2388 * better safe than sorry (plus there's not only Intel).
2389 */
2390 text_poke_sync();
2391 }
2392
2393 /*
2394 * Third step: replace the first byte (int3) by the first byte of
2395 * replacing opcode.
2396 */
2397 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2398 u8 byte = tp[i].text[0];
2399
2400 if (tp[i].len == 6)
2401 byte = 0x0f;
2402
2403 if (byte == INT3_INSN_OPCODE)
2404 continue;
2405
2406 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2407 do_sync++;
2408 }
2409
2410 if (do_sync)
2411 text_poke_sync();
2412
2413 /*
2414 * Remove and wait for refs to be zero.
2415 */
2416 if (!atomic_dec_and_test(&bp_desc.refs))
2417 atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2418 }
2419
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)2420 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2421 const void *opcode, size_t len, const void *emulate)
2422 {
2423 struct insn insn;
2424 int ret, i = 0;
2425
2426 if (len == 6)
2427 i = 1;
2428 memcpy((void *)tp->text, opcode+i, len-i);
2429 if (!emulate)
2430 emulate = opcode;
2431
2432 ret = insn_decode_kernel(&insn, emulate);
2433 BUG_ON(ret < 0);
2434
2435 tp->rel_addr = addr - (void *)_stext;
2436 tp->len = len;
2437 tp->opcode = insn.opcode.bytes[0];
2438
2439 if (is_jcc32(&insn)) {
2440 /*
2441 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2442 */
2443 tp->opcode = insn.opcode.bytes[1] - 0x10;
2444 }
2445
2446 switch (tp->opcode) {
2447 case RET_INSN_OPCODE:
2448 case JMP32_INSN_OPCODE:
2449 case JMP8_INSN_OPCODE:
2450 /*
2451 * Control flow instructions without implied execution of the
2452 * next instruction can be padded with INT3.
2453 */
2454 for (i = insn.length; i < len; i++)
2455 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2456 break;
2457
2458 default:
2459 BUG_ON(len != insn.length);
2460 }
2461
2462 switch (tp->opcode) {
2463 case INT3_INSN_OPCODE:
2464 case RET_INSN_OPCODE:
2465 break;
2466
2467 case CALL_INSN_OPCODE:
2468 case JMP32_INSN_OPCODE:
2469 case JMP8_INSN_OPCODE:
2470 case 0x70 ... 0x7f: /* Jcc */
2471 tp->disp = insn.immediate.value;
2472 break;
2473
2474 default: /* assume NOP */
2475 switch (len) {
2476 case 2: /* NOP2 -- emulate as JMP8+0 */
2477 BUG_ON(memcmp(emulate, x86_nops[len], len));
2478 tp->opcode = JMP8_INSN_OPCODE;
2479 tp->disp = 0;
2480 break;
2481
2482 case 5: /* NOP5 -- emulate as JMP32+0 */
2483 BUG_ON(memcmp(emulate, x86_nops[len], len));
2484 tp->opcode = JMP32_INSN_OPCODE;
2485 tp->disp = 0;
2486 break;
2487
2488 default: /* unknown instruction */
2489 BUG();
2490 }
2491 break;
2492 }
2493 }
2494
2495 /*
2496 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2497 * early if needed.
2498 */
tp_order_fail(void * addr)2499 static bool tp_order_fail(void *addr)
2500 {
2501 struct text_poke_loc *tp;
2502
2503 if (!tp_vec_nr)
2504 return false;
2505
2506 if (!addr) /* force */
2507 return true;
2508
2509 tp = &tp_vec[tp_vec_nr - 1];
2510 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2511 return true;
2512
2513 return false;
2514 }
2515
text_poke_flush(void * addr)2516 static void text_poke_flush(void *addr)
2517 {
2518 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2519 text_poke_bp_batch(tp_vec, tp_vec_nr);
2520 tp_vec_nr = 0;
2521 }
2522 }
2523
text_poke_finish(void)2524 void text_poke_finish(void)
2525 {
2526 text_poke_flush(NULL);
2527 }
2528
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)2529 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2530 {
2531 struct text_poke_loc *tp;
2532
2533 text_poke_flush(addr);
2534
2535 tp = &tp_vec[tp_vec_nr++];
2536 text_poke_loc_init(tp, addr, opcode, len, emulate);
2537 }
2538
2539 /**
2540 * text_poke_bp() -- update instructions on live kernel on SMP
2541 * @addr: address to patch
2542 * @opcode: opcode of new instruction
2543 * @len: length to copy
2544 * @emulate: instruction to be emulated
2545 *
2546 * Update a single instruction with the vector in the stack, avoiding
2547 * dynamically allocated memory. This function should be used when it is
2548 * not possible to allocate memory.
2549 */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)2550 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2551 {
2552 struct text_poke_loc tp;
2553
2554 text_poke_loc_init(&tp, addr, opcode, len, emulate);
2555 text_poke_bp_batch(&tp, 1);
2556 }
2557