Lines Matching +full:alternative +full:- +full:a

1 /* SPDX-License-Identifier: GPL-2.0 */
10 #include <asm/alternative.h>
12 #include <asm/msr-index.h>
21 * The tracking does not use a counter. It uses uses arithmetic shift
36 * After a return buffer fill the depth is credited 12 calls before the
39 * There is a inaccuracy for situations like this:
49 * but there is still a cushion vs. the RSB depth. The algorithm does not
78 #include <asm/asm-offsets.h>
81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
108 * Each entry in the RSB, if used for a speculative 'ret', contains an
111 * This is required in various cases for retpoline and IBRS-based
117 * We define a CPP macro such that it can be used from both .S files and
118 * inline assembly. It's possible to do a .macro and then include that
119 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
137 * Google experimented with loop-unrolling and this turned out to be
138 * the optimal version - two calls, each with their own speculation
139 * trap should their return address end up getting used, in a loop.
157 * do a loop.
167 * Stuff a single RSB slot.
169 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
170 * forced to retire before letting a RET instruction execute.
172 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
189 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
201 * Emits a conditional CS prefix that is compatible with
202 * -mindirect-branch-cs-prefix.
213 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
241 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
255 * must be the target of a CALL instruction instead of indirectly
256 * jumping to a wrapper which then calls it. Therefore, this macro is
275 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
276 * where we have a stack but before any RET instruction.
300 ALTERNATIVE "", \
307 * attacks such as MDS. On affected systems a microcode update overloaded VERW
314 ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
317 * In 32bit mode, the memory operand must be a %cs reference. The data
321 ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
327 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
331 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
386 ALTERNATIVE("", \
407 #include <asm/GEN-for-each-reg.h>
412 #include <asm/GEN-for-each-reg.h>
417 #include <asm/GEN-for-each-reg.h>
423 * Emits a conditional CS prefix that is compatible with
424 * -mindirect-branch-cs-prefix.
448 * For i386 we use the original ret-equivalent retpoline, because
512 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) in alternative_msr_write()
514 "a" ((u32)val), in alternative_msr_write()
572 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
575 * combination with microcode which triggers a CPU buffer flush when the
583 * Has to be the memory-operand variant because only that in mds_clear_cpu_buffers()
585 * documentation. The register-operand variant does not. in mds_clear_cpu_buffers()
586 * Works with any segment selector, but a valid writable in mds_clear_cpu_buffers()
595 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability