Lines Matching +full:sp +full:- +full:disabled +full:- +full:ports

1 // SPDX-License-Identifier: GPL-2.0
21 #include <linux/user-return-notifier.h>
27 #include <linux/elf-randomize.h>
31 #include <linux/entry-common.h>
48 #include <asm/spec-ctrl.h>
60 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
61 * no more per-task TSS's. The TSS size is kept cacheline-aligned
63 * section. Since TSS's are completely CPU-local, we want them
64 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
74 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
103 dst->thread.vm86 = NULL; in arch_dup_task_struct()
106 dst->thread.fpu.fpstate = NULL; in arch_dup_task_struct()
115 fpstate_free(&tsk->thread.fpu); in arch_release_task_struct()
124 struct thread_struct *t = &tsk->thread; in exit_thread()
125 struct fpu *fpu = &t->fpu; in exit_thread()
141 return do_set_thread_area(p, -1, utls, 0); in set_new_tls()
159 regs->ax = 0; in ret_from_fork()
167 unsigned long clone_flags = args->flags; in copy_thread()
168 unsigned long sp = args->stack; in copy_thread() local
169 unsigned long tls = args->tls; in copy_thread()
178 frame = &fork_frame->frame; in copy_thread()
180 frame->bp = encode_frame_pointer(childregs); in copy_thread()
181 frame->ret_addr = (unsigned long) ret_from_fork_asm; in copy_thread()
182 p->thread.sp = (unsigned long) fork_frame; in copy_thread()
183 p->thread.io_bitmap = NULL; in copy_thread()
184 p->thread.iopl_warn = 0; in copy_thread()
185 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); in copy_thread()
189 p->thread.fsindex = current->thread.fsindex; in copy_thread()
190 p->thread.fsbase = current->thread.fsbase; in copy_thread()
191 p->thread.gsindex = current->thread.gsindex; in copy_thread()
192 p->thread.gsbase = current->thread.gsbase; in copy_thread()
194 savesegment(es, p->thread.es); in copy_thread()
195 savesegment(ds, p->thread.ds); in copy_thread()
197 if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) in copy_thread()
198 set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags); in copy_thread()
200 p->thread.sp0 = (unsigned long) (childregs + 1); in copy_thread()
201 savesegment(gs, p->thread.gs); in copy_thread()
208 frame->flags = X86_EFLAGS_FIXED; in copy_thread()
213 * is disabled, new_ssp will remain 0, and fpu_clone() will know not to in copy_thread()
216 new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size); in copy_thread()
220 fpu_clone(p, clone_flags, args->fn, new_ssp); in copy_thread()
223 if (unlikely(p->flags & PF_KTHREAD)) { in copy_thread()
224 p->thread.pkru = pkru_get_init_value(); in copy_thread()
226 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
231 * Clone current's PKRU value from hardware. tsk->thread.pkru in copy_thread()
234 p->thread.pkru = read_pkru(); in copy_thread()
236 frame->bx = 0; in copy_thread()
238 childregs->ax = 0; in copy_thread()
239 if (sp) in copy_thread()
240 childregs->sp = sp; in copy_thread()
242 if (unlikely(args->fn)) { in copy_thread()
253 childregs->sp = 0; in copy_thread()
254 childregs->ip = 0; in copy_thread()
255 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
283 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); in flush_thread()
332 return -EINVAL; in set_tsc_mode()
384 return -ENODEV; in set_cpuid_mode()
399 /* If cpuid was previously disabled for this task, re-enable it. */ in arch_setup_new_exec()
415 mm_reset_untag_mask(current->mm); in arch_setup_new_exec()
436 * covers the permitted I/O ports. in tss_copy_io_bitmap()
442 memcpy(tss->io_bitmap.bitmap, iobm->bitmap, in tss_copy_io_bitmap()
443 max(tss->io_bitmap.prev_max, iobm->max)); in tss_copy_io_bitmap()
449 tss->io_bitmap.prev_max = iobm->max; in tss_copy_io_bitmap()
450 tss->io_bitmap.prev_sequence = iobm->sequence; in tss_copy_io_bitmap()
454 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
459 struct thread_struct *t = &current->thread; in native_tss_update_io_bitmap()
460 u16 *base = &tss->x86_tss.io_bitmap_base; in native_tss_update_io_bitmap()
467 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) { in native_tss_update_io_bitmap()
470 struct io_bitmap *iobm = t->io_bitmap; in native_tss_update_io_bitmap()
476 if (tss->io_bitmap.prev_sequence != iobm->sequence) in native_tss_update_io_bitmap()
514 st->local_state = 0; in speculative_store_bypass_ht_init()
520 if (st->shared_state) in speculative_store_bypass_ht_init()
523 raw_spin_lock_init(&st->lock); in speculative_store_bypass_ht_init()
537 st->shared_state = per_cpu(ssb_state, cpu).shared_state; in speculative_store_bypass_ht_init()
547 st->shared_state = st; in speculative_store_bypass_ht_init()
573 if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
578 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
580 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
582 st->shared_state->disable_state++; in amd_set_core_ssb_state()
583 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
585 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
588 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
589 st->shared_state->disable_state--; in amd_set_core_ssb_state()
590 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
592 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
824 * to SME active (or vice-versa). The cache must be cleared so that in stop_this_cpu()
833 if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0))) in stop_this_cpu()
912 clflush((void *)&current_thread_info()->flags); in mwait_idle()
916 __monitor((void *)&current_thread_info()->flags, 0, 0); in mwait_idle()
987 return -EINVAL; in idle_setup()
994 /* 'idle=halt' HALT for idle. C-states are disabled. */ in idle_setup()
1000 return -EINVAL; in idle_setup()
1007 unsigned long arch_align_stack(unsigned long sp) in arch_align_stack() argument
1009 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) in arch_align_stack()
1010 sp -= get_random_u32_below(8192); in arch_align_stack()
1011 return sp & ~0xf; in arch_align_stack()
1017 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk()
1019 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk()
1066 return -EINVAL; in do_arch_prctl_common()