Lines Matching +full:half +full:- +full:precision

1 // SPDX-License-Identifier: GPL-2.0-only
37 * Dual-use variable.
38 * Used in startup: set to non-zero if VFP checks fail
67 * choice here as bottom half processing is always in thread context on RT
68 * kernels so it implicitly prevents bottom half processing as well.
88 * Must be called from non-preemptible context.
93 if (thread->vfpstate.hard.cpu != cpu) in vfp_state_in_hw()
96 return vfp_current_hw_state[cpu] == &thread->vfpstate; in vfp_state_in_hw()
102 * clear vfp_current_hw_state. Must be called from non-preemptible context.
111 thread->vfpstate.hard.cpu = NR_CPUS; in vfp_force_reload()
116 * Per-thread VFP initialization.
120 union vfp_state *vfp = &thread->vfpstate; in vfp_thread_flush()
139 vfp->hard.fpexc = FPEXC_EN; in vfp_thread_flush()
140 vfp->hard.fpscr = FPSCR_ROUND_NEAREST; in vfp_thread_flush()
142 vfp->hard.cpu = NR_CPUS; in vfp_thread_flush()
148 /* release case: Per-thread VFP cleanup. */ in vfp_thread_exit()
149 union vfp_state *vfp = &thread->vfpstate; in vfp_thread_exit()
162 thread->vfpstate = parent->vfpstate; in vfp_thread_copy()
164 thread->vfpstate.hard.cpu = NR_CPUS; in vfp_thread_copy()
172 * - the previously running thread will not be scheduled onto another CPU.
173 * - the next thread to be run (v) will not be running on another CPU.
174 * - thread->cpu is the local CPU number
175 * - not preemptible as we're called in the middle of a thread switch
177 * - the thread (v) will be running on the local CPU, so
179 * - thread->cpu is the local CPU number at the time it is accessed,
181 * - we could be preempted if tree preempt rcu is enabled, so
182 * it is unsafe to use thread->cpu.
184 * - we could be preempted if tree preempt rcu is enabled, so
185 * it is unsafe to use thread->cpu.
200 cpu = thread->cpu; in vfp_notifier()
248 current->thread.error_code = 0; in vfp_raise_sigfpe()
249 current->thread.trap_no = 6; in vfp_raise_sigfpe()
252 (void __user *)(instruction_pointer(regs) - 4), in vfp_raise_sigfpe()
343 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); in vfp_emulate_instruction()
362 * 0 1 x - synchronous exception in VFP_bounce()
363 * 1 x 0 - asynchronous exception in VFP_bounce()
364 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later in VFP_bounce()
365 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 in VFP_bounce()
393 regs->ARM_pc -= 4; in VFP_bounce()
415 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); in VFP_bounce()
488 vfp_save_state(&ti->vfpstate, fpexc); in vfp_pm_suspend()
492 } else if (vfp_current_hw_state[ti->cpu]) { in vfp_pm_suspend()
495 vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); in vfp_pm_suspend()
501 vfp_current_hw_state[ti->cpu] = NULL; in vfp_pm_suspend()
544 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
558 vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); in vfp_sync_hwstate()
583 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; in vfp_preserve_user_clear_hwstate()
585 /* Ensure that the saved hwstate is up-to-date. */ in vfp_preserve_user_clear_hwstate()
592 memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); in vfp_preserve_user_clear_hwstate()
597 ufp->fpscr = hwstate->fpscr; in vfp_preserve_user_clear_hwstate()
602 ufp_exc->fpexc = hwstate->fpexc; in vfp_preserve_user_clear_hwstate()
603 ufp_exc->fpinst = hwstate->fpinst; in vfp_preserve_user_clear_hwstate()
604 ufp_exc->fpinst2 = hwstate->fpinst2; in vfp_preserve_user_clear_hwstate()
613 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); in vfp_preserve_user_clear_hwstate()
621 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; in vfp_restore_user_hwstate()
631 memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs)); in vfp_restore_user_hwstate()
635 hwstate->fpscr = ufp->fpscr; in vfp_restore_user_hwstate()
640 fpexc = ufp_exc->fpexc; in vfp_restore_user_hwstate()
647 hwstate->fpexc = fpexc; in vfp_restore_user_hwstate()
649 hwstate->fpinst = ufp_exc->fpinst; in vfp_restore_user_hwstate()
650 hwstate->fpinst2 = ufp_exc->fpinst2; in vfp_restore_user_hwstate()
660 * a threads VFP state. When a CPU starts up, we re-enable access to the
699 * vfp_support_entry - Handle VFP exception
712 return -ENODEV; in vfp_support_entry()
742 if (!vfp_state_in_hw(ti->cpu, ti)) { in vfp_support_entry()
744 vfp_current_hw_state[ti->cpu] != NULL) { in vfp_support_entry()
750 vfp_save_state(vfp_current_hw_state[ti->cpu], in vfp_support_entry()
758 fpexc = vfp_load_state(&ti->vfpstate); in vfp_support_entry()
759 vfp_current_hw_state[ti->cpu] = &ti->vfpstate; in vfp_support_entry()
765 ti->vfpstate.hard.cpu = ti->cpu; in vfp_support_entry()
798 return -ENOEXEC; in vfp_support_entry()
803 bounce: regs->ARM_pc += 4; in vfp_support_entry()
864 * Kernel-side NEON support functions
890 vfp_save_state(&thread->vfpstate, fpexc); in kernel_neon_begin()
912 regs->ARM_pc += 4; in vfp_detect()
962 * precision floating point operations. Only check in vfp_init()
983 /* also v4-D16 */ in vfp_init()
1006 * half-precision multiplication instructions. in vfp_init()
1023 /* Extract the architecture version on pre-cpuid scheme */ in vfp_init()
1026 pr_cont("no double precision support\n"); in vfp_init()