1 /*
2 * Copyright (c) 2013-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <arch/arm.h>
24 #include <assert.h>
25 #include <trace.h>
26 #include <stdbool.h>
27 #include <string.h>
28 #include <kernel/thread.h>
29
30 #define LOCAL_TRACE 0
31
32 #if ARM_WITH_VFP
is_16regs(void)33 static inline bool is_16regs(void)
34 {
35 uint32_t mvfr0;
36 __asm__ volatile("vmrs %0, MVFR0" : "=r"(mvfr0));
37
38 return (mvfr0 & 0xf) == 1;
39 }
40
read_fpexc(void)41 static inline uint32_t read_fpexc(void)
42 {
43 uint32_t val;
44 __asm__("vmrs %0, fpexc" : "=r" (val));
45 return val;
46 }
47
write_fpexc(uint32_t val)48 static inline void write_fpexc(uint32_t val)
49 {
50 __asm__ volatile("vmsr fpexc, %0" :: "r" (val));
51 }
52
arm_fpu_set_enable(bool enable)53 void arm_fpu_set_enable(bool enable)
54 {
55 /* set enable bit in fpexc */
56 write_fpexc(enable ? (1<<30) : 0);
57 }
58
arm_fpu_undefined_instruction(struct arm_iframe * frame)59 void arm_fpu_undefined_instruction(struct arm_iframe *frame)
60 {
61 thread_t *t = get_current_thread();
62
63 if (unlikely(arch_in_int_handler())) {
64 #if WITH_SMP
65 /*
66 * arch_in_int_handler is currently not SMP safe and can give both
67 * false positive and false negative results. Retry to work around
68 * the most likely false positive result.
69 */
70 int i;
71 dprintf(CRITICAL, "floating point code while some cpu is in irq context. pc 0x%x\n", frame->pc);
72 for (i = 0; i < 1000; i++) {
73 if (!arch_in_int_handler()) {
74 dprintf(CRITICAL, "arch_in_int_handler status cleared after %d reads\n", i);
75 goto false_alarm;
76 }
77 /* spin 10us to allow time for the interrupt handler to finish */
78 spin(10);
79 }
80 #endif
81 panic("floating point code in irq context. pc 0x%x\n", frame->pc);
82 }
83 false_alarm:
84
85 LTRACEF("enabling fpu on thread %p\n", t);
86
87 t->arch.fpused = true;
88 arm_fpu_thread_swap(NULL, t);
89
90 /* make sure the irq glue leaves the floating point unit enabled on the way out */
91 frame->fpexc |= (1<<30);
92 }
93
arm_fpu_thread_initialize(struct thread * t)94 void arm_fpu_thread_initialize(struct thread *t)
95 {
96 /* zero the fpu register state */
97 memset(t->arch.fpregs, 0, sizeof(t->arch.fpregs));
98
99 t->arch.fpexc = (1<<30);
100 t->arch.fpscr = 0;
101 t->arch.fpused = false;
102 }
103
arm_fpu_thread_swap(struct thread * oldthread,struct thread * newthread)104 void arm_fpu_thread_swap(struct thread *oldthread, struct thread *newthread)
105 {
106 LTRACEF("old %p (%d), new %p (%d)\n",
107 oldthread, oldthread ? oldthread->arch.fpused : 0,
108 newthread, newthread ? newthread->arch.fpused : 0);
109
110 if (oldthread) {
111 if (oldthread->arch.fpused) {
112 /* save the old state */
113 uint32_t fpexc;
114 fpexc = read_fpexc();
115
116 oldthread->arch.fpexc = fpexc;
117
118 /* make sure that the fpu is enabled, so the next instructions won't fault */
119 arm_fpu_set_enable(true);
120
121 __asm__ volatile("vmrs %0, fpscr" : "=r" (oldthread->arch.fpscr));
122 __asm__ volatile("vstm %0, { d0-d15 }" :: "r" (&oldthread->arch.fpregs[0]));
123 if (!is_16regs()) {
124 __asm__ volatile("vstm %0, { d16-d31 }" :: "r" (&oldthread->arch.fpregs[16]));
125 }
126
127 arm_fpu_set_enable(false);
128 }
129 }
130
131 if (newthread) {
132 if (newthread->arch.fpused) {
133 // load the new state
134 arm_fpu_set_enable(true);
135 __asm__ volatile("vmsr fpscr, %0" :: "r" (newthread->arch.fpscr));
136
137 __asm__ volatile("vldm %0, { d0-d15 }" :: "r" (&newthread->arch.fpregs[0]));
138 if (!is_16regs()) {
139 __asm__ volatile("vldm %0, { d16-d31 }" :: "r" (&newthread->arch.fpregs[16]));
140 }
141 write_fpexc(newthread->arch.fpexc);
142 } else {
143 arm_fpu_set_enable(false);
144 }
145 }
146 }
147 #endif
148