1 /*
2 * Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <arch/arch_ops.h>
25 #include <arch/x86.h>
26 #include <arch/usercopy.h>
27 #include <assert.h>
28 #include <string.h>
29 #include <kernel/vm.h>
30 #include <kernel/thread.h>
31 #include <err.h>
32
is_permission_legal(vmm_aspace_t * vmm_aspace,vaddr_t user_addr,bool copy_to)33 static bool is_permission_legal(vmm_aspace_t *vmm_aspace, vaddr_t user_addr,
34 bool copy_to) {
35 arch_aspace_t aspace;
36 status_t ret = NO_ERROR;
37 uint flags = 0;
38
39 aspace = vmm_aspace->arch_aspace;
40
41 ret = arch_mmu_query(&aspace, user_addr, NULL, &flags);
42 if (NO_ERROR != ret) {
43 return false;
44 }
45
46 /*
47 * ARCH_MMU_FLAG_PERM_USER should be always set.
48 * If copies from user, no more condition check.
49 * If copies to user, ARCH_MMU_FLAG_PERM_RO must not be set.
50 */
51 if (!(flags & ARCH_MMU_FLAG_PERM_USER)) {
52 return false;
53 }
54 if (copy_to && (flags & ARCH_MMU_FLAG_PERM_RO)) {
55 return false;
56 }
57 return true;
58 }
59
is_valid_to_copy(vmm_aspace_t * aspace,vaddr_t addr,bool copy_to,size_t len)60 static status_t is_valid_to_copy(vmm_aspace_t *aspace, vaddr_t addr,
61 bool copy_to, size_t len) {
62 vmm_region_t* region;
63 vaddr_t src = addr;
64 size_t rest = len;
65 size_t check_len = len;
66 status_t ret = NO_ERROR;
67
68 while (rest) {
69 region = vmm_find_region(aspace, src);
70
71 if (region && is_permission_legal(aspace, src, copy_to)) {
72 check_len = MIN(rest, (region->obj_slice.size - (src - region->base)));
73 } else {
74 ret = ERR_FAULT;
75 break;
76 }
77
78 rest = rest - check_len;
79 src += check_len;
80 }
81
82 return ret;
83 }
84
arch_copy_from_user(void * kdest,user_addr_t usrc,size_t len)85 status_t arch_copy_from_user(void *kdest, user_addr_t usrc, size_t len)
86 {
87 status_t ret = NO_ERROR;
88 vmm_aspace_t *aspace = get_current_thread()->aspace;
89
90 vmm_lock_aspace(aspace);
91
92 /*
93 * The address space needs to be locked so that is does not change
94 * from the start of is_valid_to_copy to the end of memcpy
95 */
96 ret = is_valid_to_copy(aspace, usrc, false, len);
97
98 if (NO_ERROR == ret) {
99 /*
100 * Add memory fence to avoid speculative execution.
101 * Ensure nothing would be copied unless if condition check
102 * passed, since out-of-order might lead to data leakage.
103 */
104 smp_mb();
105 x86_allow_explicit_smap();
106 memcpy(kdest, (void *)usrc, len);
107 x86_disallow_explicit_smap();
108 } else {
109 memset(kdest, 0, len);
110 }
111
112 vmm_unlock_aspace(aspace);
113
114 return ret;
115 }
116
arch_copy_to_user(user_addr_t udest,const void * ksrc,size_t len)117 status_t arch_copy_to_user(user_addr_t udest, const void *ksrc, size_t len)
118 {
119 status_t ret = NO_ERROR;
120 vmm_aspace_t *aspace = get_current_thread()->aspace;
121
122 vmm_lock_aspace(aspace);
123
124 /*
125 * The address space needs to be locked so that is does not change
126 * from the start of is_valid_to_copy to the end of memcpy
127 */
128 ret = is_valid_to_copy(aspace, udest, true, len);
129
130 if (NO_ERROR == ret) {
131 /*
132 * Add memory fence to avoid speculative execution.
133 * Ensure nothing would be copied unless if condition check
134 * passed, since out-of-order might lead to data leakage.
135 */
136 smp_mb();
137 x86_allow_explicit_smap();
138 memcpy((void *)udest, ksrc, len);
139 x86_disallow_explicit_smap();
140 }
141
142 vmm_unlock_aspace(aspace);
143
144 return ret;
145 }
146
x86_user_strlen(vmm_aspace_t * aspace,const char * src_in)147 static ssize_t x86_user_strlen(vmm_aspace_t *aspace, const char *src_in) {
148 size_t scan_len = 0;
149 vmm_region_t *region;
150 bool continue_scan = false;
151 const char *src = src_in;
152
153 do {
154 region = vmm_find_region(aspace, (vaddr_t)src);
155 if (!region) {
156 return ERR_FAULT;
157 }
158
159 if (!is_permission_legal(aspace, (vaddr_t)src, false)) {
160 return ERR_FAULT;
161 }
162
163 scan_len = region->obj_slice.size - ((vaddr_t)src - region->base);
164 DEBUG_ASSERT(scan_len > 0);
165
166 x86_allow_explicit_smap();
167 while(scan_len && (*src++ != '\0')) {
168 scan_len--;
169 }
170
171 continue_scan = *(src-1) != '\0';
172
173 x86_disallow_explicit_smap();
174 } while(continue_scan);
175
176 return src - src_in - 1;
177 }
178
arch_strlcpy_from_user(char * kdst,user_addr_t usrc,size_t len)179 ssize_t arch_strlcpy_from_user(char *kdst, user_addr_t usrc, size_t len)
180 {
181 size_t copy_len = len;
182 ssize_t user_str_len = 0;
183 char* src = (char *)usrc;
184 char* dest = kdst;
185 vmm_aspace_t *aspace = get_current_thread()->aspace;
186
187 vmm_lock_aspace(aspace);
188
189 /*
190 * The address space needs to be locked so that is does not change
191 * from the start of is_valid_to_copy to the end of string copy.
192 *
193 * Check whether user string is legal to copy, if it is legal to
194 * copy, returns string length of user string. If illegal to copy,
195 * returns ERR_FAULT.
196 */
197 user_str_len = x86_user_strlen(aspace, src);
198
199 if ((len == 0) || (user_str_len < 0)) {
200 memset(kdst, 0, len);
201 goto err;
202 }
203
204 /*
205 * Calculate length of non-null characters, null terminator would be
206 * always placed at the destination string.
207 */
208 copy_len = MIN((size_t)user_str_len, len - 1);
209
210 /*
211 * Add memory fence to avoid speculative execution.
212 * Ensure nothing would be copied unless if condition check
213 * passed, since out-of-order might lead to data leakage.
214 */
215 smp_mb();
216
217 x86_allow_explicit_smap();
218 while (copy_len) {
219 if ((*dest++ = *src++) == '\0') {
220 break;
221 }
222 copy_len--;
223 }
224 x86_disallow_explicit_smap();
225
226 /* Ensure kernel buffer is always 0 terminated. */
227 *dest = '\0';
228
229 /*
230 * If the pages readable from user-space contain a 0 terminated string,
231 * strlcpy_from_user should return the length of that string.
232 * If content in user pages changed so that the new string fit in the
233 * buffer. And returns actually length copied.
234 */
235 if (copy_len) {
236 user_str_len = (dest - kdst - 1);
237 }
238
239 err:
240 vmm_unlock_aspace(aspace);
241
242 return user_str_len;
243 }
244