1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2015-2018 Intel Corporation
4 * Copyright (c) 2016 Travis Geiselbrecht
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <debug.h>
27 #include <trace.h>
28 #include <sys/types.h>
29 #include <compiler.h>
30 #include <arch.h>
31 #include <arch/x86.h>
32 #include <arch/x86/mmu.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <arch/mmu.h>
36 #include <assert.h>
37 #include <err.h>
38 #include <arch/arch_ops.h>
39 #include <kernel/vm.h>
40
41 #define LOCAL_TRACE 0
42
43 /* top level kernel page tables, initialized in start.S */
44 #ifdef PAE_MODE_ENABLED
45 map_addr_t pdp[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
46 map_addr_t pdpt[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
47 #endif
48 map_addr_t pd[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
49
50 #ifdef PAE_MODE_ENABLED
51 /* PDP table address is 32 bit wide when on PAE mode, but the PDP entries are 64 bit wide */
get_pdp_entry_from_pdp_table(vaddr_t vaddr,map_addr_t pdpt)52 static inline map_addr_t get_pdp_entry_from_pdp_table(vaddr_t vaddr, map_addr_t pdpt)
53 {
54 uint32_t pdp_index;
55 map_addr_t *pdp_table;
56
57 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
58 pdp_table = (map_addr_t *)(pdpt & X86_PDPT_ADDR_MASK);
59 return X86_PHYS_TO_VIRT(pdp_table[pdp_index]);
60 }
61
get_pfn_from_pt(map_addr_t pt)62 static inline map_addr_t get_pfn_from_pt(map_addr_t pt)
63 {
64 map_addr_t pfn;
65
66 pfn = (pt & X86_2MB_PAGE_FRAME);
67 return X86_PHYS_TO_VIRT(pfn);
68 }
69
70 #else
get_pfn_from_pde(map_addr_t pde)71 static inline map_addr_t get_pfn_from_pde(map_addr_t pde)
72 {
73 map_addr_t pfn;
74
75 pfn = (pde & X86_4MB_PAGE_FRAME);
76 return X86_PHYS_TO_VIRT(pfn);
77 }
78 #endif
79
get_pd_entry_from_pd_table(vaddr_t vaddr,map_addr_t pdt)80 static inline map_addr_t get_pd_entry_from_pd_table(vaddr_t vaddr, map_addr_t pdt)
81 {
82 uint32_t pd_index;
83 map_addr_t *pd_table;
84
85 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
86 pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
87 return X86_PHYS_TO_VIRT(pd_table[pd_index]);
88 }
89
get_pt_entry_from_page_table(vaddr_t vaddr,map_addr_t pt)90 static inline map_addr_t get_pt_entry_from_page_table(vaddr_t vaddr, map_addr_t pt)
91 {
92 uint32_t pt_index;
93 map_addr_t *pt_table;
94
95 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
96 pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
97 return X86_PHYS_TO_VIRT(pt_table[pt_index]);
98 }
99
get_pfn_from_pte(map_addr_t pte)100 static inline map_addr_t get_pfn_from_pte(map_addr_t pte)
101 {
102 map_addr_t pfn;
103
104 pfn = (pte & X86_PG_FRAME);
105 return X86_PHYS_TO_VIRT(pfn);
106 }
107
108 /**
109 * @brief Returning the x86 arch flags from generic mmu flags
110 */
get_x86_arch_flags(arch_flags_t flags)111 arch_flags_t get_x86_arch_flags(arch_flags_t flags)
112 {
113 arch_flags_t arch_flags = 0;
114
115 if (!(flags & ARCH_MMU_FLAG_PERM_RO))
116 arch_flags |= X86_MMU_PG_RW;
117
118 if (flags & ARCH_MMU_FLAG_PERM_USER)
119 arch_flags |= X86_MMU_PG_U;
120
121 if (flags & ARCH_MMU_FLAG_UNCACHED)
122 arch_flags |= X86_MMU_CACHE_DISABLE;
123
124 #ifdef PAE_MODE_ENABLED
125 if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)
126 arch_flags |= X86_MMU_PG_NX;
127 #endif
128 return arch_flags;
129 }
130
131 /**
132 * @brief Returning the generic mmu flags from x86 arch flags
133 */
get_arch_mmu_flags(arch_flags_t flags)134 uint get_arch_mmu_flags(arch_flags_t flags)
135 {
136 arch_flags_t mmu_flags = 0;
137
138 if (!(flags & X86_MMU_PG_RW))
139 mmu_flags |= ARCH_MMU_FLAG_PERM_RO;
140
141 if (flags & X86_MMU_PG_U)
142 mmu_flags |= ARCH_MMU_FLAG_PERM_USER;
143
144 if (flags & X86_MMU_CACHE_DISABLE)
145 mmu_flags |= ARCH_MMU_FLAG_UNCACHED;
146
147 #ifdef PAE_MODE_ENABLED
148 if (flags & X86_MMU_PG_NX)
149 mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
150 #endif
151 return (uint)mmu_flags;
152 }
153
154 /**
155 * @brief Walk the page table structures - supported for both PAE & non-PAE modes
156 *
157 */
x86_mmu_get_mapping(map_addr_t init_table,vaddr_t vaddr,uint32_t * ret_level,arch_flags_t * mmu_flags,map_addr_t * last_valid_entry)158 status_t x86_mmu_get_mapping(map_addr_t init_table, vaddr_t vaddr, uint32_t *ret_level,
159 arch_flags_t *mmu_flags, map_addr_t *last_valid_entry)
160 {
161 map_addr_t pt, pte, pdt;
162 #ifdef PAE_MODE_ENABLED
163 map_addr_t pdpt;
164 #endif
165
166 DEBUG_ASSERT(init_table);
167 if ((!ret_level) || (!last_valid_entry) || (!mmu_flags)) {
168 return ERR_INVALID_ARGS;
169 }
170
171 *mmu_flags = 0;
172
173 #ifdef PAE_MODE_ENABLED
174 pdpt = init_table; /* First level table in PAE mode is pdpt */
175 *ret_level = PDP_L;
176 *last_valid_entry = pdpt;
177
178 pdt = get_pdp_entry_from_pdp_table(vaddr, pdpt);
179 if ((pdt & X86_MMU_PG_P) == 0) {
180 *ret_level = PDP_L;
181 *last_valid_entry = pdpt;
182 return ERR_NOT_FOUND;
183 }
184
185 pt = get_pd_entry_from_pd_table(vaddr, pdt);
186 if ((pt & X86_MMU_PG_P) == 0) {
187 *ret_level = PD_L;
188 *last_valid_entry = pdt;
189 return ERR_NOT_FOUND;
190 }
191 #else
192 pdt = init_table; /* First table in non PAE mode is pdt */
193 *ret_level = PD_L;
194 *last_valid_entry = pdt;
195
196 pt = get_pd_entry_from_pd_table(vaddr, pdt);
197 if ((pt & X86_MMU_PG_P) == 0)
198 return ERR_NOT_FOUND;
199 #endif
200
201 /* 4 MB pages (non PAE mode) and 2 MB pages (PAE mode) */
202 /* In this case, the page directory entry is NOT actually a PT (page table) */
203 if (pt & X86_MMU_PG_PS) {
204 #ifdef PAE_MODE_ENABLED
205 /* Getting the Page frame & adding the 4KB page offset from the vaddr */
206 *last_valid_entry = get_pfn_from_pt(pt) + (vaddr & PAGE_OFFSET_MASK_2MB);
207 #else
208 /* Getting the Page frame & adding the 4MB page offset from the vaddr */
209 *last_valid_entry = get_pfn_from_pde(pt) + (vaddr & PAGE_OFFSET_MASK_4MB);
210 #endif
211 *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pt)) & X86_FLAGS_MASK);
212 goto last;
213 }
214
215 /* 4 KB pages */
216 pte = get_pt_entry_from_page_table(vaddr, pt);
217 if ((pte & X86_MMU_PG_P) == 0) {
218 *ret_level = PT_L;
219 *last_valid_entry = pt;
220 return ERR_NOT_FOUND;
221 }
222
223 /* Getting the Page frame & adding the 4KB page offset from the vaddr */
224 *last_valid_entry = get_pfn_from_pte(pte) + (vaddr & PAGE_OFFSET_MASK_4KB);
225 *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pte)) & X86_FLAGS_MASK);
226 last:
227 *ret_level = PF_L;
228 return NO_ERROR;
229 }
230
231 /**
232 * Walk the page table structures to see if the mapping between a virtual address
233 * and a physical address exists. Also, check the flags.
234 *
235 */
x86_mmu_check_mapping(map_addr_t init_table,map_addr_t paddr,vaddr_t vaddr,arch_flags_t in_flags,uint32_t * ret_level,arch_flags_t * ret_flags,map_addr_t * last_valid_entry)236 status_t x86_mmu_check_mapping(map_addr_t init_table, map_addr_t paddr,
237 vaddr_t vaddr, arch_flags_t in_flags,
238 uint32_t *ret_level, arch_flags_t *ret_flags,
239 map_addr_t *last_valid_entry)
240 {
241 status_t status;
242 arch_flags_t existing_flags = 0;
243
244 DEBUG_ASSERT(init_table);
245 if ((!ret_level) || (!last_valid_entry) || (!ret_flags) ||
246 (!IS_ALIGNED(vaddr, PAGE_SIZE)) ||
247 (!IS_ALIGNED(paddr, PAGE_SIZE))) {
248 return ERR_INVALID_ARGS;
249 }
250
251 status = x86_mmu_get_mapping(init_table, vaddr, ret_level, &existing_flags, last_valid_entry);
252 if (status || ((*last_valid_entry) != paddr)) {
253 /* We did not reach till we check the access flags for the mapping */
254 *ret_flags = in_flags;
255 return ERR_NOT_FOUND;
256 }
257
258 /* Checking the access flags for the mapped address. If it is not zero, then
259 * the access flags are different & the return flag will have those access bits
260 * which are different.
261 */
262 *ret_flags = (in_flags ^ get_x86_arch_flags(existing_flags)) & X86_DIRTY_ACCESS_MASK;
263
264 if (!(*ret_flags))
265 return NO_ERROR;
266
267 return ERR_NOT_FOUND;
268 }
269
270 #ifdef PAE_MODE_ENABLED
update_pdp_entry(vaddr_t vaddr,map_addr_t pdpt,map_addr_t * m,arch_flags_t flags)271 static void update_pdp_entry(vaddr_t vaddr, map_addr_t pdpt, map_addr_t *m, arch_flags_t flags)
272 {
273 uint32_t pdp_index;
274
275 map_addr_t *pdp_table = (map_addr_t *)(pdpt & X86_PG_FRAME);
276 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
277 pdp_table[pdp_index] = (map_addr_t)m;
278 pdp_table[pdp_index] |= X86_MMU_PG_P;
279 }
280 #endif
281
update_pt_entry(vaddr_t vaddr,map_addr_t paddr,map_addr_t pt,arch_flags_t flags)282 static void update_pt_entry(vaddr_t vaddr, map_addr_t paddr, map_addr_t pt, arch_flags_t flags)
283 {
284 uint32_t pt_index;
285
286 map_addr_t *pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
287 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
288 pt_table[pt_index] = paddr;
289 pt_table[pt_index] |= flags | X86_MMU_PG_P; /* last level - actual page being mapped */
290 if (!(flags & X86_MMU_PG_U))
291 pt_table[pt_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
292 }
293
update_pd_entry(vaddr_t vaddr,map_addr_t pdt,paddr_t m,arch_flags_t flags)294 static void update_pd_entry(vaddr_t vaddr, map_addr_t pdt, paddr_t m, arch_flags_t flags)
295 {
296 uint32_t pd_index;
297
298 map_addr_t *pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
299 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
300 pd_table[pd_index] = m;
301 pd_table[pd_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
302 if (flags & X86_MMU_PG_U)
303 pd_table[pd_index] |= X86_MMU_PG_U;
304 else
305 pd_table[pd_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
306 }
307
308 /**
309 * @brief Allocating a new page table
310 */
_map_alloc_page(void)311 static map_addr_t *_map_alloc_page(void)
312 {
313 map_addr_t *page_ptr = pmm_alloc_kpage();
314 DEBUG_ASSERT(page_ptr);
315
316 if (page_ptr)
317 memset(page_ptr, 0, PAGE_SIZE);
318
319 return page_ptr;
320 }
321
322 /**
323 * @brief Add a new mapping for the given virtual address & physical address
324 *
325 * This is a API which handles the mapping b/w a virtual address & physical address
326 * either by checking if the mapping already exists and is valid OR by adding a
327 * new mapping with the required flags.
328 *
329 */
x86_mmu_add_mapping(map_addr_t init_table,map_addr_t paddr,vaddr_t vaddr,arch_flags_t mmu_flags)330 status_t x86_mmu_add_mapping(map_addr_t init_table, map_addr_t paddr,
331 vaddr_t vaddr, arch_flags_t mmu_flags)
332 {
333 #ifdef PAE_MODE_ENABLED
334 map_addr_t pdt;
335 uint32_t pd_new = 0;
336 #endif
337 map_addr_t pt, *m = NULL;
338 status_t ret = NO_ERROR;
339
340 DEBUG_ASSERT(init_table);
341 if ((!IS_ALIGNED(vaddr, PAGE_SIZE)) || (!IS_ALIGNED(paddr, PAGE_SIZE)) )
342 return ERR_INVALID_ARGS;
343
344 #ifdef PAE_MODE_ENABLED
345 #error fix map_alloc_page to translate to physical
346 pdt = get_pdp_entry_from_pdp_table(vaddr, init_table);
347 if ((pdt & X86_MMU_PG_P) == 0) {
348 /* Creating a new pd table */
349 m = _map_alloc_page();
350 if (m == NULL) {
351 ret = ERR_NO_MEMORY;
352 goto clean;
353 }
354 update_pdp_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags));
355 pdt = (map_addr_t)m;
356 pd_new = 1;
357 }
358
359 if (!pd_new)
360 pt = get_pd_entry_from_pd_table(vaddr, pdt);
361
362 if (pd_new || (pt & X86_MMU_PG_P) == 0) {
363 /* Creating a new pt */
364 m = _map_alloc_page();
365 if (m == NULL) {
366 ret = ERR_NO_MEMORY;
367 if (pd_new)
368 goto clean_pd;
369 goto clean;
370 }
371
372 update_pd_entry(vaddr, pdt, m, get_x86_arch_flags(mmu_flags));
373 pt = (map_addr_t)m;
374 }
375 #else
376 pt = get_pd_entry_from_pd_table(vaddr, init_table);
377 if ((pt & X86_MMU_PG_P) == 0) {
378 /* Creating a new pt */
379 m = _map_alloc_page();
380 if (m == NULL) {
381 ret = ERR_NO_MEMORY;
382 goto clean;
383 }
384
385 paddr_t pd_paddr = vaddr_to_paddr(m);
386 DEBUG_ASSERT(pd_paddr);
387
388 update_pd_entry(vaddr, init_table, pd_paddr, get_x86_arch_flags(mmu_flags));
389 pt = (map_addr_t)m;
390 }
391 #endif
392
393 /* Updating the page table entry with the paddr and access flags required for the mapping */
394 update_pt_entry(vaddr, paddr, pt, get_x86_arch_flags(mmu_flags));
395 ret = NO_ERROR;
396 #ifdef PAE_MODE_ENABLED
397 goto clean;
398
399 clean_pd:
400 if (pd_new)
401 free((map_addr_t *)pdt);
402 #endif
403 clean:
404 return ret;
405 }
406
407 /**
408 * @brief x86 MMU unmap an entry in the page tables recursively and clear out tables
409 *
410 */
x86_mmu_unmap_entry(vaddr_t vaddr,int level,map_addr_t table_entry)411 static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, map_addr_t table_entry)
412 {
413 uint32_t offset = 0, next_level_offset = 0;
414 map_addr_t *table, *next_table_addr, value;
415
416 next_table_addr = NULL;
417 table = (map_addr_t *)(X86_VIRT_TO_PHYS(table_entry) & X86_PG_FRAME);
418
419 switch (level) {
420 #ifdef PAE_MODE_ENABLED
421 case PDP_L:
422 offset = ((vaddr >> PDP_SHIFT) & ((1 << PDPT_ADDR_OFFSET) - 1));
423 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
424 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
425 return;
426 break;
427 #endif
428 case PD_L:
429 offset = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
430 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
431 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
432 return;
433 break;
434 case PT_L:
435 offset = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
436 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
437 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
438 return;
439 break;
440 case PF_L:
441 /* Reached page frame, Let's go back */
442 default:
443 return;
444 }
445
446 level -= 1;
447 x86_mmu_unmap_entry(vaddr, level, (map_addr_t)next_table_addr);
448 level += 1;
449
450 next_table_addr = (map_addr_t *)((map_addr_t)(X86_VIRT_TO_PHYS(next_table_addr)) & X86_PG_FRAME);
451 if (level > PT_L) {
452 /* Check all entries of next level table for present bit */
453 for (next_level_offset = 0; next_level_offset < NO_OF_PT_ENTRIES; next_level_offset++) {
454 if ((next_table_addr[next_level_offset] & X86_MMU_PG_P) != 0)
455 return; /* There is an entry in the next level table */
456 }
457 free(next_table_addr);
458 }
459 /* All present bits for all entries in next level table for this address are 0 */
460 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) != 0) {
461 arch_disable_ints();
462 value = table[offset];
463 value = value & X86_PTE_NOT_PRESENT;
464 table[offset] = value;
465 arch_enable_ints();
466 }
467 }
468
x86_mmu_unmap(map_addr_t init_table,vaddr_t vaddr,size_t count)469 status_t x86_mmu_unmap(map_addr_t init_table, vaddr_t vaddr, size_t count)
470 {
471 vaddr_t next_aligned_v_addr;
472
473 DEBUG_ASSERT(init_table);
474 if (!IS_ALIGNED(vaddr, PAGE_SIZE))
475 return ERR_INVALID_ARGS;
476
477 if (count == 0)
478 return NO_ERROR;
479
480 next_aligned_v_addr = vaddr;
481 while (count > 0) {
482 #ifdef PAE_MODE_ENABLED
483 x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAE_PAGING_LEVELS, init_table);
484 #else
485 x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAGING_LEVELS, init_table);
486 #endif
487 next_aligned_v_addr += PAGE_SIZE;
488 count--;
489 }
490 return NO_ERROR;
491 }
492
arch_mmu_unmap(arch_aspace_t * aspace,vaddr_t vaddr,size_t count)493 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, size_t count)
494 {
495 map_addr_t init_table_from_cr3;
496
497 DEBUG_ASSERT(aspace);
498
499 if (!IS_ALIGNED(vaddr, PAGE_SIZE))
500 return ERR_INVALID_ARGS;
501
502 if (count == 0)
503 return NO_ERROR;
504
505 DEBUG_ASSERT(x86_get_cr3());
506 init_table_from_cr3 = x86_get_cr3();
507
508 return (x86_mmu_unmap(X86_PHYS_TO_VIRT(init_table_from_cr3), vaddr, count));
509 }
510
511 /**
512 * @brief Mapping a section/range with specific permissions
513 *
514 */
x86_mmu_map_range(map_addr_t init_table,struct map_range * range,arch_flags_t flags)515 status_t x86_mmu_map_range(map_addr_t init_table, struct map_range *range, arch_flags_t flags)
516 {
517 vaddr_t next_aligned_v_addr;
518 map_addr_t next_aligned_p_addr;
519 status_t map_status;
520 uint32_t no_of_pages, index;
521
522 TRACEF("table 0x%x, range vaddr 0x%lx paddr 0x%lx size %u\n", init_table, range->start_vaddr, range->start_paddr, range->size);
523
524 DEBUG_ASSERT(init_table);
525 if (!range)
526 return ERR_INVALID_ARGS;
527
528 /* Calculating the number of 4k pages */
529 if (IS_ALIGNED(range->size, PAGE_SIZE))
530 no_of_pages = (range->size) >> PAGE_DIV_SHIFT;
531 else
532 no_of_pages = ((range->size) >> PAGE_DIV_SHIFT) + 1;
533
534 next_aligned_v_addr = range->start_vaddr;
535 next_aligned_p_addr = range->start_paddr;
536
537 for (index = 0; index < no_of_pages; index++) {
538 map_status = x86_mmu_add_mapping(init_table, next_aligned_p_addr, next_aligned_v_addr, flags);
539 if (map_status) {
540 dprintf(SPEW, "Add mapping failed with err=%d\n", map_status);
541 /* Unmap the partial mapping - if any */
542 x86_mmu_unmap(init_table, range->start_vaddr, index);
543 return map_status;
544 }
545 next_aligned_v_addr += PAGE_SIZE;
546 next_aligned_p_addr += PAGE_SIZE;
547 }
548
549 return NO_ERROR;
550 }
551
arch_mmu_query(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t * paddr,uint * flags)552 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags)
553 {
554 uint32_t ret_level, current_cr3_val;
555 map_addr_t last_valid_entry;
556 arch_flags_t ret_flags;
557 status_t stat;
558
559 LTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags);
560
561 DEBUG_ASSERT(aspace);
562
563 if (!paddr)
564 return ERR_INVALID_ARGS;
565
566 DEBUG_ASSERT(x86_get_cr3());
567 current_cr3_val = (map_addr_t)x86_get_cr3();
568
569 stat = x86_mmu_get_mapping(X86_PHYS_TO_VIRT(current_cr3_val), vaddr, &ret_level, &ret_flags, &last_valid_entry);
570 if (stat)
571 return stat;
572
573 *paddr = (paddr_t)last_valid_entry;
574
575 /* converting x86 arch specific flags to arch mmu flags */
576 if (flags)
577 *flags = ret_flags;
578
579 return NO_ERROR;
580 }
581
arch_mmu_map(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t paddr,size_t count,uint flags)582 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, size_t count, uint flags)
583 {
584 uint32_t current_cr3_val;
585 struct map_range range;
586
587 DEBUG_ASSERT(aspace);
588
589 if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE)))
590 return ERR_INVALID_ARGS;
591
592 if (count == 0)
593 return NO_ERROR;
594
595 DEBUG_ASSERT(x86_get_cr3());
596 current_cr3_val = (map_addr_t)x86_get_cr3();
597
598 range.start_vaddr = vaddr;
599 range.start_paddr = (map_addr_t)paddr;
600 range.size = count * PAGE_SIZE;
601
602 return (x86_mmu_map_range(X86_PHYS_TO_VIRT(current_cr3_val), &range, flags));
603 }
604
x86_mmu_early_init(void)605 void x86_mmu_early_init(void)
606 {
607 volatile uint32_t cr0;
608
609 /* Set WP bit in CR0*/
610 cr0 = x86_get_cr0();
611 cr0 |= X86_CR0_WP;
612 x86_set_cr0(cr0);
613
614 #ifdef PAE_MODE_ENABLED
615 volatile uint32_t efer_msr, cr4;
616
617 /* Setting the SMEP & SMAP bit in CR4 */
618 cr4 = x86_get_cr4();
619 if (check_smep_avail())
620 cr4 |= X86_CR4_SMEP;
621 if (check_smap_avail())
622 cr4 |=X86_CR4_SMAP;
623 x86_set_cr4(cr4);
624
625 /* Set NXE bit in MSR_EFER*/
626 efer_msr = read_msr(X86_MSR_EFER);
627 efer_msr |= X86_EFER_NXE;
628 write_msr(X86_MSR_EFER, efer_msr);
629 #endif
630
631 /* unmap the lower identity mapping */
632 for (uint i = 0; i < (1024*1024*1024) / (4*1024*1024); i++) {
633 pd[i] = 0;
634 }
635
636 /* tlb flush */
637 x86_set_cr3(x86_get_cr3());
638 }
639
x86_mmu_init(void)640 void x86_mmu_init(void)
641 {
642 }
643
644 /*
645 * x86 does not support multiple address spaces at the moment, so fail if these apis
646 * are used for it.
647 */
arch_mmu_init_aspace(arch_aspace_t * aspace,vaddr_t base,size_t size,uint flags)648 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags)
649 {
650 DEBUG_ASSERT(aspace);
651
652 if ((flags & ARCH_ASPACE_FLAG_KERNEL) == 0) {
653 return ERR_NOT_SUPPORTED;
654 }
655
656 return NO_ERROR;
657 }
658
arch_mmu_destroy_aspace(arch_aspace_t * aspace)659 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace)
660 {
661 return NO_ERROR;
662 }
663
arch_mmu_context_switch(arch_aspace_t * aspace)664 void arch_mmu_context_switch(arch_aspace_t *aspace)
665 {
666 if (aspace != NULL) {
667 PANIC_UNIMPLEMENTED;
668 }
669 }
670
671