1 /*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <arch/ops.h>
25 #include <assert.h>
26 #include <err.h>
27 #include <kernel/mutex.h>
28 #include <kernel/vm.h>
29 #include <lib/console.h>
30 #include <lib/rand/rand.h>
31 #include <string.h>
32 #include <trace.h>
33 #include <inttypes.h>
34
35 #include "res_group.h"
36 #include "vm_priv.h"
37
38 #define LOCAL_TRACE 0
39
40 static struct list_node aspace_list = LIST_INITIAL_VALUE(aspace_list);
41 static mutex_t vmm_lock = MUTEX_INITIAL_VALUE(vmm_lock);
42
43 vmm_aspace_t _kernel_aspace;
44
45 static void dump_aspace(const vmm_aspace_t* a);
46 static void dump_region(const vmm_region_t* r);
47
vmm_lock_aspace(vmm_aspace_t * aspace)48 void vmm_lock_aspace(vmm_aspace_t *aspace) {
49 mutex_acquire(&vmm_lock);
50 }
51
vmm_unlock_aspace(vmm_aspace_t * aspace)52 void vmm_unlock_aspace(vmm_aspace_t *aspace) {
53 mutex_release(&vmm_lock);
54 }
55
get_arch_aspace_flags(const uint vmm_aspace_flags)56 static inline uint get_arch_aspace_flags(const uint vmm_aspace_flags) {
57 uint arch_flags = 0;
58
59 if (vmm_aspace_flags & VMM_ASPACE_FLAG_KERNEL) {
60 arch_flags |= ARCH_ASPACE_FLAG_KERNEL;
61 }
62
63 if (vmm_aspace_flags & VMM_ASPACE_FLAG_BTI) {
64 arch_flags |= ARCH_ASPACE_FLAG_BTI;
65 }
66
67 return arch_flags;
68 }
69
70 static vmm_region_t* vmm_find_region_in_bst(const struct bst_root* region_tree,
71 vaddr_t vaddr, size_t size);
72
vmm_res_obj_check_flags(struct vmm_obj * obj,uint * arch_mmu_flags)73 static int vmm_res_obj_check_flags(struct vmm_obj *obj, uint *arch_mmu_flags)
74 {
75 return 0; /* Allow any flags until RO reserved regions are implemented. */
76 }
77
vmm_obj_to_res_vmm_obj(struct vmm_obj * vmm_obj)78 static inline struct vmm_res_obj* vmm_obj_to_res_vmm_obj(struct vmm_obj *vmm_obj)
79 {
80 ASSERT(vmm_obj->ops->check_flags == vmm_res_obj_check_flags);
81 return containerof(vmm_obj, struct vmm_res_obj, vmm_obj);
82 }
83
vmm_res_obj_get_page(struct vmm_obj * obj,size_t offset,paddr_t * paddr,size_t * paddr_size)84 static int vmm_res_obj_get_page(struct vmm_obj *obj, size_t offset,
85 paddr_t *paddr, size_t *paddr_size)
86 {
87 /* This is not implemented until we map pages on faults. */
88 return ERR_INVALID_ARGS;
89 }
90
vmm_res_obj_destroy(struct vmm_obj * obj)91 static void vmm_res_obj_destroy(struct vmm_obj *obj)
92 {
93 struct vmm_res_obj* vmm_res_obj = vmm_obj_to_res_vmm_obj(obj);
94 vmm_region_t* region;
95 bst_for_every_entry_delete(&vmm_res_obj->regions, region, vmm_region_t, node) {
96 vmm_obj_slice_release(®ion->obj_slice);
97 free(region);
98 }
99 free(obj);
100 }
101
102 static struct vmm_obj_ops vmm_res_obj_ops = {
103 .check_flags = vmm_res_obj_check_flags,
104 .get_page = vmm_res_obj_get_page,
105 .destroy = vmm_res_obj_destroy,
106 };
107
vmm_init_preheap(void)108 void vmm_init_preheap(void) {
109 /* initialize the kernel address space */
110 strlcpy(_kernel_aspace.name, "kernel", sizeof(_kernel_aspace.name));
111 _kernel_aspace.base = KERNEL_ASPACE_BASE;
112 _kernel_aspace.size = KERNEL_ASPACE_SIZE;
113 _kernel_aspace.flags = VMM_ASPACE_FLAG_KERNEL;
114
115 #ifdef KERNEL_BTI_ENABLED
116 if (arch_bti_supported()) {
117 _kernel_aspace.flags |= VMM_ASPACE_FLAG_BTI;
118 }
119 #endif
120
121 bst_root_initialize(&_kernel_aspace.regions);
122
123 arch_mmu_init_aspace(&_kernel_aspace.arch_aspace, KERNEL_ASPACE_BASE,
124 KERNEL_ASPACE_SIZE,
125 get_arch_aspace_flags(_kernel_aspace.flags));
126
127 list_add_head(&aspace_list, &_kernel_aspace.node);
128 }
129
vmm_init(void)130 void vmm_init(void) {}
131
range_contains_range(vaddr_t range_base,size_t range_size,vaddr_t query_base,size_t query_size)132 static inline bool range_contains_range(vaddr_t range_base,
133 size_t range_size,
134 vaddr_t query_base,
135 size_t query_size) {
136 vaddr_t range_last;
137 vaddr_t query_last;
138
139 ASSERT(range_size > 0);
140 ASSERT(query_size > 0);
141
142 ASSERT(!__builtin_add_overflow(range_base, range_size - 1, &range_last));
143 ASSERT(!__builtin_add_overflow(query_base, query_size - 1, &query_last));
144
145 return range_base <= query_base && query_last <= range_last;
146 }
147
is_inside_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr)148 static inline bool is_inside_aspace(const vmm_aspace_t* aspace, vaddr_t vaddr) {
149 DEBUG_ASSERT(aspace);
150 return range_contains_range(aspace->base, aspace->size, vaddr, 1);
151 }
152
153 /*
154 * returns true iff, after potentially adding a guard page at the end of the
155 * region, it fits inside the address space pointed to by the first argument.
156 */
is_region_inside_aspace(const vmm_aspace_t * aspace,const vmm_region_t * r)157 static bool is_region_inside_aspace(const vmm_aspace_t* aspace,
158 const vmm_region_t* r) {
159 size_t aspace_size = aspace->size;
160
161 DEBUG_ASSERT(aspace);
162 DEBUG_ASSERT(aspace->base >= PAGE_SIZE);
163 DEBUG_ASSERT(aspace->size > PAGE_SIZE);
164
165 if (!(r->flags & VMM_FLAG_NO_END_GUARD)) {
166 /*
167 * rather than adding to the region size, shrink the address space
168 * size; the former operation can overflow but the latter cannot.
169 */
170 aspace_size -= PAGE_SIZE;
171
172 /*
173 * We do not have to handle the symmetric case for start guards
174 * because {KERNEL,USER}_ASPACE_BASE >= PAGE_SIZE must hold.
175 * See also vmm_create_aspace.
176 */
177 }
178
179 return range_contains_range(aspace->base, aspace_size, r->base,
180 r->obj_slice.size);
181 }
182
is_inside_region(const vmm_region_t * r,vaddr_t vaddr)183 static bool is_inside_region(const vmm_region_t* r, vaddr_t vaddr) {
184 DEBUG_ASSERT(r);
185 return range_contains_range(r->base, r->obj_slice.size, vaddr, 1);
186 }
187
is_range_inside_region(const vmm_region_t * r,vaddr_t vaddr,size_t size)188 static bool is_range_inside_region(const vmm_region_t* r,
189 vaddr_t vaddr,
190 size_t size) {
191 DEBUG_ASSERT(r);
192 return range_contains_range(r->base, r->obj_slice.size, vaddr, size);
193 }
194
trim_to_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr,size_t size)195 static size_t trim_to_aspace(const vmm_aspace_t* aspace,
196 vaddr_t vaddr,
197 size_t size) {
198 DEBUG_ASSERT(aspace);
199 DEBUG_ASSERT(is_inside_aspace(aspace, vaddr));
200
201 if (size == 0)
202 return size;
203
204 size_t offset = vaddr - aspace->base;
205
206 // LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace
207 // size 0x%zx\n",
208 // vaddr, size, offset, aspace->base, aspace->size);
209
210 if (offset + size < offset)
211 size = ULONG_MAX - offset - 1;
212
213 // LTRACEF("size now 0x%zx\n", size);
214
215 if (offset + size >= aspace->size - 1)
216 size = aspace->size - offset;
217
218 // LTRACEF("size now 0x%zx\n", size);
219
220 return size;
221 }
222
vmm_obj_slice_init(struct vmm_obj_slice * slice)223 void vmm_obj_slice_init(struct vmm_obj_slice *slice) {
224 slice->obj = NULL;
225 obj_ref_init(&slice->obj_ref);
226 slice->offset = 0;
227 slice->size = 0;
228 }
229
230 /*
231 * This will not invoke the destructor on the vmm_obj if it is the last
232 * one out, as the vmm lock is held. If we would need to destroy the object,
233 * we instead assert fail in debug builds, and with NDEBUG builds leak.
234 */
vmm_obj_slice_release_locked(struct vmm_obj_slice * slice)235 static void vmm_obj_slice_release_locked(struct vmm_obj_slice *slice) {
236 bool dead = false;
237 if (slice->obj) {
238 dead = obj_del_ref(&slice->obj->obj, &slice->obj_ref, NULL);
239 slice->obj = NULL;
240 }
241 ASSERT(!dead);
242 }
243
vmm_obj_slice_release(struct vmm_obj_slice * slice)244 void vmm_obj_slice_release(struct vmm_obj_slice *slice) {
245 if (slice->obj) {
246 vmm_obj_del_ref(slice->obj, &slice->obj_ref);
247 slice->obj = NULL;
248 }
249 }
250
vmm_obj_slice_bind_locked(struct vmm_obj_slice * slice,struct vmm_obj * obj,size_t offset,size_t size)251 static void vmm_obj_slice_bind_locked(struct vmm_obj_slice *slice,
252 struct vmm_obj *obj,
253 size_t offset,
254 size_t size) {
255 DEBUG_ASSERT(!slice->obj);
256 slice->obj = obj;
257 /* Use obj_add_ref directly to avoid acquiring the vmm lock. */
258 obj_add_ref(&slice->obj->obj, &slice->obj_ref);
259 slice->offset = offset;
260 slice->size = size;
261 }
262
vmm_obj_slice_bind(struct vmm_obj_slice * slice,struct vmm_obj * obj,size_t offset,size_t size)263 void vmm_obj_slice_bind(struct vmm_obj_slice *slice, struct vmm_obj *obj,
264 size_t offset, size_t size) {
265 mutex_acquire(&vmm_lock);
266 vmm_obj_slice_bind_locked(slice, obj, offset, size);
267 mutex_release(&vmm_lock);
268 }
269
alloc_region_struct(const char * name,vaddr_t base,size_t size,uint flags,uint arch_mmu_flags)270 static vmm_region_t* alloc_region_struct(const char* name,
271 vaddr_t base,
272 size_t size,
273 uint flags,
274 uint arch_mmu_flags) {
275 DEBUG_ASSERT(name);
276
277 vmm_region_t* r = calloc(1, sizeof(vmm_region_t));
278 if (!r)
279 return NULL;
280
281 strlcpy(r->name, name, sizeof(r->name));
282 r->base = base;
283 r->flags = flags;
284 r->arch_mmu_flags = arch_mmu_flags;
285 vmm_obj_slice_init(&r->obj_slice);
286 r->obj_slice.size = size;
287
288 return r;
289 }
290
vmm_flags_guard(uint low_flags,uint high_flags)291 static size_t vmm_flags_guard(uint low_flags, uint high_flags) {
292 if ((low_flags & VMM_FLAG_NO_END_GUARD) &&
293 (high_flags & VMM_FLAG_NO_START_GUARD)) {
294 /*
295 * Both regions have reported that they don't need a guard page on the
296 * potentially touching side.
297 */
298 return 0;
299 }
300
301 return PAGE_SIZE;
302 }
303
vmm_rguard(vmm_region_t * low,vmm_region_t * high)304 static size_t vmm_rguard(vmm_region_t *low, vmm_region_t *high) {
305 if (low->base >= high->base) {
306 /*
307 * Skip returning guard page if the regions are out of order to avoid
308 * possible overflow on last region in address space.
309 */
310 return 0;
311 }
312 return vmm_flags_guard(low->flags, high->flags);
313 }
314
315 /* Match any region that overlap */
vmm_region_cmp(struct bst_node * _a,struct bst_node * _b)316 static int vmm_region_cmp(struct bst_node *_a, struct bst_node *_b) {
317 vmm_region_t *a = containerof(_a, vmm_region_t, node);
318 vmm_region_t *b = containerof(_b, vmm_region_t, node);
319
320 if (b->base > a->base + (a->obj_slice.size - 1) + vmm_rguard(a, b)) {
321 return 1;
322 }
323 if (a->base > b->base + (b->obj_slice.size - 1) + vmm_rguard(b, a)) {
324 return -1;
325 }
326 return 0;
327 }
328
add_region_to_bst(struct bst_root * bst,vmm_region_t * r)329 static status_t add_region_to_bst(struct bst_root* bst, vmm_region_t* r) {
330 if (bst_insert(bst, &r->node, vmm_region_cmp)) {
331 return NO_ERROR;
332 }
333
334 LTRACEF("couldn't find spot\n");
335 vmm_region_t *r_coll = bst_search_type(bst, r,
336 vmm_region_cmp, vmm_region_t, node);
337 LTRACEF("colliding r %p base %p size 0x%zx flags 0x%x\n",
338 r_coll, (void*)r_coll->base, r_coll->obj_slice.size, r_coll->flags);
339 return ERR_NO_MEMORY;
340 }
341
342 /* add a region to the appropriate spot in the address space list,
343 * testing to see if there's a space */
add_region_to_aspace(vmm_aspace_t * aspace,vmm_region_t * r)344 static status_t add_region_to_aspace(vmm_aspace_t* aspace, vmm_region_t* r) {
345 DEBUG_ASSERT(aspace);
346 DEBUG_ASSERT(r);
347
348 LTRACEF("aspace %p base 0x%" PRIxVADDR " size 0x%zx r %p base 0x%" PRIxVADDR " size 0x%zx flags 0x%x\n",
349 aspace, aspace->base, aspace->size, r, r->base, r->obj_slice.size,
350 r->flags);
351
352 /* only try if the region will at least fit in the address space */
353 if (r->obj_slice.size == 0 ||
354 !is_region_inside_aspace(aspace, r)) {
355 LTRACEF("region was out of range\n");
356 return ERR_OUT_OF_RANGE;
357 }
358
359 return add_region_to_bst(&aspace->regions, r);
360 }
361
362 /* add a region to the appropriate spot in the vmm_res_obj,
363 * testing to see if there's a space */
add_region_to_vmm_res_obj(struct vmm_res_obj * vmm_res_obj,vmm_region_t * r)364 static status_t add_region_to_vmm_res_obj(struct vmm_res_obj* vmm_res_obj, vmm_region_t* r) {
365 DEBUG_ASSERT(vmm_res_obj);
366 DEBUG_ASSERT(r);
367
368 LTRACEF("vmm_res_obj %p r %p base 0x%" PRIxVADDR " size 0x%zx flags 0x%x\n",
369 vmm_res_obj, r, r->base, r->obj_slice.size, r->flags);
370
371 return add_region_to_bst(&vmm_res_obj->regions, r);
372 }
373
374 /*
375 * Try to pick the spot within specified gap
376 *
377 * Arch can override this to impose it's own restrictions.
378 */
arch_mmu_pick_spot(arch_aspace_t * aspace,vaddr_t base,uint prev_region_arch_mmu_flags,vaddr_t end,uint next_region_arch_mmu_flags,vaddr_t alignment,size_t size,uint arch_mmu_flags)379 __WEAK vaddr_t arch_mmu_pick_spot(arch_aspace_t* aspace,
380 vaddr_t base,
381 uint prev_region_arch_mmu_flags,
382 vaddr_t end,
383 uint next_region_arch_mmu_flags,
384 vaddr_t alignment,
385 size_t size,
386 uint arch_mmu_flags) {
387 /* just align it by default */
388 return align(base, alignment);
389 }
390
391 /**
392 * next_spot() - Finds the next valid mapping location in a range
393 * @low: Lowest virtual address available for use
394 * @high: Highest virtual address available for use
395 * @align: Virtual address alignment requested
396 * @size: Size of region requested
397 * @arch_mmu_flags: Flags to pass to the mmu in case of restrictions.
398 * @out: Output parameter for the base of a range matching the
399 * requirements, of size @size. Only valid if next_spot()
400 * returns true.
401 *
402 * Finds the lowest region available in a range subject to alignment, size,
403 * and MMU constraints.
404 *
405 * Return: Whether a region was found. If false, *@out is invalid. If
406 * true, *@out is the base of a legal range to map at.
407 */
next_spot(arch_aspace_t * aspace,uint prev_region_arch_mmu_flags,uint next_region_arch_mmu_flags,vaddr_t low,vaddr_t high,vaddr_t align,size_t size,uint arch_mmu_flags,vaddr_t * out)408 static inline bool next_spot(arch_aspace_t* aspace,
409 uint prev_region_arch_mmu_flags,
410 uint next_region_arch_mmu_flags,
411 vaddr_t low,
412 vaddr_t high,
413 vaddr_t align,
414 size_t size,
415 uint arch_mmu_flags,
416 vaddr_t* out) {
417 DEBUG_ASSERT(aspace);
418 DEBUG_ASSERT(out);
419
420 vaddr_t candidate = arch_mmu_pick_spot(
421 aspace, low, prev_region_arch_mmu_flags, high,
422 next_region_arch_mmu_flags, align, size, arch_mmu_flags);
423
424 if ((candidate < low) || (candidate > high)) {
425 /* arch_mmu_pick_spot sent the base address out of range */
426 return false;
427 }
428
429 vaddr_t candidate_end;
430 if (__builtin_add_overflow(candidate, size - 1, &candidate_end)) {
431 /* Virtual address region would wrap around */
432 return false;
433 }
434
435 if (candidate_end > high) {
436 /* Virtual address stretches out of range */
437 return false;
438 }
439
440 *out = candidate;
441 return true;
442 }
443
444 /**
445 * extract_gap() - Finds the gap between two used regions
446 * @aspace: The address space we are working in
447 * @low: The lower virtual region. May be null to indicate the area below
448 * the first region.
449 * @high: The higher virtual region. May be null to indicate the area above
450 * the last region.
451 * @gap_low: Output parameter for the lowest open address.
452 * @gap_high: Output parameter for the highest open address.
453 *
454 * Finds the largest gap of open (unused) addresses inside an address space
455 * @aspace that is separated from any adjacent virtual regions (@low, @high)
456 * by a guard page. When there is no higher adjacent virtual region, the gap
457 * is still separated from the end of the address space by one guard page.
458 * Calculating a pointer to the element one past the end of a allocation can
459 * therefore only trigger a pointer overflow if the element size is greater
460 * than or equal to a guard page.
461 *
462 * Return: Whether a gap was found. If the return value is false, the output
463 * parameters may be invalid. If true, all addresses between
464 * *@gap_low and *@gap_high inclusive are unmapped.
465 */
extract_gap(vmm_aspace_t * aspace,vmm_region_t * low,vmm_region_t * high,vaddr_t * gap_low,vaddr_t * gap_high)466 static inline bool extract_gap(vmm_aspace_t* aspace,
467 vmm_region_t* low,
468 vmm_region_t* high,
469 vaddr_t* gap_low,
470 vaddr_t* gap_high) {
471 vaddr_t gap_high_val;
472
473 DEBUG_ASSERT(aspace);
474 DEBUG_ASSERT(gap_low);
475 DEBUG_ASSERT(gap_high);
476 DEBUG_ASSERT(aspace->size != 0);
477
478 if (low) {
479 if (__builtin_add_overflow(low->base, low->obj_slice.size, gap_low)) {
480 /* No valid address exists above the low region */
481 return false;
482 }
483 if (__builtin_add_overflow(*gap_low,
484 PAGE_SIZE,
485 gap_low)) {
486 /* No valid address exists above the low region + guard page */
487 return false;
488 }
489 } else {
490 *gap_low = aspace->base;
491 /* Assume no adjacent address space so no guard page needed */
492 }
493
494 if (high) {
495 DEBUG_ASSERT(high->base != 0);
496 gap_high_val = high->base - 1;
497 } else {
498 gap_high_val = aspace->base + (aspace->size - 1);
499 }
500
501 /*
502 * Add a guard page even when the area is above highest region. We do so
503 * because it is common and legal to calculate a pointer just beyond a
504 * memory allocation. If we place an allocation at the very end of a
505 * virtual address space, calculating a pointer just beyond the allocation
506 * causes the pointer to wrap which is undefined behavior.
507 */
508 if (__builtin_sub_overflow(gap_high_val,
509 PAGE_SIZE,
510 &gap_high_val)) {
511 /*
512 * No valid address exists below the high region + guard page (OR the
513 * virtual address space is unexpectedly smaller than one guard page)
514 */
515 return false;
516 }
517
518 if ((*gap_low) > gap_high_val) {
519 /* No gap available */
520 return false;
521 }
522
523 *gap_high = gap_high_val;
524
525 return true;
526 }
527
528 /**
529 * scan_gap() - Searches between two vm regions for usable spots
530 * @aspace: The address space to search in
531 * @low: The vm region below the search area. May be null to
532 * indicate the bottom of the address space.
533 * @high: The vm region above the search area. May be null to
534 * indicate the top of the address space.
535 * @alignment: The required alignment for the new region
536 * @size: How large the new region needs to be
537 * @arch_mmu_flags: Architecture specific MMU flags for the new region
538 *
539 * Finds the number of different candidate offsets for a new region to be
540 * created between two others.
541 *
542 * The result can be higher than reality if arch_mmu_pick_spot() employs exotic
543 * requirements, but any value less than the return of scan_gap() will still
544 * be valid for spot_in_gap().
545 *
546 * Return: The number of different places the region could be created within
547 * the gap.
548 */
scan_gap(vmm_aspace_t * aspace,vmm_region_t * low,vmm_region_t * high,vaddr_t alignment,size_t size,uint arch_mmu_flags)549 static inline size_t scan_gap(vmm_aspace_t* aspace,
550 vmm_region_t* low,
551 vmm_region_t* high,
552 vaddr_t alignment,
553 size_t size,
554 uint arch_mmu_flags) {
555 vaddr_t low_addr;
556 vaddr_t high_addr;
557 if (!extract_gap(aspace, low, high, &low_addr, &high_addr)) {
558 /* There's no gap, so there are no available positions */
559 return 0;
560 }
561
562 uint low_flags = low ? low->arch_mmu_flags : ARCH_MMU_FLAG_INVALID;
563 uint high_flags = high ? high->arch_mmu_flags : ARCH_MMU_FLAG_INVALID;
564
565 vaddr_t first_base;
566 arch_aspace_t* arch_aspace = &aspace->arch_aspace;
567 if (!next_spot(arch_aspace, low_flags, high_flags, low_addr, high_addr,
568 alignment, size, arch_mmu_flags, &first_base)) {
569 /*
570 * We couldn't find a first place, so there are no available
571 * positions.
572 */
573 return 0;
574 }
575
576 /* Estimate that the last position will be the last page aligned slot */
577 vaddr_t final_base = round_down(high_addr - (size - 1), PAGE_SIZE);
578 /* If we can't map at that address, shrink it by a page each time. */
579 while (!next_spot(arch_aspace, low_flags, high_flags, final_base, high_addr,
580 alignment, size, arch_mmu_flags, &final_base)) {
581 if ((final_base - first_base) < PAGE_SIZE) {
582 /* There's only one location available in the region. */
583 break;
584 }
585 final_base -= PAGE_SIZE;
586 }
587
588 /*
589 * first_base and final_base now point to the lower and upper mapping
590 * bounds.
591 * We assume that every page in between would be a legal mapping. If it
592 * would not, the worst consequence will be having less randomness than
593 * expected since we know all addresses in the range will have a
594 * valid next_spot().
595 */
596
597 return ((final_base - first_base) >> PAGE_SIZE_SHIFT) + 1;
598 }
599
600 /**
601 * spot_in_gap() - Pick a specific available mapping range
602 * @aspace: The address space in which the mapping will take place
603 * @low: The lower virtual region. May be null to indicate the
604 * area below the first region.
605 * @high: The higher virtual region. May be null to indicate the
606 * area above the last region.
607 * @align: The requested alignment of the region
608 * @size: The requested size of the region
609 * @arch_mmu_flags: The requested MMU flags (RWX etc)
610 * @index: Which possibility to map the region at. This value must
611 * be less than the value returned by scan_gap() for the
612 * same query.
613 *
614 * spot_in_gap() picks one of several possible regions within a gap, using the
615 * provided index to select which one.
616 *
617 * This function is intended to be used in concert with scan_gap().
618 * After running scan_gap(), the size returned will be a max (exclusive) for
619 * the value of @index to this function, which should then not fail.
620 *
621 * Return: The virtual address that the mapping should be performed at.
622 */
spot_in_gap(vmm_aspace_t * aspace,vmm_region_t * low,vmm_region_t * high,vaddr_t align,size_t size,uint arch_mmu_flags,size_t index)623 static inline vaddr_t spot_in_gap(vmm_aspace_t* aspace,
624 vmm_region_t* low,
625 vmm_region_t* high,
626 vaddr_t align,
627 size_t size,
628 uint arch_mmu_flags,
629 size_t index) {
630 vaddr_t low_addr;
631 vaddr_t high_addr;
632 if (!extract_gap(aspace, low, high, &low_addr, &high_addr)) {
633 panic("spot_in_gap() called on a 0-size region\n");
634 }
635
636 uint low_flags = low ? low->arch_mmu_flags : ARCH_MMU_FLAG_INVALID;
637 uint high_flags = high ? high->arch_mmu_flags : ARCH_MMU_FLAG_INVALID;
638
639 vaddr_t base;
640 arch_aspace_t* arch_aspace = &aspace->arch_aspace;
641 if (!next_spot(arch_aspace, low_flags, high_flags, low_addr, high_addr,
642 align, size, arch_mmu_flags, &base)) {
643 panic("spot_in_gap() called on a region with no available mappings\n");
644 }
645
646 base += index * PAGE_SIZE;
647
648 if (!next_spot(arch_aspace, low_flags, high_flags, base, high_addr, align,
649 size, arch_mmu_flags, &base)) {
650 panic("spot_in_gap() with an index with no mapping option\n");
651 }
652
653 return base;
654 }
655
656 /**
657 * alloc_spot() - Find a place in the address space for a new virtual region
658 * @aspace: The address space to search within
659 * @size: How large of a spot is required
660 * @align_pow2: Alignment requirements for the gap in bits
661 * @arch_mmu_flags: Architecture-specifc MMU flags (RWX etc)
662 *
663 * Finds a space in the virtual memory space which is currently unoccupied,
664 * is legal to map according to the MMU, is at least as large as @size,
665 * and aligned as @align_pow2.
666 *
667 * If ASLR is enabled, this spot will also be *randomized* from amongst all
668 * legal positions.
669 * If ASLR is disabled, it will bias towards the lowest legal virtual address.
670 *
671 * This function does not actually mutate the aspace and reserve the region.
672 * That is the responsibility of the caller.
673 *
674 * Return: The value of the first address for the new region if one was found,
675 * or -1 if no region was found.
676 */
alloc_spot(vmm_aspace_t * aspace,size_t size,uint8_t align_pow2,uint arch_mmu_flags)677 static vaddr_t alloc_spot(vmm_aspace_t* aspace,
678 size_t size,
679 uint8_t align_pow2,
680 uint arch_mmu_flags) {
681 DEBUG_ASSERT(aspace);
682 DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size));
683
684 LTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2);
685
686 if (align_pow2 < PAGE_SIZE_SHIFT)
687 align_pow2 = PAGE_SIZE_SHIFT;
688 vaddr_t align = 1UL << align_pow2;
689
690 vaddr_t spot;
691 vmm_region_t* left = NULL;
692 vmm_region_t* right;
693
694 /*
695 * TODO: When ASLR is enabled, pick a random address and check if it is
696 * available with bst_search before falling back to examine every region.
697 */
698
699 /* Figure out how many options we have to size randomness appropriately */
700 size_t choices = 0;
701 bst_for_every_entry(&aspace->regions, right, vmm_region_t, node) {
702 choices += scan_gap(aspace, left, right, align, size, arch_mmu_flags);
703 left = right;
704 }
705 right = NULL;
706 choices += scan_gap(aspace, left, right, align, size, arch_mmu_flags);
707 if (!choices) {
708 /* No available choices, bail */
709 return (vaddr_t)-1;
710 }
711
712 /* Grab the index through all choices */
713 #ifdef ASLR
714 size_t index = rand_get_size(choices - 1);
715 #else
716 size_t index = 0;
717 #endif
718 left = NULL;
719 bst_for_every_entry(&aspace->regions, right, vmm_region_t, node) {
720 size_t local_spots =
721 scan_gap(aspace, left, right, align, size, arch_mmu_flags);
722 if (local_spots > index) {
723 spot = spot_in_gap(aspace, left, right, align, size,
724 arch_mmu_flags, index);
725 goto done;
726 } else {
727 index -= local_spots;
728 }
729 left = right;
730 }
731 right = NULL;
732 spot = spot_in_gap(aspace, left, right, align, size, arch_mmu_flags,
733 index);
734
735 done:
736 return spot;
737 }
738
vmm_find_spot(vmm_aspace_t * aspace,size_t size,vaddr_t * out)739 bool vmm_find_spot(vmm_aspace_t* aspace, size_t size, vaddr_t* out) {
740 mutex_acquire(&vmm_lock);
741 *out = alloc_spot(aspace, size, PAGE_SIZE_SHIFT, 0);
742 mutex_release(&vmm_lock);
743 return *out != (vaddr_t)(-1);
744 }
745
746 /* allocate a region structure and stick it in the address space */
alloc_region(vmm_aspace_t * aspace,const char * name,size_t size,vaddr_t vaddr,uint8_t align_pow2,uint vmm_flags,uint region_flags,uint arch_mmu_flags,vmm_region_t ** out,struct bst_root ** out_root)747 static status_t alloc_region(vmm_aspace_t* aspace,
748 const char* name,
749 size_t size,
750 vaddr_t vaddr,
751 uint8_t align_pow2,
752 uint vmm_flags,
753 uint region_flags,
754 uint arch_mmu_flags,
755 vmm_region_t** out,
756 struct bst_root** out_root) {
757 DEBUG_ASSERT((vmm_flags & VMM_REGION_FLAG_INTERNAL_MASK) == 0);
758 DEBUG_ASSERT(vaddr == arch_adjusted_vaddr(vaddr, aspace->flags &
759 ARCH_ASPACE_FLAG_KERNEL));
760 /* make a region struct for it and stick it in the list */
761 vmm_region_t* r = alloc_region_struct(name, vaddr, size,
762 region_flags | vmm_flags,
763 arch_mmu_flags);
764 if (!r)
765 return ERR_NO_MEMORY;
766
767 struct bst_root* root;
768
769 /* if they ask us for a specific spot, put it there */
770 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
771 status_t ret;
772 vmm_region_t *reserved_region = vmm_find_region_in_bst(&aspace->regions, vaddr, size);
773 if (reserved_region) {
774 if (reserved_region->flags & VMM_FLAG_NO_PHYSICAL) {
775 if (!is_range_inside_region(reserved_region, vaddr, size)) {
776 ret = ERR_INVALID_ARGS;
777 } else {
778 /*
779 * Allocations from a NO_PHYSICAL region are always specific.
780 * The caller is responsible for managing guard pages.
781 */
782 r->flags |= VMM_FLAG_NO_START_GUARD | VMM_FLAG_NO_END_GUARD;
783 struct vmm_res_obj *res_obj = vmm_obj_to_res_vmm_obj(reserved_region->obj_slice.obj);
784 ret = add_region_to_vmm_res_obj(res_obj, r);
785 root = &res_obj->regions;
786 }
787 } else {
788 ret = ERR_NO_MEMORY;
789 }
790 } else {
791 /* stick it in the list, checking to see if it fits */
792 ret = add_region_to_aspace(aspace, r);
793 root = &aspace->regions;
794 }
795 if (ret < 0) {
796 /* didn't fit */
797 free(r);
798 return ret;
799 }
800 } else {
801 /* allocate a virtual slot for it */
802 if ((vmm_flags & VMM_FLAG_NO_START_GUARD) ||
803 (vmm_flags & VMM_FLAG_NO_END_GUARD)) {
804 LTRACEF("invalid allocation request: only requests for a specific"
805 " spot may disable guard pages before/after allocation\n");
806 free(r);
807 return ERR_INVALID_ARGS;
808 }
809
810 vaddr = alloc_spot(aspace, size, align_pow2, arch_mmu_flags);
811 LTRACEF("alloc_spot returns 0x%" PRIxVADDR "\n", vaddr);
812
813 if (vaddr == (vaddr_t)-1) {
814 LTRACEF("failed to find spot\n");
815 free(r);
816 return ERR_NO_MEMORY;
817 }
818
819 r->base = (vaddr_t)vaddr;
820
821 /* add it to the region list */
822 ASSERT(bst_insert(&aspace->regions, &r->node, vmm_region_cmp));
823 root = &aspace->regions;
824 }
825
826 if (out) {
827 *out = r;
828 }
829 if (out_root) {
830 *out_root = root;
831 }
832 return NO_ERROR;
833 }
834
vmm_map_obj_locked(vmm_aspace_t * aspace,vmm_region_t * r,uint arch_mmu_flags,bool replace)835 static status_t vmm_map_obj_locked(vmm_aspace_t* aspace, vmm_region_t* r,
836 uint arch_mmu_flags, bool replace) {
837 /* map all of the pages */
838 /* XXX use smarter algorithm that tries to build runs */
839 status_t err;
840 size_t off = 0;
841 struct vmm_obj *vmm_obj = r->obj_slice.obj;
842 while (off < r->obj_slice.size) {
843 paddr_t pa;
844 vaddr_t va;
845 size_t pa_size;
846 err = vmm_obj->ops->get_page(vmm_obj, off + r->obj_slice.offset, &pa,
847 &pa_size);
848 if (err) {
849 goto err_map_loop;
850 }
851 pa_size = MIN(pa_size, r->obj_slice.size - off);
852
853 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
854 DEBUG_ASSERT(pa_size);
855 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa_size));
856
857 if (__builtin_add_overflow(r->base, off, &va)) {
858 DEBUG_ASSERT(false);
859 }
860 DEBUG_ASSERT(IS_PAGE_ALIGNED(va));
861 DEBUG_ASSERT(va <= r->base + (r->obj_slice.size - 1));
862 if (replace) {
863 err = arch_mmu_map_replace(&aspace->arch_aspace, va, pa,
864 pa_size / PAGE_SIZE, arch_mmu_flags);
865 if (err) {
866 TRACEF("replace mapping failed, unmapping existing mapping\n");
867 off = r->obj_slice.size;
868 }
869 } else {
870 err = arch_mmu_map(&aspace->arch_aspace, va, pa,
871 pa_size / PAGE_SIZE, arch_mmu_flags);
872 }
873 if (err) {
874 goto err_map_loop;
875 }
876 off += pa_size;
877 }
878
879 return NO_ERROR;
880
881 err_map_loop:
882 arch_mmu_unmap(&aspace->arch_aspace, r->base, off / PAGE_SIZE);
883 return err;
884 }
885
886
vmm_reserve_space(vmm_aspace_t * aspace,const char * name,size_t size,vaddr_t vaddr)887 status_t vmm_reserve_space(vmm_aspace_t* aspace,
888 const char* name,
889 size_t size,
890 vaddr_t vaddr) {
891 status_t ret;
892
893 LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%" PRIxVADDR "\n", aspace, name, size,
894 vaddr);
895
896 DEBUG_ASSERT(aspace);
897 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
898 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
899
900 if (!name)
901 name = "";
902
903 if (!aspace)
904 return ERR_INVALID_ARGS;
905 if (size == 0)
906 return NO_ERROR;
907 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size))
908 return ERR_INVALID_ARGS;
909
910 if (!is_inside_aspace(aspace, vaddr))
911 return ERR_OUT_OF_RANGE;
912
913 /* trim the size */
914 size = trim_to_aspace(aspace, vaddr, size);
915
916 mutex_acquire(&vmm_lock);
917
918 /* lookup how it's already mapped */
919 uint arch_mmu_flags = 0;
920 arch_mmu_query(&aspace->arch_aspace, vaddr, NULL, &arch_mmu_flags);
921
922 /* build a new region structure */
923 ret = alloc_region(aspace, name, size, vaddr, 0, VMM_FLAG_VALLOC_SPECIFIC,
924 VMM_REGION_FLAG_RESERVED, arch_mmu_flags, NULL, NULL);
925
926 mutex_release(&vmm_lock);
927 return ret;
928 }
929
vmm_obj_add_ref(struct vmm_obj * obj,struct obj_ref * ref)930 void vmm_obj_add_ref(struct vmm_obj* obj, struct obj_ref* ref) {
931 mutex_acquire(&vmm_lock);
932 obj_add_ref(&obj->obj, ref);
933 mutex_release(&vmm_lock);
934 }
935
vmm_obj_del_ref(struct vmm_obj * obj,struct obj_ref * ref)936 void vmm_obj_del_ref(struct vmm_obj* obj, struct obj_ref* ref) {
937 bool destroy;
938 mutex_acquire(&vmm_lock);
939 destroy = obj_del_ref(&obj->obj, ref, NULL);
940 mutex_release(&vmm_lock);
941 if (destroy) {
942 obj->ops->destroy(obj);
943 }
944 }
945
vmm_obj_has_only_ref(struct vmm_obj * obj,struct obj_ref * ref)946 bool vmm_obj_has_only_ref(struct vmm_obj* obj, struct obj_ref* ref) {
947 bool has_only_ref;
948 mutex_acquire(&vmm_lock);
949 has_only_ref = obj_has_only_ref(&obj->obj, ref);
950 mutex_release(&vmm_lock);
951 return has_only_ref;
952 }
953
arch_clear_pages_and_tags(vaddr_t addr,size_t size)954 __WEAK void arch_clear_pages_and_tags(vaddr_t addr, size_t size) {
955 panic("weak arch_clear_pages_and_tags called");
956 }
957
arch_tagging_enabled(void)958 __WEAK bool arch_tagging_enabled(void) {
959 return false;
960 }
961
arch_bti_supported(void)962 __WEAK bool arch_bti_supported(void) {
963 return false;
964 }
965
arch_sve_supported(void)966 __WEAK bool arch_sve_supported(void) {
967 return false;
968 }
969
arch_enable_sve(void)970 __WEAK uint64_t arch_enable_sve(void){
971 return 0L;
972 }
973
arch_disable_sve(void)974 __WEAK uint64_t arch_disable_sve(void){
975 return 0L;
976 }
977
vmm_alloc_obj(vmm_aspace_t * aspace,const char * name,struct vmm_obj * vmm_obj,size_t offset,size_t size,void ** ptr,uint8_t align_log2,uint vmm_flags,uint arch_mmu_flags)978 status_t vmm_alloc_obj(vmm_aspace_t* aspace, const char* name,
979 struct vmm_obj* vmm_obj, size_t offset, size_t size,
980 void** ptr, uint8_t align_log2, uint vmm_flags,
981 uint arch_mmu_flags) {
982 status_t ret;
983
984 LTRACEF("aspace %p name '%s' obj %p offset 0x%zx size 0x%zx\n",
985 aspace, name, vmm_obj, offset, size);
986 LTRACEF("ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
987 ptr ? *ptr : 0, align_log2, vmm_flags, arch_mmu_flags);
988
989 DEBUG_ASSERT(aspace);
990 DEBUG_ASSERT(vmm_obj);
991 DEBUG_ASSERT(vmm_obj->ops);
992 DEBUG_ASSERT(IS_PAGE_ALIGNED(offset));
993 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
994 DEBUG_ASSERT(ptr);
995
996 if (!ptr) {
997 ret = ERR_INVALID_ARGS;
998 goto err_missing_ptr;
999 }
1000
1001 if (!name) {
1002 name = "";
1003 }
1004
1005 vaddr_t vaddr = 0;
1006
1007 /* if they're asking for a specific spot, copy the address */
1008 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
1009 vaddr = (vaddr_t)*ptr;
1010 }
1011
1012 ret = vmm_obj->ops->check_flags(vmm_obj, &arch_mmu_flags);
1013 if (ret) {
1014 LTRACEF("check_flags failed\n");
1015 goto err_check_flags;
1016 }
1017
1018 mutex_acquire(&vmm_lock);
1019
1020 /* allocate a region and put it in the aspace list */
1021 vmm_region_t* r;
1022 struct bst_root *region_root = NULL;
1023 ret = alloc_region(aspace, name, size, vaddr, align_log2,
1024 vmm_flags, VMM_REGION_FLAG_PHYSICAL,
1025 arch_mmu_flags, &r, ®ion_root);
1026 if (ret) {
1027 LTRACEF("alloc_region failed\n");
1028 goto err_alloc_region;
1029 }
1030
1031 vmm_obj_slice_bind_locked(&r->obj_slice, vmm_obj, offset, size);
1032
1033 if (!(vmm_flags & VMM_FLAG_NO_PHYSICAL)) {
1034 /*
1035 * For tagged memory, pmm_alloc won't have zeroed the pages, so do that
1036 * here along with the tags
1037 */
1038 bool tagged = arch_mmu_flags & ARCH_MMU_FLAG_TAGGED;
1039 bool allow_tagged = pmm_vmm_is_pmm_that_allows_tagged(vmm_obj);
1040 bool needs_clear = pmm_vmm_is_pmm_that_needs_clear(vmm_obj);
1041 if (tagged && !allow_tagged) {
1042 TRACEF("obj not allowed to be tagged\n");
1043 ret = ERR_INVALID_ARGS;
1044 goto err_map_obj;
1045 }
1046 if (needs_clear) {
1047 uint tmpflags = ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1048 if (allow_tagged) {
1049 tmpflags |= ARCH_MMU_FLAG_TAGGED;
1050 }
1051 ret = vmm_map_obj_locked(aspace, r, tmpflags, false);
1052 if (ret) {
1053 goto err_map_obj;
1054 }
1055 if (allow_tagged) {
1056 arch_clear_pages_and_tags(r->base, size);
1057 } else {
1058 memset((void*)r->base, 0, size);
1059 };
1060 pmm_set_cleared(vmm_obj, offset, size);
1061 }
1062 ret = vmm_map_obj_locked(aspace, r, arch_mmu_flags, needs_clear);
1063 if (ret) {
1064 goto err_map_obj;
1065 }
1066 if (tagged) {
1067 /* only allow mapping as tagged once */
1068 pmm_set_tagged(vmm_obj);
1069 }
1070 }
1071
1072 /* return the vaddr */
1073 *ptr = (void*)r->base;
1074
1075 mutex_release(&vmm_lock);
1076 return NO_ERROR;
1077
1078 err_map_obj:
1079 vmm_obj_slice_release_locked(&r->obj_slice);
1080 ASSERT(region_root);
1081 ASSERT(r);
1082 bst_delete(region_root, &r->node);
1083 free(r);
1084 err_alloc_region:
1085 mutex_release(&vmm_lock);
1086 err_check_flags:
1087 err_missing_ptr:
1088 return ret;
1089 }
1090
vmm_alloc_physical_etc(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_log2,const paddr_t * paddr,uint paddr_count,uint vmm_flags,uint arch_mmu_flags)1091 status_t vmm_alloc_physical_etc(vmm_aspace_t* aspace,
1092 const char* name,
1093 size_t size,
1094 void** ptr,
1095 uint8_t align_log2,
1096 const paddr_t* paddr,
1097 uint paddr_count,
1098 uint vmm_flags,
1099 uint arch_mmu_flags) {
1100 status_t ret;
1101 uint i;
1102 size_t page_size;
1103
1104 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%" PRIxPADDR "... vmm_flags 0x%x "
1105 "arch_mmu_flags 0x%x\n",
1106 aspace, name, size, ptr ? *ptr : 0, paddr[0], vmm_flags,
1107 arch_mmu_flags);
1108
1109 DEBUG_ASSERT(aspace);
1110 DEBUG_ASSERT(ptr);
1111 for (i = 0; i < paddr_count; i++) {
1112 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr[i]));
1113 }
1114 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
1115
1116 if (!name)
1117 name = "";
1118
1119 if (!aspace)
1120 return ERR_INVALID_ARGS;
1121 if (size == 0)
1122 return NO_ERROR;
1123 if (!paddr_count)
1124 return ERR_INVALID_ARGS;
1125 page_size = size / paddr_count;
1126 if (!IS_PAGE_ALIGNED(paddr[0]) || !IS_PAGE_ALIGNED(page_size))
1127 return ERR_INVALID_ARGS;
1128
1129 if (!ptr) {
1130 return ERR_INVALID_ARGS;
1131 }
1132
1133 vaddr_t vaddr = 0;
1134
1135 /* if they're asking for a specific spot, copy the address */
1136 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
1137 vaddr = (vaddr_t)*ptr;
1138 }
1139
1140 mutex_acquire(&vmm_lock);
1141
1142 /* allocate a region and put it in the aspace list */
1143 vmm_region_t* r;
1144 ret = alloc_region(aspace, name, size, vaddr, align_log2, vmm_flags,
1145 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags, &r, NULL);
1146 if (ret) {
1147 goto err_alloc_region;
1148 }
1149
1150 /* return the vaddr */
1151 *ptr = (void*)r->base;
1152
1153 /* map all of the pages */
1154 for (i = 0; i < paddr_count; i++) {
1155 int err = arch_mmu_map(&aspace->arch_aspace, r->base + i * page_size,
1156 paddr[i], page_size / PAGE_SIZE, arch_mmu_flags);
1157 LTRACEF("arch_mmu_map returns %d\n", err);
1158 }
1159
1160 ret = NO_ERROR;
1161
1162 err_alloc_region:
1163 mutex_release(&vmm_lock);
1164 return ret;
1165 }
1166
vmm_alloc_pmm(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags,uint32_t pmm_alloc_flags,uint8_t pmm_alloc_align_pow2)1167 static status_t vmm_alloc_pmm(vmm_aspace_t* aspace,
1168 const char* name,
1169 size_t size,
1170 void** ptr,
1171 uint8_t align_pow2,
1172 uint vmm_flags,
1173 uint arch_mmu_flags,
1174 uint32_t pmm_alloc_flags,
1175 uint8_t pmm_alloc_align_pow2) {
1176 status_t ret;
1177 struct vmm_obj *vmm_obj;
1178 struct obj_ref vmm_obj_ref = OBJ_REF_INITIAL_VALUE(vmm_obj_ref);
1179 ASSERT(!(vmm_flags & VMM_FLAG_NO_PHYSICAL));
1180
1181 size = round_up(size, PAGE_SIZE);
1182 size_t num_pages = size / PAGE_SIZE;
1183 if (size == 0)
1184 return ERR_INVALID_ARGS;
1185
1186 if (arch_mmu_flags & ARCH_MMU_FLAG_TAGGED) {
1187 if (!arch_tagging_enabled()) {
1188 return ERR_INVALID_ARGS;
1189 }
1190 /*
1191 * Indicate to pmm that memory doesn't need to be cleared
1192 * because we'll do that later along with the tags
1193 */
1194 pmm_alloc_flags |=
1195 (PMM_ALLOC_FLAG_NO_CLEAR | PMM_ALLOC_FLAG_ALLOW_TAGGED);
1196 }
1197
1198 struct res_group* res_group = NULL;
1199
1200 if (vmm_flags & VMM_FLAG_QUOTA && !aspace->quota_res_group) {
1201 LTRACEF("the address space does not support QUOTA allocations!\n");
1202 return ERR_INVALID_ARGS;
1203 }
1204
1205 if (vmm_flags & VMM_FLAG_QUOTA) {
1206 res_group = aspace->quota_res_group;
1207 pmm_alloc_flags |= PMM_ALLOC_FLAG_FROM_RESERVED;
1208 }
1209 ret = pmm_alloc_from_res_group(&vmm_obj, &vmm_obj_ref, res_group, num_pages,
1210 pmm_alloc_flags, pmm_alloc_align_pow2);
1211 if (ret) {
1212 LTRACEF("failed to allocate enough pages (asked for %zu)\n",
1213 size / PAGE_SIZE);
1214 return ret;
1215 }
1216 ret = vmm_alloc_obj(aspace, name, vmm_obj, 0, size, ptr, align_pow2,
1217 vmm_flags, arch_mmu_flags);
1218 vmm_obj_del_ref(vmm_obj, &vmm_obj_ref);
1219 return ret;
1220 }
1221
vmm_alloc_contiguous(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)1222 status_t vmm_alloc_contiguous(vmm_aspace_t* aspace,
1223 const char* name,
1224 size_t size,
1225 void** ptr,
1226 uint8_t align_pow2,
1227 uint vmm_flags,
1228 uint arch_mmu_flags) {
1229 return vmm_alloc_pmm(aspace, name, size, ptr, align_pow2, vmm_flags,
1230 arch_mmu_flags, PMM_ALLOC_FLAG_CONTIGUOUS, align_pow2);
1231 }
1232
vmm_alloc(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)1233 status_t vmm_alloc(vmm_aspace_t* aspace,
1234 const char* name,
1235 size_t size,
1236 void** ptr,
1237 uint8_t align_pow2,
1238 uint vmm_flags,
1239 uint arch_mmu_flags) {
1240 /* reserve a region without mapping physical pages */
1241 if (vmm_flags & VMM_FLAG_NO_PHYSICAL) {
1242 return vmm_alloc_no_physical(aspace, name, size, ptr, align_pow2,
1243 vmm_flags, arch_mmu_flags);
1244 }
1245
1246 /* allocate physical pages */
1247 return vmm_alloc_pmm(aspace, name, size, ptr, align_pow2, vmm_flags,
1248 arch_mmu_flags, 0, 0);
1249 }
1250
vmm_find_region(const vmm_aspace_t * aspace,vaddr_t vaddr)1251 vmm_region_t* vmm_find_region(const vmm_aspace_t* aspace,
1252 vaddr_t vaddr) {
1253 DEBUG_ASSERT(aspace);
1254 DEBUG_ASSERT(is_mutex_held(&vmm_lock));
1255
1256 if (!aspace)
1257 return NULL;
1258
1259 if (!is_mutex_held(&vmm_lock)) {
1260 return NULL;
1261 }
1262
1263 return vmm_find_region_in_bst(&aspace->regions, vaddr, PAGE_SIZE);
1264 }
1265
vmm_get_address_description(vaddr_t vaddr,char * name,size_t name_size)1266 void vmm_get_address_description(vaddr_t vaddr, char *name, size_t name_size) {
1267 /*
1268 * If the vmm lock is already held by the current thread, or cannot be
1269 * acquired immediately, return right away to avoid blocking or deadlocking
1270 * the caller.
1271 */
1272 if(is_mutex_held(&vmm_lock) ||
1273 mutex_acquire_timeout(&vmm_lock, 0) != NO_ERROR) {
1274 snprintf(name, name_size, "<unavailable>");
1275 return;
1276 }
1277
1278 vmm_region_t* region = NULL;
1279 vmm_aspace_t* aspace = vaddr_to_aspace((void*)vaddr);
1280
1281 if(aspace) {
1282 vaddr = arch_adjusted_vaddr(vaddr,
1283 aspace->flags & ARCH_ASPACE_FLAG_KERNEL);
1284 region = vmm_find_region_in_bst(&aspace->regions, vaddr, 0);
1285 }
1286 if (region) {
1287 snprintf(name, name_size, "%s", region->name);
1288 } else {
1289 vaddr_t next, prev;
1290 vmm_region_t* before = NULL;
1291 vmm_region_t* after = NULL;
1292 if(!__builtin_add_overflow(vaddr, PAGE_SIZE, &next)) {
1293 before = vmm_find_region_in_bst(&aspace->regions, next, 0);
1294 }
1295 if (!__builtin_sub_overflow(vaddr, PAGE_SIZE, &prev)) {
1296 after = vmm_find_region_in_bst(&aspace->regions, prev, 0);
1297 }
1298 if (before && after) {
1299 snprintf(name, name_size, "%" PRIdVADDR " bytes after %s, %"
1300 PRIdVADDR " bytes before %s",
1301 vaddr - (after->base + after->obj_slice.size),
1302 after->name,
1303 before->base - vaddr,
1304 before->name);
1305 } else if (before) {
1306 snprintf(name, name_size, "%" PRIdVADDR " bytes before %s",
1307 before->base - vaddr,
1308 before->name);
1309 } else if (after) {
1310 snprintf(name, name_size, "%" PRIdVADDR " bytes after %s",
1311 vaddr - (after->base + after->obj_slice.size),
1312 after->name);
1313 } else {
1314 snprintf(name, name_size, "<no region>");
1315 }
1316 }
1317
1318 mutex_release(&vmm_lock);
1319 }
1320
vmm_find_region_in_bst(const struct bst_root * region_tree,vaddr_t vaddr,size_t size)1321 static vmm_region_t* vmm_find_region_in_bst(
1322 const struct bst_root* region_tree,
1323 vaddr_t vaddr, size_t size) {
1324 vmm_region_t* r;
1325
1326 vaddr = round_down(vaddr, PAGE_SIZE);
1327
1328 /* search the region list */
1329 vmm_region_t r_ref;
1330 r_ref.flags = VMM_FLAG_NO_START_GUARD | VMM_FLAG_NO_END_GUARD;
1331 r_ref.base = vaddr;
1332 r_ref.obj_slice.size = size ? size : PAGE_SIZE;
1333 r = bst_search_type(region_tree, &r_ref, vmm_region_cmp, vmm_region_t,
1334 node);
1335 if (!r) {
1336 return NULL;
1337 }
1338 if (!is_inside_region(r, vaddr)) {
1339 DEBUG_ASSERT(vaddr == r->base - PAGE_SIZE || vaddr == r->base + r->obj_slice.size);
1340 /* don't return regions that only overlap with guard page */
1341 return NULL;
1342 }
1343 return r;
1344 }
1345
vmm_alloc_no_physical(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)1346 status_t vmm_alloc_no_physical(vmm_aspace_t* aspace,
1347 const char* name,
1348 size_t size,
1349 void** ptr,
1350 uint8_t align_pow2,
1351 uint vmm_flags,
1352 uint arch_mmu_flags) {
1353 struct vmm_res_obj *vmm_res_obj;
1354 if (arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO) {
1355 LTRACEF("a NO_PHYSICAL allocation cannot be read-only\n");
1356 return ERR_INVALID_ARGS;
1357 }
1358 size = round_up(size, PAGE_SIZE);
1359 if (size == 0)
1360 return ERR_INVALID_ARGS;
1361
1362 struct obj_ref vmm_obj_ref = OBJ_REF_INITIAL_VALUE(vmm_obj_ref);
1363 vmm_res_obj = calloc(1, sizeof(*vmm_res_obj));
1364 if (!vmm_res_obj) {
1365 return ERR_NO_MEMORY;
1366 }
1367 vmm_obj_init(&vmm_res_obj->vmm_obj, &vmm_obj_ref, &vmm_res_obj_ops);
1368 bst_root_initialize(&vmm_res_obj->regions);
1369
1370
1371 status_t ret = vmm_alloc_obj(aspace, name, &vmm_res_obj->vmm_obj, 0, size, ptr, align_pow2,
1372 vmm_flags | VMM_FLAG_NO_PHYSICAL, arch_mmu_flags);
1373 vmm_obj_del_ref(&vmm_res_obj->vmm_obj, &vmm_obj_ref);
1374 return ret;
1375 }
1376
vmm_get_obj(const vmm_aspace_t * aspace,vaddr_t vaddr,size_t size,struct vmm_obj_slice * slice)1377 status_t vmm_get_obj(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size,
1378 struct vmm_obj_slice *slice) {
1379 status_t ret = NO_ERROR;
1380
1381 DEBUG_ASSERT(slice);
1382
1383 if (size == 0) {
1384 return ERR_INVALID_ARGS;
1385 }
1386
1387 mutex_acquire(&vmm_lock);
1388
1389 vaddr = arch_adjusted_vaddr(vaddr, aspace->flags & ARCH_ASPACE_FLAG_KERNEL);
1390 struct vmm_region *region = vmm_find_region(aspace, vaddr);
1391 if (!region) {
1392 ret = ERR_NOT_FOUND;
1393 goto out;
1394 }
1395
1396
1397 if (region->flags & VMM_FLAG_NO_PHYSICAL) {
1398 /* this region doesn't map any pages, look inside instead */
1399 struct vmm_res_obj* obj = vmm_obj_to_res_vmm_obj(region->obj_slice.obj);
1400 region = vmm_find_region_in_bst(&obj->regions, vaddr, size);
1401 if (!region) {
1402 ret = ERR_NOT_FOUND;
1403 goto out;
1404 }
1405 }
1406
1407 /* vmm_find_region already checked that vaddr is in region */
1408 vaddr_t last;
1409 if (__builtin_add_overflow(vaddr, size - 1, &last)) {
1410 /* vaddr + size overflows, this can't be a valid mapping */
1411 ret = ERR_INVALID_ARGS;
1412 goto out;
1413 }
1414
1415 /*
1416 * region base / size should already be invariant checked, so we
1417 * need not check for overflow
1418 */
1419 vaddr_t region_last = region->base + (region->obj_slice.size - 1);
1420 if (region_last < last) {
1421 /* signal that we got an object, the whole range is not inside */
1422 ret = ERR_OUT_OF_RANGE;
1423 goto out;
1424 }
1425
1426 if (!region->obj_slice.obj) {
1427 /* while the range is inside a region, there's no backing obj */
1428 ret = ERR_OUT_OF_RANGE;
1429 goto out;
1430 }
1431
1432 /*
1433 * This should not overflow since the region is mapped already and our
1434 * vmm_obj uses size_t for its get_page() offset calculation. If we
1435 * extend to a larger type on 32-bit systems, we will need to switch to
1436 * using another type for slice representation.
1437 */
1438 size_t offset = (vaddr - region->base) + region->obj_slice.offset;
1439
1440 /* all checks passed, we can update slice now */
1441
1442 slice->obj = region->obj_slice.obj;
1443 slice->size = size;
1444 slice->offset = offset;
1445 /* direct use of obj_add_ref to operate inside the vmm mutex */
1446 obj_add_ref(&slice->obj->obj, &slice->obj_ref);
1447
1448 out:
1449 mutex_release(&vmm_lock);
1450 return ret;
1451 }
1452
vmm_region_is_match(vmm_region_t * r,vaddr_t va,size_t size,uint32_t flags)1453 static bool vmm_region_is_match(vmm_region_t* r,
1454 vaddr_t va,
1455 size_t size,
1456 uint32_t flags) {
1457 if (!r) {
1458 return false;
1459 }
1460 if (flags & VMM_FREE_REGION_FLAG_EXPAND) {
1461 return is_range_inside_region(r, va, size);
1462 } else {
1463 return r->base == va && r->obj_slice.size == size;
1464 }
1465 }
1466
vmm_free_region_etc(vmm_aspace_t * aspace,vaddr_t vaddr,size_t size,uint32_t flags)1467 status_t vmm_free_region_etc(vmm_aspace_t* aspace,
1468 vaddr_t vaddr,
1469 size_t size,
1470 uint32_t flags) {
1471 DEBUG_ASSERT(aspace);
1472 DEBUG_ASSERT(vaddr == arch_adjusted_vaddr(vaddr, aspace->flags &
1473 ARCH_ASPACE_FLAG_KERNEL));
1474
1475 mutex_acquire(&vmm_lock);
1476
1477 vmm_region_t* r = vmm_find_region(aspace, vaddr);
1478 if (!vmm_region_is_match(r, vaddr, size, flags)) {
1479 mutex_release(&vmm_lock);
1480 return ERR_NOT_FOUND;
1481 }
1482
1483 /* remove it from aspace */
1484 bst_delete(&aspace->regions, &r->node);
1485
1486 /* unmap it */
1487 arch_mmu_unmap(&aspace->arch_aspace, r->base,
1488 r->obj_slice.size / PAGE_SIZE);
1489
1490 mutex_release(&vmm_lock);
1491
1492 /* release our hold on the backing object, if any */
1493 vmm_obj_slice_release(&r->obj_slice);
1494
1495 /* free it */
1496 free(r);
1497
1498 return NO_ERROR;
1499 }
1500
vmm_free_region(vmm_aspace_t * aspace,vaddr_t vaddr)1501 status_t vmm_free_region(vmm_aspace_t* aspace, vaddr_t vaddr) {
1502 return vmm_free_region_etc(aspace, vaddr, 1, VMM_FREE_REGION_FLAG_EXPAND);
1503 }
1504
vmm_create_aspace_with_quota(vmm_aspace_t ** _aspace,const char * name,size_t size,uint flags)1505 status_t vmm_create_aspace_with_quota(vmm_aspace_t** _aspace,
1506 const char* name,
1507 size_t size,
1508 uint flags) {
1509 status_t err;
1510
1511 /* Make sure the kernel and user address spaces are not adjacent */
1512 STATIC_ASSERT(USER_ASPACE_BASE >= PAGE_SIZE);
1513 STATIC_ASSERT(KERNEL_ASPACE_BASE >= PAGE_SIZE);
1514 STATIC_ASSERT(((KERNEL_ASPACE_BASE < USER_ASPACE_BASE) &&
1515 (KERNEL_ASPACE_BASE + KERNEL_ASPACE_SIZE) <=
1516 (USER_ASPACE_BASE - PAGE_SIZE)) ||
1517 ((USER_ASPACE_BASE < KERNEL_ASPACE_BASE) &&
1518 (USER_ASPACE_BASE + USER_ASPACE_SIZE) <=
1519 (KERNEL_ASPACE_BASE - PAGE_SIZE)));
1520
1521 DEBUG_ASSERT(_aspace);
1522
1523 vmm_aspace_t* aspace = calloc(1, sizeof(vmm_aspace_t));
1524 if (!aspace)
1525 return ERR_NO_MEMORY;
1526
1527 if (name)
1528 strlcpy(aspace->name, name, sizeof(aspace->name));
1529 else
1530 strlcpy(aspace->name, "unnamed", sizeof(aspace->name));
1531
1532 aspace->flags = flags;
1533
1534 if (aspace->flags & VMM_ASPACE_FLAG_KERNEL) {
1535 aspace->base = KERNEL_ASPACE_BASE;
1536 aspace->size = KERNEL_ASPACE_SIZE;
1537 } else {
1538 aspace->base = USER_ASPACE_BASE;
1539 aspace->size = USER_ASPACE_SIZE;
1540 }
1541
1542 /* initialize the arch specific component to our address space */
1543 err = arch_mmu_init_aspace(&aspace->arch_aspace, aspace->base, aspace->size,
1544 get_arch_aspace_flags(aspace->flags));
1545 if (err < 0) {
1546 free(aspace);
1547 return err;
1548 }
1549
1550 list_clear_node(&aspace->node);
1551 bst_root_initialize(&aspace->regions);
1552 if (size) {
1553 uint num_pages = round_up(size, PAGE_SIZE) / PAGE_SIZE;
1554 obj_ref_init(&aspace->quota_res_group_ref);
1555 struct res_group* new_res_group = res_group_create(num_pages,
1556 &aspace->quota_res_group_ref);
1557 if (!new_res_group) {
1558 if (!(aspace->flags & VMM_ASPACE_FLAG_KERNEL)) {
1559 arch_mmu_destroy_aspace(&aspace->arch_aspace);
1560 }
1561 free(aspace);
1562 return ERR_NO_MEMORY;
1563 }
1564 aspace->quota_res_group = new_res_group;
1565 }
1566
1567 mutex_acquire(&vmm_lock);
1568 list_add_head(&aspace_list, &aspace->node);
1569 mutex_release(&vmm_lock);
1570
1571 *_aspace = aspace;
1572
1573 return NO_ERROR;
1574 }
1575
vmm_free_aspace(vmm_aspace_t * aspace)1576 status_t vmm_free_aspace(vmm_aspace_t* aspace) {
1577 DEBUG_ASSERT(aspace);
1578
1579 /* pop it out of the global aspace list */
1580 mutex_acquire(&vmm_lock);
1581 if (!list_in_list(&aspace->node)) {
1582 mutex_release(&vmm_lock);
1583 return ERR_INVALID_ARGS;
1584 }
1585 list_delete(&aspace->node);
1586
1587 /* free all of the regions */
1588
1589 vmm_region_t* r;
1590 bst_for_every_entry(&aspace->regions, r, vmm_region_t, node) {
1591 /* unmap it */
1592 arch_mmu_unmap(&aspace->arch_aspace, r->base,
1593 r->obj_slice.size / PAGE_SIZE);
1594
1595 /* mark it as unmapped (only used for debug assert below) */
1596 r->obj_slice.size = 0;
1597 }
1598 mutex_release(&vmm_lock);
1599
1600 /* without the vmm lock held, free all of the pmm pages and the structure */
1601 bst_for_every_entry(&aspace->regions, r, vmm_region_t, node) {
1602 DEBUG_ASSERT(!r->obj_slice.size);
1603 bst_delete(&aspace->regions, &r->node);
1604
1605 /* release our hold on the backing object, if any */
1606 vmm_obj_slice_release(&r->obj_slice);
1607
1608 /* free it */
1609 free(r);
1610 }
1611
1612 /* make sure the current thread does not map the aspace */
1613 thread_t* current_thread = get_current_thread();
1614 if (current_thread->aspace == aspace) {
1615 THREAD_LOCK(state);
1616 current_thread->aspace = NULL;
1617 vmm_context_switch(aspace, NULL);
1618 THREAD_UNLOCK(state);
1619 }
1620
1621 /* destroy the arch portion of the aspace */
1622 arch_mmu_destroy_aspace(&aspace->arch_aspace);
1623
1624 if (aspace->quota_res_group) {
1625 res_group_shutdown(aspace->quota_res_group);
1626 res_group_del_ref(aspace->quota_res_group, &aspace->quota_res_group_ref);
1627 }
1628
1629 /* free the aspace */
1630 free(aspace);
1631
1632 return NO_ERROR;
1633 }
1634
vmm_context_switch(vmm_aspace_t * oldspace,vmm_aspace_t * newaspace)1635 void vmm_context_switch(vmm_aspace_t* oldspace, vmm_aspace_t* newaspace) {
1636 DEBUG_ASSERT(thread_lock_held());
1637
1638 arch_mmu_context_switch(newaspace ? &newaspace->arch_aspace : NULL);
1639 }
1640
vmm_set_active_aspace(vmm_aspace_t * aspace)1641 void vmm_set_active_aspace(vmm_aspace_t* aspace) {
1642 LTRACEF("aspace %p\n", aspace);
1643
1644 thread_t* t = get_current_thread();
1645 DEBUG_ASSERT(t);
1646
1647 if (aspace == t->aspace)
1648 return;
1649
1650 /* grab the thread lock and switch to the new address space */
1651 THREAD_LOCK(state);
1652 vmm_aspace_t* old = t->aspace;
1653 t->aspace = aspace;
1654 vmm_context_switch(old, t->aspace);
1655 THREAD_UNLOCK(state);
1656 }
1657
dump_region(const vmm_region_t * r)1658 static void dump_region(const vmm_region_t* r) {
1659 DEBUG_ASSERT(r);
1660
1661 printf("\tregion %p: name '%s' range 0x%" PRIxVADDR " - 0x%" PRIxVADDR " size 0x%zx flags 0x%x "
1662 "mmu_flags 0x%x\n",
1663 r, r->name, r->base, r->base + (r->obj_slice.size - 1),
1664 r->obj_slice.size, r->flags, r->arch_mmu_flags);
1665 }
1666
dump_aspace(const vmm_aspace_t * a)1667 static void dump_aspace(const vmm_aspace_t* a) {
1668 DEBUG_ASSERT(a);
1669
1670 printf("aspace %p: name '%s' range 0x%" PRIxVADDR " - 0x%" PRIxVADDR " size 0x%zx flags 0x%x\n",
1671 a, a->name, a->base, a->base + (a->size - 1), a->size, a->flags);
1672
1673 printf("regions:\n");
1674 vmm_region_t* r;
1675 bst_for_every_entry(&a->regions, r, vmm_region_t, node) {
1676 dump_region(r);
1677 }
1678 }
1679
cmd_vmm(int argc,const cmd_args * argv)1680 static int cmd_vmm(int argc, const cmd_args* argv) {
1681 if (argc < 2) {
1682 notenoughargs:
1683 printf("not enough arguments\n");
1684 usage:
1685 printf("usage:\n");
1686 printf("%s aspaces\n", argv[0].str);
1687 printf("%s alloc <size> <align_pow2>\n", argv[0].str);
1688 printf("%s alloc_physical <paddr> <size> <align_pow2>\n", argv[0].str);
1689 printf("%s alloc_contig <size> <align_pow2>\n", argv[0].str);
1690 printf("%s free_region <address>\n", argv[0].str);
1691 printf("%s create_aspace\n", argv[0].str);
1692 printf("%s create_test_aspace\n", argv[0].str);
1693 printf("%s free_aspace <address>\n", argv[0].str);
1694 printf("%s set_test_aspace <address>\n", argv[0].str);
1695 return ERR_GENERIC;
1696 }
1697
1698 static vmm_aspace_t* test_aspace;
1699 if (!test_aspace)
1700 test_aspace = vmm_get_kernel_aspace();
1701
1702 if (!strcmp(argv[1].str, "aspaces")) {
1703 vmm_aspace_t* a;
1704 list_for_every_entry(&aspace_list, a, vmm_aspace_t, node) {
1705 dump_aspace(a);
1706 }
1707 } else if (!strcmp(argv[1].str, "alloc")) {
1708 if (argc < 4)
1709 goto notenoughargs;
1710
1711 void* ptr = (void*)0x99;
1712 status_t err = vmm_alloc(test_aspace, "alloc test", argv[2].u, &ptr,
1713 argv[3].u, 0, 0);
1714 printf("vmm_alloc returns %d, ptr %p\n", err, ptr);
1715 } else if (!strcmp(argv[1].str, "alloc_physical")) {
1716 if (argc < 4)
1717 goto notenoughargs;
1718
1719 void* ptr = (void*)0x99;
1720 status_t err = vmm_alloc_physical(test_aspace, "physical test",
1721 argv[3].u, &ptr, argv[4].u, argv[2].u,
1722 0, ARCH_MMU_FLAG_UNCACHED_DEVICE |
1723 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1724 printf("vmm_alloc_physical returns %d, ptr %p\n", err, ptr);
1725 } else if (!strcmp(argv[1].str, "alloc_contig")) {
1726 if (argc < 4)
1727 goto notenoughargs;
1728
1729 void* ptr = (void*)0x99;
1730 status_t err = vmm_alloc_contiguous(test_aspace, "contig test",
1731 argv[2].u, &ptr, argv[3].u, 0, 0);
1732 printf("vmm_alloc_contig returns %d, ptr %p\n", err, ptr);
1733 } else if (!strcmp(argv[1].str, "free_region")) {
1734 if (argc < 2)
1735 goto notenoughargs;
1736
1737 status_t err = vmm_free_region(test_aspace, (vaddr_t)argv[2].u);
1738 printf("vmm_free_region returns %d\n", err);
1739 } else if (!strcmp(argv[1].str, "create_aspace")) {
1740 vmm_aspace_t* aspace;
1741 status_t err = vmm_create_aspace(&aspace, "test", 0);
1742 printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
1743 } else if (!strcmp(argv[1].str, "create_test_aspace")) {
1744 vmm_aspace_t* aspace;
1745 status_t err = vmm_create_aspace(&aspace, "test", 0);
1746 printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
1747 if (err < 0)
1748 return err;
1749
1750 test_aspace = aspace;
1751 get_current_thread()->aspace = aspace;
1752 thread_sleep(1); // XXX hack to force it to reschedule and thus load
1753 // the aspace
1754 } else if (!strcmp(argv[1].str, "free_aspace")) {
1755 if (argc < 2)
1756 goto notenoughargs;
1757
1758 vmm_aspace_t* aspace = (void*)argv[2].u;
1759 if (test_aspace == aspace)
1760 test_aspace = NULL;
1761
1762 if (get_current_thread()->aspace == aspace) {
1763 get_current_thread()->aspace = NULL;
1764 thread_sleep(1); // hack
1765 }
1766
1767 status_t err = vmm_free_aspace(aspace);
1768 printf("vmm_free_aspace returns %d\n", err);
1769 } else if (!strcmp(argv[1].str, "set_test_aspace")) {
1770 if (argc < 2)
1771 goto notenoughargs;
1772
1773 test_aspace = (void*)argv[2].u;
1774 get_current_thread()->aspace = test_aspace;
1775 thread_sleep(1); // XXX hack to force it to reschedule and thus load
1776 // the aspace
1777 } else {
1778 printf("unknown command\n");
1779 goto usage;
1780 }
1781
1782 return NO_ERROR;
1783 }
1784
1785 STATIC_COMMAND_START
1786 #if LK_DEBUGLEVEL > 0
1787 STATIC_COMMAND("vmm", "virtual memory manager", &cmd_vmm)
1788 #endif
1789 STATIC_COMMAND_END(vmm);
1790