1 /*
2 * Copyright (c) 2022 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "res_group.h"
25 #include <err.h>
26 #include <kernel/mutex.h>
27 #include <kernel/vm.h>
28 #include <stdlib.h>
29 #include <trace.h>
30 #include "vm_priv.h"
31
32 #define LOCAL_TRACE 0
33
34 static mutex_t res_group_lock = MUTEX_INITIAL_VALUE(res_group_lock);
35
res_group_destroy(struct res_group * res_group)36 static status_t res_group_destroy(struct res_group* res_group) {
37 ASSERT(res_group);
38 ASSERT(res_group->is_shutdown);
39 ASSERT(!res_group->used_pages);
40 ASSERT(!obj_has_ref(&res_group->obj));
41 if (res_group->reserved_pages) {
42 pmm_unreserve_pages(res_group->reserved_pages);
43 }
44 free(res_group);
45 return NO_ERROR;
46 }
47
res_group_create(size_t pages,struct obj_ref * ref)48 struct res_group* res_group_create(size_t pages, struct obj_ref* ref) {
49 ASSERT(ref);
50 struct res_group* new_grp = calloc(1, sizeof(struct res_group));
51 if (!new_grp) {
52 return NULL;
53 }
54 if (pmm_reserve_pages(pages)) {
55 free(new_grp);
56 return NULL;
57 }
58 obj_init(&new_grp->obj, ref);
59 new_grp->reserved_pages = pages;
60
61 return new_grp;
62 }
63
res_group_add_ref(struct res_group * res_group,struct obj_ref * ref)64 void res_group_add_ref(struct res_group* res_group, struct obj_ref* ref) {
65 ASSERT(ref);
66 mutex_acquire(&res_group_lock);
67 obj_add_ref(&res_group->obj, ref);
68 mutex_release(&res_group_lock);
69 }
70
res_group_del_ref(struct res_group * res_group,struct obj_ref * ref)71 void res_group_del_ref(struct res_group* res_group, struct obj_ref* ref) {
72 ASSERT(ref);
73 bool destroy;
74 mutex_acquire(&res_group_lock);
75 destroy = obj_del_ref(&res_group->obj, ref, NULL);
76 mutex_release(&res_group_lock);
77 if (destroy) {
78 res_group_destroy(res_group);
79 }
80 }
81
res_group_shutdown(struct res_group * res_group)82 status_t res_group_shutdown(struct res_group* res_group) {
83 ASSERT(res_group);
84 mutex_acquire(&res_group_lock);
85 ASSERT(!res_group->is_shutdown);
86 res_group->is_shutdown = true;
87 size_t unused_pages = res_group->reserved_pages - res_group->used_pages;
88 res_group->reserved_pages -= unused_pages;
89 mutex_release(&res_group_lock);
90 pmm_unreserve_pages(unused_pages);
91 return NO_ERROR;
92 }
93
check_take(struct res_group * res_group,size_t pages)94 static status_t check_take(struct res_group* res_group,
95 size_t pages) {
96 if (res_group->is_shutdown) {
97 return ERR_OBJECT_DESTROYED;
98 }
99 size_t total_pages;
100 if (__builtin_add_overflow(res_group->used_pages, pages, &total_pages)) {
101 return ERR_NO_MEMORY;
102 }
103 if (total_pages > res_group->reserved_pages) {
104 return ERR_NO_MEMORY;
105 }
106 return NO_ERROR;
107 }
108
res_group_take_mem(struct res_group * res_group,size_t pages)109 status_t res_group_take_mem(struct res_group* res_group, size_t pages) {
110 ASSERT(res_group);
111 mutex_acquire(&res_group_lock);
112 status_t ret = check_take(res_group, pages);
113 if (!ret) {
114 res_group->used_pages += pages;
115 }
116 mutex_release(&res_group_lock);
117 return ret;
118 }
119
res_group_release_mem(struct res_group * res_group,size_t pages)120 void res_group_release_mem(struct res_group* res_group, size_t pages) {
121 ASSERT(res_group);
122 mutex_acquire(&res_group_lock);
123 ASSERT(res_group->used_pages >= pages);
124 res_group->used_pages -= pages;
125 mutex_release(&res_group_lock);
126 }
127
128