1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2018-06-10 Bernard first version
9 */
10
11 #include <rtthread.h>
12 #include <lwp.h>
13
14 #define DBG_ENABLE
15 #define DBG_SECTION_NAME "LWPMEM"
16 #define DBG_COLOR
17 #define DBG_LEVEL DBG_WARNING
18 #include <rtdbg.h>
19
20 // todo: remove repleat code
21 #define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_lwp_memheap_item), RT_ALIGN_SIZE)
22 #define MEMITEM_SIZE(item) ((rt_uint32_t)item->next - (rt_uint32_t)item - RT_MEMHEAP_SIZE)
23
24 #ifndef LWP_MEM_PAGE_SIZE
25 #define LWP_MEM_PAGE_SIZE (4 * 1024)
26 #endif
27
28 #ifndef LWP_MEM_MAX_PAGE_COUNT
29 #define LWP_MEM_MAX_PAGE_COUNT (256 * 4)
30 #endif
31
rt_lwp_malloc_page(struct rt_lwp * lwp,rt_size_t npages)32 static void *rt_lwp_malloc_page(struct rt_lwp *lwp, rt_size_t npages)
33 {
34 void *chunk;
35 char name[6];
36 struct rt_lwp_memheap *lwp_heap;
37 rt_size_t page_cnt;
38
39 RT_ASSERT(lwp != RT_NULL);
40
41 page_cnt = lwp->heap_cnt + npages;
42 if (page_cnt > LWP_MEM_MAX_PAGE_COUNT)
43 {
44 dbg_log(DBG_ERROR, "alloc new page failed, lwp memory size out of limited: %d\n", page_cnt);
45 return RT_NULL;
46 }
47
48 lwp_heap = rt_malloc(sizeof(struct rt_lwp_memheap));
49 if (lwp_heap == RT_NULL)
50 {
51 dbg_log(DBG_ERROR, "alloc new page head failed, out of memory : %d\n", page_cnt);
52 return RT_NULL;
53 }
54
55 chunk = rt_malloc(npages * LWP_MEM_PAGE_SIZE);
56 if (chunk == RT_NULL)
57 {
58 dbg_log(DBG_ERROR, "alloc new page buffer failed, out of memory : %d\n", page_cnt);
59 rt_free(lwp_heap);
60 return RT_NULL;
61 }
62
63 dbg_log(DBG_LOG, "lwp alloc page: %d\n", npages);
64
65 rt_sprintf(name, "lwp%02x", lwp->heap_cnt);
66 rt_lwp_memheap_init(lwp_heap, name, chunk, npages * LWP_MEM_PAGE_SIZE);
67
68 rt_list_insert_before(&lwp->hlist, &lwp_heap->mlist);
69
70 lwp->heap_cnt += npages;
71
72 return chunk;
73 }
74
rt_lwp_free_page(struct rt_lwp * lwp,struct rt_lwp_memheap * lwp_heap)75 static void rt_lwp_free_page(struct rt_lwp *lwp, struct rt_lwp_memheap *lwp_heap)
76 {
77 rt_size_t npages;
78
79 RT_ASSERT(lwp != RT_NULL);
80 RT_ASSERT(lwp_heap != RT_NULL);
81 RT_ASSERT(lwp_heap->start_addr != RT_NULL);
82
83 npages = lwp_heap->pool_size / LWP_MEM_PAGE_SIZE;
84 lwp->heap_cnt -= npages;
85
86 dbg_log(DBG_LOG, "lwp free page: %d\n", npages);
87
88 rt_list_remove(&lwp_heap->mlist);
89
90 rt_free(lwp_heap->start_addr);
91 rt_free(lwp_heap);
92 }
93
rt_lwp_mem_init(struct rt_lwp * lwp)94 void rt_lwp_mem_init(struct rt_lwp *lwp)
95 {
96 RT_ASSERT(lwp != RT_NULL);
97 rt_list_init(&lwp->hlist);
98 }
99
rt_lwp_mem_deinit(struct rt_lwp * lwp)100 void rt_lwp_mem_deinit(struct rt_lwp *lwp)
101 {
102 struct rt_list_node *node;
103
104 RT_ASSERT(lwp != RT_NULL);
105
106 node = lwp->hlist.next;
107
108 while (node != &(lwp->hlist))
109 {
110 struct rt_lwp_memheap *lwp_heap;
111
112 lwp_heap = rt_list_entry(node, struct rt_lwp_memheap, mlist);
113 RT_ASSERT(lwp_heap != RT_NULL);
114
115 /* update note before free page*/
116 node = node->next;
117
118 rt_lwp_free_page(lwp, lwp_heap);
119 }
120 }
121
rt_lwp_mem_malloc(rt_uint32_t size)122 void *rt_lwp_mem_malloc(rt_uint32_t size)
123 {
124 struct rt_lwp *lwp;
125 struct rt_list_node *node;
126 void *addr = RT_NULL;
127 rt_uint32_t npages;
128
129 if (size == 0)
130 return RT_NULL;
131
132 lwp = rt_lwp_self();
133 RT_ASSERT(lwp != RT_NULL);
134
135 for (node = lwp->hlist.next; node != &(lwp->hlist); node = node->next)
136 {
137 struct rt_lwp_memheap *lwp_heap;
138 lwp_heap = rt_list_entry(node, struct rt_lwp_memheap, mlist);
139
140 addr = rt_lwp_memheap_alloc(lwp_heap, size);
141 if (addr != RT_NULL)
142 {
143 dbg_log(DBG_LOG, "lwp alloc 0x%x/%d\n", addr, size);
144 return addr;
145 }
146 }
147
148 npages = (size + rt_lwp_memheap_unavailable_size_get() + LWP_MEM_PAGE_SIZE) / LWP_MEM_PAGE_SIZE;
149 if (RT_NULL != rt_lwp_malloc_page(lwp, npages))
150 return rt_lwp_mem_malloc(size);
151 else
152 return RT_NULL;
153 }
154
rt_lwp_mem_free(void * addr)155 void rt_lwp_mem_free(void *addr)
156 {
157 struct rt_lwp_memheap_item *header_ptr;
158 struct rt_lwp_memheap *lwp_heap;
159
160 if (addr == RT_NULL)
161 return ;
162
163 /* get memory item */
164 header_ptr = (struct rt_lwp_memheap_item *)((rt_uint8_t *)addr - RT_MEMHEAP_SIZE);
165 RT_ASSERT(header_ptr);
166
167 lwp_heap = header_ptr->pool_ptr;
168 RT_ASSERT(lwp_heap);
169
170 dbg_log(DBG_LOG, "lwp free 0x%x\n", addr);
171 rt_lwp_memheap_free((void *)addr);
172
173 if (rt_lwp_memheap_is_empty(lwp_heap))
174 {
175 rt_lwp_free_page(rt_lwp_self(), lwp_heap);
176 }
177 }
178
rt_lwp_mem_realloc(void * rmem,rt_size_t newsize)179 void *rt_lwp_mem_realloc(void *rmem, rt_size_t newsize)
180 {
181 void *new_ptr;
182 struct rt_lwp_memheap_item *header_ptr;
183
184 if (rmem == RT_NULL)
185 return rt_lwp_mem_malloc(newsize);
186
187 if (newsize == 0)
188 {
189 rt_lwp_mem_free(rmem);
190 return RT_NULL;
191 }
192
193 /* get old memory item */
194 header_ptr = (struct rt_lwp_memheap_item *)
195 ((rt_uint8_t *)rmem - RT_MEMHEAP_SIZE);
196
197 new_ptr = rt_lwp_memheap_realloc(header_ptr->pool_ptr, rmem, newsize);
198 if (new_ptr == RT_NULL)
199 {
200 /* allocate memory block from other memheap */
201 new_ptr = rt_lwp_mem_malloc(newsize);
202 if (new_ptr != RT_NULL && rmem != RT_NULL)
203 {
204 rt_size_t oldsize;
205
206 /* get the size of old memory block */
207 oldsize = MEMITEM_SIZE(header_ptr);
208 if (newsize > oldsize)
209 rt_memcpy(new_ptr, rmem, oldsize);
210 else
211 rt_memcpy(new_ptr, rmem, newsize);
212
213 dbg_log(DBG_LOG, "lwp realloc with memcpy 0x%x -> 0x%x/%d\n", rmem, new_ptr, newsize);
214 rt_lwp_mem_free(rmem);
215
216 }
217 }
218
219 dbg_log(DBG_LOG, "lwp realloc in same address 0x%x/%d\n", rmem, newsize);
220
221 return new_ptr;
222 }
223