1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2012-04-10 Bernard first implementation
9 * 2012-10-16 Bernard add the mutex lock for heap object.
10 * 2012-12-29 Bernard memheap can be used as system heap.
11 * change mutex lock to semaphore lock.
12 * 2013-04-10 Bernard add rt_lwp_memheap_realloc function.
13 * 2013-05-24 Bernard fix the rt_lwp_memheap_realloc issue.
14 * 2013-07-11 Grissiom fix the memory block splitting issue.
15 * 2013-07-15 Grissiom optimize rt_lwp_memheap_realloc
16 */
17
18 #include <rthw.h>
19 #include <rtthread.h>
20 #include <lwp.h>
21
22 /* dynamic pool magic and mask */
23 #define RT_MEMHEAP_MAGIC 0x1ea01ea0
24 #define RT_MEMHEAP_MASK 0xfffffffe
25 #define RT_MEMHEAP_USED 0x01
26 #define RT_MEMHEAP_FREED 0x00
27
28 #define RT_MEMHEAP_IS_USED(i) ((i)->magic & RT_MEMHEAP_USED)
29 #define RT_MEMHEAP_MINIALLOC 12
30
31 #define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_lwp_memheap_item), RT_ALIGN_SIZE)
32 #define MEMITEM_SIZE(item) ((rt_uint32_t)item->next - (rt_uint32_t)item - RT_MEMHEAP_SIZE)
33
34 /*
35 * The initialized memory pool will be:
36 * +-----------------------------------+--------------------------+
37 * | whole freed memory block | Used Memory Block Tailer |
38 * +-----------------------------------+--------------------------+
39 *
40 * block_list --> whole freed memory block
41 *
42 * The length of Used Memory Block Tailer is 0,
43 * which is prevents block merging across list
44 */
rt_lwp_memheap_init(struct rt_lwp_memheap * memheap,const char * name,void * start_addr,rt_uint32_t size)45 rt_err_t rt_lwp_memheap_init(struct rt_lwp_memheap *memheap,
46 const char *name,
47 void *start_addr,
48 rt_uint32_t size)
49 {
50 struct rt_lwp_memheap_item *item;
51
52 RT_ASSERT(memheap != RT_NULL);
53
54 /* initialize pool object */
55 memheap->start_addr = start_addr;
56 memheap->pool_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
57 memheap->available_size = memheap->pool_size - (2 * RT_MEMHEAP_SIZE);
58 memheap->max_used_size = memheap->pool_size - memheap->available_size;
59
60 /* initialize the free list header */
61 item = &(memheap->free_header);
62 item->magic = RT_MEMHEAP_MAGIC;
63 item->pool_ptr = memheap;
64 item->next = RT_NULL;
65 item->prev = RT_NULL;
66 item->next_free = item;
67 item->prev_free = item;
68
69 /* set the free list to free list header */
70 memheap->free_list = item;
71
72 /* initialize the first big memory block */
73 item = (struct rt_lwp_memheap_item *)start_addr;
74 item->magic = RT_MEMHEAP_MAGIC;
75 item->pool_ptr = memheap;
76 item->next = RT_NULL;
77 item->prev = RT_NULL;
78 item->next_free = item;
79 item->prev_free = item;
80
81 item->next = (struct rt_lwp_memheap_item *)
82 ((rt_uint8_t *)item + memheap->available_size + RT_MEMHEAP_SIZE);
83 item->prev = item->next;
84
85 /* block list header */
86 memheap->block_list = item;
87
88 /* place the big memory block to free list */
89 item->next_free = memheap->free_list->next_free;
90 item->prev_free = memheap->free_list;
91 memheap->free_list->next_free->prev_free = item;
92 memheap->free_list->next_free = item;
93
94 /* move to the end of memory pool to build a small tailer block,
95 * which prevents block merging
96 */
97 item = item->next;
98 /* it's a used memory block */
99 item->magic = RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED;
100 item->pool_ptr = memheap;
101 item->next = (struct rt_lwp_memheap_item *)start_addr;
102 item->prev = (struct rt_lwp_memheap_item *)start_addr;
103 /* not in free list */
104 item->next_free = item->prev_free = RT_NULL;
105
106 /* initialize semaphore lock */
107 rt_sem_init(&(memheap->lock), name, 1, RT_IPC_FLAG_FIFO);
108
109 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
110 ("memory heap: start addr 0x%08x, size %d, free list header 0x%08x\n",
111 start_addr, size, &(memheap->free_header)));
112
113 return RT_EOK;
114 }
115
rt_lwp_memheap_alloc(struct rt_lwp_memheap * heap,rt_uint32_t size)116 void *rt_lwp_memheap_alloc(struct rt_lwp_memheap *heap, rt_uint32_t size)
117 {
118 rt_err_t result;
119 rt_uint32_t free_size;
120 struct rt_lwp_memheap_item *header_ptr;
121
122 RT_ASSERT(heap != RT_NULL);
123
124 /* align allocated size */
125 size = RT_ALIGN(size, RT_ALIGN_SIZE);
126 if (size < RT_MEMHEAP_MINIALLOC)
127 size = RT_MEMHEAP_MINIALLOC;
128
129 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d on heap:%8.*s",
130 size, RT_NAME_MAX, heap->parent.name));
131
132 if (size < heap->available_size)
133 {
134 /* search on free list */
135 free_size = 0;
136
137 /* lock memheap */
138 result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
139 if (result != RT_EOK)
140 {
141 rt_set_errno(result);
142
143 return RT_NULL;
144 }
145
146 /* get the first free memory block */
147 header_ptr = heap->free_list->next_free;
148 while (header_ptr != heap->free_list && free_size < size)
149 {
150 /* get current freed memory block size */
151 free_size = MEMITEM_SIZE(header_ptr);
152 if (free_size < size)
153 {
154 /* move to next free memory block */
155 header_ptr = header_ptr->next_free;
156 }
157 }
158
159 /* determine if the memory is available. */
160 if (free_size >= size)
161 {
162 /* a block that satisfies the request has been found. */
163
164 /* determine if the block needs to be split. */
165 if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC))
166 {
167 struct rt_lwp_memheap_item *new_ptr;
168
169 /* split the block. */
170 new_ptr = (struct rt_lwp_memheap_item *)
171 (((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE);
172
173 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
174 ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
175 header_ptr,
176 header_ptr->next,
177 header_ptr->prev,
178 new_ptr));
179
180 /* mark the new block as a memory block and freed. */
181 new_ptr->magic = RT_MEMHEAP_MAGIC;
182
183 /* put the pool pointer into the new block. */
184 new_ptr->pool_ptr = heap;
185
186 /* break down the block list */
187 new_ptr->prev = header_ptr;
188 new_ptr->next = header_ptr->next;
189 header_ptr->next->prev = new_ptr;
190 header_ptr->next = new_ptr;
191
192 /* remove header ptr from free list */
193 header_ptr->next_free->prev_free = header_ptr->prev_free;
194 header_ptr->prev_free->next_free = header_ptr->next_free;
195 header_ptr->next_free = RT_NULL;
196 header_ptr->prev_free = RT_NULL;
197
198 /* insert new_ptr to free list */
199 new_ptr->next_free = heap->free_list->next_free;
200 new_ptr->prev_free = heap->free_list;
201 heap->free_list->next_free->prev_free = new_ptr;
202 heap->free_list->next_free = new_ptr;
203 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x\n",
204 new_ptr->next_free,
205 new_ptr->prev_free));
206
207 /* decrement the available byte count. */
208 heap->available_size = heap->available_size -
209 size -
210 RT_MEMHEAP_SIZE;
211 if (heap->pool_size - heap->available_size > heap->max_used_size)
212 heap->max_used_size = heap->pool_size - heap->available_size;
213 }
214 else
215 {
216 /* decrement the entire free size from the available bytes count. */
217 heap->available_size = heap->available_size - free_size;
218 if (heap->pool_size - heap->available_size > heap->max_used_size)
219 heap->max_used_size = heap->pool_size - heap->available_size;
220
221 /* remove header_ptr from free list */
222 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
223 ("one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x\n",
224 header_ptr,
225 header_ptr->next_free,
226 header_ptr->prev_free));
227
228 header_ptr->next_free->prev_free = header_ptr->prev_free;
229 header_ptr->prev_free->next_free = header_ptr->next_free;
230 header_ptr->next_free = RT_NULL;
231 header_ptr->prev_free = RT_NULL;
232 }
233
234 /* Mark the allocated block as not available. */
235 header_ptr->magic |= RT_MEMHEAP_USED;
236
237 /* release lock */
238 rt_sem_release(&(heap->lock));
239
240 /* Return a memory address to the caller. */
241 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
242 ("alloc mem: memory[0x%08x], heap[0x%08x], size: %d\n",
243 (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE),
244 header_ptr,
245 size));
246
247 return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE);
248 }
249
250 /* release lock */
251 rt_sem_release(&(heap->lock));
252 }
253
254 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n"));
255
256 /* Return the completion status. */
257 return RT_NULL;
258 }
259
rt_lwp_memheap_realloc(struct rt_lwp_memheap * heap,void * ptr,rt_size_t newsize)260 void *rt_lwp_memheap_realloc(struct rt_lwp_memheap *heap, void *ptr, rt_size_t newsize)
261 {
262 rt_err_t result;
263 rt_size_t oldsize;
264 struct rt_lwp_memheap_item *header_ptr;
265 struct rt_lwp_memheap_item *new_ptr;
266
267 if (newsize == 0)
268 {
269 rt_lwp_memheap_free(ptr);
270
271 return RT_NULL;
272 }
273 /* align allocated size */
274 newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
275 if (newsize < RT_MEMHEAP_MINIALLOC)
276 newsize = RT_MEMHEAP_MINIALLOC;
277
278 if (ptr == RT_NULL)
279 {
280 return rt_lwp_memheap_alloc(heap, newsize);
281 }
282
283 /* get memory block header and get the size of memory block */
284 header_ptr = (struct rt_lwp_memheap_item *)
285 ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
286 oldsize = MEMITEM_SIZE(header_ptr);
287 /* re-allocate memory */
288 if (newsize > oldsize)
289 {
290 void *new_ptr;
291 struct rt_lwp_memheap_item *next_ptr;
292
293 /* lock memheap */
294 result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
295 if (result != RT_EOK)
296 {
297 rt_set_errno(result);
298 return RT_NULL;
299 }
300
301 next_ptr = header_ptr->next;
302
303 /* header_ptr should not be the tail */
304 RT_ASSERT(next_ptr > header_ptr);
305
306 /* check whether the following free space is enough to expand */
307 if (!RT_MEMHEAP_IS_USED(next_ptr))
308 {
309 rt_int32_t nextsize;
310
311 nextsize = MEMITEM_SIZE(next_ptr);
312 RT_ASSERT(next_ptr > 0);
313
314 /* Here is the ASCII art of the situation that we can make use of
315 * the next free node without alloc/memcpy, |*| is the control
316 * block:
317 *
318 * oldsize free node
319 * |*|-----------|*|----------------------|*|
320 * newsize >= minialloc
321 * |*|----------------|*|-----------------|*|
322 */
323 if (nextsize + oldsize > newsize + RT_MEMHEAP_MINIALLOC)
324 {
325 /* decrement the entire free size from the available bytes count. */
326 heap->available_size = heap->available_size - (newsize - oldsize);
327 if (heap->pool_size - heap->available_size > heap->max_used_size)
328 heap->max_used_size = heap->pool_size - heap->available_size;
329
330 /* remove next_ptr from free list */
331 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
332 ("remove block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x",
333 next_ptr,
334 next_ptr->next_free,
335 next_ptr->prev_free));
336
337 next_ptr->next_free->prev_free = next_ptr->prev_free;
338 next_ptr->prev_free->next_free = next_ptr->next_free;
339 next_ptr->next->prev = next_ptr->prev;
340 next_ptr->prev->next = next_ptr->next;
341
342 /* build a new one on the right place */
343 next_ptr = (struct rt_lwp_memheap_item *)((char *)ptr + newsize);
344
345 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
346 ("new free block: block[0x%08x] nextm[0x%08x] prevm[0x%08x]",
347 next_ptr,
348 next_ptr->next,
349 next_ptr->prev));
350
351 /* mark the new block as a memory block and freed. */
352 next_ptr->magic = RT_MEMHEAP_MAGIC;
353
354 /* put the pool pointer into the new block. */
355 next_ptr->pool_ptr = heap;
356
357 next_ptr->prev = header_ptr;
358 next_ptr->next = header_ptr->next;
359 header_ptr->next->prev = next_ptr;
360 header_ptr->next = next_ptr;
361
362 /* insert next_ptr to free list */
363 next_ptr->next_free = heap->free_list->next_free;
364 next_ptr->prev_free = heap->free_list;
365 heap->free_list->next_free->prev_free = next_ptr;
366 heap->free_list->next_free = next_ptr;
367 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x",
368 next_ptr->next_free,
369 next_ptr->prev_free));
370
371 /* release lock */
372 rt_sem_release(&(heap->lock));
373
374 return ptr;
375 }
376 }
377
378 /* release lock */
379 rt_sem_release(&(heap->lock));
380
381 /* re-allocate a memory block */
382 new_ptr = (void *)rt_lwp_memheap_alloc(heap, newsize);
383 if (new_ptr != RT_NULL)
384 {
385 rt_memcpy(new_ptr, ptr, oldsize < newsize ? oldsize : newsize);
386 rt_lwp_memheap_free(ptr);
387 }
388
389 return new_ptr;
390 }
391
392 /* don't split when there is less than one node space left */
393 if (newsize + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC >= oldsize)
394 return ptr;
395
396 /* lock memheap */
397 result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
398 if (result != RT_EOK)
399 {
400 rt_set_errno(result);
401
402 return RT_NULL;
403 }
404
405 /* split the block. */
406 new_ptr = (struct rt_lwp_memheap_item *)
407 (((rt_uint8_t *)header_ptr) + newsize + RT_MEMHEAP_SIZE);
408
409 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
410 ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
411 header_ptr,
412 header_ptr->next,
413 header_ptr->prev,
414 new_ptr));
415
416 /* mark the new block as a memory block and freed. */
417 new_ptr->magic = RT_MEMHEAP_MAGIC;
418 /* put the pool pointer into the new block. */
419 new_ptr->pool_ptr = heap;
420
421 /* break down the block list */
422 new_ptr->prev = header_ptr;
423 new_ptr->next = header_ptr->next;
424 header_ptr->next->prev = new_ptr;
425 header_ptr->next = new_ptr;
426
427 /* determine if the block can be merged with the next neighbor. */
428 if (!RT_MEMHEAP_IS_USED(new_ptr->next))
429 {
430 struct rt_lwp_memheap_item *free_ptr;
431
432 /* merge block with next neighbor. */
433 free_ptr = new_ptr->next;
434 heap->available_size = heap->available_size - MEMITEM_SIZE(free_ptr);
435
436 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
437 ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
438 header_ptr, header_ptr->next_free, header_ptr->prev_free));
439
440 free_ptr->next->prev = new_ptr;
441 new_ptr->next = free_ptr->next;
442
443 /* remove free ptr from free list */
444 free_ptr->next_free->prev_free = free_ptr->prev_free;
445 free_ptr->prev_free->next_free = free_ptr->next_free;
446 }
447
448 /* insert the split block to free list */
449 new_ptr->next_free = heap->free_list->next_free;
450 new_ptr->prev_free = heap->free_list;
451 heap->free_list->next_free->prev_free = new_ptr;
452 heap->free_list->next_free = new_ptr;
453 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free ptr: next_free 0x%08x, prev_free 0x%08x\n",
454 new_ptr->next_free,
455 new_ptr->prev_free));
456
457 /* increment the available byte count. */
458 heap->available_size = heap->available_size + MEMITEM_SIZE(new_ptr);
459
460 /* release lock */
461 rt_sem_release(&(heap->lock));
462
463 /* return the old memory block */
464 return ptr;
465 }
466
rt_lwp_memheap_free(void * ptr)467 void rt_lwp_memheap_free(void *ptr)
468 {
469 rt_err_t result;
470 struct rt_lwp_memheap *heap;
471 struct rt_lwp_memheap_item *header_ptr, *new_ptr;
472 rt_uint32_t insert_header;
473
474 /* NULL check */
475 if (ptr == RT_NULL) return;
476
477 /* set initial status as OK */
478 insert_header = 1;
479 new_ptr = RT_NULL;
480 header_ptr = (struct rt_lwp_memheap_item *)
481 ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
482
483 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("free memory: memory[0x%08x], block[0x%08x]\n",
484 ptr, header_ptr));
485
486 /* check magic */
487 RT_ASSERT((header_ptr->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);
488 RT_ASSERT(header_ptr->magic & RT_MEMHEAP_USED);
489 /* check whether this block of memory has been over-written. */
490 RT_ASSERT((header_ptr->next->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);
491
492 /* get pool ptr */
493 heap = header_ptr->pool_ptr;
494
495 /* lock memheap */
496 result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
497 if (result != RT_EOK)
498 {
499 rt_set_errno(result);
500
501 return ;
502 }
503
504 /* Mark the memory as available. */
505 header_ptr->magic &= ~RT_MEMHEAP_USED;
506 /* Adjust the available number of bytes. */
507 heap->available_size = heap->available_size + MEMITEM_SIZE(header_ptr);
508
509 /* Determine if the block can be merged with the previous neighbor. */
510 if (!RT_MEMHEAP_IS_USED(header_ptr->prev))
511 {
512 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: left node 0x%08x\n",
513 header_ptr->prev));
514
515 /* adjust the available number of bytes. */
516 heap->available_size = heap->available_size + RT_MEMHEAP_SIZE;
517
518 /* yes, merge block with previous neighbor. */
519 (header_ptr->prev)->next = header_ptr->next;
520 (header_ptr->next)->prev = header_ptr->prev;
521
522 /* move header pointer to previous. */
523 header_ptr = header_ptr->prev;
524 /* don't insert header to free list */
525 insert_header = 0;
526 }
527
528 /* determine if the block can be merged with the next neighbor. */
529 if (!RT_MEMHEAP_IS_USED(header_ptr->next))
530 {
531 /* adjust the available number of bytes. */
532 heap->available_size = heap->available_size + RT_MEMHEAP_SIZE;
533
534 /* merge block with next neighbor. */
535 new_ptr = header_ptr->next;
536
537 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
538 ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
539 new_ptr, new_ptr->next_free, new_ptr->prev_free));
540
541 new_ptr->next->prev = header_ptr;
542 header_ptr->next = new_ptr->next;
543
544 /* remove new ptr from free list */
545 new_ptr->next_free->prev_free = new_ptr->prev_free;
546 new_ptr->prev_free->next_free = new_ptr->next_free;
547 }
548
549 if (insert_header)
550 {
551 /* no left merge, insert to free list */
552 header_ptr->next_free = heap->free_list->next_free;
553 header_ptr->prev_free = heap->free_list;
554 heap->free_list->next_free->prev_free = header_ptr;
555 heap->free_list->next_free = header_ptr;
556
557 RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
558 ("insert to free list: next_free 0x%08x, prev_free 0x%08x\n",
559 header_ptr->next_free, header_ptr->prev_free));
560 }
561
562 /* release lock */
563 rt_sem_release(&(heap->lock));
564 }
565
rt_lwp_memheap_is_empty(struct rt_lwp_memheap * memheap)566 rt_bool_t rt_lwp_memheap_is_empty(struct rt_lwp_memheap *memheap)
567 {
568 RT_ASSERT(memheap != RT_NULL);
569
570 return (memheap->available_size + 2 * sizeof(struct rt_lwp_memheap_item)) == memheap->pool_size;
571 }
572
rt_lwp_memheap_unavailable_size_get(void)573 rt_bool_t rt_lwp_memheap_unavailable_size_get(void)
574 {
575 return 2 * RT_MEMHEAP_SIZE + 3;
576 }
577