xref: /nrf52832-nimble/rt-thread/src/mem.c (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * Copyright (c) 2006-2018, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2008-7-12      Bernard      the first version
9  * 2010-06-09     Bernard      fix the end stub of heap
10  *                             fix memory check in rt_realloc function
11  * 2010-07-13     Bernard      fix RT_ALIGN issue found by kuronca
12  * 2010-10-14     Bernard      fix rt_realloc issue when realloc a NULL pointer.
13  * 2017-07-14     armink       fix rt_realloc issue when new size is 0
14  * 2018-10-02     Bernard      Add 64bit support
15  */
16 
17 /*
18  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
19  * All rights reserved.
20  *
21  * Redistribution and use in source and binary forms, with or without modification,
22  * are permitted provided that the following conditions are met:
23  *
24  * 1. Redistributions of source code must retain the above copyright notice,
25  *    this list of conditions and the following disclaimer.
26  * 2. Redistributions in binary form must reproduce the above copyright notice,
27  *    this list of conditions and the following disclaimer in the documentation
28  *    and/or other materials provided with the distribution.
29  * 3. The name of the author may not be used to endorse or promote products
30  *    derived from this software without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
33  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
34  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
35  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
36  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
37  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
38  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
39  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
41  * OF SUCH DAMAGE.
42  *
43  * This file is part of the lwIP TCP/IP stack.
44  *
45  * Author: Adam Dunkels <[email protected]>
46  *         Simon Goldschmidt
47  *
48  */
49 
50 #include <rthw.h>
51 #include <rtthread.h>
52 
53 #ifndef RT_USING_MEMHEAP_AS_HEAP
54 
55 /* #define RT_MEM_DEBUG */
56 #define RT_MEM_STATS
57 
58 #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
59 #ifdef RT_USING_HOOK
60 static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
61 static void (*rt_free_hook)(void *ptr);
62 
63 /**
64  * @addtogroup Hook
65  */
66 
67 /**@{*/
68 
69 /**
70  * This function will set a hook function, which will be invoked when a memory
71  * block is allocated from heap memory.
72  *
73  * @param hook the hook function
74  */
rt_malloc_sethook(void (* hook)(void * ptr,rt_size_t size))75 void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
76 {
77     rt_malloc_hook = hook;
78 }
79 
80 /**
81  * This function will set a hook function, which will be invoked when a memory
82  * block is released to heap memory.
83  *
84  * @param hook the hook function
85  */
rt_free_sethook(void (* hook)(void * ptr))86 void rt_free_sethook(void (*hook)(void *ptr))
87 {
88     rt_free_hook = hook;
89 }
90 
91 /**@}*/
92 
93 #endif
94 
95 #define HEAP_MAGIC 0x1ea0
96 struct heap_mem
97 {
98     /* magic and used flag */
99     rt_uint16_t magic;
100     rt_uint16_t used;
101 #ifdef ARCH_CPU_64BIT
102     rt_uint32_t resv;
103 #endif
104 
105     rt_size_t next, prev;
106 
107 #ifdef RT_USING_MEMTRACE
108 #ifdef ARCH_CPU_64BIT
109     rt_uint8_t thread[8];
110 #else
111     rt_uint8_t thread[4];   /* thread name */
112 #endif
113 #endif
114 };
115 
116 /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
117 static rt_uint8_t *heap_ptr;
118 
119 /** the last entry, always unused! */
120 static struct heap_mem *heap_end;
121 
122 #ifdef ARCH_CPU_64BIT
123 #define MIN_SIZE 24
124 #else
125 #define MIN_SIZE 12
126 #endif
127 
128 #define MIN_SIZE_ALIGNED     RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
129 #define SIZEOF_STRUCT_MEM    RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
130 
131 static struct heap_mem *lfree;   /* pointer to the lowest free block */
132 
133 static struct rt_semaphore heap_sem;
134 static rt_size_t mem_size_aligned;
135 
136 #ifdef RT_MEM_STATS
137 static rt_size_t used_mem, max_mem;
138 #endif
139 #ifdef RT_USING_MEMTRACE
rt_mem_setname(struct heap_mem * mem,const char * name)140 rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
141 {
142     int index;
143     for (index = 0; index < sizeof(mem->thread); index ++)
144     {
145         if (name[index] == '\0') break;
146         mem->thread[index] = name[index];
147     }
148 
149     for (; index < sizeof(mem->thread); index ++)
150     {
151         mem->thread[index] = ' ';
152     }
153 }
154 #endif
155 
plug_holes(struct heap_mem * mem)156 static void plug_holes(struct heap_mem *mem)
157 {
158     struct heap_mem *nmem;
159     struct heap_mem *pmem;
160 
161     RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
162     RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
163     RT_ASSERT(mem->used == 0);
164 
165     /* plug hole forward */
166     nmem = (struct heap_mem *)&heap_ptr[mem->next];
167     if (mem != nmem &&
168         nmem->used == 0 &&
169         (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
170     {
171         /* if mem->next is unused and not end of heap_ptr,
172          * combine mem and mem->next
173          */
174         if (lfree == nmem)
175         {
176             lfree = mem;
177         }
178         mem->next = nmem->next;
179         ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
180     }
181 
182     /* plug hole backward */
183     pmem = (struct heap_mem *)&heap_ptr[mem->prev];
184     if (pmem != mem && pmem->used == 0)
185     {
186         /* if mem->prev is unused, combine mem and mem->prev */
187         if (lfree == mem)
188         {
189             lfree = pmem;
190         }
191         pmem->next = mem->next;
192         ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
193     }
194 }
195 
196 /**
197  * @ingroup SystemInit
198  *
199  * This function will initialize system heap memory.
200  *
201  * @param begin_addr the beginning address of system heap memory.
202  * @param end_addr the end address of system heap memory.
203  */
rt_system_heap_init(void * begin_addr,void * end_addr)204 void rt_system_heap_init(void *begin_addr, void *end_addr)
205 {
206     struct heap_mem *mem;
207     rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
208     rt_ubase_t end_align   = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
209 
210     RT_DEBUG_NOT_IN_INTERRUPT;
211 
212     /* alignment addr */
213     if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
214         ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
215     {
216         /* calculate the aligned memory size */
217         mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
218     }
219     else
220     {
221         rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
222                    (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
223 
224         return;
225     }
226 
227     /* point to begin address of heap */
228     heap_ptr = (rt_uint8_t *)begin_align;
229 
230     RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
231                                 (rt_ubase_t)heap_ptr, mem_size_aligned));
232 
233     /* initialize the start of the heap */
234     mem        = (struct heap_mem *)heap_ptr;
235     mem->magic = HEAP_MAGIC;
236     mem->next  = mem_size_aligned + SIZEOF_STRUCT_MEM;
237     mem->prev  = 0;
238     mem->used  = 0;
239 #ifdef RT_USING_MEMTRACE
240     rt_mem_setname(mem, "INIT");
241 #endif
242 
243     /* initialize the end of the heap */
244     heap_end        = (struct heap_mem *)&heap_ptr[mem->next];
245     heap_end->magic = HEAP_MAGIC;
246     heap_end->used  = 1;
247     heap_end->next  = mem_size_aligned + SIZEOF_STRUCT_MEM;
248     heap_end->prev  = mem_size_aligned + SIZEOF_STRUCT_MEM;
249 #ifdef RT_USING_MEMTRACE
250     rt_mem_setname(heap_end, "INIT");
251 #endif
252 
253     rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
254 
255     /* initialize the lowest-free pointer to the start of the heap */
256     lfree = (struct heap_mem *)heap_ptr;
257 }
258 
259 /**
260  * @addtogroup MM
261  */
262 
263 /**@{*/
264 
265 /**
266  * Allocate a block of memory with a minimum of 'size' bytes.
267  *
268  * @param size is the minimum size of the requested block in bytes.
269  *
270  * @return pointer to allocated memory or NULL if no free memory was found.
271  */
rt_malloc(rt_size_t size)272 void *rt_malloc(rt_size_t size)
273 {
274     rt_size_t ptr, ptr2;
275     struct heap_mem *mem, *mem2;
276 
277     if (size == 0)
278         return RT_NULL;
279 
280     RT_DEBUG_NOT_IN_INTERRUPT;
281 
282     if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
283         RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
284                                     size, RT_ALIGN(size, RT_ALIGN_SIZE)));
285     else
286         RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
287 
288     /* alignment size */
289     size = RT_ALIGN(size, RT_ALIGN_SIZE);
290 
291     if (size > mem_size_aligned)
292     {
293         RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
294 
295         return RT_NULL;
296     }
297 
298     /* every data block must be at least MIN_SIZE_ALIGNED long */
299     if (size < MIN_SIZE_ALIGNED)
300         size = MIN_SIZE_ALIGNED;
301 
302     /* take memory semaphore */
303     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
304 
305     for (ptr = (rt_uint8_t *)lfree - heap_ptr;
306          ptr < mem_size_aligned - size;
307          ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
308     {
309         mem = (struct heap_mem *)&heap_ptr[ptr];
310 
311         if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
312         {
313             /* mem is not used and at least perfect fit is possible:
314              * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
315 
316             if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
317                 (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
318             {
319                 /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
320                  * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
321                  * -> split large block, create empty remainder,
322                  * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
323                  * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
324                  * struct heap_mem would fit in but no data between mem2 and mem2->next
325                  * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
326                  *       region that couldn't hold data, but when mem->next gets freed,
327                  *       the 2 regions would be combined, resulting in more free memory
328                  */
329                 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
330 
331                 /* create mem2 struct */
332                 mem2       = (struct heap_mem *)&heap_ptr[ptr2];
333                 mem2->magic = HEAP_MAGIC;
334                 mem2->used = 0;
335                 mem2->next = mem->next;
336                 mem2->prev = ptr;
337 #ifdef RT_USING_MEMTRACE
338                 rt_mem_setname(mem2, "    ");
339 #endif
340 
341                 /* and insert it between mem and mem->next */
342                 mem->next = ptr2;
343                 mem->used = 1;
344 
345                 if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
346                 {
347                     ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
348                 }
349 #ifdef RT_MEM_STATS
350                 used_mem += (size + SIZEOF_STRUCT_MEM);
351                 if (max_mem < used_mem)
352                     max_mem = used_mem;
353 #endif
354             }
355             else
356             {
357                 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
358                  * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
359                  * take care of this).
360                  * -> near fit or excact fit: do not split, no mem2 creation
361                  * also can't move mem->next directly behind mem, since mem->next
362                  * will always be used at this point!
363                  */
364                 mem->used = 1;
365 #ifdef RT_MEM_STATS
366                 used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
367                 if (max_mem < used_mem)
368                     max_mem = used_mem;
369 #endif
370             }
371             /* set memory block magic */
372             mem->magic = HEAP_MAGIC;
373 #ifdef RT_USING_MEMTRACE
374             if (rt_thread_self())
375                 rt_mem_setname(mem, rt_thread_self()->name);
376             else
377                 rt_mem_setname(mem, "NONE");
378 #endif
379 
380             if (mem == lfree)
381             {
382                 /* Find next free block after mem and update lowest free pointer */
383                 while (lfree->used && lfree != heap_end)
384                     lfree = (struct heap_mem *)&heap_ptr[lfree->next];
385 
386                 RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
387             }
388 
389             rt_sem_release(&heap_sem);
390             RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
391             RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
392             RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
393 
394             RT_DEBUG_LOG(RT_DEBUG_MEM,
395                          ("allocate memory at 0x%x, size: %d\n",
396                           (rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
397                           (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
398 
399             RT_OBJECT_HOOK_CALL(rt_malloc_hook,
400                                 (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
401 
402             /* return the memory data except mem struct */
403             return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
404         }
405     }
406 
407     rt_sem_release(&heap_sem);
408 
409     return RT_NULL;
410 }
411 RTM_EXPORT(rt_malloc);
412 
413 /**
414  * This function will change the previously allocated memory block.
415  *
416  * @param rmem pointer to memory allocated by rt_malloc
417  * @param newsize the required new size
418  *
419  * @return the changed memory block address
420  */
rt_realloc(void * rmem,rt_size_t newsize)421 void *rt_realloc(void *rmem, rt_size_t newsize)
422 {
423     rt_size_t size;
424     rt_size_t ptr, ptr2;
425     struct heap_mem *mem, *mem2;
426     void *nmem;
427 
428     RT_DEBUG_NOT_IN_INTERRUPT;
429 
430     /* alignment size */
431     newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
432     if (newsize > mem_size_aligned)
433     {
434         RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
435 
436         return RT_NULL;
437     }
438     else if (newsize == 0)
439     {
440         rt_free(rmem);
441         return RT_NULL;
442     }
443 
444     /* allocate a new memory block */
445     if (rmem == RT_NULL)
446         return rt_malloc(newsize);
447 
448     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
449 
450     if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
451         (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
452     {
453         /* illegal memory */
454         rt_sem_release(&heap_sem);
455 
456         return rmem;
457     }
458 
459     mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
460 
461     ptr = (rt_uint8_t *)mem - heap_ptr;
462     size = mem->next - ptr - SIZEOF_STRUCT_MEM;
463     if (size == newsize)
464     {
465         /* the size is the same as */
466         rt_sem_release(&heap_sem);
467 
468         return rmem;
469     }
470 
471     if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
472     {
473         /* split memory block */
474 #ifdef RT_MEM_STATS
475         used_mem -= (size - newsize);
476 #endif
477 
478         ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
479         mem2 = (struct heap_mem *)&heap_ptr[ptr2];
480         mem2->magic = HEAP_MAGIC;
481         mem2->used = 0;
482         mem2->next = mem->next;
483         mem2->prev = ptr;
484 #ifdef RT_USING_MEMTRACE
485         rt_mem_setname(mem2, "    ");
486 #endif
487         mem->next = ptr2;
488         if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
489         {
490             ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
491         }
492 
493         plug_holes(mem2);
494 
495         rt_sem_release(&heap_sem);
496 
497         return rmem;
498     }
499     rt_sem_release(&heap_sem);
500 
501     /* expand memory */
502     nmem = rt_malloc(newsize);
503     if (nmem != RT_NULL) /* check memory */
504     {
505         rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
506         rt_free(rmem);
507     }
508 
509     return nmem;
510 }
511 RTM_EXPORT(rt_realloc);
512 
513 /**
514  * This function will contiguously allocate enough space for count objects
515  * that are size bytes of memory each and returns a pointer to the allocated
516  * memory.
517  *
518  * The allocated memory is filled with bytes of value zero.
519  *
520  * @param count number of objects to allocate
521  * @param size size of the objects to allocate
522  *
523  * @return pointer to allocated memory / NULL pointer if there is an error
524  */
rt_calloc(rt_size_t count,rt_size_t size)525 void *rt_calloc(rt_size_t count, rt_size_t size)
526 {
527     void *p;
528 
529     /* allocate 'count' objects of size 'size' */
530     p = rt_malloc(count * size);
531 
532     /* zero the memory */
533     if (p)
534         rt_memset(p, 0, count * size);
535 
536     return p;
537 }
538 RTM_EXPORT(rt_calloc);
539 
540 /**
541  * This function will release the previously allocated memory block by
542  * rt_malloc. The released memory block is taken back to system heap.
543  *
544  * @param rmem the address of memory which will be released
545  */
rt_free(void * rmem)546 void rt_free(void *rmem)
547 {
548     struct heap_mem *mem;
549 
550     if (rmem == RT_NULL)
551         return;
552 
553     RT_DEBUG_NOT_IN_INTERRUPT;
554 
555     RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
556     RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
557               (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
558 
559     RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
560 
561     if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
562         (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
563     {
564         RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n"));
565 
566         return;
567     }
568 
569     /* Get the corresponding struct heap_mem ... */
570     mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
571 
572     RT_DEBUG_LOG(RT_DEBUG_MEM,
573                  ("release memory 0x%x, size: %d\n",
574                   (rt_ubase_t)rmem,
575                   (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
576 
577 
578     /* protect the heap from concurrent access */
579     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
580 
581     /* ... which has to be in a used state ... */
582     if (!mem->used || mem->magic != HEAP_MAGIC)
583     {
584         rt_kprintf("to free a bad data block:\n");
585         rt_kprintf("mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
586     }
587     RT_ASSERT(mem->used);
588     RT_ASSERT(mem->magic == HEAP_MAGIC);
589     /* ... and is now unused. */
590     mem->used  = 0;
591     mem->magic = HEAP_MAGIC;
592 #ifdef RT_USING_MEMTRACE
593     rt_mem_setname(mem, "    ");
594 #endif
595 
596     if (mem < lfree)
597     {
598         /* the newly freed struct is now the lowest */
599         lfree = mem;
600     }
601 
602 #ifdef RT_MEM_STATS
603     used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
604 #endif
605 
606     /* finally, see if prev or next are free also */
607     plug_holes(mem);
608     rt_sem_release(&heap_sem);
609 }
610 RTM_EXPORT(rt_free);
611 
612 #ifdef RT_MEM_STATS
rt_memory_info(rt_uint32_t * total,rt_uint32_t * used,rt_uint32_t * max_used)613 void rt_memory_info(rt_uint32_t *total,
614                     rt_uint32_t *used,
615                     rt_uint32_t *max_used)
616 {
617     if (total != RT_NULL)
618         *total = mem_size_aligned;
619     if (used  != RT_NULL)
620         *used = used_mem;
621     if (max_used != RT_NULL)
622         *max_used = max_mem;
623 }
624 
625 #ifdef RT_USING_FINSH
626 #include <finsh.h>
627 
list_mem(void)628 void list_mem(void)
629 {
630     rt_kprintf("total memory: %d\n", mem_size_aligned);
631     rt_kprintf("used memory : %d\n", used_mem);
632     rt_kprintf("maximum allocated memory: %d\n", max_mem);
633 }
FINSH_FUNCTION_EXPORT(list_mem,list memory usage information)634 FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
635 
636 #ifdef RT_USING_MEMTRACE
637 int memcheck(void)
638 {
639     int position;
640     rt_ubase_t level;
641     struct heap_mem *mem;
642     level = rt_hw_interrupt_disable();
643     for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
644     {
645         position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
646         if (position < 0) goto __exit;
647         if (position > mem_size_aligned) goto __exit;
648         if (mem->magic != HEAP_MAGIC) goto __exit;
649         if (mem->used != 0 && mem->used != 1) goto __exit;
650     }
651     rt_hw_interrupt_enable(level);
652 
653     return 0;
654 __exit:
655     rt_kprintf("Memory block wrong:\n");
656     rt_kprintf("address: 0x%08x\n", mem);
657     rt_kprintf("  magic: 0x%04x\n", mem->magic);
658     rt_kprintf("   used: %d\n", mem->used);
659     rt_kprintf("  size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
660     rt_hw_interrupt_enable(level);
661 
662     return 0;
663 }
664 MSH_CMD_EXPORT(memcheck, check memory data);
665 
memtrace(int argc,char ** argv)666 int memtrace(int argc, char **argv)
667 {
668     struct heap_mem *mem;
669 
670     list_mem();
671 
672     rt_kprintf("\nmemory heap address:\n");
673     rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
674     rt_kprintf("lfree   : 0x%08x\n", lfree);
675     rt_kprintf("heap_end: 0x%08x\n", heap_end);
676 
677     rt_kprintf("\n--memory item information --\n");
678     for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
679     {
680         int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
681         int size;
682 
683         rt_kprintf("[0x%08x - ", mem);
684 
685         size = mem->next - position - SIZEOF_STRUCT_MEM;
686         if (size < 1024)
687             rt_kprintf("%5d", size);
688         else if (size < 1024 * 1024)
689             rt_kprintf("%4dK", size / 1024);
690         else
691             rt_kprintf("%4dM", size / (1024 * 1024));
692 
693         rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
694         if (mem->magic != HEAP_MAGIC)
695             rt_kprintf(": ***\n");
696         else
697             rt_kprintf("\n");
698     }
699 
700     return 0;
701 }
702 MSH_CMD_EXPORT(memtrace, dump memory trace information);
703 #endif /* end of RT_USING_MEMTRACE */
704 #endif /* end of RT_USING_FINSH    */
705 
706 #endif
707 
708 /**@}*/
709 
710 #endif /* end of RT_USING_HEAP */
711 #endif /* end of RT_USING_MEMHEAP_AS_HEAP */
712