1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /*
8 * File : slab.c
9 *
10 * Change Logs:
11 * Date Author Notes
12 * 2008-07-12 Bernard the first version
13 * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
14 * 2010-10-23 yi.qiu add module memory allocator
15 * 2010-12-18 yi.qiu fix zone release bug
16 */
17
18 /*
19 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
20 *
21 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
22 *
23 * This code is derived from software contributed to The DragonFly Project
24 * by Matthew Dillon <[email protected]>
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * 3. Neither the name of The DragonFly Project nor the names of its
37 * contributors may be used to endorse or promote products derived
38 * from this software without specific, prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 */
54
55 #include <rthw.h>
56 #include <rtthread.h>
57
58 #define RT_MEM_STATS
59
60 #if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
61 /* some statistical variable */
62 #ifdef RT_MEM_STATS
63 static rt_size_t used_mem, max_mem;
64 #endif
65
66 #ifdef RT_USING_HOOK
67 static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
68 static void (*rt_free_hook)(void *ptr);
69
70 /**
71 * @addtogroup Hook
72 */
73
74 /**@{*/
75
76 /**
77 * This function will set a hook function, which will be invoked when a memory
78 * block is allocated from heap memory.
79 *
80 * @param hook the hook function
81 */
rt_malloc_sethook(void (* hook)(void * ptr,rt_size_t size))82 void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
83 {
84 rt_malloc_hook = hook;
85 }
86 RTM_EXPORT(rt_malloc_sethook);
87
88 /**
89 * This function will set a hook function, which will be invoked when a memory
90 * block is released to heap memory.
91 *
92 * @param hook the hook function
93 */
rt_free_sethook(void (* hook)(void * ptr))94 void rt_free_sethook(void (*hook)(void *ptr))
95 {
96 rt_free_hook = hook;
97 }
98 RTM_EXPORT(rt_free_sethook);
99
100 /**@}*/
101
102 #endif
103
104 /*
105 * slab allocator implementation
106 *
107 * A slab allocator reserves a ZONE for each chunk size, then lays the
108 * chunks out in an array within the zone. Allocation and deallocation
109 * is nearly instantanious, and fragmentation/overhead losses are limited
110 * to a fixed worst-case amount.
111 *
112 * The downside of this slab implementation is in the chunk size
113 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
114 * In a kernel implementation all this memory will be physical so
115 * the zone size is adjusted downward on machines with less physical
116 * memory. The upside is that overhead is bounded... this is the *worst*
117 * case overhead.
118 *
119 * Slab management is done on a per-cpu basis and no locking or mutexes
120 * are required, only a critical section. When one cpu frees memory
121 * belonging to another cpu's slab manager an asynchronous IPI message
122 * will be queued to execute the operation. In addition, both the
123 * high level slab allocator and the low level zone allocator optimize
124 * M_ZERO requests, and the slab allocator does not have to pre initialize
125 * the linked list of chunks.
126 *
127 * XXX Balancing is needed between cpus. Balance will be handled through
128 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
129 *
130 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
131 * the new zone should be restricted to M_USE_RESERVE requests only.
132 *
133 * Alloc Size Chunking Number of zones
134 * 0-127 8 16
135 * 128-255 16 8
136 * 256-511 32 8
137 * 512-1023 64 8
138 * 1024-2047 128 8
139 * 2048-4095 256 8
140 * 4096-8191 512 8
141 * 8192-16383 1024 8
142 * 16384-32767 2048 8
143 * (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
144 *
145 * Allocations >= zone_limit go directly to kmem.
146 *
147 * API REQUIREMENTS AND SIDE EFFECTS
148 *
149 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
150 * have remained compatible with the following API requirements:
151 *
152 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
153 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
154 * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
155 * + ability to allocate arbitrarily large chunks of memory
156 */
157
158 /*
159 * Chunk structure for free elements
160 */
161 typedef struct slab_chunk
162 {
163 struct slab_chunk *c_next;
164 } slab_chunk;
165
166 /*
167 * The IN-BAND zone header is placed at the beginning of each zone.
168 */
169 typedef struct slab_zone
170 {
171 rt_int32_t z_magic; /* magic number for sanity check */
172 rt_int32_t z_nfree; /* total free chunks / ualloc space in zone */
173 rt_int32_t z_nmax; /* maximum free chunks */
174
175 struct slab_zone *z_next; /* zoneary[] link if z_nfree non-zero */
176 rt_uint8_t *z_baseptr; /* pointer to start of chunk array */
177
178 rt_int32_t z_uindex; /* current initial allocation index */
179 rt_int32_t z_chunksize; /* chunk size for validation */
180
181 rt_int32_t z_zoneindex; /* zone index */
182 slab_chunk *z_freechunk; /* free chunk list */
183 } slab_zone;
184
185 #define ZALLOC_SLAB_MAGIC 0x51ab51ab
186 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
187 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
188 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
189 #define NZONES 72 /* number of zones */
190 #define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
191
192 static slab_zone *zone_array[NZONES]; /* linked list of zones NFree > 0 */
193 static slab_zone *zone_free; /* whole zones that have become free */
194
195 static int zone_free_cnt;
196 static int zone_size;
197 static int zone_limit;
198 static int zone_page_cnt;
199
200 /*
201 * Misc constants. Note that allocations that are exact multiples of
202 * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
203 */
204 #define MIN_CHUNK_SIZE 8 /* in bytes */
205 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
206
207 /*
208 * Array of descriptors that describe the contents of each page
209 */
210 #define PAGE_TYPE_FREE 0x00
211 #define PAGE_TYPE_SMALL 0x01
212 #define PAGE_TYPE_LARGE 0x02
213 struct memusage
214 {
215 rt_uint32_t type: 2 ; /* page type */
216 rt_uint32_t size: 30; /* pages allocated or offset from zone */
217 };
218 static struct memusage *memusage = RT_NULL;
219 #define btokup(addr) \
220 (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
221
222 static rt_uint32_t heap_start, heap_end;
223
224 /* page allocator */
225 struct rt_page_head
226 {
227 struct rt_page_head *next; /* next valid page */
228 rt_size_t page; /* number of page */
229
230 /* dummy */
231 char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head *) + sizeof(rt_size_t))];
232 };
233 static struct rt_page_head *rt_page_list;
234 static struct rt_semaphore heap_sem;
235
rt_page_alloc(rt_size_t npages)236 void *rt_page_alloc(rt_size_t npages)
237 {
238 struct rt_page_head *b, *n;
239 struct rt_page_head **prev;
240
241 if (npages == 0)
242 return RT_NULL;
243
244 /* lock heap */
245 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
246 for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
247 {
248 if (b->page > npages)
249 {
250 /* splite pages */
251 n = b + npages;
252 n->next = b->next;
253 n->page = b->page - npages;
254 *prev = n;
255 break;
256 }
257
258 if (b->page == npages)
259 {
260 /* this node fit, remove this node */
261 *prev = b->next;
262 break;
263 }
264 }
265
266 /* unlock heap */
267 rt_sem_release(&heap_sem);
268
269 return b;
270 }
271
rt_page_free(void * addr,rt_size_t npages)272 void rt_page_free(void *addr, rt_size_t npages)
273 {
274 struct rt_page_head *b, *n;
275 struct rt_page_head **prev;
276
277 RT_ASSERT(addr != RT_NULL);
278 RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
279 RT_ASSERT(npages != 0);
280
281 n = (struct rt_page_head *)addr;
282
283 /* lock heap */
284 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
285
286 for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
287 {
288 RT_ASSERT(b->page > 0);
289 RT_ASSERT(b > n || b + b->page <= n);
290
291 if (b + b->page == n)
292 {
293 if (b + (b->page += npages) == b->next)
294 {
295 b->page += b->next->page;
296 b->next = b->next->next;
297 }
298
299 goto _return;
300 }
301
302 if (b == n + npages)
303 {
304 n->page = b->page + npages;
305 n->next = b->next;
306 *prev = n;
307
308 goto _return;
309 }
310
311 if (b > n + npages)
312 break;
313 }
314
315 n->page = npages;
316 n->next = b;
317 *prev = n;
318
319 _return:
320 /* unlock heap */
321 rt_sem_release(&heap_sem);
322 }
323
324 /*
325 * Initialize the page allocator
326 */
rt_page_init(void * addr,rt_size_t npages)327 static void rt_page_init(void *addr, rt_size_t npages)
328 {
329 RT_ASSERT(addr != RT_NULL);
330 RT_ASSERT(npages != 0);
331
332 rt_page_list = RT_NULL;
333 rt_page_free(addr, npages);
334 }
335
336 /**
337 * @ingroup SystemInit
338 *
339 * This function will init system heap
340 *
341 * @param begin_addr the beginning address of system page
342 * @param end_addr the end address of system page
343 */
rt_system_heap_init(void * begin_addr,void * end_addr)344 void rt_system_heap_init(void *begin_addr, void *end_addr)
345 {
346 rt_uint32_t limsize, npages;
347
348 RT_DEBUG_NOT_IN_INTERRUPT;
349
350 /* align begin and end addr to page */
351 heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
352 heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
353
354 if (heap_start >= heap_end)
355 {
356 rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
357 (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
358
359 return;
360 }
361
362 limsize = heap_end - heap_start;
363 npages = limsize / RT_MM_PAGE_SIZE;
364
365 /* initialize heap semaphore */
366 rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
367
368 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n",
369 heap_start, heap_end, limsize, npages));
370
371 /* init pages */
372 rt_page_init((void *)heap_start, npages);
373
374 /* calculate zone size */
375 zone_size = ZALLOC_MIN_ZONE_SIZE;
376 while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize / 1024))
377 zone_size <<= 1;
378
379 zone_limit = zone_size / 4;
380 if (zone_limit > ZALLOC_ZONE_LIMIT)
381 zone_limit = ZALLOC_ZONE_LIMIT;
382
383 zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
384
385 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n",
386 zone_size, zone_page_cnt));
387
388 /* allocate memusage array */
389 limsize = npages * sizeof(struct memusage);
390 limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
391 memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
392
393 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
394 (rt_uint32_t)memusage, limsize));
395 }
396
397 /*
398 * Calculate the zone index for the allocation request size and set the
399 * allocation request size to that particular zone's chunk size.
400 */
zoneindex(rt_uint32_t * bytes)401 rt_inline int zoneindex(rt_uint32_t *bytes)
402 {
403 /* unsigned for shift opt */
404 rt_uint32_t n = (rt_uint32_t) * bytes;
405
406 if (n < 128)
407 {
408 *bytes = n = (n + 7) & ~7;
409
410 /* 8 byte chunks, 16 zones */
411 return (n / 8 - 1);
412 }
413 if (n < 256)
414 {
415 *bytes = n = (n + 15) & ~15;
416
417 return (n / 16 + 7);
418 }
419 if (n < 8192)
420 {
421 if (n < 512)
422 {
423 *bytes = n = (n + 31) & ~31;
424
425 return (n / 32 + 15);
426 }
427 if (n < 1024)
428 {
429 *bytes = n = (n + 63) & ~63;
430
431 return (n / 64 + 23);
432 }
433 if (n < 2048)
434 {
435 *bytes = n = (n + 127) & ~127;
436
437 return (n / 128 + 31);
438 }
439 if (n < 4096)
440 {
441 *bytes = n = (n + 255) & ~255;
442
443 return (n / 256 + 39);
444 }
445 *bytes = n = (n + 511) & ~511;
446
447 return (n / 512 + 47);
448 }
449 if (n < 16384)
450 {
451 *bytes = n = (n + 1023) & ~1023;
452
453 return (n / 1024 + 55);
454 }
455
456 rt_kprintf("Unexpected byte count %d", n);
457
458 return 0;
459 }
460
461 /**
462 * @addtogroup MM
463 */
464
465 /**@{*/
466
467 /**
468 * This function will allocate a block from system heap memory.
469 * - If the nbytes is less than zero,
470 * or
471 * - If there is no nbytes sized memory valid in system,
472 * the RT_NULL is returned.
473 *
474 * @param size the size of memory to be allocated
475 *
476 * @return the allocated memory
477 */
rt_malloc(rt_size_t size)478 void *rt_malloc(rt_size_t size)
479 {
480 slab_zone *z;
481 rt_int32_t zi;
482 slab_chunk *chunk;
483 struct memusage *kup;
484
485 /* zero size, return RT_NULL */
486 if (size == 0)
487 return RT_NULL;
488
489 /*
490 * Handle large allocations directly. There should not be very many of
491 * these so performance is not a big issue.
492 */
493 if (size >= zone_limit)
494 {
495 size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
496
497 chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
498 if (chunk == RT_NULL)
499 return RT_NULL;
500
501 /* set kup */
502 kup = btokup(chunk);
503 kup->type = PAGE_TYPE_LARGE;
504 kup->size = size >> RT_MM_PAGE_BITS;
505
506 RT_DEBUG_LOG(RT_DEBUG_SLAB,
507 ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
508 size,
509 size >> RT_MM_PAGE_BITS,
510 ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
511
512 /* lock heap */
513 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
514
515 #ifdef RT_MEM_STATS
516 used_mem += size;
517 if (used_mem > max_mem)
518 max_mem = used_mem;
519 #endif
520 goto done;
521 }
522
523 /* lock heap */
524 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
525
526 /*
527 * Attempt to allocate out of an existing zone. First try the free list,
528 * then allocate out of unallocated space. If we find a good zone move
529 * it to the head of the list so later allocations find it quickly
530 * (we might have thousands of zones in the list).
531 *
532 * Note: zoneindex() will panic of size is too large.
533 */
534 zi = zoneindex(&size);
535 RT_ASSERT(zi < NZONES);
536
537 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to malloc 0x%x on zone: %d\n", size, zi));
538
539 if ((z = zone_array[zi]) != RT_NULL)
540 {
541 RT_ASSERT(z->z_nfree > 0);
542
543 /* Remove us from the zone_array[] when we become empty */
544 if (--z->z_nfree == 0)
545 {
546 zone_array[zi] = z->z_next;
547 z->z_next = RT_NULL;
548 }
549
550 /*
551 * No chunks are available but nfree said we had some memory, so
552 * it must be available in the never-before-used-memory area
553 * governed by uindex. The consequences are very serious if our zone
554 * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
555 */
556 if (z->z_uindex + 1 != z->z_nmax)
557 {
558 z->z_uindex = z->z_uindex + 1;
559 chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
560 }
561 else
562 {
563 /* find on free chunk list */
564 chunk = z->z_freechunk;
565
566 /* remove this chunk from list */
567 z->z_freechunk = z->z_freechunk->c_next;
568 }
569
570 #ifdef RT_MEM_STATS
571 used_mem += z->z_chunksize;
572 if (used_mem > max_mem)
573 max_mem = used_mem;
574 #endif
575
576 goto done;
577 }
578
579 /*
580 * If all zones are exhausted we need to allocate a new zone for this
581 * index.
582 *
583 * At least one subsystem, the tty code (see CROUND) expects power-of-2
584 * allocations to be power-of-2 aligned. We maintain compatibility by
585 * adjusting the base offset below.
586 */
587 {
588 rt_int32_t off;
589
590 if ((z = zone_free) != RT_NULL)
591 {
592 /* remove zone from free zone list */
593 zone_free = z->z_next;
594 -- zone_free_cnt;
595 }
596 else
597 {
598 /* unlock heap, since page allocator will think about lock */
599 rt_sem_release(&heap_sem);
600
601 /* allocate a zone from page */
602 z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
603 if (z == RT_NULL)
604 {
605 chunk = RT_NULL;
606 goto __exit;
607 }
608
609 /* lock heap */
610 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
611
612 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
613 (rt_uint32_t)z));
614
615 /* set message usage */
616 for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
617 {
618 kup->type = PAGE_TYPE_SMALL;
619 kup->size = off;
620
621 kup ++;
622 }
623 }
624
625 /* clear to zero */
626 rt_memset(z, 0, sizeof(slab_zone));
627
628 /* offset of slab zone struct in zone */
629 off = sizeof(slab_zone);
630
631 /*
632 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
633 * Otherwise just 8-byte align the data.
634 */
635 if ((size | (size - 1)) + 1 == (size << 1))
636 off = (off + size - 1) & ~(size - 1);
637 else
638 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
639
640 z->z_magic = ZALLOC_SLAB_MAGIC;
641 z->z_zoneindex = zi;
642 z->z_nmax = (zone_size - off) / size;
643 z->z_nfree = z->z_nmax - 1;
644 z->z_baseptr = (rt_uint8_t *)z + off;
645 z->z_uindex = 0;
646 z->z_chunksize = size;
647
648 chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
649
650 /* link to zone array */
651 z->z_next = zone_array[zi];
652 zone_array[zi] = z;
653
654 #ifdef RT_MEM_STATS
655 used_mem += z->z_chunksize;
656 if (used_mem > max_mem)
657 max_mem = used_mem;
658 #endif
659 }
660
661 done:
662 rt_sem_release(&heap_sem);
663 RT_OBJECT_HOOK_CALL(rt_malloc_hook, ((char *)chunk, size));
664
665 __exit:
666 return chunk;
667 }
668 RTM_EXPORT(rt_malloc);
669
670 /**
671 * This function will change the size of previously allocated memory block.
672 *
673 * @param ptr the previously allocated memory block
674 * @param size the new size of memory block
675 *
676 * @return the allocated memory
677 */
rt_realloc(void * ptr,rt_size_t size)678 void *rt_realloc(void *ptr, rt_size_t size)
679 {
680 void *nptr;
681 slab_zone *z;
682 struct memusage *kup;
683
684 if (ptr == RT_NULL)
685 return rt_malloc(size);
686 if (size == 0)
687 {
688 rt_free(ptr);
689
690 return RT_NULL;
691 }
692
693 /*
694 * Get the original allocation's zone. If the new request winds up
695 * using the same chunk size we do not have to do anything.
696 */
697 kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
698 if (kup->type == PAGE_TYPE_LARGE)
699 {
700 rt_size_t osize;
701
702 osize = kup->size << RT_MM_PAGE_BITS;
703 if ((nptr = rt_malloc(size)) == RT_NULL)
704 return RT_NULL;
705 rt_memcpy(nptr, ptr, size > osize ? osize : size);
706 rt_free(ptr);
707
708 return nptr;
709 }
710 else if (kup->type == PAGE_TYPE_SMALL)
711 {
712 z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
713 kup->size * RT_MM_PAGE_SIZE);
714 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
715
716 zoneindex(&size);
717 if (z->z_chunksize == size)
718 return (ptr); /* same chunk */
719
720 /*
721 * Allocate memory for the new request size. Note that zoneindex has
722 * already adjusted the request size to the appropriate chunk size, which
723 * should optimize our bcopy(). Then copy and return the new pointer.
724 */
725 if ((nptr = rt_malloc(size)) == RT_NULL)
726 return RT_NULL;
727
728 rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
729 rt_free(ptr);
730
731 return nptr;
732 }
733
734 return RT_NULL;
735 }
736 RTM_EXPORT(rt_realloc);
737
738 /**
739 * This function will contiguously allocate enough space for count objects
740 * that are size bytes of memory each and returns a pointer to the allocated
741 * memory.
742 *
743 * The allocated memory is filled with bytes of value zero.
744 *
745 * @param count number of objects to allocate
746 * @param size size of the objects to allocate
747 *
748 * @return pointer to allocated memory / NULL pointer if there is an error
749 */
rt_calloc(rt_size_t count,rt_size_t size)750 void *rt_calloc(rt_size_t count, rt_size_t size)
751 {
752 void *p;
753
754 /* allocate 'count' objects of size 'size' */
755 p = rt_malloc(count * size);
756
757 /* zero the memory */
758 if (p)
759 rt_memset(p, 0, count * size);
760
761 return p;
762 }
763 RTM_EXPORT(rt_calloc);
764
765 /**
766 * This function will release the previous allocated memory block by rt_malloc.
767 * The released memory block is taken back to system heap.
768 *
769 * @param ptr the address of memory which will be released
770 */
rt_free(void * ptr)771 void rt_free(void *ptr)
772 {
773 slab_zone *z;
774 slab_chunk *chunk;
775 struct memusage *kup;
776
777 /* free a RT_NULL pointer */
778 if (ptr == RT_NULL)
779 return ;
780
781 RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
782
783 /* get memory usage */
784 #if RT_DEBUG_SLAB
785 {
786 rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
787 RT_DEBUG_LOG(RT_DEBUG_SLAB,
788 ("free a memory 0x%x and align to 0x%x, kup index %d\n",
789 (rt_uint32_t)ptr,
790 (rt_uint32_t)addr,
791 ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
792 }
793 #endif
794
795 kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
796 /* release large allocation */
797 if (kup->type == PAGE_TYPE_LARGE)
798 {
799 rt_uint32_t size;
800
801 /* lock heap */
802 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
803 /* clear page counter */
804 size = kup->size;
805 kup->size = 0;
806
807 #ifdef RT_MEM_STATS
808 used_mem -= size * RT_MM_PAGE_SIZE;
809 #endif
810 rt_sem_release(&heap_sem);
811
812 RT_DEBUG_LOG(RT_DEBUG_SLAB,
813 ("free large memory block 0x%x, page count %d\n",
814 (rt_uint32_t)ptr, size));
815
816 /* free this page */
817 rt_page_free(ptr, size);
818
819 return;
820 }
821
822 /* lock heap */
823 rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
824
825 /* zone case. get out zone. */
826 z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
827 kup->size * RT_MM_PAGE_SIZE);
828 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
829
830 chunk = (slab_chunk *)ptr;
831 chunk->c_next = z->z_freechunk;
832 z->z_freechunk = chunk;
833
834 #ifdef RT_MEM_STATS
835 used_mem -= z->z_chunksize;
836 #endif
837
838 /*
839 * Bump the number of free chunks. If it becomes non-zero the zone
840 * must be added back onto the appropriate list.
841 */
842 if (z->z_nfree++ == 0)
843 {
844 z->z_next = zone_array[z->z_zoneindex];
845 zone_array[z->z_zoneindex] = z;
846 }
847
848 /*
849 * If the zone becomes totally free, and there are other zones we
850 * can allocate from, move this zone to the FreeZones list. Since
851 * this code can be called from an IPI callback, do *NOT* try to mess
852 * with kernel_map here. Hysteresis will be performed at malloc() time.
853 */
854 if (z->z_nfree == z->z_nmax &&
855 (z->z_next || zone_array[z->z_zoneindex] != z))
856 {
857 slab_zone **pz;
858
859 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
860 (rt_uint32_t)z, z->z_zoneindex));
861
862 /* remove zone from zone array list */
863 for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
864 ;
865 *pz = z->z_next;
866
867 /* reset zone */
868 z->z_magic = -1;
869
870 /* insert to free zone list */
871 z->z_next = zone_free;
872 zone_free = z;
873
874 ++ zone_free_cnt;
875
876 /* release zone to page allocator */
877 if (zone_free_cnt > ZONE_RELEASE_THRESH)
878 {
879 register rt_base_t i;
880
881 z = zone_free;
882 zone_free = z->z_next;
883 -- zone_free_cnt;
884
885 /* set message usage */
886 for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
887 {
888 kup->type = PAGE_TYPE_FREE;
889 kup->size = 0;
890 kup ++;
891 }
892
893 /* unlock heap */
894 rt_sem_release(&heap_sem);
895
896 /* release pages */
897 rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
898
899 return;
900 }
901 }
902 /* unlock heap */
903 rt_sem_release(&heap_sem);
904 }
905 RTM_EXPORT(rt_free);
906
907 #ifdef RT_MEM_STATS
rt_memory_info(rt_uint32_t * total,rt_uint32_t * used,rt_uint32_t * max_used)908 void rt_memory_info(rt_uint32_t *total,
909 rt_uint32_t *used,
910 rt_uint32_t *max_used)
911 {
912 if (total != RT_NULL)
913 *total = heap_end - heap_start;
914
915 if (used != RT_NULL)
916 *used = used_mem;
917
918 if (max_used != RT_NULL)
919 *max_used = max_mem;
920 }
921
922 #ifdef RT_USING_FINSH
923 #include <finsh.h>
924
list_mem(void)925 void list_mem(void)
926 {
927 rt_kprintf("total memory: %d\n", heap_end - heap_start);
928 rt_kprintf("used memory : %d\n", used_mem);
929 rt_kprintf("maximum allocated memory: %d\n", max_mem);
930 }
931 FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
932 #endif
933 #endif
934
935 /**@}*/
936
937 #endif
938