xref: /nrf52832-nimble/rt-thread/src/slab.c (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero /*
2*10465441SEvalZero  * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero  *
4*10465441SEvalZero  * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero  */
6*10465441SEvalZero 
7*10465441SEvalZero /*
8*10465441SEvalZero  * File      : slab.c
9*10465441SEvalZero  *
10*10465441SEvalZero  * Change Logs:
11*10465441SEvalZero  * Date           Author       Notes
12*10465441SEvalZero  * 2008-07-12     Bernard      the first version
13*10465441SEvalZero  * 2010-07-13     Bernard      fix RT_ALIGN issue found by kuronca
14*10465441SEvalZero  * 2010-10-23     yi.qiu       add module memory allocator
15*10465441SEvalZero  * 2010-12-18     yi.qiu       fix zone release bug
16*10465441SEvalZero  */
17*10465441SEvalZero 
18*10465441SEvalZero /*
19*10465441SEvalZero  * KERN_SLABALLOC.C - Kernel SLAB memory allocator
20*10465441SEvalZero  *
21*10465441SEvalZero  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
22*10465441SEvalZero  *
23*10465441SEvalZero  * This code is derived from software contributed to The DragonFly Project
24*10465441SEvalZero  * by Matthew Dillon <[email protected]>
25*10465441SEvalZero  *
26*10465441SEvalZero  * Redistribution and use in source and binary forms, with or without
27*10465441SEvalZero  * modification, are permitted provided that the following conditions
28*10465441SEvalZero  * are met:
29*10465441SEvalZero  *
30*10465441SEvalZero  * 1. Redistributions of source code must retain the above copyright
31*10465441SEvalZero  *    notice, this list of conditions and the following disclaimer.
32*10465441SEvalZero  * 2. Redistributions in binary form must reproduce the above copyright
33*10465441SEvalZero  *    notice, this list of conditions and the following disclaimer in
34*10465441SEvalZero  *    the documentation and/or other materials provided with the
35*10465441SEvalZero  *    distribution.
36*10465441SEvalZero  * 3. Neither the name of The DragonFly Project nor the names of its
37*10465441SEvalZero  *    contributors may be used to endorse or promote products derived
38*10465441SEvalZero  *    from this software without specific, prior written permission.
39*10465441SEvalZero  *
40*10465441SEvalZero  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41*10465441SEvalZero  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42*10465441SEvalZero  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43*10465441SEvalZero  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
44*10465441SEvalZero  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45*10465441SEvalZero  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
46*10465441SEvalZero  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47*10465441SEvalZero  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48*10465441SEvalZero  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49*10465441SEvalZero  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50*10465441SEvalZero  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51*10465441SEvalZero  * SUCH DAMAGE.
52*10465441SEvalZero  *
53*10465441SEvalZero  */
54*10465441SEvalZero 
55*10465441SEvalZero #include <rthw.h>
56*10465441SEvalZero #include <rtthread.h>
57*10465441SEvalZero 
58*10465441SEvalZero #define RT_MEM_STATS
59*10465441SEvalZero 
60*10465441SEvalZero #if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
61*10465441SEvalZero /* some statistical variable */
62*10465441SEvalZero #ifdef RT_MEM_STATS
63*10465441SEvalZero static rt_size_t used_mem, max_mem;
64*10465441SEvalZero #endif
65*10465441SEvalZero 
66*10465441SEvalZero #ifdef RT_USING_HOOK
67*10465441SEvalZero static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
68*10465441SEvalZero static void (*rt_free_hook)(void *ptr);
69*10465441SEvalZero 
70*10465441SEvalZero /**
71*10465441SEvalZero  * @addtogroup Hook
72*10465441SEvalZero  */
73*10465441SEvalZero 
74*10465441SEvalZero /**@{*/
75*10465441SEvalZero 
76*10465441SEvalZero /**
77*10465441SEvalZero  * This function will set a hook function, which will be invoked when a memory
78*10465441SEvalZero  * block is allocated from heap memory.
79*10465441SEvalZero  *
80*10465441SEvalZero  * @param hook the hook function
81*10465441SEvalZero  */
rt_malloc_sethook(void (* hook)(void * ptr,rt_size_t size))82*10465441SEvalZero void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
83*10465441SEvalZero {
84*10465441SEvalZero     rt_malloc_hook = hook;
85*10465441SEvalZero }
86*10465441SEvalZero RTM_EXPORT(rt_malloc_sethook);
87*10465441SEvalZero 
88*10465441SEvalZero /**
89*10465441SEvalZero  * This function will set a hook function, which will be invoked when a memory
90*10465441SEvalZero  * block is released to heap memory.
91*10465441SEvalZero  *
92*10465441SEvalZero  * @param hook the hook function
93*10465441SEvalZero  */
rt_free_sethook(void (* hook)(void * ptr))94*10465441SEvalZero void rt_free_sethook(void (*hook)(void *ptr))
95*10465441SEvalZero {
96*10465441SEvalZero     rt_free_hook = hook;
97*10465441SEvalZero }
98*10465441SEvalZero RTM_EXPORT(rt_free_sethook);
99*10465441SEvalZero 
100*10465441SEvalZero /**@}*/
101*10465441SEvalZero 
102*10465441SEvalZero #endif
103*10465441SEvalZero 
104*10465441SEvalZero /*
105*10465441SEvalZero  * slab allocator implementation
106*10465441SEvalZero  *
107*10465441SEvalZero  * A slab allocator reserves a ZONE for each chunk size, then lays the
108*10465441SEvalZero  * chunks out in an array within the zone.  Allocation and deallocation
109*10465441SEvalZero  * is nearly instantanious, and fragmentation/overhead losses are limited
110*10465441SEvalZero  * to a fixed worst-case amount.
111*10465441SEvalZero  *
112*10465441SEvalZero  * The downside of this slab implementation is in the chunk size
113*10465441SEvalZero  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
114*10465441SEvalZero  * In a kernel implementation all this memory will be physical so
115*10465441SEvalZero  * the zone size is adjusted downward on machines with less physical
116*10465441SEvalZero  * memory.  The upside is that overhead is bounded... this is the *worst*
117*10465441SEvalZero  * case overhead.
118*10465441SEvalZero  *
119*10465441SEvalZero  * Slab management is done on a per-cpu basis and no locking or mutexes
120*10465441SEvalZero  * are required, only a critical section.  When one cpu frees memory
121*10465441SEvalZero  * belonging to another cpu's slab manager an asynchronous IPI message
122*10465441SEvalZero  * will be queued to execute the operation.   In addition, both the
123*10465441SEvalZero  * high level slab allocator and the low level zone allocator optimize
124*10465441SEvalZero  * M_ZERO requests, and the slab allocator does not have to pre initialize
125*10465441SEvalZero  * the linked list of chunks.
126*10465441SEvalZero  *
127*10465441SEvalZero  * XXX Balancing is needed between cpus.  Balance will be handled through
128*10465441SEvalZero  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
129*10465441SEvalZero  *
130*10465441SEvalZero  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
131*10465441SEvalZero  * the new zone should be restricted to M_USE_RESERVE requests only.
132*10465441SEvalZero  *
133*10465441SEvalZero  *  Alloc Size  Chunking        Number of zones
134*10465441SEvalZero  *  0-127       8               16
135*10465441SEvalZero  *  128-255     16              8
136*10465441SEvalZero  *  256-511     32              8
137*10465441SEvalZero  *  512-1023    64              8
138*10465441SEvalZero  *  1024-2047   128             8
139*10465441SEvalZero  *  2048-4095   256             8
140*10465441SEvalZero  *  4096-8191   512             8
141*10465441SEvalZero  *  8192-16383  1024            8
142*10465441SEvalZero  *  16384-32767 2048            8
143*10465441SEvalZero  *  (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
144*10465441SEvalZero  *
145*10465441SEvalZero  *  Allocations >= zone_limit go directly to kmem.
146*10465441SEvalZero  *
147*10465441SEvalZero  *          API REQUIREMENTS AND SIDE EFFECTS
148*10465441SEvalZero  *
149*10465441SEvalZero  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
150*10465441SEvalZero  *    have remained compatible with the following API requirements:
151*10465441SEvalZero  *
152*10465441SEvalZero  *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
153*10465441SEvalZero  *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
154*10465441SEvalZero  *    + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
155*10465441SEvalZero  *    + ability to allocate arbitrarily large chunks of memory
156*10465441SEvalZero  */
157*10465441SEvalZero 
158*10465441SEvalZero /*
159*10465441SEvalZero  * Chunk structure for free elements
160*10465441SEvalZero  */
161*10465441SEvalZero typedef struct slab_chunk
162*10465441SEvalZero {
163*10465441SEvalZero     struct slab_chunk *c_next;
164*10465441SEvalZero } slab_chunk;
165*10465441SEvalZero 
166*10465441SEvalZero /*
167*10465441SEvalZero  * The IN-BAND zone header is placed at the beginning of each zone.
168*10465441SEvalZero  */
169*10465441SEvalZero typedef struct slab_zone
170*10465441SEvalZero {
171*10465441SEvalZero     rt_int32_t  z_magic;        /* magic number for sanity check */
172*10465441SEvalZero     rt_int32_t  z_nfree;        /* total free chunks / ualloc space in zone */
173*10465441SEvalZero     rt_int32_t  z_nmax;         /* maximum free chunks */
174*10465441SEvalZero 
175*10465441SEvalZero     struct slab_zone *z_next;   /* zoneary[] link if z_nfree non-zero */
176*10465441SEvalZero     rt_uint8_t  *z_baseptr;     /* pointer to start of chunk array */
177*10465441SEvalZero 
178*10465441SEvalZero     rt_int32_t  z_uindex;       /* current initial allocation index */
179*10465441SEvalZero     rt_int32_t  z_chunksize;    /* chunk size for validation */
180*10465441SEvalZero 
181*10465441SEvalZero     rt_int32_t  z_zoneindex;    /* zone index */
182*10465441SEvalZero     slab_chunk  *z_freechunk;   /* free chunk list */
183*10465441SEvalZero } slab_zone;
184*10465441SEvalZero 
185*10465441SEvalZero #define ZALLOC_SLAB_MAGIC       0x51ab51ab
186*10465441SEvalZero #define ZALLOC_ZONE_LIMIT       (16 * 1024)     /* max slab-managed alloc */
187*10465441SEvalZero #define ZALLOC_MIN_ZONE_SIZE    (32 * 1024)     /* minimum zone size */
188*10465441SEvalZero #define ZALLOC_MAX_ZONE_SIZE    (128 * 1024)    /* maximum zone size */
189*10465441SEvalZero #define NZONES                  72              /* number of zones */
190*10465441SEvalZero #define ZONE_RELEASE_THRESH     2               /* threshold number of zones */
191*10465441SEvalZero 
192*10465441SEvalZero static slab_zone *zone_array[NZONES];   /* linked list of zones NFree > 0 */
193*10465441SEvalZero static slab_zone *zone_free;            /* whole zones that have become free */
194*10465441SEvalZero 
195*10465441SEvalZero static int zone_free_cnt;
196*10465441SEvalZero static int zone_size;
197*10465441SEvalZero static int zone_limit;
198*10465441SEvalZero static int zone_page_cnt;
199*10465441SEvalZero 
200*10465441SEvalZero /*
201*10465441SEvalZero  * Misc constants.  Note that allocations that are exact multiples of
202*10465441SEvalZero  * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
203*10465441SEvalZero  */
204*10465441SEvalZero #define MIN_CHUNK_SIZE      8       /* in bytes */
205*10465441SEvalZero #define MIN_CHUNK_MASK      (MIN_CHUNK_SIZE - 1)
206*10465441SEvalZero 
207*10465441SEvalZero /*
208*10465441SEvalZero  * Array of descriptors that describe the contents of each page
209*10465441SEvalZero  */
210*10465441SEvalZero #define PAGE_TYPE_FREE      0x00
211*10465441SEvalZero #define PAGE_TYPE_SMALL     0x01
212*10465441SEvalZero #define PAGE_TYPE_LARGE     0x02
213*10465441SEvalZero struct memusage
214*10465441SEvalZero {
215*10465441SEvalZero     rt_uint32_t type: 2 ;       /* page type */
216*10465441SEvalZero     rt_uint32_t size: 30;       /* pages allocated or offset from zone */
217*10465441SEvalZero };
218*10465441SEvalZero static struct memusage *memusage = RT_NULL;
219*10465441SEvalZero #define btokup(addr)    \
220*10465441SEvalZero     (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
221*10465441SEvalZero 
222*10465441SEvalZero static rt_uint32_t heap_start, heap_end;
223*10465441SEvalZero 
224*10465441SEvalZero /* page allocator */
225*10465441SEvalZero struct rt_page_head
226*10465441SEvalZero {
227*10465441SEvalZero     struct rt_page_head *next;      /* next valid page */
228*10465441SEvalZero     rt_size_t page;                 /* number of page  */
229*10465441SEvalZero 
230*10465441SEvalZero     /* dummy */
231*10465441SEvalZero     char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head *) + sizeof(rt_size_t))];
232*10465441SEvalZero };
233*10465441SEvalZero static struct rt_page_head *rt_page_list;
234*10465441SEvalZero static struct rt_semaphore heap_sem;
235*10465441SEvalZero 
rt_page_alloc(rt_size_t npages)236*10465441SEvalZero void *rt_page_alloc(rt_size_t npages)
237*10465441SEvalZero {
238*10465441SEvalZero     struct rt_page_head *b, *n;
239*10465441SEvalZero     struct rt_page_head **prev;
240*10465441SEvalZero 
241*10465441SEvalZero     if (npages == 0)
242*10465441SEvalZero         return RT_NULL;
243*10465441SEvalZero 
244*10465441SEvalZero     /* lock heap */
245*10465441SEvalZero     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
246*10465441SEvalZero     for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
247*10465441SEvalZero     {
248*10465441SEvalZero         if (b->page > npages)
249*10465441SEvalZero         {
250*10465441SEvalZero             /* splite pages */
251*10465441SEvalZero             n       = b + npages;
252*10465441SEvalZero             n->next = b->next;
253*10465441SEvalZero             n->page = b->page - npages;
254*10465441SEvalZero             *prev   = n;
255*10465441SEvalZero             break;
256*10465441SEvalZero         }
257*10465441SEvalZero 
258*10465441SEvalZero         if (b->page == npages)
259*10465441SEvalZero         {
260*10465441SEvalZero             /* this node fit, remove this node */
261*10465441SEvalZero             *prev = b->next;
262*10465441SEvalZero             break;
263*10465441SEvalZero         }
264*10465441SEvalZero     }
265*10465441SEvalZero 
266*10465441SEvalZero     /* unlock heap */
267*10465441SEvalZero     rt_sem_release(&heap_sem);
268*10465441SEvalZero 
269*10465441SEvalZero     return b;
270*10465441SEvalZero }
271*10465441SEvalZero 
rt_page_free(void * addr,rt_size_t npages)272*10465441SEvalZero void rt_page_free(void *addr, rt_size_t npages)
273*10465441SEvalZero {
274*10465441SEvalZero     struct rt_page_head *b, *n;
275*10465441SEvalZero     struct rt_page_head **prev;
276*10465441SEvalZero 
277*10465441SEvalZero     RT_ASSERT(addr != RT_NULL);
278*10465441SEvalZero     RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
279*10465441SEvalZero     RT_ASSERT(npages != 0);
280*10465441SEvalZero 
281*10465441SEvalZero     n = (struct rt_page_head *)addr;
282*10465441SEvalZero 
283*10465441SEvalZero     /* lock heap */
284*10465441SEvalZero     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
285*10465441SEvalZero 
286*10465441SEvalZero     for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
287*10465441SEvalZero     {
288*10465441SEvalZero         RT_ASSERT(b->page > 0);
289*10465441SEvalZero         RT_ASSERT(b > n || b + b->page <= n);
290*10465441SEvalZero 
291*10465441SEvalZero         if (b + b->page == n)
292*10465441SEvalZero         {
293*10465441SEvalZero             if (b + (b->page += npages) == b->next)
294*10465441SEvalZero             {
295*10465441SEvalZero                 b->page += b->next->page;
296*10465441SEvalZero                 b->next  = b->next->next;
297*10465441SEvalZero             }
298*10465441SEvalZero 
299*10465441SEvalZero             goto _return;
300*10465441SEvalZero         }
301*10465441SEvalZero 
302*10465441SEvalZero         if (b == n + npages)
303*10465441SEvalZero         {
304*10465441SEvalZero             n->page = b->page + npages;
305*10465441SEvalZero             n->next = b->next;
306*10465441SEvalZero             *prev   = n;
307*10465441SEvalZero 
308*10465441SEvalZero             goto _return;
309*10465441SEvalZero         }
310*10465441SEvalZero 
311*10465441SEvalZero         if (b > n + npages)
312*10465441SEvalZero             break;
313*10465441SEvalZero     }
314*10465441SEvalZero 
315*10465441SEvalZero     n->page = npages;
316*10465441SEvalZero     n->next = b;
317*10465441SEvalZero     *prev   = n;
318*10465441SEvalZero 
319*10465441SEvalZero _return:
320*10465441SEvalZero     /* unlock heap */
321*10465441SEvalZero     rt_sem_release(&heap_sem);
322*10465441SEvalZero }
323*10465441SEvalZero 
324*10465441SEvalZero /*
325*10465441SEvalZero  * Initialize the page allocator
326*10465441SEvalZero  */
rt_page_init(void * addr,rt_size_t npages)327*10465441SEvalZero static void rt_page_init(void *addr, rt_size_t npages)
328*10465441SEvalZero {
329*10465441SEvalZero     RT_ASSERT(addr != RT_NULL);
330*10465441SEvalZero     RT_ASSERT(npages != 0);
331*10465441SEvalZero 
332*10465441SEvalZero     rt_page_list = RT_NULL;
333*10465441SEvalZero     rt_page_free(addr, npages);
334*10465441SEvalZero }
335*10465441SEvalZero 
336*10465441SEvalZero /**
337*10465441SEvalZero  * @ingroup SystemInit
338*10465441SEvalZero  *
339*10465441SEvalZero  * This function will init system heap
340*10465441SEvalZero  *
341*10465441SEvalZero  * @param begin_addr the beginning address of system page
342*10465441SEvalZero  * @param end_addr the end address of system page
343*10465441SEvalZero  */
rt_system_heap_init(void * begin_addr,void * end_addr)344*10465441SEvalZero void rt_system_heap_init(void *begin_addr, void *end_addr)
345*10465441SEvalZero {
346*10465441SEvalZero     rt_uint32_t limsize, npages;
347*10465441SEvalZero 
348*10465441SEvalZero     RT_DEBUG_NOT_IN_INTERRUPT;
349*10465441SEvalZero 
350*10465441SEvalZero     /* align begin and end addr to page */
351*10465441SEvalZero     heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
352*10465441SEvalZero     heap_end   = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
353*10465441SEvalZero 
354*10465441SEvalZero     if (heap_start >= heap_end)
355*10465441SEvalZero     {
356*10465441SEvalZero         rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
357*10465441SEvalZero                    (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
358*10465441SEvalZero 
359*10465441SEvalZero         return;
360*10465441SEvalZero     }
361*10465441SEvalZero 
362*10465441SEvalZero     limsize = heap_end - heap_start;
363*10465441SEvalZero     npages  = limsize / RT_MM_PAGE_SIZE;
364*10465441SEvalZero 
365*10465441SEvalZero     /* initialize heap semaphore */
366*10465441SEvalZero     rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
367*10465441SEvalZero 
368*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n",
369*10465441SEvalZero                                  heap_start, heap_end, limsize, npages));
370*10465441SEvalZero 
371*10465441SEvalZero     /* init pages */
372*10465441SEvalZero     rt_page_init((void *)heap_start, npages);
373*10465441SEvalZero 
374*10465441SEvalZero     /* calculate zone size */
375*10465441SEvalZero     zone_size = ZALLOC_MIN_ZONE_SIZE;
376*10465441SEvalZero     while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize / 1024))
377*10465441SEvalZero         zone_size <<= 1;
378*10465441SEvalZero 
379*10465441SEvalZero     zone_limit = zone_size / 4;
380*10465441SEvalZero     if (zone_limit > ZALLOC_ZONE_LIMIT)
381*10465441SEvalZero         zone_limit = ZALLOC_ZONE_LIMIT;
382*10465441SEvalZero 
383*10465441SEvalZero     zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
384*10465441SEvalZero 
385*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n",
386*10465441SEvalZero                                  zone_size, zone_page_cnt));
387*10465441SEvalZero 
388*10465441SEvalZero     /* allocate memusage array */
389*10465441SEvalZero     limsize  = npages * sizeof(struct memusage);
390*10465441SEvalZero     limsize  = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
391*10465441SEvalZero     memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
392*10465441SEvalZero 
393*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
394*10465441SEvalZero                                  (rt_uint32_t)memusage, limsize));
395*10465441SEvalZero }
396*10465441SEvalZero 
397*10465441SEvalZero /*
398*10465441SEvalZero  * Calculate the zone index for the allocation request size and set the
399*10465441SEvalZero  * allocation request size to that particular zone's chunk size.
400*10465441SEvalZero  */
zoneindex(rt_uint32_t * bytes)401*10465441SEvalZero rt_inline int zoneindex(rt_uint32_t *bytes)
402*10465441SEvalZero {
403*10465441SEvalZero     /* unsigned for shift opt */
404*10465441SEvalZero     rt_uint32_t n = (rt_uint32_t) * bytes;
405*10465441SEvalZero 
406*10465441SEvalZero     if (n < 128)
407*10465441SEvalZero     {
408*10465441SEvalZero         *bytes = n = (n + 7) & ~7;
409*10465441SEvalZero 
410*10465441SEvalZero         /* 8 byte chunks, 16 zones */
411*10465441SEvalZero         return (n / 8 - 1);
412*10465441SEvalZero     }
413*10465441SEvalZero     if (n < 256)
414*10465441SEvalZero     {
415*10465441SEvalZero         *bytes = n = (n + 15) & ~15;
416*10465441SEvalZero 
417*10465441SEvalZero         return (n / 16 + 7);
418*10465441SEvalZero     }
419*10465441SEvalZero     if (n < 8192)
420*10465441SEvalZero     {
421*10465441SEvalZero         if (n < 512)
422*10465441SEvalZero         {
423*10465441SEvalZero             *bytes = n = (n + 31) & ~31;
424*10465441SEvalZero 
425*10465441SEvalZero             return (n / 32 + 15);
426*10465441SEvalZero         }
427*10465441SEvalZero         if (n < 1024)
428*10465441SEvalZero         {
429*10465441SEvalZero             *bytes = n = (n + 63) & ~63;
430*10465441SEvalZero 
431*10465441SEvalZero             return (n / 64 + 23);
432*10465441SEvalZero         }
433*10465441SEvalZero         if (n < 2048)
434*10465441SEvalZero         {
435*10465441SEvalZero             *bytes = n = (n + 127) & ~127;
436*10465441SEvalZero 
437*10465441SEvalZero             return (n / 128 + 31);
438*10465441SEvalZero         }
439*10465441SEvalZero         if (n < 4096)
440*10465441SEvalZero         {
441*10465441SEvalZero             *bytes = n = (n + 255) & ~255;
442*10465441SEvalZero 
443*10465441SEvalZero             return (n / 256 + 39);
444*10465441SEvalZero         }
445*10465441SEvalZero         *bytes = n = (n + 511) & ~511;
446*10465441SEvalZero 
447*10465441SEvalZero         return (n / 512 + 47);
448*10465441SEvalZero     }
449*10465441SEvalZero     if (n < 16384)
450*10465441SEvalZero     {
451*10465441SEvalZero         *bytes = n = (n + 1023) & ~1023;
452*10465441SEvalZero 
453*10465441SEvalZero         return (n / 1024 + 55);
454*10465441SEvalZero     }
455*10465441SEvalZero 
456*10465441SEvalZero     rt_kprintf("Unexpected byte count %d", n);
457*10465441SEvalZero 
458*10465441SEvalZero     return 0;
459*10465441SEvalZero }
460*10465441SEvalZero 
461*10465441SEvalZero /**
462*10465441SEvalZero  * @addtogroup MM
463*10465441SEvalZero  */
464*10465441SEvalZero 
465*10465441SEvalZero /**@{*/
466*10465441SEvalZero 
467*10465441SEvalZero /**
468*10465441SEvalZero  * This function will allocate a block from system heap memory.
469*10465441SEvalZero  * - If the nbytes is less than zero,
470*10465441SEvalZero  * or
471*10465441SEvalZero  * - If there is no nbytes sized memory valid in system,
472*10465441SEvalZero  * the RT_NULL is returned.
473*10465441SEvalZero  *
474*10465441SEvalZero  * @param size the size of memory to be allocated
475*10465441SEvalZero  *
476*10465441SEvalZero  * @return the allocated memory
477*10465441SEvalZero  */
rt_malloc(rt_size_t size)478*10465441SEvalZero void *rt_malloc(rt_size_t size)
479*10465441SEvalZero {
480*10465441SEvalZero     slab_zone *z;
481*10465441SEvalZero     rt_int32_t zi;
482*10465441SEvalZero     slab_chunk *chunk;
483*10465441SEvalZero     struct memusage *kup;
484*10465441SEvalZero 
485*10465441SEvalZero     /* zero size, return RT_NULL */
486*10465441SEvalZero     if (size == 0)
487*10465441SEvalZero         return RT_NULL;
488*10465441SEvalZero 
489*10465441SEvalZero     /*
490*10465441SEvalZero      * Handle large allocations directly.  There should not be very many of
491*10465441SEvalZero      * these so performance is not a big issue.
492*10465441SEvalZero      */
493*10465441SEvalZero     if (size >= zone_limit)
494*10465441SEvalZero     {
495*10465441SEvalZero         size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
496*10465441SEvalZero 
497*10465441SEvalZero         chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
498*10465441SEvalZero         if (chunk == RT_NULL)
499*10465441SEvalZero             return RT_NULL;
500*10465441SEvalZero 
501*10465441SEvalZero         /* set kup */
502*10465441SEvalZero         kup = btokup(chunk);
503*10465441SEvalZero         kup->type = PAGE_TYPE_LARGE;
504*10465441SEvalZero         kup->size = size >> RT_MM_PAGE_BITS;
505*10465441SEvalZero 
506*10465441SEvalZero         RT_DEBUG_LOG(RT_DEBUG_SLAB,
507*10465441SEvalZero                      ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
508*10465441SEvalZero                       size,
509*10465441SEvalZero                       size >> RT_MM_PAGE_BITS,
510*10465441SEvalZero                       ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
511*10465441SEvalZero 
512*10465441SEvalZero         /* lock heap */
513*10465441SEvalZero         rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
514*10465441SEvalZero 
515*10465441SEvalZero #ifdef RT_MEM_STATS
516*10465441SEvalZero         used_mem += size;
517*10465441SEvalZero         if (used_mem > max_mem)
518*10465441SEvalZero             max_mem = used_mem;
519*10465441SEvalZero #endif
520*10465441SEvalZero         goto done;
521*10465441SEvalZero     }
522*10465441SEvalZero 
523*10465441SEvalZero     /* lock heap */
524*10465441SEvalZero     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
525*10465441SEvalZero 
526*10465441SEvalZero     /*
527*10465441SEvalZero      * Attempt to allocate out of an existing zone.  First try the free list,
528*10465441SEvalZero      * then allocate out of unallocated space.  If we find a good zone move
529*10465441SEvalZero      * it to the head of the list so later allocations find it quickly
530*10465441SEvalZero      * (we might have thousands of zones in the list).
531*10465441SEvalZero      *
532*10465441SEvalZero      * Note: zoneindex() will panic of size is too large.
533*10465441SEvalZero      */
534*10465441SEvalZero     zi = zoneindex(&size);
535*10465441SEvalZero     RT_ASSERT(zi < NZONES);
536*10465441SEvalZero 
537*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to malloc 0x%x on zone: %d\n", size, zi));
538*10465441SEvalZero 
539*10465441SEvalZero     if ((z = zone_array[zi]) != RT_NULL)
540*10465441SEvalZero     {
541*10465441SEvalZero         RT_ASSERT(z->z_nfree > 0);
542*10465441SEvalZero 
543*10465441SEvalZero         /* Remove us from the zone_array[] when we become empty */
544*10465441SEvalZero         if (--z->z_nfree == 0)
545*10465441SEvalZero         {
546*10465441SEvalZero             zone_array[zi] = z->z_next;
547*10465441SEvalZero             z->z_next = RT_NULL;
548*10465441SEvalZero         }
549*10465441SEvalZero 
550*10465441SEvalZero         /*
551*10465441SEvalZero          * No chunks are available but nfree said we had some memory, so
552*10465441SEvalZero          * it must be available in the never-before-used-memory area
553*10465441SEvalZero          * governed by uindex.  The consequences are very serious if our zone
554*10465441SEvalZero          * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
555*10465441SEvalZero          */
556*10465441SEvalZero         if (z->z_uindex + 1 != z->z_nmax)
557*10465441SEvalZero         {
558*10465441SEvalZero             z->z_uindex = z->z_uindex + 1;
559*10465441SEvalZero             chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
560*10465441SEvalZero         }
561*10465441SEvalZero         else
562*10465441SEvalZero         {
563*10465441SEvalZero             /* find on free chunk list */
564*10465441SEvalZero             chunk = z->z_freechunk;
565*10465441SEvalZero 
566*10465441SEvalZero             /* remove this chunk from list */
567*10465441SEvalZero             z->z_freechunk = z->z_freechunk->c_next;
568*10465441SEvalZero         }
569*10465441SEvalZero 
570*10465441SEvalZero #ifdef RT_MEM_STATS
571*10465441SEvalZero         used_mem += z->z_chunksize;
572*10465441SEvalZero         if (used_mem > max_mem)
573*10465441SEvalZero             max_mem = used_mem;
574*10465441SEvalZero #endif
575*10465441SEvalZero 
576*10465441SEvalZero         goto done;
577*10465441SEvalZero     }
578*10465441SEvalZero 
579*10465441SEvalZero     /*
580*10465441SEvalZero      * If all zones are exhausted we need to allocate a new zone for this
581*10465441SEvalZero      * index.
582*10465441SEvalZero      *
583*10465441SEvalZero      * At least one subsystem, the tty code (see CROUND) expects power-of-2
584*10465441SEvalZero      * allocations to be power-of-2 aligned.  We maintain compatibility by
585*10465441SEvalZero      * adjusting the base offset below.
586*10465441SEvalZero      */
587*10465441SEvalZero     {
588*10465441SEvalZero         rt_int32_t off;
589*10465441SEvalZero 
590*10465441SEvalZero         if ((z = zone_free) != RT_NULL)
591*10465441SEvalZero         {
592*10465441SEvalZero             /* remove zone from free zone list */
593*10465441SEvalZero             zone_free = z->z_next;
594*10465441SEvalZero             -- zone_free_cnt;
595*10465441SEvalZero         }
596*10465441SEvalZero         else
597*10465441SEvalZero         {
598*10465441SEvalZero             /* unlock heap, since page allocator will think about lock */
599*10465441SEvalZero             rt_sem_release(&heap_sem);
600*10465441SEvalZero 
601*10465441SEvalZero             /* allocate a zone from page */
602*10465441SEvalZero             z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
603*10465441SEvalZero             if (z == RT_NULL)
604*10465441SEvalZero             {
605*10465441SEvalZero                 chunk = RT_NULL;
606*10465441SEvalZero                 goto __exit;
607*10465441SEvalZero             }
608*10465441SEvalZero 
609*10465441SEvalZero             /* lock heap */
610*10465441SEvalZero             rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
611*10465441SEvalZero 
612*10465441SEvalZero             RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
613*10465441SEvalZero                                          (rt_uint32_t)z));
614*10465441SEvalZero 
615*10465441SEvalZero             /* set message usage */
616*10465441SEvalZero             for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
617*10465441SEvalZero             {
618*10465441SEvalZero                 kup->type = PAGE_TYPE_SMALL;
619*10465441SEvalZero                 kup->size = off;
620*10465441SEvalZero 
621*10465441SEvalZero                 kup ++;
622*10465441SEvalZero             }
623*10465441SEvalZero         }
624*10465441SEvalZero 
625*10465441SEvalZero         /* clear to zero */
626*10465441SEvalZero         rt_memset(z, 0, sizeof(slab_zone));
627*10465441SEvalZero 
628*10465441SEvalZero         /* offset of slab zone struct in zone */
629*10465441SEvalZero         off = sizeof(slab_zone);
630*10465441SEvalZero 
631*10465441SEvalZero         /*
632*10465441SEvalZero          * Guarentee power-of-2 alignment for power-of-2-sized chunks.
633*10465441SEvalZero          * Otherwise just 8-byte align the data.
634*10465441SEvalZero          */
635*10465441SEvalZero         if ((size | (size - 1)) + 1 == (size << 1))
636*10465441SEvalZero             off = (off + size - 1) & ~(size - 1);
637*10465441SEvalZero         else
638*10465441SEvalZero             off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
639*10465441SEvalZero 
640*10465441SEvalZero         z->z_magic     = ZALLOC_SLAB_MAGIC;
641*10465441SEvalZero         z->z_zoneindex = zi;
642*10465441SEvalZero         z->z_nmax      = (zone_size - off) / size;
643*10465441SEvalZero         z->z_nfree     = z->z_nmax - 1;
644*10465441SEvalZero         z->z_baseptr   = (rt_uint8_t *)z + off;
645*10465441SEvalZero         z->z_uindex    = 0;
646*10465441SEvalZero         z->z_chunksize = size;
647*10465441SEvalZero 
648*10465441SEvalZero         chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
649*10465441SEvalZero 
650*10465441SEvalZero         /* link to zone array */
651*10465441SEvalZero         z->z_next = zone_array[zi];
652*10465441SEvalZero         zone_array[zi] = z;
653*10465441SEvalZero 
654*10465441SEvalZero #ifdef RT_MEM_STATS
655*10465441SEvalZero         used_mem += z->z_chunksize;
656*10465441SEvalZero         if (used_mem > max_mem)
657*10465441SEvalZero             max_mem = used_mem;
658*10465441SEvalZero #endif
659*10465441SEvalZero     }
660*10465441SEvalZero 
661*10465441SEvalZero done:
662*10465441SEvalZero     rt_sem_release(&heap_sem);
663*10465441SEvalZero     RT_OBJECT_HOOK_CALL(rt_malloc_hook, ((char *)chunk, size));
664*10465441SEvalZero 
665*10465441SEvalZero __exit:
666*10465441SEvalZero     return chunk;
667*10465441SEvalZero }
668*10465441SEvalZero RTM_EXPORT(rt_malloc);
669*10465441SEvalZero 
670*10465441SEvalZero /**
671*10465441SEvalZero  * This function will change the size of previously allocated memory block.
672*10465441SEvalZero  *
673*10465441SEvalZero  * @param ptr the previously allocated memory block
674*10465441SEvalZero  * @param size the new size of memory block
675*10465441SEvalZero  *
676*10465441SEvalZero  * @return the allocated memory
677*10465441SEvalZero  */
rt_realloc(void * ptr,rt_size_t size)678*10465441SEvalZero void *rt_realloc(void *ptr, rt_size_t size)
679*10465441SEvalZero {
680*10465441SEvalZero     void *nptr;
681*10465441SEvalZero     slab_zone *z;
682*10465441SEvalZero     struct memusage *kup;
683*10465441SEvalZero 
684*10465441SEvalZero     if (ptr == RT_NULL)
685*10465441SEvalZero         return rt_malloc(size);
686*10465441SEvalZero     if (size == 0)
687*10465441SEvalZero     {
688*10465441SEvalZero         rt_free(ptr);
689*10465441SEvalZero 
690*10465441SEvalZero         return RT_NULL;
691*10465441SEvalZero     }
692*10465441SEvalZero 
693*10465441SEvalZero     /*
694*10465441SEvalZero      * Get the original allocation's zone.  If the new request winds up
695*10465441SEvalZero      * using the same chunk size we do not have to do anything.
696*10465441SEvalZero      */
697*10465441SEvalZero     kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
698*10465441SEvalZero     if (kup->type == PAGE_TYPE_LARGE)
699*10465441SEvalZero     {
700*10465441SEvalZero         rt_size_t osize;
701*10465441SEvalZero 
702*10465441SEvalZero         osize = kup->size << RT_MM_PAGE_BITS;
703*10465441SEvalZero         if ((nptr = rt_malloc(size)) == RT_NULL)
704*10465441SEvalZero             return RT_NULL;
705*10465441SEvalZero         rt_memcpy(nptr, ptr, size > osize ? osize : size);
706*10465441SEvalZero         rt_free(ptr);
707*10465441SEvalZero 
708*10465441SEvalZero         return nptr;
709*10465441SEvalZero     }
710*10465441SEvalZero     else if (kup->type == PAGE_TYPE_SMALL)
711*10465441SEvalZero     {
712*10465441SEvalZero         z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
713*10465441SEvalZero                           kup->size * RT_MM_PAGE_SIZE);
714*10465441SEvalZero         RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
715*10465441SEvalZero 
716*10465441SEvalZero         zoneindex(&size);
717*10465441SEvalZero         if (z->z_chunksize == size)
718*10465441SEvalZero             return (ptr); /* same chunk */
719*10465441SEvalZero 
720*10465441SEvalZero         /*
721*10465441SEvalZero          * Allocate memory for the new request size.  Note that zoneindex has
722*10465441SEvalZero          * already adjusted the request size to the appropriate chunk size, which
723*10465441SEvalZero          * should optimize our bcopy().  Then copy and return the new pointer.
724*10465441SEvalZero          */
725*10465441SEvalZero         if ((nptr = rt_malloc(size)) == RT_NULL)
726*10465441SEvalZero             return RT_NULL;
727*10465441SEvalZero 
728*10465441SEvalZero         rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
729*10465441SEvalZero         rt_free(ptr);
730*10465441SEvalZero 
731*10465441SEvalZero         return nptr;
732*10465441SEvalZero     }
733*10465441SEvalZero 
734*10465441SEvalZero     return RT_NULL;
735*10465441SEvalZero }
736*10465441SEvalZero RTM_EXPORT(rt_realloc);
737*10465441SEvalZero 
738*10465441SEvalZero /**
739*10465441SEvalZero  * This function will contiguously allocate enough space for count objects
740*10465441SEvalZero  * that are size bytes of memory each and returns a pointer to the allocated
741*10465441SEvalZero  * memory.
742*10465441SEvalZero  *
743*10465441SEvalZero  * The allocated memory is filled with bytes of value zero.
744*10465441SEvalZero  *
745*10465441SEvalZero  * @param count number of objects to allocate
746*10465441SEvalZero  * @param size size of the objects to allocate
747*10465441SEvalZero  *
748*10465441SEvalZero  * @return pointer to allocated memory / NULL pointer if there is an error
749*10465441SEvalZero  */
rt_calloc(rt_size_t count,rt_size_t size)750*10465441SEvalZero void *rt_calloc(rt_size_t count, rt_size_t size)
751*10465441SEvalZero {
752*10465441SEvalZero     void *p;
753*10465441SEvalZero 
754*10465441SEvalZero     /* allocate 'count' objects of size 'size' */
755*10465441SEvalZero     p = rt_malloc(count * size);
756*10465441SEvalZero 
757*10465441SEvalZero     /* zero the memory */
758*10465441SEvalZero     if (p)
759*10465441SEvalZero         rt_memset(p, 0, count * size);
760*10465441SEvalZero 
761*10465441SEvalZero     return p;
762*10465441SEvalZero }
763*10465441SEvalZero RTM_EXPORT(rt_calloc);
764*10465441SEvalZero 
765*10465441SEvalZero /**
766*10465441SEvalZero  * This function will release the previous allocated memory block by rt_malloc.
767*10465441SEvalZero  * The released memory block is taken back to system heap.
768*10465441SEvalZero  *
769*10465441SEvalZero  * @param ptr the address of memory which will be released
770*10465441SEvalZero  */
rt_free(void * ptr)771*10465441SEvalZero void rt_free(void *ptr)
772*10465441SEvalZero {
773*10465441SEvalZero     slab_zone *z;
774*10465441SEvalZero     slab_chunk *chunk;
775*10465441SEvalZero     struct memusage *kup;
776*10465441SEvalZero 
777*10465441SEvalZero     /* free a RT_NULL pointer */
778*10465441SEvalZero     if (ptr == RT_NULL)
779*10465441SEvalZero         return ;
780*10465441SEvalZero 
781*10465441SEvalZero     RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
782*10465441SEvalZero 
783*10465441SEvalZero     /* get memory usage */
784*10465441SEvalZero #if RT_DEBUG_SLAB
785*10465441SEvalZero     {
786*10465441SEvalZero         rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
787*10465441SEvalZero         RT_DEBUG_LOG(RT_DEBUG_SLAB,
788*10465441SEvalZero                      ("free a memory 0x%x and align to 0x%x, kup index %d\n",
789*10465441SEvalZero                       (rt_uint32_t)ptr,
790*10465441SEvalZero                       (rt_uint32_t)addr,
791*10465441SEvalZero                       ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
792*10465441SEvalZero     }
793*10465441SEvalZero #endif
794*10465441SEvalZero 
795*10465441SEvalZero     kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
796*10465441SEvalZero     /* release large allocation */
797*10465441SEvalZero     if (kup->type == PAGE_TYPE_LARGE)
798*10465441SEvalZero     {
799*10465441SEvalZero         rt_uint32_t size;
800*10465441SEvalZero 
801*10465441SEvalZero         /* lock heap */
802*10465441SEvalZero         rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
803*10465441SEvalZero         /* clear page counter */
804*10465441SEvalZero         size = kup->size;
805*10465441SEvalZero         kup->size = 0;
806*10465441SEvalZero 
807*10465441SEvalZero #ifdef RT_MEM_STATS
808*10465441SEvalZero         used_mem -= size * RT_MM_PAGE_SIZE;
809*10465441SEvalZero #endif
810*10465441SEvalZero         rt_sem_release(&heap_sem);
811*10465441SEvalZero 
812*10465441SEvalZero         RT_DEBUG_LOG(RT_DEBUG_SLAB,
813*10465441SEvalZero                      ("free large memory block 0x%x, page count %d\n",
814*10465441SEvalZero                       (rt_uint32_t)ptr, size));
815*10465441SEvalZero 
816*10465441SEvalZero         /* free this page */
817*10465441SEvalZero         rt_page_free(ptr, size);
818*10465441SEvalZero 
819*10465441SEvalZero         return;
820*10465441SEvalZero     }
821*10465441SEvalZero 
822*10465441SEvalZero     /* lock heap */
823*10465441SEvalZero     rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
824*10465441SEvalZero 
825*10465441SEvalZero     /* zone case. get out zone. */
826*10465441SEvalZero     z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
827*10465441SEvalZero                       kup->size * RT_MM_PAGE_SIZE);
828*10465441SEvalZero     RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
829*10465441SEvalZero 
830*10465441SEvalZero     chunk          = (slab_chunk *)ptr;
831*10465441SEvalZero     chunk->c_next  = z->z_freechunk;
832*10465441SEvalZero     z->z_freechunk = chunk;
833*10465441SEvalZero 
834*10465441SEvalZero #ifdef RT_MEM_STATS
835*10465441SEvalZero     used_mem -= z->z_chunksize;
836*10465441SEvalZero #endif
837*10465441SEvalZero 
838*10465441SEvalZero     /*
839*10465441SEvalZero      * Bump the number of free chunks.  If it becomes non-zero the zone
840*10465441SEvalZero      * must be added back onto the appropriate list.
841*10465441SEvalZero      */
842*10465441SEvalZero     if (z->z_nfree++ == 0)
843*10465441SEvalZero     {
844*10465441SEvalZero         z->z_next = zone_array[z->z_zoneindex];
845*10465441SEvalZero         zone_array[z->z_zoneindex] = z;
846*10465441SEvalZero     }
847*10465441SEvalZero 
848*10465441SEvalZero     /*
849*10465441SEvalZero      * If the zone becomes totally free, and there are other zones we
850*10465441SEvalZero      * can allocate from, move this zone to the FreeZones list.  Since
851*10465441SEvalZero      * this code can be called from an IPI callback, do *NOT* try to mess
852*10465441SEvalZero      * with kernel_map here.  Hysteresis will be performed at malloc() time.
853*10465441SEvalZero      */
854*10465441SEvalZero     if (z->z_nfree == z->z_nmax &&
855*10465441SEvalZero         (z->z_next || zone_array[z->z_zoneindex] != z))
856*10465441SEvalZero     {
857*10465441SEvalZero         slab_zone **pz;
858*10465441SEvalZero 
859*10465441SEvalZero         RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
860*10465441SEvalZero                                      (rt_uint32_t)z, z->z_zoneindex));
861*10465441SEvalZero 
862*10465441SEvalZero         /* remove zone from zone array list */
863*10465441SEvalZero         for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
864*10465441SEvalZero             ;
865*10465441SEvalZero         *pz = z->z_next;
866*10465441SEvalZero 
867*10465441SEvalZero         /* reset zone */
868*10465441SEvalZero         z->z_magic = -1;
869*10465441SEvalZero 
870*10465441SEvalZero         /* insert to free zone list */
871*10465441SEvalZero         z->z_next = zone_free;
872*10465441SEvalZero         zone_free = z;
873*10465441SEvalZero 
874*10465441SEvalZero         ++ zone_free_cnt;
875*10465441SEvalZero 
876*10465441SEvalZero         /* release zone to page allocator */
877*10465441SEvalZero         if (zone_free_cnt > ZONE_RELEASE_THRESH)
878*10465441SEvalZero         {
879*10465441SEvalZero             register rt_base_t i;
880*10465441SEvalZero 
881*10465441SEvalZero             z         = zone_free;
882*10465441SEvalZero             zone_free = z->z_next;
883*10465441SEvalZero             -- zone_free_cnt;
884*10465441SEvalZero 
885*10465441SEvalZero             /* set message usage */
886*10465441SEvalZero             for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
887*10465441SEvalZero             {
888*10465441SEvalZero                 kup->type = PAGE_TYPE_FREE;
889*10465441SEvalZero                 kup->size = 0;
890*10465441SEvalZero                 kup ++;
891*10465441SEvalZero             }
892*10465441SEvalZero 
893*10465441SEvalZero             /* unlock heap */
894*10465441SEvalZero             rt_sem_release(&heap_sem);
895*10465441SEvalZero 
896*10465441SEvalZero             /* release pages */
897*10465441SEvalZero             rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
898*10465441SEvalZero 
899*10465441SEvalZero             return;
900*10465441SEvalZero         }
901*10465441SEvalZero     }
902*10465441SEvalZero     /* unlock heap */
903*10465441SEvalZero     rt_sem_release(&heap_sem);
904*10465441SEvalZero }
905*10465441SEvalZero RTM_EXPORT(rt_free);
906*10465441SEvalZero 
907*10465441SEvalZero #ifdef RT_MEM_STATS
rt_memory_info(rt_uint32_t * total,rt_uint32_t * used,rt_uint32_t * max_used)908*10465441SEvalZero void rt_memory_info(rt_uint32_t *total,
909*10465441SEvalZero                     rt_uint32_t *used,
910*10465441SEvalZero                     rt_uint32_t *max_used)
911*10465441SEvalZero {
912*10465441SEvalZero     if (total != RT_NULL)
913*10465441SEvalZero         *total = heap_end - heap_start;
914*10465441SEvalZero 
915*10465441SEvalZero     if (used  != RT_NULL)
916*10465441SEvalZero         *used = used_mem;
917*10465441SEvalZero 
918*10465441SEvalZero     if (max_used != RT_NULL)
919*10465441SEvalZero         *max_used = max_mem;
920*10465441SEvalZero }
921*10465441SEvalZero 
922*10465441SEvalZero #ifdef RT_USING_FINSH
923*10465441SEvalZero #include <finsh.h>
924*10465441SEvalZero 
list_mem(void)925*10465441SEvalZero void list_mem(void)
926*10465441SEvalZero {
927*10465441SEvalZero     rt_kprintf("total memory: %d\n", heap_end - heap_start);
928*10465441SEvalZero     rt_kprintf("used memory : %d\n", used_mem);
929*10465441SEvalZero     rt_kprintf("maximum allocated memory: %d\n", max_mem);
930*10465441SEvalZero }
931*10465441SEvalZero FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
932*10465441SEvalZero #endif
933*10465441SEvalZero #endif
934*10465441SEvalZero 
935*10465441SEvalZero /**@}*/
936*10465441SEvalZero 
937*10465441SEvalZero #endif
938