xref: /nrf52832-nimble/rt-thread/components/drivers/src/ringblk_buf.c (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * Copyright (c) 2006-2018, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2018-08-25     armink       the first version
9  */
10 
11 #include <rthw.h>
12 #include <rtthread.h>
13 #include <rtdevice.h>
14 
15 /**
16  * ring block buffer object initialization
17  *
18  * @param rbb ring block buffer object
19  * @param buf buffer
20  * @param buf_size buffer size
21  * @param block_set block set
22  * @param blk_max_num max block number
23  *
24  * @note When your application need align access, please make the buffer address is aligned.
25  */
rt_rbb_init(rt_rbb_t rbb,rt_uint8_t * buf,rt_size_t buf_size,rt_rbb_blk_t block_set,rt_size_t blk_max_num)26 void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
27 {
28     rt_size_t i;
29 
30     RT_ASSERT(rbb);
31     RT_ASSERT(buf);
32     RT_ASSERT(block_set);
33 
34     rbb->buf = buf;
35     rbb->buf_size = buf_size;
36     rbb->blk_set = block_set;
37     rbb->blk_max_num = blk_max_num;
38     rt_slist_init(&rbb->blk_list);
39     /* initialize block status */
40     for (i = 0; i < blk_max_num; i++)
41     {
42         block_set[i].status = RT_RBB_BLK_UNUSED;
43     }
44 }
45 RTM_EXPORT(rt_rbb_init);
46 
47 /**
48  * ring block buffer object create
49  *
50  * @param buf_size buffer size
51  * @param blk_max_num max block number
52  *
53  * @return != NULL: ring block buffer object
54  *            NULL: create failed
55  */
rt_rbb_create(rt_size_t buf_size,rt_size_t blk_max_num)56 rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
57 {
58     rt_rbb_t rbb = NULL;
59     rt_uint8_t *buf;
60     rt_rbb_blk_t blk_set;
61 
62     rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
63     if (!rbb)
64     {
65         return NULL;
66     }
67 
68     buf = (rt_uint8_t *)rt_malloc(buf_size);
69     if (!buf)
70     {
71         rt_free(rbb);
72         return NULL;
73     }
74 
75     blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
76     if (!blk_set)
77     {
78         rt_free(buf);
79         rt_free(rbb);
80         return NULL;
81     }
82 
83     rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
84 
85     return rbb;
86 }
87 RTM_EXPORT(rt_rbb_create);
88 
89 /**
90  * ring block buffer object destroy
91  *
92  * @param rbb ring block buffer object
93  */
rt_rbb_destroy(rt_rbb_t rbb)94 void rt_rbb_destroy(rt_rbb_t rbb)
95 {
96     RT_ASSERT(rbb);
97 
98     rt_free(rbb);
99     rt_free(rbb->buf);
100     rt_free(rbb->blk_set);
101 
102 }
103 RTM_EXPORT(rt_rbb_destroy);
104 
find_empty_blk_in_set(rt_rbb_t rbb)105 static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
106 {
107     rt_size_t i;
108 
109     RT_ASSERT(rbb);
110 
111     for (i = 0; i < rbb->blk_max_num; i ++)
112     {
113         if (rbb->blk_set[i].status == RT_RBB_BLK_UNUSED)
114         {
115             return &rbb->blk_set[i];
116         }
117     }
118 
119     return NULL;
120 }
121 
122 /**
123  * Allocate a block by given size. The block will add to blk_list when allocate success.
124  *
125  * @param rbb ring block buffer object
126  * @param blk_size block size
127  *
128  * @note When your application need align access, please make the blk_szie is aligned.
129  *
130  * @return != NULL: allocated block
131  *            NULL: allocate failed
132  */
rt_rbb_blk_alloc(rt_rbb_t rbb,rt_size_t blk_size)133 rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
134 {
135     rt_base_t level;
136     rt_size_t empty1 = 0, empty2 = 0;
137     rt_rbb_blk_t head, tail, new = NULL;
138 
139     RT_ASSERT(rbb);
140     RT_ASSERT(blk_size < (1L << 24));
141 
142     level = rt_hw_interrupt_disable();
143 
144     new = find_empty_blk_in_set(rbb);
145 
146     if (rt_slist_len(&rbb->blk_list) < rbb->blk_max_num && new)
147     {
148         if (rt_slist_len(&rbb->blk_list) > 0)
149         {
150             head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
151             tail = rt_slist_tail_entry(&rbb->blk_list, struct rt_rbb_blk, list);
152             if (head->buf <= tail->buf)
153             {
154                 /**
155                  *                      head                     tail
156                  * +--------------------------------------+-----------------+------------------+
157                  * |      empty2     | block1 |   block2  |      block3     |       empty1     |
158                  * +--------------------------------------+-----------------+------------------+
159                  *                            rbb->buf
160                  */
161                 empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
162                 empty2 = head->buf - rbb->buf;
163 
164                 if (empty1 >= blk_size)
165                 {
166                     rt_slist_append(&rbb->blk_list, &new->list);
167                     new->status = RT_RBB_BLK_INITED;
168                     new->buf = tail->buf + tail->size;
169                     new->size = blk_size;
170                 }
171                 else if (empty2 >= blk_size)
172                 {
173                     rt_slist_append(&rbb->blk_list, &new->list);
174                     new->status = RT_RBB_BLK_INITED;
175                     new->buf = rbb->buf;
176                     new->size = blk_size;
177                 }
178                 else
179                 {
180                     /* no space */
181                     new = NULL;
182                 }
183             }
184             else
185             {
186                 /**
187                  *        tail                                              head
188                  * +----------------+-------------------------------------+--------+-----------+
189                  * |     block3     |                empty1               | block1 |  block2   |
190                  * +----------------+-------------------------------------+--------+-----------+
191                  *                            rbb->buf
192                  */
193                 empty1 = head->buf - (tail->buf + tail->size);
194 
195                 if (empty1 >= blk_size)
196                 {
197                     rt_slist_append(&rbb->blk_list, &new->list);
198                     new->status = RT_RBB_BLK_INITED;
199                     new->buf = tail->buf + tail->size;
200                     new->size = blk_size;
201                 }
202                 else
203                 {
204                     /* no space */
205                     new = NULL;
206                 }
207             }
208         }
209         else
210         {
211             /* the list is empty */
212             rt_slist_append(&rbb->blk_list, &new->list);
213             new->status = RT_RBB_BLK_INITED;
214             new->buf = rbb->buf;
215             new->size = blk_size;
216         }
217     }
218     else
219     {
220         new = NULL;
221     }
222 
223     rt_hw_interrupt_enable(level);
224 
225     return new;
226 }
227 RTM_EXPORT(rt_rbb_blk_alloc);
228 
229 /**
230  * put a block to ring block buffer object
231  *
232  * @param block the block
233  */
rt_rbb_blk_put(rt_rbb_blk_t block)234 void rt_rbb_blk_put(rt_rbb_blk_t block)
235 {
236     RT_ASSERT(block);
237     RT_ASSERT(block->status == RT_RBB_BLK_INITED);
238 
239     block->status = RT_RBB_BLK_PUT;
240 }
241 RTM_EXPORT(rt_rbb_blk_put);
242 
243 /**
244  * get a block from the ring block buffer object
245  *
246  * @param rbb ring block buffer object
247  *
248  * @return != NULL: block
249  *            NULL: get failed
250  */
rt_rbb_blk_get(rt_rbb_t rbb)251 rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
252 {
253     rt_base_t level;
254     rt_rbb_blk_t block = NULL;
255     rt_slist_t *node;
256 
257     RT_ASSERT(rbb);
258 
259     if (rt_slist_isempty(&rbb->blk_list))
260         return 0;
261 
262     level = rt_hw_interrupt_disable();
263 
264     for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
265     {
266         block = rt_slist_entry(node, struct rt_rbb_blk, list);
267         if (block->status == RT_RBB_BLK_PUT)
268         {
269             block->status = RT_RBB_BLK_GET;
270             goto __exit;
271         }
272     }
273     /* not found */
274     block = NULL;
275 
276 __exit:
277 
278     rt_hw_interrupt_enable(level);
279 
280     return block;
281 }
282 RTM_EXPORT(rt_rbb_blk_get);
283 
284 /**
285  * return the block size
286  *
287  * @param block the block
288  *
289  * @return block size
290  */
rt_rbb_blk_size(rt_rbb_blk_t block)291 rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
292 {
293     RT_ASSERT(block);
294 
295     return block->size;
296 }
297 RTM_EXPORT(rt_rbb_blk_size);
298 
299 /**
300  * return the block buffer
301  *
302  * @param block the block
303  *
304  * @return block buffer
305  */
rt_rbb_blk_buf(rt_rbb_blk_t block)306 rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
307 {
308     RT_ASSERT(block);
309 
310     return block->buf;
311 }
312 RTM_EXPORT(rt_rbb_blk_buf);
313 
314 /**
315  * free the block
316  *
317  * @param rbb ring block buffer object
318  * @param block the block
319  */
rt_rbb_blk_free(rt_rbb_t rbb,rt_rbb_blk_t block)320 void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
321 {
322     rt_base_t level;
323 
324     RT_ASSERT(rbb);
325     RT_ASSERT(block);
326     RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
327 
328     level = rt_hw_interrupt_disable();
329 
330     /* remove it on rbb block list */
331     rt_slist_remove(&rbb->blk_list, &block->list);
332 
333     block->status = RT_RBB_BLK_UNUSED;
334 
335     rt_hw_interrupt_enable(level);
336 }
337 RTM_EXPORT(rt_rbb_blk_free);
338 
339 /**
340  * get a continuous block to queue by given size
341  *
342  *          tail                         head
343  * +------------------+---------------+--------+----------+--------+
344  * |      block3      |  empty1       | block1 |  block2  |fragment|
345  * +------------------+------------------------+----------+--------+
346  *                                    |<-- return_size -->|    |
347  *                                    |<--- queue_data_len --->|
348  *
349  *         tail                          head
350  * +------------------+---------------+--------+----------+--------+
351  * |      block3      |  empty1       | block1 |  block2  |fragment|
352  * +------------------+------------------------+----------+--------+
353  *                                    |<-- return_size -->|              out of len(b1+b2+b3)    |
354  *                                    |<-------------------- queue_data_len -------------------->|
355  *
356  * @param rbb ring block buffer object
357  * @param queue_data_len The max queue data size, and the return size must less then it.
358  * @param queue continuous block queue
359  *
360  * @return the block queue data total size
361  */
rt_rbb_blk_queue_get(rt_rbb_t rbb,rt_size_t queue_data_len,rt_rbb_blk_queue_t blk_queue)362 rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
363 {
364     rt_base_t level;
365     rt_size_t data_total_size = 0;
366     rt_slist_t *node;
367     rt_rbb_blk_t last_block = NULL, block;
368 
369     RT_ASSERT(rbb);
370     RT_ASSERT(blk_queue);
371 
372     if (rt_slist_isempty(&rbb->blk_list))
373         return 0;
374 
375     level = rt_hw_interrupt_disable();
376 
377     for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
378     {
379         if (!last_block)
380         {
381             last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
382             if (last_block->status == RT_RBB_BLK_PUT)
383             {
384                 /* save the first put status block to queue */
385                 blk_queue->blocks = last_block;
386                 blk_queue->blk_num = 0;
387             }
388             else
389             {
390                 /* the first block must be put status */
391                 last_block = NULL;
392                 continue;
393             }
394         }
395         else
396         {
397             block = rt_slist_entry(node, struct rt_rbb_blk, list);
398             /*
399              * these following conditions will break the loop:
400              * 1. the current block is not put status
401              * 2. the last block and current block is not continuous
402              * 3. the data_total_size will out of range
403              */
404             if (block->status != RT_RBB_BLK_PUT ||
405                 last_block->buf > block->buf ||
406                 data_total_size + block->size > queue_data_len)
407             {
408                 break;
409             }
410             /* backup last block */
411             last_block = block;
412         }
413         /* remove current block */
414         rt_slist_remove(&rbb->blk_list, &last_block->list);
415         data_total_size += last_block->size;
416         last_block->status = RT_RBB_BLK_GET;
417         blk_queue->blk_num++;
418     }
419 
420     rt_hw_interrupt_enable(level);
421 
422     return data_total_size;
423 }
424 RTM_EXPORT(rt_rbb_blk_queue_get);
425 
426 /**
427  * get all block length on block queue
428  *
429  * @param blk_queue the block queue
430  *
431  * @return total length
432  */
rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)433 rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
434 {
435     rt_size_t i, data_total_size = 0;
436 
437     RT_ASSERT(blk_queue);
438 
439     for (i = 0; i < blk_queue->blk_num; i++)
440     {
441         data_total_size += blk_queue->blocks[i].size;
442     }
443 
444     return data_total_size;
445 }
446 RTM_EXPORT(rt_rbb_blk_queue_len);
447 
448 /**
449  * return the block queue buffer
450  *
451  * @param blk_queue the block queue
452  *
453  * @return block queue buffer
454  */
rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)455 rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
456 {
457     RT_ASSERT(blk_queue);
458 
459     return blk_queue->blocks[0].buf;
460 }
461 RTM_EXPORT(rt_rbb_blk_queue_buf);
462 
463 /**
464  * free the block queue
465  *
466  * @param rbb ring block buffer object
467  * @param blk_queue the block queue
468  */
rt_rbb_blk_queue_free(rt_rbb_t rbb,rt_rbb_blk_queue_t blk_queue)469 void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
470 {
471     rt_size_t i;
472 
473     RT_ASSERT(rbb);
474     RT_ASSERT(blk_queue);
475 
476     for (i = 0; i < blk_queue->blk_num; i++)
477     {
478         rt_rbb_blk_free(rbb, &blk_queue->blocks[i]);
479     }
480 }
481 RTM_EXPORT(rt_rbb_blk_queue_free);
482 
483 /**
484  * The put status and buffer continuous blocks can be make a block queue.
485  * This function will return the length which from next can be make block queue.
486  *
487  * @param rbb ring block buffer object
488  *
489  * @return the next can be make block queue's length
490  */
rt_rbb_next_blk_queue_len(rt_rbb_t rbb)491 rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
492 {
493     rt_base_t level;
494     rt_size_t data_len = 0;
495     rt_slist_t *node;
496     rt_rbb_blk_t last_block = NULL, block;
497 
498     RT_ASSERT(rbb);
499 
500     if (rt_slist_isempty(&rbb->blk_list))
501         return 0;
502 
503     level = rt_hw_interrupt_disable();
504 
505     for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
506     {
507         if (!last_block)
508         {
509             last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
510             if (last_block->status != RT_RBB_BLK_PUT)
511             {
512                 /* the first block must be put status */
513                 last_block = NULL;
514                 continue;
515             }
516         }
517         else
518         {
519             block = rt_slist_entry(node, struct rt_rbb_blk, list);
520             /*
521              * these following conditions will break the loop:
522              * 1. the current block is not put status
523              * 2. the last block and current block is not continuous
524              */
525             if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
526             {
527                 break;
528             }
529             /* backup last block */
530             last_block = block;
531         }
532         data_len += last_block->size;
533     }
534 
535     rt_hw_interrupt_enable(level);
536 
537     return data_len;
538 }
539 RTM_EXPORT(rt_rbb_next_blk_queue_len);
540 
541 /**
542  * get the ring block buffer object buffer size
543  *
544  * @param rbb ring block buffer object
545  *
546  * @return buffer size
547  */
rt_rbb_get_buf_size(rt_rbb_t rbb)548 rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
549 {
550     RT_ASSERT(rbb);
551 
552     return rbb->buf_size;
553 }
554 RTM_EXPORT(rt_rbb_get_buf_size);
555