Lines Matching full:rb

61 static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)  in ringbuf_free_ring()  argument
64 munmap(r->consumer_pos, rb->page_size); in ringbuf_free_ring()
68 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_free_ring()
76 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument
103 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add()
106 rb->rings = tmp; in ring_buffer__add()
108 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add()
111 rb->events = tmp; in ring_buffer__add()
116 rb->rings[rb->ring_cnt] = r; in ring_buffer__add()
124 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add()
137 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; in ring_buffer__add()
143 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); in ring_buffer__add()
151 r->data = tmp + rb->page_size; in ring_buffer__add()
153 e = &rb->events[rb->ring_cnt]; in ring_buffer__add()
157 e->data.fd = rb->ring_cnt; in ring_buffer__add()
158 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { in ring_buffer__add()
165 rb->ring_cnt++; in ring_buffer__add()
169 ringbuf_free_ring(rb, r); in ring_buffer__add()
173 void ring_buffer__free(struct ring_buffer *rb) in ring_buffer__free() argument
177 if (!rb) in ring_buffer__free()
180 for (i = 0; i < rb->ring_cnt; ++i) in ring_buffer__free()
181 ringbuf_free_ring(rb, rb->rings[i]); in ring_buffer__free()
182 if (rb->epoll_fd >= 0) in ring_buffer__free()
183 close(rb->epoll_fd); in ring_buffer__free()
185 free(rb->events); in ring_buffer__free()
186 free(rb->rings); in ring_buffer__free()
187 free(rb); in ring_buffer__free()
194 struct ring_buffer *rb; in ring_buffer__new() local
200 rb = calloc(1, sizeof(*rb)); in ring_buffer__new()
201 if (!rb) in ring_buffer__new()
204 rb->page_size = getpagesize(); in ring_buffer__new()
206 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in ring_buffer__new()
207 if (rb->epoll_fd < 0) { in ring_buffer__new()
213 err = ring_buffer__add(rb, map_fd, sample_cb, ctx); in ring_buffer__new()
217 return rb; in ring_buffer__new()
220 ring_buffer__free(rb); in ring_buffer__new()
288 int ring_buffer__consume_n(struct ring_buffer *rb, size_t n) in ring_buffer__consume_n() argument
293 for (i = 0; i < rb->ring_cnt; i++) { in ring_buffer__consume_n()
294 struct ring *ring = rb->rings[i]; in ring_buffer__consume_n()
313 int ring_buffer__consume(struct ring_buffer *rb) in ring_buffer__consume() argument
318 for (i = 0; i < rb->ring_cnt; i++) { in ring_buffer__consume()
319 struct ring *ring = rb->rings[i]; in ring_buffer__consume()
337 int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) in ring_buffer__poll() argument
342 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); in ring_buffer__poll()
347 __u32 ring_id = rb->events[i].data.fd; in ring_buffer__poll()
348 struct ring *ring = rb->rings[ring_id]; in ring_buffer__poll()
361 int ring_buffer__epoll_fd(const struct ring_buffer *rb) in ring_buffer__epoll_fd() argument
363 return rb->epoll_fd; in ring_buffer__epoll_fd()
366 struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx) in ring_buffer__ring() argument
368 if (idx >= rb->ring_cnt) in ring_buffer__ring()
371 return rb->rings[idx]; in ring_buffer__ring()
423 static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb) in user_ringbuf_unmap_ring() argument
425 if (rb->consumer_pos) { in user_ringbuf_unmap_ring()
426 munmap(rb->consumer_pos, rb->page_size); in user_ringbuf_unmap_ring()
427 rb->consumer_pos = NULL; in user_ringbuf_unmap_ring()
429 if (rb->producer_pos) { in user_ringbuf_unmap_ring()
430 munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1)); in user_ringbuf_unmap_ring()
431 rb->producer_pos = NULL; in user_ringbuf_unmap_ring()
435 void user_ring_buffer__free(struct user_ring_buffer *rb) in user_ring_buffer__free() argument
437 if (!rb) in user_ring_buffer__free()
440 user_ringbuf_unmap_ring(rb); in user_ring_buffer__free()
442 if (rb->epoll_fd >= 0) in user_ring_buffer__free()
443 close(rb->epoll_fd); in user_ring_buffer__free()
445 free(rb); in user_ring_buffer__free()
448 static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) in user_ringbuf_map() argument
472 rb->map_fd = map_fd; in user_ringbuf_map()
473 rb->mask = info.max_entries - 1; in user_ringbuf_map()
476 tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0); in user_ringbuf_map()
483 rb->consumer_pos = tmp; in user_ringbuf_map()
490 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; in user_ringbuf_map()
496 map_fd, rb->page_size); in user_ringbuf_map()
504 rb->producer_pos = tmp; in user_ringbuf_map()
505 rb->data = tmp + rb->page_size; in user_ringbuf_map()
507 rb_epoll = &rb->event; in user_ringbuf_map()
509 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) { in user_ringbuf_map()
521 struct user_ring_buffer *rb; in user_ring_buffer__new() local
527 rb = calloc(1, sizeof(*rb)); in user_ring_buffer__new()
528 if (!rb) in user_ring_buffer__new()
531 rb->page_size = getpagesize(); in user_ring_buffer__new()
533 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in user_ring_buffer__new()
534 if (rb->epoll_fd < 0) { in user_ring_buffer__new()
540 err = user_ringbuf_map(rb, map_fd); in user_ring_buffer__new()
544 return rb; in user_ring_buffer__new()
547 user_ring_buffer__free(rb); in user_ring_buffer__new()
551 static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard) in user_ringbuf_commit() argument
557 hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ; in user_ringbuf_commit()
558 hdr = rb->data + (hdr_offset & rb->mask); in user_ringbuf_commit()
570 void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample) in user_ring_buffer__discard() argument
572 user_ringbuf_commit(rb, sample, true); in user_ring_buffer__discard()
575 void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample) in user_ring_buffer__submit() argument
577 user_ringbuf_commit(rb, sample, false); in user_ring_buffer__submit()
580 void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size) in user_ring_buffer__reserve() argument
594 cons_pos = smp_load_acquire(rb->consumer_pos); in user_ring_buffer__reserve()
596 prod_pos = smp_load_acquire(rb->producer_pos); in user_ring_buffer__reserve()
598 max_size = rb->mask + 1; in user_ring_buffer__reserve()
609 hdr = rb->data + (prod_pos & rb->mask); in user_ring_buffer__reserve()
616 smp_store_release(rb->producer_pos, prod_pos + total_size); in user_ring_buffer__reserve()
618 return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask); in user_ring_buffer__reserve()
631 void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms) in user_ring_buffer__reserve_blocking() argument
651 sample = user_ring_buffer__reserve(rb, size); in user_ring_buffer__reserve_blocking()
668 cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining); in user_ring_buffer__reserve_blocking()
684 return user_ring_buffer__reserve(rb, size); in user_ring_buffer__reserve_blocking()