1 /*
2  * Copyright (c) 2015 PLUMgrid, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 /* eBPF mini library */
17 
18 #ifndef LIBBPF_H
19 #define LIBBPF_H
20 
21 #include "linux/bpf.h"
22 #include <stdbool.h>
23 #include <stdint.h>
24 #include <sys/types.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 struct bcc_create_map_attr {
31 	const char *name;
32 	enum bpf_map_type map_type;
33 	__u32 map_flags;
34 	__u32 key_size;
35 	__u32 value_size;
36 	__u32 max_entries;
37 	__u32 numa_node;
38 	__u32 btf_fd;
39 	__u32 btf_key_type_id;
40 	__u32 btf_value_type_id;
41 	__u32 map_ifindex;
42 	union {
43 		__u32 inner_map_fd;
44 		__u32 btf_vmlinux_value_type_id;
45 	};
46 };
47 
48 struct bpf_prog_load_opts;
49 
50 enum bpf_probe_attach_type {
51 	BPF_PROBE_ENTRY,
52 	BPF_PROBE_RETURN
53 };
54 
55 struct bcc_perf_buffer_opts {
56   int pid;
57   int cpu;
58   int wakeup_events;
59 };
60 
61 int bcc_create_map(enum bpf_map_type map_type, const char *name,
62                    int key_size, int value_size, int max_entries,
63                    int map_flags);
64 int bcc_create_map_xattr(struct bcc_create_map_attr *attr, bool allow_rlimit);
65 int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
66 int bpf_lookup_elem(int fd, void *key, void *value);
67 int bpf_delete_elem(int fd, void *key);
68 int bpf_get_first_key(int fd, void *key, size_t key_size);
69 int bpf_get_next_key(int fd, void *key, void *next_key);
70 int bpf_lookup_and_delete(int fd, void *key, void *value);
71 
72 /*
73  * Load a BPF program, and return the FD of the loaded program.
74  *
75  * On newer Kernels, the parameter name is used to identify the loaded program
76  * for inspection and debugging. It could be different from the function name.
77  *
78  * If log_level has value greater than 0, or the load failed, it will enable
79  * extra logging of loaded BPF bytecode and register status, and will print the
80  * logging message to stderr. In such cases:
81  *   - If log_buf and log_buf_size are provided, it will use and also write the
82  *     log messages to the provided log_buf. If log_buf is insufficient in size,
83  *     it will not to any additional memory allocation.
84  *   - Otherwise, it will allocate an internal temporary buffer for log message
85  *     printing, and continue to attempt increase that allocated buffer size if
86  *     initial attempt was insufficient in size.
87  */
88 int bcc_prog_load(enum bpf_prog_type prog_type, const char *name,
89                   const struct bpf_insn *insns, int prog_len,
90                   const char *license, unsigned kern_version,
91                   int log_level, char *log_buf, unsigned log_buf_size);
92 int bcc_prog_load_xattr(enum bpf_prog_type prog_type, const char *prog_name,
93 						const char *license, const struct bpf_insn *insns,
94 						struct bpf_prog_load_opts *opts,
95                         int prog_len, char *log_buf,
96                         unsigned log_buf_size, bool allow_rlimit);
97 int bpf_attach_socket(int sockfd, int progfd);
98 
99 /* create RAW socket. If name is not NULL/a non-empty null-terminated string,
100  * bind the raw socket to the interface 'name' */
101 int bpf_open_raw_sock(const char *name);
102 
103 typedef void (*perf_reader_raw_cb)(void *cb_cookie, void *raw, int raw_size);
104 typedef void (*perf_reader_lost_cb)(void *cb_cookie, uint64_t lost);
105 
106 int bpf_attach_kprobe(int progfd, enum bpf_probe_attach_type attach_type,
107                       const char *ev_name, const char *fn_name, uint64_t fn_offset,
108                       int maxactive);
109 int bpf_detach_kprobe(const char *ev_name);
110 
111 int bpf_attach_uprobe(int progfd, enum bpf_probe_attach_type attach_type,
112                       const char *ev_name, const char *binary_path,
113                       uint64_t offset, pid_t pid, uint32_t ref_ctr_offset);
114 int bpf_detach_uprobe(const char *ev_name);
115 
116 int bpf_attach_tracepoint(int progfd, const char *tp_category,
117                           const char *tp_name);
118 int bpf_detach_tracepoint(const char *tp_category, const char *tp_name);
119 
120 int bpf_attach_raw_tracepoint(int progfd, const char *tp_name);
121 
122 int bpf_attach_kfunc(int prog_fd);
123 
124 int bpf_attach_lsm(int prog_fd);
125 
126 bool bpf_has_kernel_btf(void);
127 
128 int kernel_struct_has_field(const char *struct_name, const char *field_name);
129 
130 void * bpf_open_perf_buffer(perf_reader_raw_cb raw_cb,
131                             perf_reader_lost_cb lost_cb, void *cb_cookie,
132                             int pid, int cpu, int page_cnt);
133 
134 void * bpf_open_perf_buffer_opts(perf_reader_raw_cb raw_cb,
135                             perf_reader_lost_cb lost_cb, void *cb_cookie,
136                             int page_cnt, struct bcc_perf_buffer_opts *opts);
137 
138 /* attached a prog expressed by progfd to the device specified in dev_name */
139 int bpf_attach_xdp(const char *dev_name, int progfd, uint32_t flags);
140 
141 // attach a prog expressed by progfd to run on a specific perf event. The perf
142 // event will be created using the perf_event_attr pointer provided.
143 int bpf_attach_perf_event_raw(int progfd, void *perf_event_attr, pid_t pid,
144                               int cpu, int group_fd, unsigned long extra_flags);
145 // attach a prog expressed by progfd to run on a specific perf event, with
146 // certain sample period or sample frequency
147 int bpf_attach_perf_event(int progfd, uint32_t ev_type, uint32_t ev_config,
148                           uint64_t sample_period, uint64_t sample_freq,
149                           pid_t pid, int cpu, int group_fd);
150 
151 int bpf_open_perf_event(uint32_t type, uint64_t config, int pid, int cpu);
152 
153 int bpf_close_perf_event_fd(int fd);
154 
155 typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
156 
157 struct ring_buffer;
158 
159 void * bpf_new_ringbuf(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx);
160 void bpf_free_ringbuf(struct ring_buffer *rb);
161 int bpf_add_ringbuf(struct ring_buffer *rb, int map_fd,
162                     ring_buffer_sample_fn sample_cb, void *ctx);
163 int bpf_poll_ringbuf(struct ring_buffer *rb, int timeout_ms);
164 int bpf_consume_ringbuf(struct ring_buffer *rb);
165 
166 int bpf_obj_pin(int fd, const char *pathname);
167 int bpf_obj_get(const char *pathname);
168 int bpf_obj_get_info(int prog_map_fd, void *info, uint32_t *info_len);
169 int bpf_prog_compute_tag(const struct bpf_insn *insns, int prog_len,
170                          unsigned long long *tag);
171 int bpf_prog_get_tag(int fd, unsigned long long *tag);
172 int bpf_prog_get_next_id(uint32_t start_id, uint32_t *next_id);
173 int bpf_prog_get_fd_by_id(uint32_t id);
174 int bpf_map_get_fd_by_id(uint32_t id);
175 int bpf_obj_get_info_by_fd(int prog_fd, void *info, uint32_t *info_len);
176 
177 int bcc_iter_attach(int prog_fd, union bpf_iter_link_info *link_info,
178                     uint32_t link_info_len);
179 int bcc_iter_create(int link_fd);
180 int bcc_make_parent_dir(const char *path);
181 int bcc_check_bpffs_path(const char *path);
182 int bpf_lookup_batch(int fd, __u32 *in_batch, __u32 *out_batch, void *keys,
183                      void *values, __u32 *count);
184 int bpf_delete_batch(int fd,  void *keys, __u32 *count);
185 int bpf_update_batch(int fd, void *keys, void *values, __u32 *count);
186 int bpf_lookup_and_delete_batch(int fd, __u32 *in_batch, __u32 *out_batch,
187                                 void *keys, void *values, __u32 *count);
188 
189 #define LOG_BUF_SIZE 65536
190 
191 // Put non-static/inline functions in their own section with this prefix +
192 // fn_name to enable discovery by the bcc library.
193 #define BPF_FN_PREFIX ".bpf.fn."
194 
195 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
196 
197 #define BPF_ALU64_REG(OP, DST, SRC)				\
198 	((struct bpf_insn) {					\
199 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
200 		.dst_reg = DST,					\
201 		.src_reg = SRC,					\
202 		.off   = 0,					\
203 		.imm   = 0 })
204 
205 #define BPF_ALU32_REG(OP, DST, SRC)				\
206 	((struct bpf_insn) {					\
207 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
208 		.dst_reg = DST,					\
209 		.src_reg = SRC,					\
210 		.off   = 0,					\
211 		.imm   = 0 })
212 
213 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
214 
215 #define BPF_ALU64_IMM(OP, DST, IMM)				\
216 	((struct bpf_insn) {					\
217 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
218 		.dst_reg = DST,					\
219 		.src_reg = 0,					\
220 		.off   = 0,					\
221 		.imm   = IMM })
222 
223 #define BPF_ALU32_IMM(OP, DST, IMM)				\
224 	((struct bpf_insn) {					\
225 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
226 		.dst_reg = DST,					\
227 		.src_reg = 0,					\
228 		.off   = 0,					\
229 		.imm   = IMM })
230 
231 /* Short form of mov, dst_reg = src_reg */
232 
233 #define BPF_MOV64_REG(DST, SRC)					\
234 	((struct bpf_insn) {					\
235 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
236 		.dst_reg = DST,					\
237 		.src_reg = SRC,					\
238 		.off   = 0,					\
239 		.imm   = 0 })
240 
241 /* Short form of mov, dst_reg = imm32 */
242 
243 #define BPF_MOV64_IMM(DST, IMM)					\
244 	((struct bpf_insn) {					\
245 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
246 		.dst_reg = DST,					\
247 		.src_reg = 0,					\
248 		.off   = 0,					\
249 		.imm   = IMM })
250 
251 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
252 #define BPF_LD_IMM64(DST, IMM)					\
253 	BPF_LD_IMM64_RAW(DST, 0, IMM)
254 
255 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
256 	((struct bpf_insn) {					\
257 		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
258 		.dst_reg = DST,					\
259 		.src_reg = SRC,					\
260 		.off   = 0,					\
261 		.imm   = (__u32) (IMM) }),			\
262 	((struct bpf_insn) {					\
263 		.code  = 0, /* zero is reserved opcode */	\
264 		.dst_reg = 0,					\
265 		.src_reg = 0,					\
266 		.off   = 0,					\
267 		.imm   = ((__u64) (IMM)) >> 32 })
268 
269 #define BPF_PSEUDO_MAP_FD	1
270 
271 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
272 #define BPF_LD_MAP_FD(DST, MAP_FD)				\
273 	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
274 
275 
276 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
277 
278 #define BPF_LD_ABS(SIZE, IMM)					\
279 	((struct bpf_insn) {					\
280 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
281 		.dst_reg = 0,					\
282 		.src_reg = 0,					\
283 		.off   = 0,					\
284 		.imm   = IMM })
285 
286 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
287 
288 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
289 	((struct bpf_insn) {					\
290 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
291 		.dst_reg = DST,					\
292 		.src_reg = SRC,					\
293 		.off   = OFF,					\
294 		.imm   = 0 })
295 
296 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
297 
298 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
299 	((struct bpf_insn) {					\
300 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
301 		.dst_reg = DST,					\
302 		.src_reg = SRC,					\
303 		.off   = OFF,					\
304 		.imm   = 0 })
305 
306 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
307 
308 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
309 	((struct bpf_insn) {					\
310 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
311 		.dst_reg = DST,					\
312 		.src_reg = 0,					\
313 		.off   = OFF,					\
314 		.imm   = IMM })
315 
316 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
317 
318 #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
319 	((struct bpf_insn) {					\
320 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
321 		.dst_reg = DST,					\
322 		.src_reg = SRC,					\
323 		.off   = OFF,					\
324 		.imm   = 0 })
325 
326 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
327 
328 #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
329 	((struct bpf_insn) {					\
330 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
331 		.dst_reg = DST,					\
332 		.src_reg = 0,					\
333 		.off   = OFF,					\
334 		.imm   = IMM })
335 
336 /* Raw code statement block */
337 
338 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
339 	((struct bpf_insn) {					\
340 		.code  = CODE,					\
341 		.dst_reg = DST,					\
342 		.src_reg = SRC,					\
343 		.off   = OFF,					\
344 		.imm   = IMM })
345 
346 /* Program exit */
347 
348 #define BPF_EXIT_INSN()						\
349 	((struct bpf_insn) {					\
350 		.code  = BPF_JMP | BPF_EXIT,			\
351 		.dst_reg = 0,					\
352 		.src_reg = 0,					\
353 		.off   = 0,					\
354 		.imm   = 0 })
355 
356 #ifdef __cplusplus
357 }
358 #endif
359 
360 #endif
361