1#!/usr/bin/env python3 2# Copyright (c) PLUMgrid, Inc. 3# Licensed under the Apache License, Version 2.0 (the "License") 4 5from bcc import BPF, BPFAttachType, BPFProgType 6from bcc.libbcc import lib 7import ctypes as ct 8from unittest import main, skipUnless, TestCase 9from utils import kernel_version_ge 10import os 11import sys 12import socket 13import struct 14from contextlib import contextmanager 15 16@contextmanager 17def redirect_stderr(to): 18 stderr_fd = sys.stderr.fileno() 19 with os.fdopen(os.dup(stderr_fd), 'wb') as copied, os.fdopen(to, 'w') as to: 20 sys.stderr.flush() 21 os.dup2(to.fileno(), stderr_fd) 22 try: 23 yield sys.stderr 24 finally: 25 sys.stderr.flush() 26 os.dup2(copied.fileno(), stderr_fd) 27 28class TestClang(TestCase): 29 def test_complex(self): 30 b = BPF(src_file=b"test_clang_complex.c", debug=0) 31 fn = b.load_func(b"handle_packet", BPF.SCHED_CLS) 32 def test_printk(self): 33 text = b""" 34#include <bcc/proto.h> 35int handle_packet(void *ctx) { 36 u8 *cursor = 0; 37 struct ethernet_t *ethernet = cursor_advance(cursor, sizeof(*ethernet)); 38 bpf_trace_printk("ethernet->dst = %llx, ethernet->src = %llx\\n", 39 ethernet->dst, ethernet->src); 40 return 0; 41} 42""" 43 b = BPF(text=text, debug=0) 44 fn = b.load_func(b"handle_packet", BPF.SCHED_CLS) 45 46 def test_probe_read1(self): 47 text = b""" 48#include <linux/sched.h> 49#include <uapi/linux/ptrace.h> 50int count_sched(struct pt_regs *ctx, struct task_struct *prev) { 51 pid_t p = prev->pid; 52 return (p != -1); 53} 54""" 55 b = BPF(text=text, debug=0) 56 fn = b.load_func(b"count_sched", BPF.KPROBE) 57 58 def test_load_cgroup_sockopt_prog(self): 59 text = b""" 60int sockopt(struct bpf_sockopt* ctx){ 61 62 return 0; 63} 64""" 65 b = BPF(text=text, debug=0) 66 fn = b.load_func(b"sockopt", BPFProgType.CGROUP_SOCKOPT, device = None, attach_type = BPFAttachType.CGROUP_SETSOCKOPT) 67 68 def test_probe_read2(self): 69 text = b""" 70#include <linux/sched.h> 71#include <uapi/linux/ptrace.h> 72int count_foo(struct pt_regs *ctx, unsigned long a, unsigned long b) { 73 return (a != b); 74} 75""" 76 b = BPF(text=text, debug=0) 77 fn = b.load_func(b"count_foo", BPF.KPROBE) 78 79 def test_probe_read3(self): 80 text = b""" 81#include <net/tcp.h> 82#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;}) 83int count_tcp(struct pt_regs *ctx, struct sk_buff *skb) { 84 return _(TCP_SKB_CB(skb)->tcp_gso_size); 85} 86""" 87 b = BPF(text=text) 88 fn = b.load_func(b"count_tcp", BPF.KPROBE) 89 90 def test_probe_read4(self): 91 text = b""" 92#include <net/tcp.h> 93#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;}) 94int test(struct pt_regs *ctx, struct sk_buff *skb) { 95 return _(TCP_SKB_CB(skb)->tcp_gso_size) + skb->protocol; 96} 97""" 98 b = BPF(text=text) 99 fn = b.load_func(b"test", BPF.KPROBE) 100 101 def test_probe_read_whitelist1(self): 102 text = b""" 103#include <net/tcp.h> 104int count_tcp(struct pt_regs *ctx, struct sk_buff *skb) { 105 // The below define is in net/tcp.h: 106 // #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 107 // Note that it has AddrOf in the macro, which will cause current rewriter 108 // failing below statement 109 // return TCP_SKB_CB(skb)->tcp_gso_size; 110 u16 val = 0; 111 bpf_probe_read_kernel(&val, sizeof(val), &(TCP_SKB_CB(skb)->tcp_gso_size)); 112 return val; 113} 114""" 115 b = BPF(text=text) 116 fn = b.load_func(b"count_tcp", BPF.KPROBE) 117 118 def test_probe_read_whitelist2(self): 119 text = b""" 120#include <net/tcp.h> 121int count_tcp(struct pt_regs *ctx, struct sk_buff *skb) { 122 // The below define is in net/tcp.h: 123 // #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 124 // Note that it has AddrOf in the macro, which will cause current rewriter 125 // failing below statement 126 // return TCP_SKB_CB(skb)->tcp_gso_size; 127 u16 val = 0; 128 bpf_probe_read_kernel(&val, sizeof(val), &(TCP_SKB_CB(skb)->tcp_gso_size)); 129 return val + skb->protocol; 130} 131""" 132 b = BPF(text=text) 133 fn = b.load_func(b"count_tcp", BPF.KPROBE) 134 135 def test_probe_read_keys(self): 136 text = b""" 137#include <uapi/linux/ptrace.h> 138#include <linux/blkdev.h> 139BPF_HASH(start, struct request *); 140int do_request(struct pt_regs *ctx, struct request *req) { 141 u64 ts = bpf_ktime_get_ns(); 142 start.update(&req, &ts); 143 return 0; 144} 145 146int do_completion(struct pt_regs *ctx, struct request *req) { 147 u64 *tsp = start.lookup(&req); 148 if (tsp != 0) { 149 start.delete(&req); 150 } 151 return 0; 152} 153""" 154 b = BPF(text=text, debug=0) 155 fns = b.load_funcs(BPF.KPROBE) 156 157 @skipUnless(lib.bpf_module_rw_engine_enabled(), "requires enabled rwengine") 158 def test_sscanf(self): 159 text = b""" 160BPF_HASH(stats, int, struct { u64 a; u64 b; u64 c:36; u64 d:28; struct { u32 a; u32 b; } s; }, 10); 161int foo(void *ctx) { 162 return 0; 163} 164""" 165 b = BPF(text=text, debug=0) 166 fn = b.load_func(b"foo", BPF.KPROBE) 167 t = b.get_table(b"stats") 168 s1 = t.key_sprintf(t.Key(2)) 169 self.assertEqual(s1, b"0x2") 170 s2 = t.leaf_sprintf(t.Leaf(2, 3, 4, 1, (5, 6))) 171 l = t.leaf_scanf(s2) 172 self.assertEqual(l.a, 2) 173 self.assertEqual(l.b, 3) 174 self.assertEqual(l.c, 4) 175 self.assertEqual(l.d, 1) 176 self.assertEqual(l.s.a, 5) 177 self.assertEqual(l.s.b, 6) 178 179 @skipUnless(lib.bpf_module_rw_engine_enabled(), "requires enabled rwengine") 180 def test_sscanf_array(self): 181 text = b""" 182BPF_HASH(stats, int, struct { u32 a[3]; u32 b; }, 10); 183""" 184 b = BPF(text=text, debug=0) 185 t = b.get_table(b"stats") 186 s1 = t.key_sprintf(t.Key(2)) 187 self.assertEqual(s1, b"0x2") 188 s2 = t.leaf_sprintf(t.Leaf((ct.c_uint * 3)(1,2,3), 4)) 189 self.assertEqual(s2, b"{ [ 0x1 0x2 0x3 ] 0x4 }") 190 l = t.leaf_scanf(s2) 191 self.assertEqual(l.a[0], 1) 192 self.assertEqual(l.a[1], 2) 193 self.assertEqual(l.a[2], 3) 194 self.assertEqual(l.b, 4) 195 196 @skipUnless(lib.bpf_module_rw_engine_enabled(), "requires enabled rwengine") 197 def test_sscanf_string(self): 198 text = b""" 199struct Symbol { 200 char name[128]; 201 char path[128]; 202}; 203struct Event { 204 uint32_t pid; 205 uint32_t tid; 206 struct Symbol stack[64]; 207}; 208BPF_TABLE("array", int, struct Event, comms, 1); 209""" 210 b = BPF(text=text) 211 t = b.get_table(b"comms") 212 s1 = t.leaf_sprintf(t[0]) 213 fill = b' { "" "" }' * 63 214 self.assertEqual(s1, b'{ 0x0 0x0 [ { "" "" }%s ] }' % fill) 215 l = t.Leaf(1, 2) 216 name = b"libxyz" 217 path = b"/usr/lib/libxyz.so" 218 l.stack[0].name = name 219 l.stack[0].path = path 220 s2 = t.leaf_sprintf(l) 221 self.assertEqual(s2, 222 b'{ 0x1 0x2 [ { "%s" "%s" }%s ] }' % (name, path, fill)) 223 l = t.leaf_scanf(s2) 224 self.assertEqual(l.pid, 1) 225 self.assertEqual(l.tid, 2) 226 self.assertEqual(l.stack[0].name, name) 227 self.assertEqual(l.stack[0].path, path) 228 229 def test_iosnoop(self): 230 text = b""" 231#include <linux/blkdev.h> 232#include <uapi/linux/ptrace.h> 233 234struct key_t { 235 struct request *req; 236}; 237 238BPF_HASH(start, struct key_t, u64, 1024); 239int do_request(struct pt_regs *ctx, struct request *req) { 240 struct key_t key = {}; 241 242 bpf_trace_printk("traced start %d\\n", req->__data_len); 243 244 return 0; 245} 246""" 247 b = BPF(text=text, debug=0) 248 fn = b.load_func(b"do_request", BPF.KPROBE) 249 250 def test_blk_start_request(self): 251 text = b""" 252#include <linux/blkdev.h> 253#include <uapi/linux/ptrace.h> 254int do_request(struct pt_regs *ctx, int req) { 255 bpf_trace_printk("req ptr: 0x%x\\n", req); 256 return 0; 257} 258""" 259 b = BPF(text=text, debug=0) 260 fn = b.load_func(b"do_request", BPF.KPROBE) 261 262 def test_bpf_hash(self): 263 text = b""" 264BPF_HASH(table1); 265BPF_HASH(table2, u32); 266BPF_HASH(table3, u32, int); 267""" 268 b = BPF(text=text, debug=0) 269 270 def test_consecutive_probe_read(self): 271 text = b""" 272#include <linux/fs.h> 273#include <linux/mount.h> 274BPF_HASH(table1, struct super_block *); 275int trace_entry(struct pt_regs *ctx, struct file *file) { 276 if (!file) return 0; 277 struct vfsmount *mnt = file->f_path.mnt; 278 if (mnt) { 279 struct super_block *k = mnt->mnt_sb; 280 u64 zero = 0; 281 table1.update(&k, &zero); 282 k = mnt->mnt_sb; 283 table1.update(&k, &zero); 284 } 285 286 return 0; 287} 288""" 289 b = BPF(text=text, debug=0) 290 fn = b.load_func(b"trace_entry", BPF.KPROBE) 291 292 def test_nested_probe_read(self): 293 text = b""" 294#include <linux/fs.h> 295int trace_entry(struct pt_regs *ctx, struct file *file) { 296 if (!file) return 0; 297 const char *name = file->f_path.dentry->d_name.name; 298 bpf_trace_printk("%s\\n", name); 299 return 0; 300} 301""" 302 b = BPF(text=text, debug=0) 303 fn = b.load_func(b"trace_entry", BPF.KPROBE) 304 305 def test_nested_probe_read_deref(self): 306 text = b""" 307#include <uapi/linux/ptrace.h> 308struct sock { 309 u32 *sk_daddr; 310}; 311int test(struct pt_regs *ctx, struct sock *skp) { 312 return *(skp->sk_daddr); 313} 314""" 315 b = BPF(text=text) 316 fn = b.load_func(b"test", BPF.KPROBE) 317 318 def test_char_array_probe(self): 319 BPF(text=b"""#include <linux/blkdev.h> 320int kprobe__blk_update_request(struct pt_regs *ctx, struct request *req) { 321 bpf_trace_printk("%s\\n", req->rq_disk->disk_name); 322 return 0; 323}""") 324 325 @skipUnless(kernel_version_ge(5,7), "requires kernel >= 5.7") 326 def test_lsm_probe(self): 327 # Skip if the kernel is not compiled with CONFIG_BPF_LSM 328 if not BPF.support_lsm(): 329 return 330 b = BPF(text=b""" 331LSM_PROBE(bpf, int cmd, union bpf_attr *uattr, unsigned int size) { 332 return 0; 333}""") 334 335 def test_probe_read_helper(self): 336 b = BPF(text=b""" 337#include <linux/fs.h> 338static void print_file_name(struct file *file) { 339 if (!file) return; 340 const char *name = file->f_path.dentry->d_name.name; 341 bpf_trace_printk("%s\\n", name); 342} 343static void print_file_name2(int unused, struct file *file) { 344 print_file_name(file); 345} 346int trace_entry1(struct pt_regs *ctx, struct file *file) { 347 print_file_name(file); 348 return 0; 349} 350int trace_entry2(struct pt_regs *ctx, int unused, struct file *file) { 351 print_file_name2(unused, file); 352 return 0; 353} 354""") 355 fn = b.load_func(b"trace_entry1", BPF.KPROBE) 356 fn = b.load_func(b"trace_entry2", BPF.KPROBE) 357 358 def test_probe_unnamed_union_deref(self): 359 text = b""" 360#include <linux/mm_types.h> 361int trace(struct pt_regs *ctx, struct page *page) { 362 void *p = page->mapping; 363 return p != NULL; 364} 365""" 366 # depending on llvm, compile may pass/fail, but at least shouldn't crash 367 try: 368 b = BPF(text=text) 369 except: 370 pass 371 372 def test_probe_struct_assign(self): 373 b = BPF(text = b""" 374#include <uapi/linux/ptrace.h> 375struct args_t { 376 const char *filename; 377 int flags; 378 int mode; 379}; 380int do_sys_open(struct pt_regs *ctx, const char *filename, 381 int flags, int mode) { 382 struct args_t args = {}; 383 args.filename = filename; 384 args.flags = flags; 385 args.mode = mode; 386 bpf_trace_printk("%s\\n", args.filename); 387 return 0; 388}; 389""") 390 b.attach_kprobe(event=b.get_syscall_fnname(b"open"), 391 fn_name=b"do_sys_open") 392 393 def test_task_switch(self): 394 b = BPF(text=b""" 395#include <uapi/linux/ptrace.h> 396#include <linux/sched.h> 397struct key_t { 398 u32 prev_pid; 399 u32 curr_pid; 400}; 401BPF_HASH(stats, struct key_t, u64, 1024); 402int kprobe__finish_task_switch(struct pt_regs *ctx, struct task_struct *prev) { 403 struct key_t key = {}; 404 u64 zero = 0, *val; 405 key.curr_pid = bpf_get_current_pid_tgid(); 406 key.prev_pid = prev->pid; 407 408 val = stats.lookup_or_try_init(&key, &zero); 409 if (val) { 410 (*val)++; 411 } 412 return 0; 413} 414""") 415 416 def test_probe_simple_assign(self): 417 b = BPF(text=b""" 418#include <uapi/linux/ptrace.h> 419#include <linux/gfp.h> 420struct leaf { size_t size; }; 421BPF_HASH(simple_map, u32, struct leaf); 422int kprobe____kmalloc(struct pt_regs *ctx, size_t size) { 423 u32 pid = bpf_get_current_pid_tgid(); 424 struct leaf* leaf = simple_map.lookup(&pid); 425 if (leaf) 426 leaf->size += size; 427 return 0; 428}""") 429 430 def test_probe_simple_member_assign(self): 431 b = BPF(text=b""" 432#include <uapi/linux/ptrace.h> 433#include <linux/netdevice.h> 434struct leaf { void *ptr; }; 435int test(struct pt_regs *ctx, struct sk_buff *skb) { 436 struct leaf l = {}; 437 struct leaf *lp = &l; 438 lp->ptr = skb; 439 return 0; 440}""") 441 b.load_func(b"test", BPF.KPROBE) 442 443 def test_probe_member_expr_deref(self): 444 b = BPF(text=b""" 445#include <uapi/linux/ptrace.h> 446#include <linux/netdevice.h> 447struct leaf { struct sk_buff *ptr; }; 448int test(struct pt_regs *ctx, struct sk_buff *skb) { 449 struct leaf l = {}; 450 struct leaf *lp = &l; 451 lp->ptr = skb; 452 return lp->ptr->priority; 453}""") 454 b.load_func(b"test", BPF.KPROBE) 455 456 def test_probe_member_expr(self): 457 b = BPF(text=b""" 458#include <uapi/linux/ptrace.h> 459#include <linux/netdevice.h> 460struct leaf { struct sk_buff *ptr; }; 461int test(struct pt_regs *ctx, struct sk_buff *skb) { 462 struct leaf l = {}; 463 struct leaf *lp = &l; 464 lp->ptr = skb; 465 return l.ptr->priority; 466}""") 467 b.load_func(b"test", BPF.KPROBE) 468 469 def test_unop_probe_read(self): 470 text = b""" 471#include <linux/blkdev.h> 472int trace_entry(struct pt_regs *ctx, struct request *req) { 473 if (!(req->bio->bi_flags & 1)) 474 return 1; 475 if (((req->bio->bi_flags))) 476 return 1; 477 return 0; 478} 479""" 480 b = BPF(text=text) 481 fn = b.load_func(b"trace_entry", BPF.KPROBE) 482 483 def test_probe_read_nested_deref(self): 484 text = b""" 485#include <net/inet_sock.h> 486int test(struct pt_regs *ctx, struct sock *sk) { 487 struct sock *ptr1; 488 struct sock **ptr2 = &ptr1; 489 *ptr2 = sk; 490 return ((struct sock *)(*ptr2))->sk_daddr; 491} 492""" 493 b = BPF(text=text) 494 fn = b.load_func(b"test", BPF.KPROBE) 495 496 def test_probe_read_nested_deref2(self): 497 text = b""" 498#include <net/inet_sock.h> 499int test(struct pt_regs *ctx, struct sock *sk) { 500 struct sock *ptr1; 501 struct sock **ptr2 = &ptr1; 502 struct sock ***ptr3 = &ptr2; 503 *ptr2 = sk; 504 *ptr3 = ptr2; 505 return ((struct sock *)(**ptr3))->sk_daddr; 506} 507""" 508 b = BPF(text=text) 509 fn = b.load_func(b"test", BPF.KPROBE) 510 511 def test_probe_read_nested_deref3(self): 512 text = b""" 513#include <net/inet_sock.h> 514int test(struct pt_regs *ctx, struct sock *sk) { 515 struct sock **ptr1, **ptr2 = &sk; 516 ptr1 = &sk; 517 return (*ptr1)->sk_daddr + (*ptr2)->sk_daddr; 518} 519""" 520 b = BPF(text=text) 521 fn = b.load_func(b"test", BPF.KPROBE) 522 523 def test_probe_read_nested_deref_func1(self): 524 text = b""" 525#include <net/inet_sock.h> 526static struct sock **subtest(struct sock **sk) { 527 return sk; 528} 529int test(struct pt_regs *ctx, struct sock *sk) { 530 struct sock **ptr1, **ptr2 = subtest(&sk); 531 ptr1 = subtest(&sk); 532 return (*ptr1)->sk_daddr + (*ptr2)->sk_daddr; 533} 534""" 535 b = BPF(text=text) 536 fn = b.load_func(b"test", BPF.KPROBE) 537 538 def test_probe_read_nested_deref_func2(self): 539 text = b""" 540#include <net/inet_sock.h> 541static int subtest(struct sock ***skp) { 542 return ((struct sock *)(**skp))->sk_daddr; 543} 544int test(struct pt_regs *ctx, struct sock *sk) { 545 struct sock *ptr1; 546 struct sock **ptr2 = &ptr1; 547 struct sock ***ptr3 = &ptr2; 548 *ptr2 = sk; 549 *ptr3 = ptr2; 550 return subtest(ptr3); 551} 552""" 553 b = BPF(text=text) 554 fn = b.load_func(b"test", BPF.KPROBE) 555 556 def test_probe_read_nested_member1(self): 557 text = b""" 558#include <net/inet_sock.h> 559int test(struct pt_regs *ctx, struct sock *skp) { 560 u32 *daddr = &skp->sk_daddr; 561 return *daddr; 562} 563""" 564 b = BPF(text=text) 565 fn = b.load_func(b"test", BPF.KPROBE) 566 567 def test_probe_read_nested_member2(self): 568 text = b""" 569#include <uapi/linux/ptrace.h> 570struct sock { 571 u32 **sk_daddr; 572}; 573int test(struct pt_regs *ctx, struct sock *skp) { 574 u32 *daddr = *(skp->sk_daddr); 575 return *daddr; 576} 577""" 578 b = BPF(text=text) 579 fn = b.load_func(b"test", BPF.KPROBE) 580 581 def test_probe_read_nested_member3(self): 582 text = b""" 583#include <uapi/linux/ptrace.h> 584struct sock { 585 u32 *sk_daddr; 586}; 587int test(struct pt_regs *ctx, struct sock *skp) { 588 return *(&skp->sk_daddr); 589} 590""" 591 b = BPF(text=text) 592 fn = b.load_func(b"test", BPF.KPROBE) 593 594 def test_paren_probe_read(self): 595 text = b""" 596#include <net/inet_sock.h> 597int trace_entry(struct pt_regs *ctx, struct sock *sk) { 598 u16 sport = ((struct inet_sock *)sk)->inet_sport; 599 return sport; 600} 601""" 602 b = BPF(text=text) 603 fn = b.load_func(b"trace_entry", BPF.KPROBE) 604 605 def test_complex_leaf_types(self): 606 text = b""" 607struct list; 608struct list { 609 struct list *selfp; 610 struct list *another_selfp; 611 struct list *selfp_array[2]; 612}; 613struct empty { 614}; 615union emptyu { 616 struct empty *em1; 617 struct empty em2; 618 struct empty em3; 619 struct empty em4; 620}; 621BPF_ARRAY(t1, struct list, 1); 622BPF_ARRAY(t2, struct list *, 1); 623BPF_ARRAY(t3, union emptyu, 1); 624""" 625 b = BPF(text=text) 626 self.assertEqual(ct.sizeof(b[b"t3"].Leaf), 8) 627 628 def test_cflags(self): 629 text = b""" 630#ifndef MYFLAG 631#error "MYFLAG not set as expected" 632#endif 633""" 634 b = BPF(text=text, cflags=["-DMYFLAG"]) 635 636 def test_exported_maps(self): 637 b1 = BPF(text=b"""BPF_TABLE_PUBLIC("hash", int, int, table1, 10);""") 638 b2 = BPF(text=b"""BPF_TABLE("extern", int, int, table1, 10);""") 639 t = b2[b"table1"] 640 641 def test_syntax_error(self): 642 with self.assertRaises(Exception): 643 b = BPF(text=b"""int failure(void *ctx) { if (); return 0; }""") 644 645 def test_nested_union(self): 646 text = b""" 647BPF_HASH(t1, struct bpf_tunnel_key, int, 1); 648""" 649 b = BPF(text=text) 650 t1 = b[b"t1"] 651 print(t1.Key().remote_ipv4) 652 653 def test_too_many_args(self): 654 text = b""" 655#include <uapi/linux/ptrace.h> 656int many(struct pt_regs *ctx, int a, int b, int c, int d, int e, int f, int g) { 657 return 0; 658} 659""" 660 with self.assertRaises(Exception): 661 b = BPF(text=text) 662 663 def test_call_macro_arg(self): 664 text = b""" 665BPF_PROG_ARRAY(jmp, 32); 666 667#define JMP_IDX_PIPE (1U << 1) 668 669enum action { 670 ACTION_PASS 671}; 672 673int process(struct xdp_md *ctx) { 674 jmp.call((void *)ctx, ACTION_PASS); 675 jmp.call((void *)ctx, JMP_IDX_PIPE); 676 return XDP_PASS; 677} 678 """ 679 b = BPF(text=text) 680 t = b[b"jmp"] 681 self.assertEqual(len(t), 32); 682 683 def test_update_macro_arg(self): 684 text = b""" 685BPF_ARRAY(act, u32, 32); 686 687#define JMP_IDX_PIPE (1U << 1) 688 689enum action { 690 ACTION_PASS 691}; 692 693int process(struct xdp_md *ctx) { 694 act.increment(ACTION_PASS); 695 act.increment(JMP_IDX_PIPE); 696 return XDP_PASS; 697} 698 """ 699 b = BPF(text=text) 700 t = b[b"act"] 701 self.assertEqual(len(t), 32); 702 703 def test_ext_ptr_maps1(self): 704 bpf_text = b""" 705#include <uapi/linux/ptrace.h> 706#include <net/sock.h> 707#include <bcc/proto.h> 708 709BPF_HASH(currsock, u32, struct sock *); 710 711int trace_entry(struct pt_regs *ctx, struct sock *sk, 712 struct sockaddr *uaddr, int addr_len) { 713 u32 pid = bpf_get_current_pid_tgid(); 714 currsock.update(&pid, &sk); 715 return 0; 716}; 717 718int trace_exit(struct pt_regs *ctx) { 719 u32 pid = bpf_get_current_pid_tgid(); 720 struct sock **skpp; 721 skpp = currsock.lookup(&pid); 722 if (skpp) { 723 struct sock *skp = *skpp; 724 return skp->__sk_common.skc_dport; 725 } 726 return 0; 727} 728 """ 729 b = BPF(text=bpf_text) 730 b.load_func(b"trace_entry", BPF.KPROBE) 731 b.load_func(b"trace_exit", BPF.KPROBE) 732 733 def test_ext_ptr_maps2(self): 734 bpf_text = b""" 735#include <uapi/linux/ptrace.h> 736#include <net/sock.h> 737#include <bcc/proto.h> 738 739BPF_HASH(currsock, u32, struct sock *); 740 741int trace_entry(struct pt_regs *ctx, struct sock *sk, 742 struct sockaddr *uaddr, int addr_len) { 743 u32 pid = bpf_get_current_pid_tgid(); 744 currsock.update(&pid, &sk); 745 return 0; 746}; 747 748int trace_exit(struct pt_regs *ctx) { 749 u32 pid = bpf_get_current_pid_tgid(); 750 struct sock **skpp = currsock.lookup(&pid); 751 if (skpp) { 752 struct sock *skp = *skpp; 753 return skp->__sk_common.skc_dport; 754 } 755 return 0; 756} 757 """ 758 b = BPF(text=bpf_text) 759 b.load_func(b"trace_entry", BPF.KPROBE) 760 b.load_func(b"trace_exit", BPF.KPROBE) 761 762 def test_ext_ptr_maps_reverse(self): 763 bpf_text = b""" 764#include <uapi/linux/ptrace.h> 765#include <net/sock.h> 766#include <bcc/proto.h> 767 768BPF_HASH(currsock, u32, struct sock *); 769 770int trace_exit(struct pt_regs *ctx) { 771 u32 pid = bpf_get_current_pid_tgid(); 772 struct sock **skpp; 773 skpp = currsock.lookup(&pid); 774 if (skpp) { 775 struct sock *skp = *skpp; 776 return skp->__sk_common.skc_dport; 777 } 778 return 0; 779} 780 781int trace_entry(struct pt_regs *ctx, struct sock *sk) { 782 u32 pid = bpf_get_current_pid_tgid(); 783 currsock.update(&pid, &sk); 784 return 0; 785}; 786 """ 787 b = BPF(text=bpf_text) 788 b.load_func(b"trace_entry", BPF.KPROBE) 789 b.load_func(b"trace_exit", BPF.KPROBE) 790 791 def test_ext_ptr_maps_indirect(self): 792 bpf_text = b""" 793#include <uapi/linux/ptrace.h> 794#include <net/sock.h> 795#include <bcc/proto.h> 796 797BPF_HASH(currsock, u32, struct sock *); 798 799int trace_entry(struct pt_regs *ctx, struct sock *sk) { 800 u32 pid = bpf_get_current_pid_tgid(); 801 struct sock **skp = &sk; 802 currsock.update(&pid, skp); 803 return 0; 804}; 805 806int trace_exit(struct pt_regs *ctx) { 807 u32 pid = bpf_get_current_pid_tgid(); 808 struct sock **skpp; 809 skpp = currsock.lookup(&pid); 810 if (skpp) { 811 struct sock *skp = *skpp; 812 return skp->__sk_common.skc_dport; 813 } 814 return 0; 815} 816 """ 817 b = BPF(text=bpf_text) 818 b.load_func(b"trace_entry", BPF.KPROBE) 819 b.load_func(b"trace_exit", BPF.KPROBE) 820 821 def test_bpf_dins_pkt_rewrite(self): 822 text = b""" 823#include <bcc/proto.h> 824int dns_test(struct __sk_buff *skb) { 825 u8 *cursor = 0; 826 struct ethernet_t *ethernet = cursor_advance(cursor, sizeof(*ethernet)); 827 if(ethernet->type == ETH_P_IP) { 828 struct ip_t *ip = cursor_advance(cursor, sizeof(*ip)); 829 ip->src = ip->dst; 830 return 0; 831 } 832 return -1; 833} 834 """ 835 b = BPF(text=text) 836 837 @skipUnless(kernel_version_ge(4,8), "requires kernel >= 4.8") 838 def test_ext_ptr_from_helper(self): 839 text = b""" 840#include <linux/sched.h> 841int test(struct pt_regs *ctx) { 842 struct task_struct *task = (struct task_struct *)bpf_get_current_task(); 843 return task->prio; 844} 845""" 846 b = BPF(text=text) 847 fn = b.load_func(b"test", BPF.KPROBE) 848 849 def test_unary_operator(self): 850 text = b""" 851#include <linux/fs.h> 852#include <uapi/linux/ptrace.h> 853int trace_read_entry(struct pt_regs *ctx, struct file *file) { 854 return !file->f_op->read_iter; 855} 856 """ 857 b = BPF(text=text) 858 try: 859 b.attach_kprobe(event=b"__vfs_read", fn_name=b"trace_read_entry") 860 except Exception: 861 print('Current kernel does not have __vfs_read, try vfs_read instead') 862 b.attach_kprobe(event=b"vfs_read", fn_name=b"trace_read_entry") 863 864 def test_printk_f(self): 865 text = b""" 866#include <uapi/linux/ptrace.h> 867int trace_entry(struct pt_regs *ctx) { 868 bpf_trace_printk("%0.2f\\n", 1); 869 return 0; 870} 871""" 872 r, w = os.pipe() 873 with redirect_stderr(to=w): 874 BPF(text=text) 875 r = os.fdopen(r) 876 output = r.read() 877 expectedWarn = "warning: only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed" 878 self.assertIn(expectedWarn, output) 879 r.close() 880 881 def test_printk_lf(self): 882 text = b""" 883#include <uapi/linux/ptrace.h> 884int trace_entry(struct pt_regs *ctx) { 885 bpf_trace_printk("%lf\\n", 1); 886 return 0; 887} 888""" 889 r, w = os.pipe() 890 with redirect_stderr(to=w): 891 BPF(text=text) 892 r = os.fdopen(r) 893 output = r.read() 894 expectedWarn = "warning: only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed" 895 self.assertIn(expectedWarn, output) 896 r.close() 897 898 def test_printk_2s(self): 899 text = b""" 900#include <uapi/linux/ptrace.h> 901int trace_entry(struct pt_regs *ctx) { 902 char s1[] = "hello", s2[] = "world"; 903 bpf_trace_printk("%s %s\\n", s1, s2); 904 return 0; 905} 906""" 907 r, w = os.pipe() 908 with redirect_stderr(to=w): 909 BPF(text=text) 910 r = os.fdopen(r) 911 output = r.read() 912 expectedWarn = "warning: cannot use several %s conversion specifiers" 913 self.assertIn(expectedWarn, output) 914 r.close() 915 916 def test_map_insert(self): 917 text = b""" 918BPF_HASH(dummy); 919void do_trace(struct pt_regs *ctx) { 920 u64 key = 0, val = 2; 921 dummy.insert(&key, &val); 922 key = 1; 923 dummy.update(&key, &val); 924} 925""" 926 b = BPF(text=text) 927 c_val = ct.c_ulong(1) 928 b[b"dummy"][ct.c_ulong(0)] = c_val 929 b[b"dummy"][ct.c_ulong(1)] = c_val 930 b.attach_kprobe(event=b.get_syscall_fnname(b"sync"), fn_name=b"do_trace") 931 libc = ct.CDLL("libc.so.6") 932 libc.sync() 933 self.assertEqual(1, b[b"dummy"][ct.c_ulong(0)].value) 934 self.assertEqual(2, b[b"dummy"][ct.c_ulong(1)].value) 935 936 def test_prog_array_delete(self): 937 text = b""" 938BPF_PROG_ARRAY(dummy, 256); 939""" 940 b1 = BPF(text=text) 941 text = b""" 942int do_next(struct pt_regs *ctx) { 943 return 0; 944} 945""" 946 b2 = BPF(text=text) 947 fn = b2.load_func(b"do_next", BPF.KPROBE) 948 c_key = ct.c_int(0) 949 b1[b"dummy"][c_key] = ct.c_int(fn.fd) 950 b1[b"dummy"].__delitem__(c_key); 951 with self.assertRaises(KeyError): 952 b1[b"dummy"][c_key] 953 954 def test_invalid_noninline_call(self): 955 text = b""" 956int bar(void) { 957 return 0; 958} 959int foo(struct pt_regs *ctx) { 960 return bar(); 961} 962""" 963 with self.assertRaises(Exception): 964 b = BPF(text=text) 965 966 def test_incomplete_type(self): 967 text = b""" 968BPF_HASH(drops, struct key_t); 969struct key_t { 970 u64 location; 971}; 972""" 973 with self.assertRaises(Exception): 974 b = BPF(text=text) 975 976 def test_enumerations(self): 977 text = b""" 978enum b { 979 CHOICE_A, 980}; 981struct a { 982 enum b test; 983}; 984BPF_HASH(drops, struct a); 985 """ 986 b = BPF(text=text) 987 t = b[b'drops'] 988 989 def test_int128_types(self): 990 text = b""" 991BPF_HASH(table1, unsigned __int128, __int128); 992""" 993 b = BPF(text=text) 994 table = b[b'table1'] 995 self.assertEqual(ct.sizeof(table.Key), 16) 996 self.assertEqual(ct.sizeof(table.Leaf), 16) 997 table[ 998 table.Key.from_buffer_copy( 999 socket.inet_pton(socket.AF_INET6, "2001:db8::")) 1000 ] = table.Leaf.from_buffer_copy(struct.pack('LL', 42, 123456789)) 1001 for k, v in table.items(): 1002 self.assertEqual(v[0], 42) 1003 self.assertEqual(v[1], 123456789) 1004 self.assertEqual(socket.inet_ntop(socket.AF_INET6, 1005 struct.pack('LL', k[0], k[1])), 1006 "2001:db8::") 1007 1008 def test_padding_types(self): 1009 text = b""" 1010struct key_t { 1011 u32 f1_1; /* offset 0 */ 1012 struct { 1013 char f2_1; /* offset 16 */ 1014 __int128 f2_2; /* offset 32 */ 1015 }; 1016 u8 f1_3; /* offset 48 */ 1017 unsigned __int128 f1_4; /* offset 64 */ 1018 char f1_5; /* offset 80 */ 1019}; 1020struct value_t { 1021 u8 src[4] __attribute__ ((aligned (8))); /* offset 0 */ 1022 u8 dst[4] __attribute__ ((aligned (8))); /* offset 8 */ 1023}; 1024BPF_HASH(table1, struct key_t, struct value_t); 1025""" 1026 b = BPF(text=text) 1027 table = b[b'table1'] 1028 self.assertEqual(ct.sizeof(table.Key), 96) 1029 self.assertEqual(ct.sizeof(table.Leaf), 16) 1030 1031 @skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7") 1032 def test_probe_read_tracepoint_context(self): 1033 text = b""" 1034#include <linux/netdevice.h> 1035TRACEPOINT_PROBE(skb, kfree_skb) { 1036 struct sk_buff *skb = (struct sk_buff *)args->skbaddr; 1037 return skb->protocol; 1038} 1039""" 1040 b = BPF(text=text) 1041 1042 def test_probe_read_kprobe_ctx(self): 1043 text = b""" 1044#include <linux/sched.h> 1045#include <net/inet_sock.h> 1046int test(struct pt_regs *ctx) { 1047 struct sock *sk; 1048 sk = (struct sock *)PT_REGS_PARM1(ctx); 1049 return sk->sk_dport; 1050} 1051""" 1052 b = BPF(text=text) 1053 fn = b.load_func(b"test", BPF.KPROBE) 1054 1055 def test_probe_read_ctx_array(self): 1056 text = b""" 1057#include <linux/sched.h> 1058#include <net/inet_sock.h> 1059int test(struct pt_regs *ctx) { 1060 struct sock *newsk = (struct sock *)PT_REGS_RC(ctx); 1061 return newsk->__sk_common.skc_rcv_saddr; 1062} 1063""" 1064 b = BPF(text=text) 1065 fn = b.load_func(b"test", BPF.KPROBE) 1066 1067 @skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7") 1068 def test_probe_read_tc_ctx(self): 1069 text = b""" 1070#include <uapi/linux/pkt_cls.h> 1071#include <linux/if_ether.h> 1072int test(struct __sk_buff *ctx) { 1073 void* data_end = (void*)(long)ctx->data_end; 1074 void* data = (void*)(long)ctx->data; 1075 if (data + sizeof(struct ethhdr) > data_end) 1076 return TC_ACT_SHOT; 1077 struct ethhdr *eh = (struct ethhdr *)data; 1078 if (eh->h_proto == 0x1) 1079 return TC_ACT_SHOT; 1080 return TC_ACT_OK; 1081} 1082""" 1083 b = BPF(text=text) 1084 fn = b.load_func(b"test", BPF.SCHED_CLS) 1085 1086 def test_probe_read_return(self): 1087 text = b""" 1088#include <uapi/linux/ptrace.h> 1089#include <linux/tcp.h> 1090static inline unsigned char *my_skb_transport_header(struct sk_buff *skb) { 1091 return skb->head + skb->transport_header; 1092} 1093int test(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb) { 1094 struct tcphdr *th = (struct tcphdr *)my_skb_transport_header(skb); 1095 return th->seq; 1096} 1097""" 1098 b = BPF(text=text) 1099 fn = b.load_func(b"test", BPF.KPROBE) 1100 1101 def test_probe_read_multiple_return(self): 1102 text = b""" 1103#include <uapi/linux/ptrace.h> 1104#include <linux/tcp.h> 1105static inline u64 error_function() { 1106 return 0; 1107} 1108static inline unsigned char *my_skb_transport_header(struct sk_buff *skb) { 1109 if (skb) 1110 return skb->head + skb->transport_header; 1111 return (unsigned char *)error_function(); 1112} 1113int test(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb) { 1114 struct tcphdr *th = (struct tcphdr *)my_skb_transport_header(skb); 1115 return th->seq; 1116} 1117""" 1118 b = BPF(text=text) 1119 fn = b.load_func(b"test", BPF.KPROBE) 1120 1121 def test_probe_read_return_expr(self): 1122 text = b""" 1123#include <uapi/linux/ptrace.h> 1124#include <linux/tcp.h> 1125static inline unsigned char *my_skb_transport_header(struct sk_buff *skb) { 1126 return skb->head + skb->transport_header; 1127} 1128int test(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb) { 1129 u32 *seq = (u32 *)my_skb_transport_header(skb) + offsetof(struct tcphdr, seq); 1130 return *seq; 1131} 1132""" 1133 b = BPF(text=text) 1134 fn = b.load_func(b"test", BPF.KPROBE) 1135 1136 def test_probe_read_return_call(self): 1137 text = b""" 1138#include <uapi/linux/ptrace.h> 1139#include <linux/tcp.h> 1140static inline struct tcphdr *my_skb_transport_header(struct sk_buff *skb) { 1141 return (struct tcphdr *)skb->head + skb->transport_header; 1142} 1143int test(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb) { 1144 return my_skb_transport_header(skb)->seq; 1145} 1146""" 1147 b = BPF(text=text) 1148 fn = b.load_func(b"test", BPF.KPROBE) 1149 1150 def test_no_probe_read_addrof(self): 1151 text = b""" 1152#include <linux/sched.h> 1153#include <net/inet_sock.h> 1154static inline int test_help(__be16 *addr) { 1155 __be16 val = 0; 1156 bpf_probe_read_kernel(&val, sizeof(val), addr); 1157 return val; 1158} 1159int test(struct pt_regs *ctx) { 1160 struct sock *sk; 1161 sk = (struct sock *)PT_REGS_PARM1(ctx); 1162 return test_help(&sk->sk_dport); 1163} 1164""" 1165 b = BPF(text=text) 1166 fn = b.load_func(b"test", BPF.KPROBE) 1167 1168 def test_probe_read_array_accesses1(self): 1169 text = b""" 1170#include <linux/ptrace.h> 1171#include <linux/dcache.h> 1172int test(struct pt_regs *ctx, const struct qstr *name) { 1173 return name->name[1]; 1174} 1175""" 1176 b = BPF(text=text) 1177 fn = b.load_func(b"test", BPF.KPROBE) 1178 1179 def test_probe_read_array_accesses2(self): 1180 text = b""" 1181#include <linux/ptrace.h> 1182#include <linux/dcache.h> 1183int test(struct pt_regs *ctx, const struct qstr *name) { 1184 return name->name [ 1]; 1185} 1186""" 1187 b = BPF(text=text) 1188 fn = b.load_func(b"test", BPF.KPROBE) 1189 1190 def test_probe_read_array_accesses3(self): 1191 text = b""" 1192#include <linux/ptrace.h> 1193#include <linux/dcache.h> 1194int test(struct pt_regs *ctx, const struct qstr *name) { 1195 return (name->name)[1]; 1196} 1197""" 1198 b = BPF(text=text) 1199 fn = b.load_func(b"test", BPF.KPROBE) 1200 1201 def test_probe_read_array_accesses4(self): 1202 text = b""" 1203#include <linux/ptrace.h> 1204int test(struct pt_regs *ctx, char *name) { 1205 return name[1]; 1206} 1207""" 1208 b = BPF(text=text) 1209 fn = b.load_func(b"test", BPF.KPROBE) 1210 1211 def test_probe_read_array_accesses5(self): 1212 text = b""" 1213#include <linux/ptrace.h> 1214int test(struct pt_regs *ctx, char **name) { 1215 return (*name)[1]; 1216} 1217""" 1218 b = BPF(text=text) 1219 fn = b.load_func(b"test", BPF.KPROBE) 1220 1221 def test_probe_read_array_accesses6(self): 1222 text = b""" 1223#include <linux/ptrace.h> 1224struct test_t { 1225 int tab[5]; 1226}; 1227int test(struct pt_regs *ctx, struct test_t *t) { 1228 return *(&t->tab[1]); 1229} 1230""" 1231 b = BPF(text=text) 1232 fn = b.load_func(b"test", BPF.KPROBE) 1233 1234 def test_probe_read_array_accesses7(self): 1235 text = b""" 1236#include <net/inet_sock.h> 1237int test(struct pt_regs *ctx, struct sock *sk) { 1238 return sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32[0]; 1239} 1240""" 1241 b = BPF(text=text) 1242 fn = b.load_func(b"test", BPF.KPROBE) 1243 1244 def test_probe_read_array_accesses8(self): 1245 text = b""" 1246#include <linux/mm_types.h> 1247int test(struct pt_regs *ctx, struct mm_struct *mm) { 1248 return mm->rss_stat.count[MM_ANONPAGES].counter; 1249} 1250""" 1251 b = BPF(text=text) 1252 fn = b.load_func(b"test", BPF.KPROBE) 1253 1254 def test_arbitrary_increment_simple(self): 1255 b = BPF(text=b""" 1256#include <uapi/linux/ptrace.h> 1257struct bpf_map; 1258BPF_HASH(map); 1259int map_delete(struct pt_regs *ctx, struct bpf_map *bpfmap, u64 *k) { 1260 map.increment(42, 5); 1261 map.atomic_increment(42, 5); 1262 return 0; 1263} 1264""") 1265 b.attach_kprobe(event=b"htab_map_delete_elem", fn_name=b"map_delete") 1266 b.cleanup() 1267 1268 @skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7") 1269 def test_packed_structure(self): 1270 b = BPF(text=b""" 1271struct test { 1272 u16 a; 1273 u32 b; 1274} __packed; 1275BPF_TABLE("hash", u32, struct test, testing, 2); 1276TRACEPOINT_PROBE(kmem, kmalloc) { 1277 u32 key = 0; 1278 struct test info, *entry; 1279 entry = testing.lookup(&key); 1280 if (entry == NULL) { 1281 info.a = 10; 1282 info.b = 20; 1283 testing.update(&key, &info); 1284 } 1285 return 0; 1286} 1287""") 1288 if len(b[b"testing"].items()): 1289 st = b[b"testing"][ct.c_uint(0)] 1290 self.assertEqual(st.a, 10) 1291 self.assertEqual(st.b, 20) 1292 1293 @skipUnless(kernel_version_ge(4,14), "requires kernel >= 4.14") 1294 def test_jump_table(self): 1295 text = b""" 1296#include <linux/blk_types.h> 1297#include <linux/blkdev.h> 1298#include <linux/time64.h> 1299 1300BPF_PERCPU_ARRAY(rwdf_100ms, u64, 400); 1301 1302int do_request(struct pt_regs *ctx, struct request *rq) { 1303 u32 cmd_flags; 1304 u64 base, dur, slot, now = 100000; 1305 1306 if (!rq->start_time_ns) 1307 return 0; 1308 1309 if (!rq->rq_disk || rq->rq_disk->major != 5 || 1310 rq->rq_disk->first_minor != 6) 1311 return 0; 1312 1313 cmd_flags = rq->cmd_flags; 1314 switch (cmd_flags & REQ_OP_MASK) { 1315 case REQ_OP_READ: 1316 base = 0; 1317 break; 1318 case REQ_OP_WRITE: 1319 base = 100; 1320 break; 1321 case REQ_OP_DISCARD: 1322 base = 200; 1323 break; 1324 case REQ_OP_FLUSH: 1325 base = 300; 1326 break; 1327 default: 1328 return 0; 1329 } 1330 1331 dur = now - rq->start_time_ns; 1332 slot = min_t(size_t, div_u64(dur, 100 * NSEC_PER_MSEC), 99); 1333 rwdf_100ms.increment(base + slot); 1334 1335 return 0; 1336} 1337""" 1338 b = BPF(text=text) 1339 fns = b.load_funcs(BPF.KPROBE) 1340 1341if __name__ == "__main__": 1342 main() 1343