1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 // Based on capable(8) from BCC by Brendan Gregg.
3 //
4 // Copyright 2022 Sony Group Corporation
5
6 #include <argp.h>
7 #include <signal.h>
8 #include <unistd.h>
9 #include <fcntl.h>
10 #include <time.h>
11 #include <bpf/bpf.h>
12 #include "capable.h"
13 #include "capable.skel.h"
14 #include "trace_helpers.h"
15
16 #define PERF_BUFFER_PAGES 16
17 #define PERF_POLL_TIMEOUT_MS 100
18
19 static struct env {
20 bool verbose;
21 char *cgroupspath;
22 bool cg;
23 bool extra_fields;
24 bool user_stack;
25 bool kernel_stack;
26 bool unique;
27 char *unique_type;
28 int stack_storage_size;
29 int perf_max_stack_depth;
30 pid_t pid;
31 } env = {
32 .pid = -1,
33 .stack_storage_size = 1024,
34 .perf_max_stack_depth = 127,
35 .unique = false,
36 };
37
38 const char *cap_name[] = {
39 [0] = "CAP_CHOWN",
40 [1] = "CAP_DAC_OVERRIDE",
41 [2] = "CAP_DAC_READ_SEARCH",
42 [3] = "CAP_FOWNER",
43 [4] = "CAP_FSETID",
44 [5] = "CAP_KILL",
45 [6] = "CAP_SETGID",
46 [7] = "CAP_SETUID",
47 [8] = "CAP_SETPCAP",
48 [9] = "CAP_LINUX_IMMUTABLE",
49 [10] = "CAP_NET_BIND_SERVICE",
50 [11] = "CAP_NET_BROADCAST",
51 [12] = "CAP_NET_ADMIN",
52 [13] = "CAP_NET_RAW",
53 [14] = "CAP_IPC_LOCK",
54 [15] = "CAP_IPC_OWNER",
55 [16] = "CAP_SYS_MODULE",
56 [17] = "CAP_SYS_RAWIO",
57 [18] = "CAP_SYS_CHROOT",
58 [19] = "CAP_SYS_PTRACE",
59 [20] = "CAP_SYS_PACCT",
60 [21] = "CAP_SYS_ADMIN",
61 [22] = "CAP_SYS_BOOT",
62 [23] = "CAP_SYS_NICE",
63 [24] = "CAP_SYS_RESOURCE",
64 [25] = "CAP_SYS_TIME",
65 [26] = "CAP_SYS_TTY_CONFIG",
66 [27] = "CAP_MKNOD",
67 [28] = "CAP_LEASE",
68 [29] = "CAP_AUDIT_WRITE",
69 [30] = "CAP_AUDIT_CONTROL",
70 [31] = "CAP_SETFCAP",
71 [32] = "CAP_MAC_OVERRIDE",
72 [33] = "CAP_MAC_ADMIN",
73 [34] = "CAP_SYSLOG",
74 [35] = "CAP_WAKE_ALARM",
75 [36] = "CAP_BLOCK_SUSPEND",
76 [37] = "CAP_AUDIT_READ",
77 [38] = "CAP_PERFMON",
78 [39] = "CAP_BPF",
79 [40] = "CAP_CHECKPOINT_RESTORE"
80 };
81
82 static volatile sig_atomic_t exiting = 0;
83 struct syms_cache *syms_cache = NULL;
84 struct ksyms *ksyms = NULL;
85 int ifd, sfd;
86
87 const char *argp_program_version = "capable 0.1";
88 const char *argp_program_bug_address =
89 "https://github.com/iovisor/bcc/tree/master/libbpf-tools";
90 const char argp_program_doc[] =
91 "Trace security capability checks (cap_capable()).\n"
92 "\n"
93 "USAGE: capable [--help] [-p PID | -c CG | -K | -U | -x] [-u TYPE]\n"
94 "[--perf-max-stack-depth] [--stack-storage-size]\n"
95 "\n"
96 "EXAMPLES:\n"
97 " capable # Trace capability checks\n"
98 " capable -p 185 # Trace this PID only\n"
99 " capable -c CG # Trace process under cgroupsPath CG\n"
100 " capable -K # Add kernel stacks to trace\n"
101 " capable -x # Extra fields: show TID and INSETID columns\n"
102 " capable -U # Add user-space stacks to trace\n"
103 " capable -u TYPE # Print unique output for TYPE=[pid | cgroup] (default:off)\n";
104
105 #define OPT_PERF_MAX_STACK_DEPTH 1 /* --perf-max-stack-depth */
106 #define OPT_STACK_STORAGE_SIZE 2 /* --stack-storage-size */
107
108 static const struct argp_option opts[] = {
109 { "verbose", 'v', NULL, 0, "Verbose debug output" },
110 { "pid", 'p', "PID", 0, "Trace this PID only" },
111 { "cgroup", 'c', "/sys/fs/cgroup/unified", 0, "Trace process in cgroup path" },
112 { "kernel-stack", 'K', NULL, 0, "output kernel stack trace" },
113 { "user-stack", 'U', NULL, 0, "output user stack trace" },
114 { "extra-fields", 'x', NULL, 0, "extra fields: show TID and INSETID columns" },
115 { "unique", 'u', "off", 0, "Print unique output for <pid> or <cgroup> (default:off)" },
116 { "perf-max-stack-depth", OPT_PERF_MAX_STACK_DEPTH,
117 "PERF-MAX-STACK-DEPTH", 0, "the limit for both kernel and user stack traces (default 127)" },
118 { "stack-storage-size", OPT_STACK_STORAGE_SIZE, "STACK-STORAGE-SIZE", 0,
119 "the number of unique stack traces that can be stored and displayed (default 1024)" },
120 { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" },
121 {},
122 };
123
parse_arg(int key,char * arg,struct argp_state * state)124 static error_t parse_arg(int key, char *arg, struct argp_state *state)
125 {
126 switch (key) {
127 case 'h':
128 argp_state_help(state, stderr, ARGP_HELP_STD_HELP);
129 break;
130 case 'v':
131 env.verbose = true;
132 break;
133 case 'p':
134 errno = 0;
135 env.pid = strtol(arg, NULL, 10);
136 if (errno || env.pid == 0) {
137 fprintf(stderr, "invalid PID: %s\n", arg);
138 argp_usage(state);
139 }
140 break;
141 case 'c':
142 env.cgroupspath = arg;
143 env.cg = true;
144 break;
145 case 'U':
146 env.user_stack = true;
147 break;
148 case 'K':
149 env.kernel_stack = true;
150 break;
151 case 'x':
152 env.extra_fields = true;
153 break;
154 case 'u':
155 env.unique_type = arg;
156 env.unique = true;
157 break;
158 case OPT_PERF_MAX_STACK_DEPTH:
159 errno = 0;
160 env.perf_max_stack_depth = strtol(arg, NULL, 10);
161 if (errno || env.perf_max_stack_depth == 0) {
162 fprintf(stderr, "invalid perf max stack depth: %s\n", arg);
163 argp_usage(state);
164 }
165 break;
166 case OPT_STACK_STORAGE_SIZE:
167 errno = 0;
168 env.stack_storage_size = strtol(arg, NULL, 10);
169 if (errno || env.stack_storage_size == 0) {
170 fprintf(stderr, "invalid stack storage size: %s\n", arg);
171 argp_usage(state);
172 }
173 break;
174 default:
175 return ARGP_ERR_UNKNOWN;
176 }
177 return 0;
178 }
179
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)180 static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
181 {
182 if (level == LIBBPF_DEBUG && !env.verbose)
183 return 0;
184 return vfprintf(stderr, format, args);
185 }
186
sig_int(int signo)187 static void sig_int(int signo)
188 {
189 exiting = 1;
190 }
191
print_map(struct ksyms * ksyms,struct syms_cache * syms_cache)192 static void print_map(struct ksyms *ksyms, struct syms_cache *syms_cache)
193 {
194 struct key_t lookup_key = {}, next_key;
195 const struct ksym *ksym;
196 const struct syms *syms;
197 const struct sym *sym;
198 int err, i;
199 unsigned long *ip;
200 struct cap_event val;
201
202 ip = calloc(env.perf_max_stack_depth, sizeof(*ip));
203 if (!ip) {
204 fprintf(stderr, "failed to alloc ip\n");
205 return;
206 }
207
208 while (!bpf_map_get_next_key(ifd, &lookup_key, &next_key)) {
209 err = bpf_map_lookup_elem(ifd, &next_key, &val);
210 if (err < 0) {
211 fprintf(stderr, "failed to lookup info: %d\n", err);
212 goto cleanup;
213 }
214 lookup_key = next_key;
215
216 if (env.kernel_stack) {
217 if (bpf_map_lookup_elem(sfd, &next_key.kern_stack_id, ip) != 0)
218 fprintf(stderr, " [Missed Kernel Stack]\n");
219 for (i = 0; i < env.perf_max_stack_depth && ip[i]; i++) {
220 ksym = ksyms__map_addr(ksyms, ip[i]);
221 printf(" %s\n", ksym ? ksym->name : "Unknown");
222 }
223 }
224
225 if (env.user_stack) {
226 if (next_key.user_stack_id == -1)
227 goto skip_ustack;
228
229 if (bpf_map_lookup_elem(sfd, &next_key.user_stack_id, ip) != 0) {
230 fprintf(stderr, " [Missed User Stack]\n");
231 continue;
232 }
233
234 syms = syms_cache__get_syms(syms_cache, next_key.tgid);
235 if (!syms) {
236 fprintf(stderr, "failed to get syms\n");
237 goto skip_ustack;
238 }
239 for (i = 0; i < env.perf_max_stack_depth && ip[i]; i++) {
240 sym = syms__map_addr(syms, ip[i]);
241 if (sym)
242 printf(" %s\n", sym->name);
243 else
244 printf(" [unknown]\n");
245 }
246 }
247
248 skip_ustack:
249 printf(" %-16s %s (%d)\n", "-", val.task, next_key.pid);
250 }
251
252 cleanup:
253 free(ip);
254 }
255
handle_event(void * ctx,int cpu,void * data,__u32 data_sz)256 static void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
257 {
258 const struct cap_event *e = data;
259 struct tm *tm;
260 char ts[32];
261 time_t t;
262
263 time(&t);
264 tm = localtime(&t);
265 strftime(ts, sizeof(ts), "%H:%M:%S", tm);
266
267 char *verdict = "deny";
268 if (!e->ret)
269 verdict = "allow";
270
271 if (env.extra_fields)
272 printf("%-8s %-5d %-7d %-7d %-16s %-7d %-20s %-7d %-7s %-7d\n", ts, e->uid, e->pid, e->tgid, e->task, e->cap, cap_name[e->cap], e->audit, verdict, e->insetid);
273 else
274 printf("%-8s %-5d %-7d %-16s %-7d %-20s %-7d %-7s\n", ts, e->uid, e->pid, e->task, e->cap, cap_name[e->cap], e->audit, verdict);
275
276 print_map(ksyms, syms_cache);
277 }
278
handle_lost_events(void * ctx,int cpu,__u64 lost_cnt)279 static void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt)
280 {
281 fprintf(stderr, "lost %llu events on CPU #%d\n", lost_cnt, cpu);
282 }
283
main(int argc,char ** argv)284 int main(int argc, char **argv)
285 {
286 static const struct argp argp = {
287 .options = opts,
288 .parser = parse_arg,
289 .doc = argp_program_doc,
290 };
291
292 struct capable_bpf *obj;
293 struct perf_buffer *pb = NULL;
294 int err;
295 int idx, cg_map_fd;
296 int cgfd = -1;
297 enum uniqueness uniqueness_type = UNQ_OFF;
298 pid_t my_pid = -1;
299
300 err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
301 if (err)
302 return err;
303
304 if (env.unique) {
305 if (strcmp(env.unique_type, "pid") == 0) {
306 uniqueness_type = UNQ_PID;
307 } else if (strcmp(env.unique_type, "cgroup") == 0) {
308 uniqueness_type = UNQ_CGROUP;
309 } else {
310 fprintf(stderr, "Unknown unique type %s\n", env.unique_type);
311 return -1;
312 }
313 }
314
315 libbpf_set_print(libbpf_print_fn);
316
317 obj = capable_bpf__open();
318 if (!obj) {
319 fprintf(stderr, "failed to open BPF object\n");
320 return 1;
321 }
322
323 obj->rodata->targ_pid = env.pid;
324 obj->rodata->filter_cg = env.cg;
325 obj->rodata->user_stack = env.user_stack;
326 obj->rodata->kernel_stack = env.kernel_stack;
327 obj->rodata->unique_type = uniqueness_type;
328
329 my_pid = getpid();
330 obj->rodata->my_pid = my_pid;
331
332 bpf_map__set_value_size(obj->maps.stackmap, env.perf_max_stack_depth * sizeof(unsigned long));
333 bpf_map__set_max_entries(obj->maps.stackmap, env.stack_storage_size);
334
335 err = capable_bpf__load(obj);
336 if (err) {
337 fprintf(stderr, "failed to load BPF object: %d\n", err);
338 goto cleanup;
339 }
340
341 /* update cgroup path fd to map */
342 if (env.cg) {
343 idx = 0;
344 cg_map_fd = bpf_map__fd(obj->maps.cgroup_map);
345 cgfd = open(env.cgroupspath, O_RDONLY);
346 if (cgfd < 0) {
347 fprintf(stderr, "Failed opening Cgroup path: %s", env.cgroupspath);
348 goto cleanup;
349 }
350 if (bpf_map_update_elem(cg_map_fd, &idx, &cgfd, BPF_ANY)) {
351 fprintf(stderr, "Failed adding target cgroup to map");
352 goto cleanup;
353 }
354 }
355
356 ksyms = ksyms__load();
357 if (!ksyms) {
358 fprintf(stderr, "failed to load kallsyms\n");
359 goto cleanup;
360 }
361 syms_cache = syms_cache__new(0);
362 if (!syms_cache) {
363 fprintf(stderr, "failed to create syms_cache\n");
364 goto cleanup;
365 }
366
367 ifd = bpf_map__fd(obj->maps.info);
368 sfd = bpf_map__fd(obj->maps.stackmap);
369
370 err = capable_bpf__attach(obj);
371 if (err) {
372 fprintf(stderr, "failed to attach BPF programs: %d\n", err);
373 goto cleanup;
374 }
375
376 pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES,
377 handle_event, handle_lost_events, NULL, NULL);
378 if (!pb) {
379 err = -errno;
380 fprintf(stderr, "failed to open perf buffer: %d\n", err);
381 goto cleanup;
382 }
383
384 if (signal(SIGINT, sig_int) == SIG_ERR) {
385 fprintf(stderr, "can't set signal handler: %s\n", strerror(errno));
386 err = 1;
387 goto cleanup;
388 }
389
390 if (env.extra_fields)
391 printf("%-8s %-5s %-7s %-7s %-16s %-7s %-20s %-7s %-7s %-7s\n", "TIME", "UID", "PID", "TID", "COMM", "CAP", "NAME", "AUDIT", "VERDICT", "INSETID");
392 else
393 printf("%-8s %-5s %-7s %-16s %-7s %-20s %-7s %-7s\n", "TIME", "UID", "PID", "COMM", "CAP", "NAME", "AUDIT", "VERDICT");
394
395 /* main: poll */
396 while (!exiting) {
397 err = perf_buffer__poll(pb, PERF_POLL_TIMEOUT_MS);
398 if (err < 0 && err != -EINTR) {
399 fprintf(stderr, "error polling perf buffer: %s\n", strerror(-err));
400 goto cleanup;
401 }
402 /* reset err to return 0 if exiting */
403 err = 0;
404 }
405
406 cleanup:
407 perf_buffer__free(pb);
408 capable_bpf__destroy(obj);
409 syms_cache__free(syms_cache);
410 ksyms__free(ksyms);
411 if (cgfd > 0)
412 close(cgfd);
413
414 return err != 0;
415 }
416