Lines Matching +full:enum +full:- +full:cnt +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
18 #include <linux/error-injection.h>
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) in bpf_get_raw_tracepoint_module() argument
64 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { in bpf_get_raw_tracepoint_module()
65 btp = &btm->module->bpf_raw_events[i]; in bpf_get_raw_tracepoint_module()
66 if (!strcmp(btp->tp->name, name)) { in bpf_get_raw_tracepoint_module()
67 if (try_module_get(btm->module)) in bpf_get_raw_tracepoint_module()
78 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) in bpf_get_raw_tracepoint_module() argument
97 * trace_call_bpf - invoke BPF program
106 * 0 - return from kprobe (event is filtered out)
107 * 1 - store kprobe event into ring buffer
120 * and don't send kprobe event into ring-buffer, in trace_call_bpf()
124 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); in trace_call_bpf()
133 * whether call->prog_array is empty or not, which is in trace_call_bpf()
137 * non-NULL, we go into trace_call_bpf() and do the actual in trace_call_bpf()
146 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), in trace_call_bpf()
209 * strncpy_from_user() does long-sized strides in the fast path. If the in bpf_probe_read_user_str_common()
333 * access_ok() should prevent writing to non-user memory, but in in BPF_CALL_3()
343 current->flags & (PF_KTHREAD | PF_EXITING))) in BPF_CALL_3()
344 return -EPERM; in BPF_CALL_3()
346 return -EPERM; in BPF_CALL_3()
426 return -EINVAL; in BPF_CALL_4()
468 return -EINVAL; in BPF_CALL_5()
479 return seq_has_overflowed(m) ? -EOVERFLOW : 0; in BPF_CALL_5()
498 return seq_write(m, data, len) ? -EOVERFLOW : 0; in BPF_CALL_3()
522 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); in BPF_CALL_4()
546 return -EINVAL; in get_map_perf_counter()
549 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter()
550 return -E2BIG; in get_map_perf_counter()
552 ee = READ_ONCE(array->ptrs[index]); in get_map_perf_counter()
554 return -ENOENT; in get_map_perf_counter()
556 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter()
566 * this api is ugly since we miss [-22..-2] range of valid in BPF_CALL_2()
585 int err = -EINVAL; in BPF_CALL_4()
589 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, in BPF_CALL_4()
590 &buf->running); in BPF_CALL_4()
622 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output()
623 return -E2BIG; in __bpf_perf_event_output()
625 ee = READ_ONCE(array->ptrs[index]); in __bpf_perf_event_output()
627 return -ENOENT; in __bpf_perf_event_output()
629 event = ee->event; in __bpf_perf_event_output()
630 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || in __bpf_perf_event_output()
631 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) in __bpf_perf_event_output()
632 return -EINVAL; in __bpf_perf_event_output()
634 if (unlikely(event->oncpu != cpu)) in __bpf_perf_event_output()
635 return -EOPNOTSUPP; in __bpf_perf_event_output()
669 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { in BPF_CALL_5()
670 err = -EBUSY; in BPF_CALL_5()
674 sd = &sds->sds[nest_level - 1]; in BPF_CALL_5()
677 err = -EINVAL; in BPF_CALL_5()
734 ret = -EBUSY; in bpf_event_output()
737 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); in bpf_event_output()
738 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); in bpf_event_output()
794 enum pid_type type;
807 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; in do_bpf_send_signal()
809 group_send_sig_info(work->sig, siginfo, work->task, work->type); in do_bpf_send_signal()
810 put_task_struct(work->task); in do_bpf_send_signal()
813 static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value) in bpf_send_signal_common()
838 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING))) in bpf_send_signal_common()
839 return -EPERM; in bpf_send_signal_common()
841 return -EPERM; in bpf_send_signal_common()
844 return -EPERM; in bpf_send_signal_common()
851 return -EINVAL; in bpf_send_signal_common()
854 if (irq_work_is_busy(&work->irq_work)) in bpf_send_signal_common()
855 return -EBUSY; in bpf_send_signal_common()
861 work->task = get_task_struct(task); in bpf_send_signal_common()
862 work->has_siginfo = siginfo == &info; in bpf_send_signal_common()
863 if (work->has_siginfo) in bpf_send_signal_common()
864 copy_siginfo(&work->info, &info); in bpf_send_signal_common()
865 work->sig = sig; in bpf_send_signal_common()
866 work->type = type; in bpf_send_signal_common()
867 irq_work_queue(&work->irq_work); in bpf_send_signal_common()
920 len = buf + sz - p; in BPF_CALL_3()
945 if (prog->type == BPF_PROG_TYPE_TRACING && in BTF_ID()
946 prog->expected_attach_type == BPF_TRACE_ITER) in BTF_ID()
949 if (prog->type == BPF_PROG_TYPE_LSM) in BTF_ID()
950 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); in BTF_ID()
953 prog->aux->attach_btf_id); in BTF_ID()
979 return -EINVAL; in bpf_btf_printf_prepare()
982 return -EINVAL; in bpf_btf_printf_prepare()
987 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; in bpf_btf_printf_prepare()
989 if (ptr->type_id > 0) in bpf_btf_printf_prepare()
990 *btf_id = ptr->type_id; in bpf_btf_printf_prepare()
992 return -EINVAL; in bpf_btf_printf_prepare()
997 return -ENOENT; in bpf_btf_printf_prepare()
1013 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, in BPF_CALL_5()
1031 return ((u64 *)ctx)[-2]; in BPF_CALL_1()
1050 if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE))) in get_entry_ip()
1053 instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE); in get_entry_ip()
1056 fentry_ip -= ENDBR_INSN_SIZE; in get_entry_ip()
1069 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1070 if (run_ctx->is_uprobe) in BPF_CALL_1()
1071 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; in BPF_CALL_1()
1076 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) in BPF_CALL_1()
1079 return get_entry_ip((uintptr_t)kp->addr); in BPF_CALL_1()
1091 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); in BPF_CALL_1()
1103 return bpf_kprobe_multi_cookie(current->bpf_ctx); in BPF_CALL_1()
1115 return bpf_uprobe_multi_entry_ip(current->bpf_ctx); in BPF_CALL_1()
1127 return bpf_uprobe_multi_cookie(current->bpf_ctx); in BPF_CALL_1()
1141 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1142 return run_ctx->bpf_cookie; in BPF_CALL_1()
1154 return ctx->event->bpf_cookie; in BPF_CALL_1()
1168 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1169 return run_ctx->bpf_cookie; in BPF_CALL_1()
1187 return -EINVAL; in BPF_CALL_3()
1190 return -ENOENT; in BPF_CALL_3()
1206 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_3()
1209 return -EINVAL; in BPF_CALL_3()
1226 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_2()
1243 return ((u64 *)ctx)[-1]; in BPF_CALL_1()
1256 * bpf_lookup_user_key - lookup a key by its serial
1258 * @flags: lookup-specific flags
1269 * one of the available key-specific kfuncs.
1302 bkey->key = key_ref_to_ptr(key_ref); in bpf_lookup_user_key()
1303 bkey->has_ref = true; in bpf_lookup_user_key()
1309 * bpf_lookup_system_key - lookup a key by a system-defined ID
1327 * pre-determined ID on success, a NULL pointer otherwise
1340 bkey->key = (struct key *)(unsigned long)id; in bpf_lookup_system_key()
1341 bkey->has_ref = false; in bpf_lookup_system_key()
1347 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1355 if (bkey->has_ref) in bpf_key_put()
1356 key_put(bkey->key); in bpf_key_put()
1363 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1383 if (trusted_keyring->has_ref) { in bpf_verify_pkcs7_signature()
1392 ret = key_validate(trusted_keyring->key); in bpf_verify_pkcs7_signature()
1403 trusted_keyring->key, in bpf_verify_pkcs7_signature()
1435 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in bpf_tracing_func_proto()
1525 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto in bpf_tracing_func_proto()
1565 if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN)) in bpf_tracing_func_proto()
1579 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI || in is_kprobe_multi()
1580 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; in is_kprobe_multi()
1585 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; in is_kprobe_session()
1590 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || in is_uprobe_multi()
1591 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; in is_uprobe_multi()
1596 return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; in is_uprobe_session()
1600 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in kprobe_prog_func_proto()
1608 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto; in kprobe_prog_func_proto()
1631 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, in kprobe_prog_is_valid_access()
1726 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in tp_prog_func_proto()
1742 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, in tp_prog_is_valid_access()
1768 int err = -EINVAL; in BPF_CALL_3()
1772 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, in BPF_CALL_3()
1773 &buf->running); in BPF_CALL_3()
1795 struct perf_branch_stack *br_stack = ctx->data->br_stack; in BPF_CALL_4()
1799 return -EINVAL; in BPF_CALL_4()
1801 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) in BPF_CALL_4()
1802 return -ENOENT; in BPF_CALL_4()
1805 return -ENOENT; in BPF_CALL_4()
1808 return br_stack->nr * br_entry_size; in BPF_CALL_4()
1811 return -EINVAL; in BPF_CALL_4()
1813 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); in BPF_CALL_4()
1814 memcpy(buf, br_stack->entries, to_copy); in BPF_CALL_4()
1830 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in pe_prog_func_proto()
1868 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { in get_bpf_raw_tp_regs()
1870 return ERR_PTR(-EBUSY); in get_bpf_raw_tp_regs()
1873 return &tp_regs->regs[nest_level - 1]; in get_bpf_raw_tp_regs()
1965 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in raw_tp_prog_func_proto()
1982 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in tracing_prog_func_proto()
2018 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2022 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2026 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2038 if (prog->type == BPF_PROG_TYPE_TRACING && in tracing_prog_func_proto()
2039 prog->expected_attach_type == BPF_TRACE_RAW_TP) in tracing_prog_func_proto()
2044 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) in tracing_prog_func_proto()
2051 enum bpf_access_type type, in raw_tp_prog_is_valid_access()
2059 enum bpf_access_type type, in tracing_prog_is_valid_access()
2070 return -ENOTSUPP; in bpf_prog_test_run_tracing()
2094 enum bpf_access_type type, in raw_tp_writable_prog_is_valid_access()
2101 info->reg_type = PTR_TO_TP_BUFFER; in raw_tp_writable_prog_is_valid_access()
2114 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, in pe_prog_is_valid_access()
2152 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, in pe_prog_convert_ctx_access()
2159 switch (si->off) { in pe_prog_convert_ctx_access()
2162 data), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2164 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2170 data), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2172 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2178 regs), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2180 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2181 si->off); in pe_prog_convert_ctx_access()
2185 return insn - insn_buf; in pe_prog_convert_ctx_access()
2207 int ret = -EEXIST; in perf_event_attach_bpf_prog()
2211 * and only if they are on the opt-in list. in perf_event_attach_bpf_prog()
2213 if (prog->kprobe_override && in perf_event_attach_bpf_prog()
2214 (!trace_kprobe_on_func_entry(event->tp_event) || in perf_event_attach_bpf_prog()
2215 !trace_kprobe_error_injectable(event->tp_event))) in perf_event_attach_bpf_prog()
2216 return -EINVAL; in perf_event_attach_bpf_prog()
2220 if (event->prog) in perf_event_attach_bpf_prog()
2223 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_attach_bpf_prog()
2226 ret = -E2BIG; in perf_event_attach_bpf_prog()
2234 /* set the new array to event->tp_event and set event->prog */ in perf_event_attach_bpf_prog()
2235 event->prog = prog; in perf_event_attach_bpf_prog()
2236 event->bpf_cookie = bpf_cookie; in perf_event_attach_bpf_prog()
2237 rcu_assign_pointer(event->tp_event->prog_array, new_array); in perf_event_attach_bpf_prog()
2254 if (!event->prog) in perf_event_detach_bpf_prog()
2257 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_detach_bpf_prog()
2261 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); in perf_event_detach_bpf_prog()
2263 bpf_prog_array_delete_safe(old_array, event->prog); in perf_event_detach_bpf_prog()
2265 rcu_assign_pointer(event->tp_event->prog_array, new_array); in perf_event_detach_bpf_prog()
2270 prog = event->prog; in perf_event_detach_bpf_prog()
2271 event->prog = NULL; in perf_event_detach_bpf_prog()
2280 * programs and uses tasks-trace-RCU. in perf_event_detach_bpf_prog()
2297 return -EPERM; in perf_event_query_prog_array()
2298 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_query_prog_array()
2299 return -EINVAL; in perf_event_query_prog_array()
2301 return -EFAULT; in perf_event_query_prog_array()
2305 return -E2BIG; in perf_event_query_prog_array()
2308 return -ENOMEM; in perf_event_query_prog_array()
2311 * is required when user only wants to check for uquery->prog_cnt. in perf_event_query_prog_array()
2317 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_query_prog_array()
2321 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || in perf_event_query_prog_array()
2322 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) in perf_event_query_prog_array()
2323 ret = -EFAULT; in perf_event_query_prog_array()
2332 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) in bpf_get_raw_tracepoint() argument
2337 if (!strcmp(btp->tp->name, name)) in bpf_get_raw_tracepoint()
2341 return bpf_get_raw_tracepoint_module(name); in bpf_get_raw_tracepoint()
2357 struct bpf_prog *prog = link->link.prog; in __bpf_trace_run()
2362 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_trace_run()
2367 run_ctx.bpf_cookie = link->cookie; in __bpf_trace_run()
2376 this_cpu_dec(*(prog->active)); in __bpf_trace_run()
2426 struct tracepoint *tp = btp->tp; in bpf_probe_register()
2427 struct bpf_prog *prog = link->link.prog; in bpf_probe_register()
2433 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) in bpf_probe_register()
2434 return -EINVAL; in bpf_probe_register()
2436 if (prog->aux->max_tp_access > btp->writable_size) in bpf_probe_register()
2437 return -EINVAL; in bpf_probe_register()
2439 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link); in bpf_probe_register()
2444 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link); in bpf_probe_unregister()
2456 prog = event->prog; in bpf_get_perf_event_info()
2458 return -ENOENT; in bpf_get_perf_event_info()
2461 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) in bpf_get_perf_event_info()
2462 return -EOPNOTSUPP; in bpf_get_perf_event_info()
2464 *prog_id = prog->aux->id; in bpf_get_perf_event_info()
2465 flags = event->tp_event->flags; in bpf_get_perf_event_info()
2467 is_syscall_tp = is_syscall_trace_event(event->tp_event); in bpf_get_perf_event_info()
2470 *buf = is_tracepoint ? event->tp_event->tp->name in bpf_get_perf_event_info()
2471 : event->tp_event->name; in bpf_get_perf_event_info()
2481 err = -EOPNOTSUPP; in bpf_get_perf_event_info()
2486 event->attr.type == PERF_TYPE_TRACEPOINT); in bpf_get_perf_event_info()
2492 event->attr.type == PERF_TYPE_TRACEPOINT); in bpf_get_perf_event_info()
2506 init_irq_work(&work->irq_work, do_bpf_send_signal); in send_signal_irq_work_init()
2521 if (mod->num_bpf_raw_events == 0 || in bpf_event_notify()
2531 btm->module = module; in bpf_event_notify()
2532 list_add(&btm->list, &bpf_trace_modules); in bpf_event_notify()
2534 ret = -ENOMEM; in bpf_event_notify()
2539 if (btm->module == module) { in bpf_event_notify()
2540 list_del(&btm->list); in bpf_event_notify()
2579 u32 cnt; member
2610 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) in copy_user_syms() argument
2615 int err = -ENOMEM; in copy_user_syms()
2618 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); in copy_user_syms()
2622 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); in copy_user_syms()
2626 for (p = buf, i = 0; i < cnt; i++) { in copy_user_syms()
2628 err = -EFAULT; in copy_user_syms()
2633 err = -E2BIG; in copy_user_syms()
2640 us->syms = syms; in copy_user_syms()
2641 us->buf = buf; in copy_user_syms()
2652 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) in kprobe_multi_put_modules() argument
2656 for (i = 0; i < cnt; i++) in kprobe_multi_put_modules()
2662 kvfree(us->syms); in free_user_syms()
2663 kvfree(us->buf); in free_user_syms()
2671 unregister_fprobe(&kmulti_link->fp); in bpf_kprobe_multi_link_release()
2672 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); in bpf_kprobe_multi_link_release()
2680 kvfree(kmulti_link->addrs); in bpf_kprobe_multi_link_dealloc()
2681 kvfree(kmulti_link->cookies); in bpf_kprobe_multi_link_dealloc()
2682 kfree(kmulti_link->mods); in bpf_kprobe_multi_link_dealloc()
2689 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies); in bpf_kprobe_multi_link_fill_link_info()
2690 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); in bpf_kprobe_multi_link_fill_link_info()
2692 u32 ucount = info->kprobe_multi.count; in bpf_kprobe_multi_link_fill_link_info()
2696 return -EINVAL; in bpf_kprobe_multi_link_fill_link_info()
2698 return -EINVAL; in bpf_kprobe_multi_link_fill_link_info()
2701 info->kprobe_multi.count = kmulti_link->cnt; in bpf_kprobe_multi_link_fill_link_info()
2702 info->kprobe_multi.flags = kmulti_link->flags; in bpf_kprobe_multi_link_fill_link_info()
2703 info->kprobe_multi.missed = kmulti_link->fp.nmissed; in bpf_kprobe_multi_link_fill_link_info()
2707 if (ucount < kmulti_link->cnt) in bpf_kprobe_multi_link_fill_link_info()
2708 err = -ENOSPC; in bpf_kprobe_multi_link_fill_link_info()
2710 ucount = kmulti_link->cnt; in bpf_kprobe_multi_link_fill_link_info()
2713 if (kmulti_link->cookies) { in bpf_kprobe_multi_link_fill_link_info()
2714 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64))) in bpf_kprobe_multi_link_fill_link_info()
2715 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2719 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2725 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) in bpf_kprobe_multi_link_fill_link_info()
2726 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2730 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2748 cookie_a = link->cookies + (addr_a - link->addrs); in bpf_kprobe_multi_cookie_swap()
2749 cookie_b = link->cookies + (addr_b - link->addrs); in bpf_kprobe_multi_cookie_swap()
2762 return *addr_a < *addr_b ? -1 : 1; in bpf_kprobe_multi_addrs_cmp()
2779 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, in bpf_kprobe_multi_cookie()
2781 link = run_ctx->link; in bpf_kprobe_multi_cookie()
2782 if (!link->cookies) in bpf_kprobe_multi_cookie()
2784 entry_ip = run_ctx->entry_ip; in bpf_kprobe_multi_cookie()
2785 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), in bpf_kprobe_multi_cookie()
2789 cookie = link->cookies + (addr - link->addrs); in bpf_kprobe_multi_cookie()
2797 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, in bpf_kprobe_multi_entry_ip()
2799 return run_ctx->entry_ip; in bpf_kprobe_multi_entry_ip()
2820 bpf_prog_inc_misses_counter(link->link.prog); in kprobe_multi_link_prog_run()
2829 err = bpf_prog_run(link->link.prog, regs); in kprobe_multi_link_prog_run()
2850 return is_kprobe_session(link->link.prog) ? err : 0; in kprobe_multi_link_handler()
2886 if (data->cookies) { in symbols_swap_r()
2889 cookie_a = data->cookies + (name_a - data->funcs); in symbols_swap_r()
2890 cookie_b = data->cookies + (name_b - data->funcs); in symbols_swap_r()
2905 if (arr->mods_cnt == arr->mods_cap) { in add_module()
2906 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); in add_module()
2907 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); in add_module()
2909 return -ENOMEM; in add_module()
2910 arr->mods = mods; in add_module()
2913 arr->mods[arr->mods_cnt] = mod; in add_module()
2914 arr->mods_cnt++; in add_module()
2922 for (i = arr->mods_cnt - 1; i >= 0; i--) { in has_module()
2923 if (arr->mods[i] == mod) in has_module()
2945 err = -EINVAL; in get_modules_for_addrs()
2968 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) in addrs_check_error_injection_list() argument
2972 for (i = 0; i < cnt; i++) { in addrs_check_error_injection_list()
2974 return -EINVAL; in addrs_check_error_injection_list()
2985 u32 flags, cnt, size; in bpf_kprobe_multi_link_attach() local
2993 return -EOPNOTSUPP; in bpf_kprobe_multi_link_attach()
2996 return -EINVAL; in bpf_kprobe_multi_link_attach()
2998 flags = attr->link_create.kprobe_multi.flags; in bpf_kprobe_multi_link_attach()
3000 return -EINVAL; in bpf_kprobe_multi_link_attach()
3002 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); in bpf_kprobe_multi_link_attach()
3003 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); in bpf_kprobe_multi_link_attach()
3005 return -EINVAL; in bpf_kprobe_multi_link_attach()
3007 cnt = attr->link_create.kprobe_multi.cnt; in bpf_kprobe_multi_link_attach()
3008 if (!cnt) in bpf_kprobe_multi_link_attach()
3009 return -EINVAL; in bpf_kprobe_multi_link_attach()
3010 if (cnt > MAX_KPROBE_MULTI_CNT) in bpf_kprobe_multi_link_attach()
3011 return -E2BIG; in bpf_kprobe_multi_link_attach()
3013 size = cnt * sizeof(*addrs); in bpf_kprobe_multi_link_attach()
3014 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); in bpf_kprobe_multi_link_attach()
3016 return -ENOMEM; in bpf_kprobe_multi_link_attach()
3018 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); in bpf_kprobe_multi_link_attach()
3020 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); in bpf_kprobe_multi_link_attach()
3022 err = -ENOMEM; in bpf_kprobe_multi_link_attach()
3026 err = -EFAULT; in bpf_kprobe_multi_link_attach()
3033 err = -EFAULT; in bpf_kprobe_multi_link_attach()
3042 err = copy_user_syms(&us, usyms, cnt); in bpf_kprobe_multi_link_attach()
3049 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, in bpf_kprobe_multi_link_attach()
3052 err = ftrace_lookup_symbols(us.syms, cnt, addrs); in bpf_kprobe_multi_link_attach()
3058 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { in bpf_kprobe_multi_link_attach()
3059 err = -EINVAL; in bpf_kprobe_multi_link_attach()
3065 err = -ENOMEM; in bpf_kprobe_multi_link_attach()
3069 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, in bpf_kprobe_multi_link_attach()
3072 err = bpf_link_prime(&link->link, &link_primer); in bpf_kprobe_multi_link_attach()
3077 link->fp.entry_handler = kprobe_multi_link_handler; in bpf_kprobe_multi_link_attach()
3079 link->fp.exit_handler = kprobe_multi_link_exit_handler; in bpf_kprobe_multi_link_attach()
3081 link->fp.entry_data_size = sizeof(u64); in bpf_kprobe_multi_link_attach()
3083 link->addrs = addrs; in bpf_kprobe_multi_link_attach()
3084 link->cookies = cookies; in bpf_kprobe_multi_link_attach()
3085 link->cnt = cnt; in bpf_kprobe_multi_link_attach()
3086 link->flags = flags; in bpf_kprobe_multi_link_attach()
3095 sort_r(addrs, cnt, sizeof(*addrs), in bpf_kprobe_multi_link_attach()
3101 err = get_modules_for_addrs(&link->mods, addrs, cnt); in bpf_kprobe_multi_link_attach()
3106 link->mods_cnt = err; in bpf_kprobe_multi_link_attach()
3108 err = register_fprobe_ips(&link->fp, addrs, cnt); in bpf_kprobe_multi_link_attach()
3110 kprobe_multi_put_modules(link->mods, link->mods_cnt); in bpf_kprobe_multi_link_attach()
3126 return -EOPNOTSUPP; in bpf_kprobe_multi_link_attach()
3154 u32 cnt; member
3166 static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt) in bpf_uprobe_unregister() argument
3170 for (i = 0; i < cnt; i++) in bpf_uprobe_unregister()
3173 if (cnt) in bpf_uprobe_unregister()
3182 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt); in bpf_uprobe_multi_link_release()
3183 if (umulti_link->task) in bpf_uprobe_multi_link_release()
3184 put_task_struct(umulti_link->task); in bpf_uprobe_multi_link_release()
3185 path_put(&umulti_link->path); in bpf_uprobe_multi_link_release()
3193 kvfree(umulti_link->uprobes); in bpf_uprobe_multi_link_dealloc()
3200 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); in bpf_uprobe_multi_link_fill_link_info()
3201 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); in bpf_uprobe_multi_link_fill_link_info()
3202 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); in bpf_uprobe_multi_link_fill_link_info()
3203 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); in bpf_uprobe_multi_link_fill_link_info()
3204 u32 upath_size = info->uprobe_multi.path_size; in bpf_uprobe_multi_link_fill_link_info()
3206 u32 ucount = info->uprobe_multi.count; in bpf_uprobe_multi_link_fill_link_info()
3212 return -EINVAL; in bpf_uprobe_multi_link_fill_link_info()
3215 return -EINVAL; in bpf_uprobe_multi_link_fill_link_info()
3218 info->uprobe_multi.count = umulti_link->cnt; in bpf_uprobe_multi_link_fill_link_info()
3219 info->uprobe_multi.flags = umulti_link->flags; in bpf_uprobe_multi_link_fill_link_info()
3220 info->uprobe_multi.pid = umulti_link->task ? in bpf_uprobe_multi_link_fill_link_info()
3221 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; in bpf_uprobe_multi_link_fill_link_info()
3226 return -ENOMEM; in bpf_uprobe_multi_link_fill_link_info()
3227 p = d_path(&umulti_link->path, buf, upath_size); in bpf_uprobe_multi_link_fill_link_info()
3232 upath_size = buf + upath_size - p; in bpf_uprobe_multi_link_fill_link_info()
3238 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3239 info->uprobe_multi.path_size = upath_size; in bpf_uprobe_multi_link_fill_link_info()
3244 if (ucount < umulti_link->cnt) in bpf_uprobe_multi_link_fill_link_info()
3245 err = -ENOSPC; in bpf_uprobe_multi_link_fill_link_info()
3247 ucount = umulti_link->cnt; in bpf_uprobe_multi_link_fill_link_info()
3251 put_user(umulti_link->uprobes[i].offset, uoffsets + i)) in bpf_uprobe_multi_link_fill_link_info()
3252 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3254 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) in bpf_uprobe_multi_link_fill_link_info()
3255 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3257 put_user(umulti_link->uprobes[i].cookie, ucookies + i)) in bpf_uprobe_multi_link_fill_link_info()
3258 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3275 struct bpf_uprobe_multi_link *link = uprobe->link; in uprobe_prog_run()
3284 struct bpf_prog *prog = link->link.prog; in uprobe_prog_run()
3285 bool sleepable = prog->sleepable; in uprobe_prog_run()
3289 if (link->task && !same_thread_group(current, link->task)) in uprobe_prog_run()
3300 err = bpf_prog_run(link->link.prog, regs); in uprobe_prog_run()
3318 return uprobe->link->task->mm == mm; in uprobe_multi_link_filter()
3330 if (uprobe->session) in uprobe_multi_link_handler()
3350 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, in bpf_uprobe_multi_entry_ip()
3352 return run_ctx->entry_ip; in bpf_uprobe_multi_entry_ip()
3359 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, in bpf_uprobe_multi_cookie()
3361 return run_ctx->uprobe->cookie; in bpf_uprobe_multi_cookie()
3374 u32 flags, cnt, i; in bpf_uprobe_multi_link_attach() local
3376 char *name; in bpf_uprobe_multi_link_attach() local
3382 return -EOPNOTSUPP; in bpf_uprobe_multi_link_attach()
3385 return -EINVAL; in bpf_uprobe_multi_link_attach()
3387 flags = attr->link_create.uprobe_multi.flags; in bpf_uprobe_multi_link_attach()
3389 return -EINVAL; in bpf_uprobe_multi_link_attach()
3392 * path, offsets and cnt are mandatory, in bpf_uprobe_multi_link_attach()
3395 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); in bpf_uprobe_multi_link_attach()
3396 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); in bpf_uprobe_multi_link_attach()
3397 cnt = attr->link_create.uprobe_multi.cnt; in bpf_uprobe_multi_link_attach()
3398 pid = attr->link_create.uprobe_multi.pid; in bpf_uprobe_multi_link_attach()
3400 if (!upath || !uoffsets || !cnt || pid < 0) in bpf_uprobe_multi_link_attach()
3401 return -EINVAL; in bpf_uprobe_multi_link_attach()
3402 if (cnt > MAX_UPROBE_MULTI_CNT) in bpf_uprobe_multi_link_attach()
3403 return -E2BIG; in bpf_uprobe_multi_link_attach()
3405 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); in bpf_uprobe_multi_link_attach()
3406 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); in bpf_uprobe_multi_link_attach()
3408 name = strndup_user(upath, PATH_MAX); in bpf_uprobe_multi_link_attach()
3409 if (IS_ERR(name)) { in bpf_uprobe_multi_link_attach()
3410 err = PTR_ERR(name); in bpf_uprobe_multi_link_attach()
3414 err = kern_path(name, LOOKUP_FOLLOW, &path); in bpf_uprobe_multi_link_attach()
3415 kfree(name); in bpf_uprobe_multi_link_attach()
3420 err = -EBADF; in bpf_uprobe_multi_link_attach()
3427 err = -ESRCH; in bpf_uprobe_multi_link_attach()
3432 err = -ENOMEM; in bpf_uprobe_multi_link_attach()
3435 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); in bpf_uprobe_multi_link_attach()
3440 for (i = 0; i < cnt; i++) { in bpf_uprobe_multi_link_attach()
3442 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3446 err = -EINVAL; in bpf_uprobe_multi_link_attach()
3450 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3454 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3470 link->cnt = cnt; in bpf_uprobe_multi_link_attach()
3471 link->uprobes = uprobes; in bpf_uprobe_multi_link_attach()
3472 link->path = path; in bpf_uprobe_multi_link_attach()
3473 link->task = task; in bpf_uprobe_multi_link_attach()
3474 link->flags = flags; in bpf_uprobe_multi_link_attach()
3476 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, in bpf_uprobe_multi_link_attach()
3479 for (i = 0; i < cnt; i++) { in bpf_uprobe_multi_link_attach()
3480 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry), in bpf_uprobe_multi_link_attach()
3486 link->cnt = i; in bpf_uprobe_multi_link_attach()
3491 err = bpf_link_prime(&link->link, &link_primer); in bpf_uprobe_multi_link_attach()
3498 bpf_uprobe_unregister(uprobes, link->cnt); in bpf_uprobe_multi_link_attach()
3512 return -EOPNOTSUPP; in bpf_uprobe_multi_link_attach()
3530 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); in bpf_session_is_return()
3531 return session_ctx->is_return; in bpf_session_is_return()
3538 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx); in bpf_session_cookie()
3539 return session_ctx->data; in bpf_session_cookie()
3555 return -EACCES; in BTF_ID_FLAGS()
3575 __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, in bpf_send_signal_task()
3579 return -EINVAL; in bpf_send_signal_task()