Lines Matching +full:pre +full:- +full:fetchable
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/bpf-cgroup.h>
34 #include <linux/bpf-netns.h>
44 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
45 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
46 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
47 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
48 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
78 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
83 * meant to be a future-proofing of bits.
92 return -E2BIG; in bpf_check_uarg_tail_zero()
99 actual_size - expected_size) == NULL; in bpf_check_uarg_tail_zero()
102 actual_size - expected_size); in bpf_check_uarg_tail_zero()
105 return res ? 0 : -E2BIG; in bpf_check_uarg_tail_zero()
118 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
123 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
128 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
133 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
135 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
136 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
137 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
141 return map->value_size; in bpf_map_value_size()
146 /* Wait for any running non-sleepable BPF programs to complete so that in maybe_wait_bpf_programs()
147 * userspace, when we return to it, knows that all non-sleepable in maybe_wait_bpf_programs()
154 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
155 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
171 for (i = 0, field = rec->fields; i < cnt; i++, field++) { in __bpf_obj_unpin_uptrs()
172 if (field->type != BPF_UPTR) in __bpf_obj_unpin_uptrs()
175 uptr_addr = obj + field->offset; in __bpf_obj_unpin_uptrs()
185 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); in bpf_obj_unpin_uptrs()
200 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { in bpf_obj_pin_uptrs()
201 if (field->type != BPF_UPTR) in bpf_obj_pin_uptrs()
204 uptr_addr = obj + field->offset; in bpf_obj_pin_uptrs()
209 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in bpf_obj_pin_uptrs()
210 /* t->size was checked for zero before */ in bpf_obj_pin_uptrs()
211 if (check_add_overflow(start, t->size - 1, &end)) { in bpf_obj_pin_uptrs()
212 err = -EFAULT; in bpf_obj_pin_uptrs()
218 err = -EOPNOTSUPP; in bpf_obj_pin_uptrs()
227 err = -EOPNOTSUPP; in bpf_obj_pin_uptrs()
250 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
251 map->map_type == BPF_MAP_TYPE_ARENA || in bpf_map_update_value()
252 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
253 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
254 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
255 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
263 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
264 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
266 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
268 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
274 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
277 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
281 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
282 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_update_value()
283 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_update_value()
284 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
286 err = bpf_obj_pin_uptrs(map->record, value); in bpf_map_update_value()
289 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
292 bpf_obj_unpin_uptrs(map->record, value); in bpf_map_update_value()
310 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
311 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
313 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
315 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
317 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
323 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
325 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
326 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_copy_value()
327 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_copy_value()
328 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
329 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
334 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
335 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
337 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
341 err = -ENOENT; in bpf_map_copy_value()
430 map->map_type = attr->map_type; in bpf_map_init_from_attr()
431 map->key_size = attr->key_size; in bpf_map_init_from_attr()
432 map->value_size = attr->value_size; in bpf_map_init_from_attr()
433 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
434 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
435 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
436 map->map_extra = attr->map_extra; in bpf_map_init_from_attr()
447 map->id = id; in bpf_map_alloc_id()
452 return -ENOSPC; in bpf_map_alloc_id()
462 * disappears - even if someone holds an fd to them they are unusable, in bpf_map_free_id()
466 if (!map->id) in bpf_map_free_id()
471 idr_remove(&map_idr, map->id); in bpf_map_free_id()
472 map->id = 0; in bpf_map_free_id()
482 * So we have to check map->objcg for being NULL each time it's in bpf_map_save_memcg()
486 map->objcg = get_obj_cgroup_from_current(); in bpf_map_save_memcg()
491 if (map->objcg) in bpf_map_release_memcg()
492 obj_cgroup_put(map->objcg); in bpf_map_release_memcg()
497 if (map->objcg) in bpf_map_get_memcg()
498 return get_mem_cgroup_from_objcg(map->objcg); in bpf_map_get_memcg()
593 ret = -ENOMEM; in bpf_map_alloc_pages()
609 if (f1->offset < f2->offset) in btf_field_cmp()
610 return -1; in btf_field_cmp()
611 else if (f1->offset > f2->offset) in btf_field_cmp()
621 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) in btf_record_find()
623 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); in btf_record_find()
624 if (!field || !(field->type & field_mask)) in btf_record_find()
635 for (i = 0; i < rec->cnt; i++) { in btf_record_free()
636 switch (rec->fields[i].type) { in btf_record_free()
641 if (rec->fields[i].kptr.module) in btf_record_free()
642 module_put(rec->fields[i].kptr.module); in btf_record_free()
643 if (btf_is_kernel(rec->fields[i].kptr.btf)) in btf_record_free()
644 btf_put(rec->fields[i].kptr.btf); in btf_record_free()
666 btf_record_free(map->record); in bpf_map_free_record()
667 map->record = NULL; in bpf_map_free_record()
678 size = offsetof(struct btf_record, fields[rec->cnt]); in btf_record_dup()
681 return ERR_PTR(-ENOMEM); in btf_record_dup()
683 fields = rec->fields; in btf_record_dup()
684 new_rec->cnt = 0; in btf_record_dup()
685 for (i = 0; i < rec->cnt; i++) { in btf_record_dup()
694 ret = -ENXIO; in btf_record_dup()
709 ret = -EFAULT; in btf_record_dup()
713 new_rec->cnt++; in btf_record_dup()
730 if (rec_a->cnt != rec_b->cnt) in btf_record_equal()
732 size = offsetof(struct btf_record, fields[rec_a->cnt]); in btf_record_equal()
754 bpf_timer_cancel_and_free(obj + rec->timer_off); in bpf_obj_free_timer()
761 bpf_wq_cancel_and_free(obj + rec->wq_off); in bpf_obj_free_workqueue()
771 fields = rec->fields; in bpf_obj_free_fields()
772 for (i = 0; i < rec->cnt; i++) { in bpf_obj_free_fields()
775 void *field_ptr = obj + field->offset; in bpf_obj_free_fields()
796 if (!btf_is_kernel(field->kptr.btf)) { in bpf_obj_free_fields()
797 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, in bpf_obj_free_fields()
798 field->kptr.btf_id); in bpf_obj_free_fields()
800 pointee_struct_meta->record : NULL, in bpf_obj_free_fields()
803 field->kptr.dtor(xchgd_field); in bpf_obj_free_fields()
811 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
813 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
816 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
818 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
833 struct btf_record *rec = map->record; in bpf_map_free()
834 struct btf *btf = map->btf; in bpf_map_free()
841 map->ops->map_free(map); in bpf_map_free()
848 * Note that the btf_record stashed in map->inner_map_meta->record was in bpf_map_free()
872 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
873 if (map->ops->map_release_uref) in bpf_map_put_uref()
874 map->ops->map_release_uref(map); in bpf_map_put_uref()
880 INIT_WORK(&map->work, bpf_map_free_deferred); in bpf_map_free_in_work()
884 queue_work(system_unbound_wq, &map->work); in bpf_map_free_in_work()
901 * (underlying map implementation ops->map_free() might sleep)
905 if (atomic64_dec_and_test(&map->refcnt)) { in bpf_map_put()
909 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); in bpf_map_put()
910 if (READ_ONCE(map->free_after_mult_rcu_gp)) in bpf_map_put()
911 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); in bpf_map_put()
912 else if (READ_ONCE(map->free_after_rcu_gp)) in bpf_map_put()
913 call_rcu(&map->rcu, bpf_map_free_rcu_gp); in bpf_map_put()
928 struct bpf_map *map = filp->private_data; in bpf_map_release()
930 if (map->ops->map_release) in bpf_map_release()
931 map->ops->map_release(map, filp); in bpf_map_release()
939 fmode_t mode = fd_file(f)->f_mode; in map_get_sys_perms()
944 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
953 return map->ops->map_mem_usage(map); in bpf_map_memory_usage()
958 struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo()
962 spin_lock(&map->owner.lock); in bpf_map_show_fdinfo()
963 type = map->owner.type; in bpf_map_show_fdinfo()
964 jited = map->owner.jited; in bpf_map_show_fdinfo()
965 spin_unlock(&map->owner.lock); in bpf_map_show_fdinfo()
978 map->map_type, in bpf_map_show_fdinfo()
979 map->key_size, in bpf_map_show_fdinfo()
980 map->value_size, in bpf_map_show_fdinfo()
981 map->max_entries, in bpf_map_show_fdinfo()
982 map->map_flags, in bpf_map_show_fdinfo()
983 (unsigned long long)map->map_extra, in bpf_map_show_fdinfo()
985 map->id, in bpf_map_show_fdinfo()
986 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
1000 return -EINVAL; in bpf_dummy_read()
1009 return -EINVAL; in bpf_dummy_write()
1012 /* called for any extra memory-mapped regions (except initial) */
1015 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open()
1017 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open()
1024 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close()
1026 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close()
1037 struct bpf_map *map = filp->private_data; in bpf_map_mmap()
1040 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) in bpf_map_mmap()
1041 return -ENOTSUPP; in bpf_map_mmap()
1043 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap()
1044 return -EINVAL; in bpf_map_mmap()
1046 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
1048 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap()
1049 if (map->frozen) { in bpf_map_mmap()
1050 err = -EPERM; in bpf_map_mmap()
1053 /* map is meant to be read-only, so do not allow mapping as in bpf_map_mmap()
1055 * reference and allows user-space to still modify it after in bpf_map_mmap()
1058 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
1059 err = -EACCES; in bpf_map_mmap()
1065 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
1070 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap()
1071 vma->vm_private_data = map; in bpf_map_mmap()
1073 /* If mapping is read-only, then disallow potentially re-mapping with in bpf_map_mmap()
1075 * means that as far as BPF map's memory-mapped VMAs are concerned, in bpf_map_mmap()
1080 if (!(vma->vm_flags & VM_WRITE)) in bpf_map_mmap()
1083 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
1085 if (vma->vm_flags & VM_WRITE) in bpf_map_mmap()
1094 struct bpf_map *map = filp->private_data; in bpf_map_poll()
1096 if (map->ops->map_poll) in bpf_map_poll()
1097 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
1106 struct bpf_map *map = filp->private_data; in bpf_get_unmapped_area()
1108 if (map->ops->map_get_unmapped_area) in bpf_get_unmapped_area()
1109 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); in bpf_get_unmapped_area()
1111 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); in bpf_get_unmapped_area()
1137 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
1144 return -EINVAL; in bpf_get_file_flag()
1154 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1155 sizeof(attr->CMD##_LAST_FIELD), 0, \
1156 sizeof(*attr) - \
1157 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1158 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1173 return -EINVAL; in bpf_obj_name_cpy()
1179 return -EINVAL; in bpf_obj_name_cpy()
1181 return src - orig_src; in bpf_obj_name_cpy()
1189 return -ENOTSUPP; in map_check_no_btf()
1202 if (!key_type || key_size != map->key_size) in map_check_btf()
1203 return -EINVAL; in map_check_btf()
1206 if (!map->ops->map_check_btf) in map_check_btf()
1207 return -EINVAL; in map_check_btf()
1211 if (!value_type || value_size != map->value_size) in map_check_btf()
1212 return -EINVAL; in map_check_btf()
1214 map->record = btf_parse_fields(btf, value_type, in map_check_btf()
1217 map->value_size); in map_check_btf()
1218 if (!IS_ERR_OR_NULL(map->record)) { in map_check_btf()
1222 ret = -EPERM; in map_check_btf()
1225 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { in map_check_btf()
1226 ret = -EACCES; in map_check_btf()
1229 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { in map_check_btf()
1230 switch (map->record->field_mask & (1 << i)) { in map_check_btf()
1234 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1235 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1236 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
1237 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1238 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1239 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1240 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1241 ret = -EOPNOTSUPP; in map_check_btf()
1247 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1248 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1249 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1250 ret = -EOPNOTSUPP; in map_check_btf()
1258 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1259 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in map_check_btf()
1260 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1261 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && in map_check_btf()
1262 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1263 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in map_check_btf()
1264 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1265 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1266 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1267 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1268 ret = -EOPNOTSUPP; in map_check_btf()
1273 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { in map_check_btf()
1274 ret = -EOPNOTSUPP; in map_check_btf()
1280 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1281 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1282 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1283 ret = -EOPNOTSUPP; in map_check_btf()
1289 ret = -EOPNOTSUPP; in map_check_btf()
1295 ret = btf_check_and_fixup_fields(btf, map->record); in map_check_btf()
1299 if (map->ops->map_check_btf) { in map_check_btf()
1300 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
1323 u32 map_type = attr->map_type; in map_create()
1331 return -EINVAL; in map_create()
1334 * to avoid per-map type checks tripping on unknown flag in map_create()
1336 token_flag = attr->map_flags & BPF_F_TOKEN_FD; in map_create()
1337 attr->map_flags &= ~BPF_F_TOKEN_FD; in map_create()
1339 if (attr->btf_vmlinux_value_type_id) { in map_create()
1340 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || in map_create()
1341 attr->btf_key_type_id || attr->btf_value_type_id) in map_create()
1342 return -EINVAL; in map_create()
1343 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { in map_create()
1344 return -EINVAL; in map_create()
1347 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && in map_create()
1348 attr->map_type != BPF_MAP_TYPE_ARENA && in map_create()
1349 attr->map_extra != 0) in map_create()
1350 return -EINVAL; in map_create()
1352 f_flags = bpf_get_file_flag(attr->map_flags); in map_create()
1359 return -EINVAL; in map_create()
1362 map_type = attr->map_type; in map_create()
1364 return -EINVAL; in map_create()
1368 return -EINVAL; in map_create()
1370 if (ops->map_alloc_check) { in map_create()
1371 err = ops->map_alloc_check(attr); in map_create()
1375 if (attr->map_ifindex) in map_create()
1377 if (!ops->map_mem_usage) in map_create()
1378 return -EINVAL; in map_create()
1381 token = bpf_token_get_from_fd(attr->map_token_fd); in map_create()
1387 * system-wide capabilities checks in map_create()
1390 !bpf_token_allow_map_type(token, attr->map_type)) { in map_create()
1396 err = -EPERM; in map_create()
1455 map = ops->map_alloc(attr); in map_create()
1460 map->ops = ops; in map_create()
1461 map->map_type = map_type; in map_create()
1463 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
1464 sizeof(attr->map_name)); in map_create()
1468 atomic64_set(&map->refcnt, 1); in map_create()
1469 atomic64_set(&map->usercnt, 1); in map_create()
1470 mutex_init(&map->freeze_mutex); in map_create()
1471 spin_lock_init(&map->owner.lock); in map_create()
1473 if (attr->btf_key_type_id || attr->btf_value_type_id || in map_create()
1477 * counter part. Thus, attr->btf_fd has in map_create()
1480 attr->btf_vmlinux_value_type_id) { in map_create()
1483 btf = btf_get_by_fd(attr->btf_fd); in map_create()
1490 err = -EACCES; in map_create()
1493 map->btf = btf; in map_create()
1495 if (attr->btf_value_type_id) { in map_create()
1496 err = map_check_btf(map, token, btf, attr->btf_key_type_id, in map_create()
1497 attr->btf_value_type_id); in map_create()
1502 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
1503 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
1504 map->btf_vmlinux_value_type_id = in map_create()
1505 attr->btf_vmlinux_value_type_id; in map_create()
1525 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. in map_create()
1544 atomic64_inc(&map->refcnt); in bpf_map_inc()
1550 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
1551 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
1585 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1587 return ERR_PTR(-ENOENT); in __bpf_map_inc_not_zero()
1589 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1606 return -ENOTSUPP; in bpf_stackmap_copy()
1615 return ERR_PTR(-EINVAL); in __bpf_copy_key()
1626 return ERR_PTR(-EINVAL); in ___bpf_copy_key()
1636 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_elem()
1637 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_elem()
1644 return -EINVAL; in map_lookup_elem()
1646 if (attr->flags & ~BPF_F_LOCK) in map_lookup_elem()
1647 return -EINVAL; in map_lookup_elem()
1649 CLASS(fd, f)(attr->map_fd); in map_lookup_elem()
1654 return -EPERM; in map_lookup_elem()
1656 if ((attr->flags & BPF_F_LOCK) && in map_lookup_elem()
1657 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in map_lookup_elem()
1658 return -EINVAL; in map_lookup_elem()
1660 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1666 err = -ENOMEM; in map_lookup_elem()
1671 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in map_lookup_elem()
1673 err = -EFAULT; in map_lookup_elem()
1675 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1679 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1683 err = -EFAULT; in map_lookup_elem()
1701 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_update_elem()
1702 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); in map_update_elem()
1709 return -EINVAL; in map_update_elem()
1711 CLASS(fd, f)(attr->map_fd); in map_update_elem()
1717 err = -EPERM; in map_update_elem()
1721 if ((attr->flags & BPF_F_LOCK) && in map_update_elem()
1722 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_update_elem()
1723 err = -EINVAL; in map_update_elem()
1727 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1740 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); in map_update_elem()
1756 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_delete_elem()
1762 return -EINVAL; in map_delete_elem()
1764 CLASS(fd, f)(attr->map_fd); in map_delete_elem()
1770 err = -EPERM; in map_delete_elem()
1774 key = ___bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1784 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1786 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1792 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1809 void __user *ukey = u64_to_user_ptr(attr->key); in map_get_next_key()
1810 void __user *unext_key = u64_to_user_ptr(attr->next_key); in map_get_next_key()
1816 return -EINVAL; in map_get_next_key()
1818 CLASS(fd, f)(attr->map_fd); in map_get_next_key()
1823 return -EPERM; in map_get_next_key()
1826 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1833 err = -ENOMEM; in map_get_next_key()
1834 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1844 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1850 err = -EFAULT; in map_get_next_key()
1851 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1867 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_delete_batch()
1872 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_delete_batch()
1873 return -EINVAL; in generic_map_delete_batch()
1875 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_delete_batch()
1876 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_delete_batch()
1877 return -EINVAL; in generic_map_delete_batch()
1880 max_count = attr->batch.count; in generic_map_delete_batch()
1884 if (put_user(0, &uattr->batch.count)) in generic_map_delete_batch()
1885 return -EFAULT; in generic_map_delete_batch()
1887 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1889 return -ENOMEM; in generic_map_delete_batch()
1892 err = -EFAULT; in generic_map_delete_batch()
1893 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1894 map->key_size)) in generic_map_delete_batch()
1904 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1911 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_delete_batch()
1912 err = -EFAULT; in generic_map_delete_batch()
1923 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_update_batch()
1924 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_update_batch()
1929 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_update_batch()
1930 return -EINVAL; in generic_map_update_batch()
1932 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_update_batch()
1933 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_update_batch()
1934 return -EINVAL; in generic_map_update_batch()
1939 max_count = attr->batch.count; in generic_map_update_batch()
1943 if (put_user(0, &uattr->batch.count)) in generic_map_update_batch()
1944 return -EFAULT; in generic_map_update_batch()
1946 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1948 return -ENOMEM; in generic_map_update_batch()
1953 return -ENOMEM; in generic_map_update_batch()
1957 err = -EFAULT; in generic_map_update_batch()
1958 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1959 map->key_size) || in generic_map_update_batch()
1964 attr->batch.elem_flags); in generic_map_update_batch()
1971 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_update_batch()
1972 err = -EFAULT; in generic_map_update_batch()
1984 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); in generic_map_lookup_batch()
1985 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in generic_map_lookup_batch()
1986 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_lookup_batch()
1987 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_lookup_batch()
1992 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_lookup_batch()
1993 return -EINVAL; in generic_map_lookup_batch()
1995 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_lookup_batch()
1996 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in generic_map_lookup_batch()
1997 return -EINVAL; in generic_map_lookup_batch()
2001 max_count = attr->batch.count; in generic_map_lookup_batch()
2005 if (put_user(0, &uattr->batch.count)) in generic_map_lookup_batch()
2006 return -EFAULT; in generic_map_lookup_batch()
2008 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2010 return -ENOMEM; in generic_map_lookup_batch()
2012 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2015 return -ENOMEM; in generic_map_lookup_batch()
2018 err = -EFAULT; in generic_map_lookup_batch()
2020 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
2023 value = key + map->key_size; in generic_map_lookup_batch()
2029 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
2034 attr->batch.elem_flags); in generic_map_lookup_batch()
2036 if (err == -ENOENT) in generic_map_lookup_batch()
2042 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
2043 map->key_size)) { in generic_map_lookup_batch()
2044 err = -EFAULT; in generic_map_lookup_batch()
2048 err = -EFAULT; in generic_map_lookup_batch()
2061 if (err == -EFAULT) in generic_map_lookup_batch()
2064 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || in generic_map_lookup_batch()
2065 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
2066 err = -EFAULT; in generic_map_lookup_batch()
2078 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_and_delete_elem()
2079 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_and_delete_elem()
2086 return -EINVAL; in map_lookup_and_delete_elem()
2088 if (attr->flags & ~BPF_F_LOCK) in map_lookup_and_delete_elem()
2089 return -EINVAL; in map_lookup_and_delete_elem()
2091 CLASS(fd, f)(attr->map_fd); in map_lookup_and_delete_elem()
2098 err = -EPERM; in map_lookup_and_delete_elem()
2102 if (attr->flags && in map_lookup_and_delete_elem()
2103 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2104 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
2105 err = -EINVAL; in map_lookup_and_delete_elem()
2109 if ((attr->flags & BPF_F_LOCK) && in map_lookup_and_delete_elem()
2110 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_lookup_and_delete_elem()
2111 err = -EINVAL; in map_lookup_and_delete_elem()
2115 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
2123 err = -ENOMEM; in map_lookup_and_delete_elem()
2128 err = -ENOTSUPP; in map_lookup_and_delete_elem()
2129 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2130 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
2131 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
2132 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
2133 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
2134 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
2135 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_and_delete_elem()
2139 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
2149 err = -EFAULT; in map_lookup_and_delete_elem()
2172 return -EINVAL; in map_freeze()
2174 CLASS(fd, f)(attr->map_fd); in map_freeze()
2179 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) in map_freeze()
2180 return -ENOTSUPP; in map_freeze()
2183 return -EPERM; in map_freeze()
2185 mutex_lock(&map->freeze_mutex); in map_freeze()
2187 err = -EBUSY; in map_freeze()
2190 if (READ_ONCE(map->frozen)) { in map_freeze()
2191 err = -EBUSY; in map_freeze()
2195 WRITE_ONCE(map->frozen, true); in map_freeze()
2197 mutex_unlock(&map->freeze_mutex); in map_freeze()
2217 return -EINVAL; in find_prog_type()
2221 return -EINVAL; in find_prog_type()
2223 if (!bpf_prog_is_offloaded(prog->aux)) in find_prog_type()
2224 prog->aux->ops = ops; in find_prog_type()
2226 prog->aux->ops = &bpf_offload_prog_ops; in find_prog_type()
2227 prog->type = type; in find_prog_type()
2256 audit_log_format(ab, "prog-id=%u op=%s", in bpf_audit_prog()
2257 prog->aux->id, bpf_audit_str[op]); in bpf_audit_prog()
2269 prog->aux->id = id; in bpf_prog_alloc_id()
2275 return -ENOSPC; in bpf_prog_alloc_id()
2286 * disappears - even if someone grabs an fd to them they are unusable, in bpf_prog_free_id()
2289 if (!prog->aux->id) in bpf_prog_free_id()
2293 idr_remove(&prog_idr, prog->aux->id); in bpf_prog_free_id()
2294 prog->aux->id = 0; in bpf_prog_free_id()
2302 kvfree(aux->func_info); in __bpf_prog_put_rcu()
2303 kfree(aux->func_info_aux); in __bpf_prog_put_rcu()
2304 free_uid(aux->user); in __bpf_prog_put_rcu()
2305 security_bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
2306 bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
2312 btf_put(prog->aux->btf); in __bpf_prog_put_noref()
2313 module_put(prog->aux->mod); in __bpf_prog_put_noref()
2314 kvfree(prog->aux->jited_linfo); in __bpf_prog_put_noref()
2315 kvfree(prog->aux->linfo); in __bpf_prog_put_noref()
2316 kfree(prog->aux->kfunc_tab); in __bpf_prog_put_noref()
2317 if (prog->aux->attach_btf) in __bpf_prog_put_noref()
2318 btf_put(prog->aux->attach_btf); in __bpf_prog_put_noref()
2321 if (prog->sleepable) in __bpf_prog_put_noref()
2322 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2324 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2326 __bpf_prog_put_rcu(&prog->aux->rcu); in __bpf_prog_put_noref()
2336 prog = aux->prog; in bpf_prog_put_deferred()
2345 struct bpf_prog_aux *aux = prog->aux; in __bpf_prog_put()
2347 if (atomic64_dec_and_test(&aux->refcnt)) { in __bpf_prog_put()
2349 INIT_WORK(&aux->work, bpf_prog_put_deferred); in __bpf_prog_put()
2350 schedule_work(&aux->work); in __bpf_prog_put()
2352 bpf_prog_put_deferred(&aux->work); in __bpf_prog_put()
2365 struct bpf_prog *prog = filp->private_data; in bpf_prog_release()
2382 stats = this_cpu_ptr(prog->stats); in bpf_prog_inc_misses_counter()
2383 flags = u64_stats_update_begin_irqsave(&stats->syncp); in bpf_prog_inc_misses_counter()
2384 u64_stats_inc(&stats->misses); in bpf_prog_inc_misses_counter()
2385 u64_stats_update_end_irqrestore(&stats->syncp, flags); in bpf_prog_inc_misses_counter()
2399 st = per_cpu_ptr(prog->stats, cpu); in bpf_prog_get_stats()
2401 start = u64_stats_fetch_begin(&st->syncp); in bpf_prog_get_stats()
2402 tnsecs = u64_stats_read(&st->nsecs); in bpf_prog_get_stats()
2403 tcnt = u64_stats_read(&st->cnt); in bpf_prog_get_stats()
2404 tmisses = u64_stats_read(&st->misses); in bpf_prog_get_stats()
2405 } while (u64_stats_fetch_retry(&st->syncp, start)); in bpf_prog_get_stats()
2410 stats->nsecs = nsecs; in bpf_prog_get_stats()
2411 stats->cnt = cnt; in bpf_prog_get_stats()
2412 stats->misses = misses; in bpf_prog_get_stats()
2418 const struct bpf_prog *prog = filp->private_data; in bpf_prog_show_fdinfo()
2419 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_prog_show_fdinfo()
2423 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_prog_show_fdinfo()
2434 prog->type, in bpf_prog_show_fdinfo()
2435 prog->jited, in bpf_prog_show_fdinfo()
2437 prog->pages * 1ULL << PAGE_SHIFT, in bpf_prog_show_fdinfo()
2438 prog->aux->id, in bpf_prog_show_fdinfo()
2442 prog->aux->verified_insns); in bpf_prog_show_fdinfo()
2463 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, in bpf_prog_new_fd()
2469 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add()
2480 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub()
2486 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc()
2495 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero()
2498 return ERR_PTR(-ENOENT); in bpf_prog_inc_not_zero()
2511 if (prog->type != *attach_type) in bpf_prog_get_ok()
2513 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) in bpf_prog_get_ok()
2526 return ERR_PTR(-EBADF); in __bpf_prog_get()
2527 if (fd_file(f)->f_op != &bpf_prog_fops) in __bpf_prog_get()
2528 return ERR_PTR(-EINVAL); in __bpf_prog_get()
2530 prog = fd_file(f)->private_data; in __bpf_prog_get()
2532 return ERR_PTR(-EINVAL); in __bpf_prog_get()
2564 switch (attr->prog_type) { in bpf_prog_load_fixup_attach_type()
2567 * exist so checking for non-zero is the way to go here. in bpf_prog_load_fixup_attach_type()
2569 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2570 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2574 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2575 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2589 return -EINVAL; in bpf_prog_load_check_attach()
2592 return -EINVAL; in bpf_prog_load_check_attach()
2601 return -EINVAL; in bpf_prog_load_check_attach()
2606 return -EINVAL; in bpf_prog_load_check_attach()
2610 return -EINVAL; in bpf_prog_load_check_attach()
2621 return -EINVAL; in bpf_prog_load_check_attach()
2644 return -EINVAL; in bpf_prog_load_check_attach()
2652 return -EINVAL; in bpf_prog_load_check_attach()
2660 return -EINVAL; in bpf_prog_load_check_attach()
2665 return -EINVAL; in bpf_prog_load_check_attach()
2672 return -EINVAL; in bpf_prog_load_check_attach()
2677 return -EINVAL; in bpf_prog_load_check_attach()
2681 return -EINVAL; in bpf_prog_load_check_attach()
2742 enum bpf_prog_type type = attr->prog_type; in bpf_prog_load()
2751 return -EINVAL; in bpf_prog_load()
2753 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | in bpf_prog_load()
2762 return -EINVAL; in bpf_prog_load()
2766 if (attr->prog_flags & BPF_F_TOKEN_FD) { in bpf_prog_load()
2767 token = bpf_token_get_from_fd(attr->prog_token_fd); in bpf_prog_load()
2772 * system-wide capabilities checks in bpf_prog_load()
2775 !bpf_token_allow_prog_type(token, attr->prog_type, in bpf_prog_load()
2776 attr->expected_attach_type)) { in bpf_prog_load()
2783 err = -EPERM; in bpf_prog_load()
2786 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && in bpf_prog_load()
2800 if (attr->insn_cnt == 0 || in bpf_prog_load()
2801 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { in bpf_prog_load()
2802 err = -E2BIG; in bpf_prog_load()
2818 if (attr->attach_prog_fd) { in bpf_prog_load()
2819 dst_prog = bpf_prog_get(attr->attach_prog_fd); in bpf_prog_load()
2822 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); in bpf_prog_load()
2824 err = -EINVAL; in bpf_prog_load()
2832 err = -ENOTSUPP; in bpf_prog_load()
2836 } else if (attr->attach_btf_id) { in bpf_prog_load()
2844 err = -EINVAL; in bpf_prog_load()
2850 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, in bpf_prog_load()
2851 attach_btf, attr->attach_btf_id, in bpf_prog_load()
2857 err = -EINVAL; in bpf_prog_load()
2862 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); in bpf_prog_load()
2868 err = -EINVAL; in bpf_prog_load()
2872 prog->expected_attach_type = attr->expected_attach_type; in bpf_prog_load()
2873 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); in bpf_prog_load()
2874 prog->aux->attach_btf = attach_btf; in bpf_prog_load()
2875 prog->aux->attach_btf_id = attr->attach_btf_id; in bpf_prog_load()
2876 prog->aux->dst_prog = dst_prog; in bpf_prog_load()
2877 prog->aux->dev_bound = !!attr->prog_ifindex; in bpf_prog_load()
2878 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; in bpf_prog_load()
2880 /* move token into prog->aux, reuse taken refcnt */ in bpf_prog_load()
2881 prog->aux->token = token; in bpf_prog_load()
2884 prog->aux->user = get_current_user(); in bpf_prog_load()
2885 prog->len = attr->insn_cnt; in bpf_prog_load()
2887 err = -EFAULT; in bpf_prog_load()
2888 if (copy_from_bpfptr(prog->insns, in bpf_prog_load()
2889 make_bpfptr(attr->insns, uattr.is_kernel), in bpf_prog_load()
2894 make_bpfptr(attr->license, uattr.is_kernel), in bpf_prog_load()
2895 sizeof(license) - 1) < 0) in bpf_prog_load()
2897 license[sizeof(license) - 1] = 0; in bpf_prog_load()
2899 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in bpf_prog_load()
2900 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; in bpf_prog_load()
2902 prog->orig_prog = NULL; in bpf_prog_load()
2903 prog->jited = 0; in bpf_prog_load()
2905 atomic64_set(&prog->aux->refcnt, 1); in bpf_prog_load()
2907 if (bpf_prog_is_dev_bound(prog->aux)) { in bpf_prog_load()
2914 bpf_prog_is_dev_bound(dst_prog->aux)) { in bpf_prog_load()
2932 dst_prog->type == BPF_PROG_TYPE_TRACING) { in bpf_prog_load()
2933 prog->aux->attach_tracing_prog = true; in bpf_prog_load()
2941 prog->aux->load_time = ktime_get_boottime_ns(); in bpf_prog_load()
2942 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, in bpf_prog_load()
2943 sizeof(attr->prog_name)); in bpf_prog_load()
2992 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); in bpf_prog_load()
2998 free_uid(prog->aux->user); in bpf_prog_load()
2999 if (prog->aux->attach_btf) in bpf_prog_load()
3000 btf_put(prog->aux->attach_btf); in bpf_prog_load()
3013 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) in bpf_obj_pin()
3014 return -EINVAL; in bpf_obj_pin()
3017 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_pin()
3018 return -EINVAL; in bpf_obj_pin()
3020 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_pin()
3021 return bpf_obj_pin_user(attr->bpf_fd, path_fd, in bpf_obj_pin()
3022 u64_to_user_ptr(attr->pathname)); in bpf_obj_pin()
3029 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || in bpf_obj_get()
3030 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) in bpf_obj_get()
3031 return -EINVAL; in bpf_obj_get()
3034 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_get()
3035 return -EINVAL; in bpf_obj_get()
3037 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_get()
3038 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), in bpf_obj_get()
3039 attr->file_flags); in bpf_obj_get()
3045 * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3046 * BPF program itself can be non-sleepable, yet, because it's transitively
3054 WARN_ON(ops->dealloc && ops->dealloc_deferred); in bpf_link_init_sleepable()
3055 atomic64_set(&link->refcnt, 1); in bpf_link_init_sleepable()
3056 link->type = type; in bpf_link_init_sleepable()
3057 link->sleepable = sleepable; in bpf_link_init_sleepable()
3058 link->id = 0; in bpf_link_init_sleepable()
3059 link->ops = ops; in bpf_link_init_sleepable()
3060 link->prog = prog; in bpf_link_init_sleepable()
3090 primer->link->prog = NULL; in bpf_link_cleanup()
3091 bpf_link_free_id(primer->id); in bpf_link_cleanup()
3092 fput(primer->file); in bpf_link_cleanup()
3093 put_unused_fd(primer->fd); in bpf_link_cleanup()
3098 atomic64_inc(&link->refcnt); in bpf_link_inc()
3104 if (link->prog) in bpf_link_dealloc()
3105 bpf_prog_put(link->prog); in bpf_link_dealloc()
3108 if (link->ops->dealloc_deferred) in bpf_link_dealloc()
3109 link->ops->dealloc_deferred(link); in bpf_link_dealloc()
3111 link->ops->dealloc(link); in bpf_link_dealloc()
3132 const struct bpf_link_ops *ops = link->ops; in bpf_link_free()
3134 bpf_link_free_id(link->id); in bpf_link_free()
3136 if (link->prog) in bpf_link_free()
3137 ops->release(link); in bpf_link_free()
3138 if (ops->dealloc_deferred) { in bpf_link_free()
3147 if (link->sleepable || (link->prog && link->prog->sleepable)) in bpf_link_free()
3148 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); in bpf_link_free()
3150 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); in bpf_link_free()
3151 } else if (ops->dealloc) { in bpf_link_free()
3168 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put()
3171 INIT_WORK(&link->work, bpf_link_put_deferred); in bpf_link_put()
3172 schedule_work(&link->work); in bpf_link_put()
3178 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put_direct()
3185 struct bpf_link *link = filp->private_data; in bpf_link_release()
3205 const struct bpf_link *link = filp->private_data; in bpf_link_show_fdinfo()
3206 const struct bpf_prog *prog = link->prog; in bpf_link_show_fdinfo()
3207 enum bpf_link_type type = link->type; in bpf_link_show_fdinfo()
3208 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_link_show_fdinfo()
3216 seq_printf(m, "link_id:\t%u\n", link->id); in bpf_link_show_fdinfo()
3219 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_link_show_fdinfo()
3224 prog->aux->id); in bpf_link_show_fdinfo()
3226 if (link->ops->show_fdinfo) in bpf_link_show_fdinfo()
3227 link->ops->show_fdinfo(link, m); in bpf_link_show_fdinfo()
3233 struct bpf_link *link = file->private_data; in bpf_link_poll()
3235 return link->ops->poll(file, pts); in bpf_link_poll()
3270 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3273 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3274 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3300 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, in bpf_link_prime()
3308 primer->link = link; in bpf_link_prime()
3309 primer->file = file; in bpf_link_prime()
3310 primer->fd = fd; in bpf_link_prime()
3311 primer->id = id; in bpf_link_prime()
3317 /* make bpf_link fetchable by ID */ in bpf_link_settle()
3319 primer->link->id = primer->id; in bpf_link_settle()
3321 /* make bpf_link fetchable by FD */ in bpf_link_settle()
3322 fd_install(primer->fd, primer->file); in bpf_link_settle()
3324 return primer->fd; in bpf_link_settle()
3329 return anon_inode_getfd("bpf-link", in bpf_link_new_fd()
3330 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, in bpf_link_new_fd()
3340 return ERR_PTR(-EBADF); in bpf_link_get_from_fd()
3341 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) in bpf_link_get_from_fd()
3342 return ERR_PTR(-EINVAL); in bpf_link_get_from_fd()
3344 link = fd_file(f)->private_data; in bpf_link_get_from_fd()
3355 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, in bpf_tracing_link_release()
3356 tr_link->trampoline, in bpf_tracing_link_release()
3357 tr_link->tgt_prog)); in bpf_tracing_link_release()
3359 bpf_trampoline_put(tr_link->trampoline); in bpf_tracing_link_release()
3362 if (tr_link->tgt_prog) in bpf_tracing_link_release()
3363 bpf_prog_put(tr_link->tgt_prog); in bpf_tracing_link_release()
3381 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_show_fdinfo()
3387 tr_link->attach_type, in bpf_tracing_link_show_fdinfo()
3398 info->tracing.attach_type = tr_link->attach_type; in bpf_tracing_link_fill_link_info()
3399 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_fill_link_info()
3400 &info->tracing.target_obj_id, in bpf_tracing_link_fill_link_info()
3401 &info->tracing.target_btf_id); in bpf_tracing_link_fill_link_info()
3425 switch (prog->type) { in bpf_tracing_prog_attach()
3427 if (prog->expected_attach_type != BPF_TRACE_FENTRY && in bpf_tracing_prog_attach()
3428 prog->expected_attach_type != BPF_TRACE_FEXIT && in bpf_tracing_prog_attach()
3429 prog->expected_attach_type != BPF_MODIFY_RETURN) { in bpf_tracing_prog_attach()
3430 err = -EINVAL; in bpf_tracing_prog_attach()
3435 if (prog->expected_attach_type != 0) { in bpf_tracing_prog_attach()
3436 err = -EINVAL; in bpf_tracing_prog_attach()
3441 if (prog->expected_attach_type != BPF_LSM_MAC) { in bpf_tracing_prog_attach()
3442 err = -EINVAL; in bpf_tracing_prog_attach()
3447 err = -EINVAL; in bpf_tracing_prog_attach()
3452 err = -EINVAL; in bpf_tracing_prog_attach()
3463 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_tracing_prog_attach()
3464 err = -EINVAL; in bpf_tracing_prog_attach()
3480 err = -ENOMEM; in bpf_tracing_prog_attach()
3483 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, in bpf_tracing_prog_attach()
3485 link->attach_type = prog->expected_attach_type; in bpf_tracing_prog_attach()
3486 link->link.cookie = bpf_cookie; in bpf_tracing_prog_attach()
3488 mutex_lock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3492 * - if prog->aux->dst_trampoline is set, the program was just loaded in bpf_tracing_prog_attach()
3494 * in prog->aux in bpf_tracing_prog_attach()
3496 * - if prog->aux->dst_trampoline is NULL, the program has already been in bpf_tracing_prog_attach()
3499 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + in bpf_tracing_prog_attach()
3502 * - if tgt_prog == NULL when this function was called using the old in bpf_tracing_prog_attach()
3503 * raw_tracepoint_open API, and we need a target from prog->aux in bpf_tracing_prog_attach()
3505 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program in bpf_tracing_prog_attach()
3506 * was detached and is going for re-attachment. in bpf_tracing_prog_attach()
3508 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf in bpf_tracing_prog_attach()
3512 if (!prog->aux->dst_trampoline && !tgt_prog) { in bpf_tracing_prog_attach()
3514 * Allow re-attach for TRACING and LSM programs. If it's in bpf_tracing_prog_attach()
3517 * re-attach in separate code path. in bpf_tracing_prog_attach()
3519 if (prog->type != BPF_PROG_TYPE_TRACING && in bpf_tracing_prog_attach()
3520 prog->type != BPF_PROG_TYPE_LSM) { in bpf_tracing_prog_attach()
3521 err = -EINVAL; in bpf_tracing_prog_attach()
3524 /* We can allow re-attach only if we have valid attach_btf. */ in bpf_tracing_prog_attach()
3525 if (!prog->aux->attach_btf) { in bpf_tracing_prog_attach()
3526 err = -EINVAL; in bpf_tracing_prog_attach()
3529 btf_id = prog->aux->attach_btf_id; in bpf_tracing_prog_attach()
3530 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); in bpf_tracing_prog_attach()
3533 if (!prog->aux->dst_trampoline || in bpf_tracing_prog_attach()
3534 (key && key != prog->aux->dst_trampoline->key)) { in bpf_tracing_prog_attach()
3547 module_put(prog->aux->mod); in bpf_tracing_prog_attach()
3548 prog->aux->mod = tgt_info.tgt_mod; in bpf_tracing_prog_attach()
3553 err = -ENOMEM; in bpf_tracing_prog_attach()
3562 * prog->aux are cleared below. in bpf_tracing_prog_attach()
3564 tr = prog->aux->dst_trampoline; in bpf_tracing_prog_attach()
3565 tgt_prog = prog->aux->dst_prog; in bpf_tracing_prog_attach()
3568 err = bpf_link_prime(&link->link.link, &link_primer); in bpf_tracing_prog_attach()
3572 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); in bpf_tracing_prog_attach()
3579 link->tgt_prog = tgt_prog; in bpf_tracing_prog_attach()
3580 link->trampoline = tr; in bpf_tracing_prog_attach()
3582 /* Always clear the trampoline and target prog from prog->aux to make in bpf_tracing_prog_attach()
3584 * program is (re-)attached to another target. in bpf_tracing_prog_attach()
3586 if (prog->aux->dst_prog && in bpf_tracing_prog_attach()
3587 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) in bpf_tracing_prog_attach()
3589 bpf_prog_put(prog->aux->dst_prog); in bpf_tracing_prog_attach()
3590 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3592 bpf_trampoline_put(prog->aux->dst_trampoline); in bpf_tracing_prog_attach()
3594 prog->aux->dst_prog = NULL; in bpf_tracing_prog_attach()
3595 prog->aux->dst_trampoline = NULL; in bpf_tracing_prog_attach()
3596 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3600 if (tr && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3602 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3615 bpf_probe_unregister(raw_tp->btp, raw_tp); in bpf_raw_tp_link_release()
3616 bpf_put_raw_tracepoint(raw_tp->btp); in bpf_raw_tp_link_release()
3635 raw_tp_link->btp->tp->name); in bpf_raw_tp_link_show_fdinfo()
3643 return -EFAULT; in bpf_copy_to_user()
3647 if (copy_to_user(ubuf, buf, ulen - 1)) in bpf_copy_to_user()
3648 return -EFAULT; in bpf_copy_to_user()
3649 if (put_user(zero, ubuf + ulen - 1)) in bpf_copy_to_user()
3650 return -EFAULT; in bpf_copy_to_user()
3651 return -ENOSPC; in bpf_copy_to_user()
3662 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); in bpf_raw_tp_link_fill_link_info()
3663 const char *tp_name = raw_tp_link->btp->tp->name; in bpf_raw_tp_link_fill_link_info()
3664 u32 ulen = info->raw_tracepoint.tp_name_len; in bpf_raw_tp_link_fill_link_info()
3668 return -EINVAL; in bpf_raw_tp_link_fill_link_info()
3670 info->raw_tracepoint.tp_name_len = tp_len + 1; in bpf_raw_tp_link_fill_link_info()
3694 struct perf_event *event = perf_link->perf_file->private_data; in bpf_perf_link_release()
3697 fput(perf_link->perf_file); in bpf_perf_link_release()
3719 return -EINVAL; in bpf_perf_link_fill_common()
3743 return -EFAULT; in bpf_perf_link_fill_common()
3758 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); in bpf_perf_link_fill_kprobe()
3759 ulen = info->perf_event.kprobe.name_len; in bpf_perf_link_fill_kprobe()
3765 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; in bpf_perf_link_fill_kprobe()
3767 info->perf_event.type = BPF_PERF_EVENT_KPROBE; in bpf_perf_link_fill_kprobe()
3768 info->perf_event.kprobe.name_len = ulen; in bpf_perf_link_fill_kprobe()
3769 info->perf_event.kprobe.offset = offset; in bpf_perf_link_fill_kprobe()
3770 info->perf_event.kprobe.missed = missed; in bpf_perf_link_fill_kprobe()
3773 info->perf_event.kprobe.addr = addr; in bpf_perf_link_fill_kprobe()
3774 info->perf_event.kprobe.cookie = event->bpf_cookie; in bpf_perf_link_fill_kprobe()
3788 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); in bpf_perf_link_fill_uprobe()
3789 ulen = info->perf_event.uprobe.name_len; in bpf_perf_link_fill_uprobe()
3796 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; in bpf_perf_link_fill_uprobe()
3798 info->perf_event.type = BPF_PERF_EVENT_UPROBE; in bpf_perf_link_fill_uprobe()
3799 info->perf_event.uprobe.name_len = ulen; in bpf_perf_link_fill_uprobe()
3800 info->perf_event.uprobe.offset = offset; in bpf_perf_link_fill_uprobe()
3801 info->perf_event.uprobe.cookie = event->bpf_cookie; in bpf_perf_link_fill_uprobe()
3810 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) in bpf_perf_link_fill_probe()
3814 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) in bpf_perf_link_fill_probe()
3817 return -EOPNOTSUPP; in bpf_perf_link_fill_probe()
3827 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); in bpf_perf_link_fill_tracepoint()
3828 ulen = info->perf_event.tracepoint.name_len; in bpf_perf_link_fill_tracepoint()
3833 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; in bpf_perf_link_fill_tracepoint()
3834 info->perf_event.tracepoint.name_len = ulen; in bpf_perf_link_fill_tracepoint()
3835 info->perf_event.tracepoint.cookie = event->bpf_cookie; in bpf_perf_link_fill_tracepoint()
3842 info->perf_event.event.type = event->attr.type; in bpf_perf_link_fill_perf_event()
3843 info->perf_event.event.config = event->attr.config; in bpf_perf_link_fill_perf_event()
3844 info->perf_event.event.cookie = event->bpf_cookie; in bpf_perf_link_fill_perf_event()
3845 info->perf_event.type = BPF_PERF_EVENT_EVENT; in bpf_perf_link_fill_perf_event()
3856 event = perf_get_event(perf_link->perf_file); in bpf_perf_link_fill_link_info()
3860 switch (event->prog->type) { in bpf_perf_link_fill_link_info()
3868 return -EOPNOTSUPP; in bpf_perf_link_fill_link_info()
3886 if (attr->link_create.flags) in bpf_perf_link_attach()
3887 return -EINVAL; in bpf_perf_link_attach()
3889 perf_file = perf_event_get(attr->link_create.target_fd); in bpf_perf_link_attach()
3895 err = -ENOMEM; in bpf_perf_link_attach()
3898 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); in bpf_perf_link_attach()
3899 link->perf_file = perf_file; in bpf_perf_link_attach()
3901 err = bpf_link_prime(&link->link, &link_primer); in bpf_perf_link_attach()
3907 event = perf_file->private_data; in bpf_perf_link_attach()
3908 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); in bpf_perf_link_attach()
3925 return -EOPNOTSUPP; in bpf_perf_link_attach()
3939 switch (prog->type) { in bpf_raw_tp_link_attach()
3947 return -EINVAL; in bpf_raw_tp_link_attach()
3948 if (prog->type == BPF_PROG_TYPE_TRACING && in bpf_raw_tp_link_attach()
3949 prog->expected_attach_type == BPF_TRACE_RAW_TP) { in bpf_raw_tp_link_attach()
3950 tp_name = prog->aux->attach_func_name; in bpf_raw_tp_link_attach()
3956 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) in bpf_raw_tp_link_attach()
3957 return -EFAULT; in bpf_raw_tp_link_attach()
3958 buf[sizeof(buf) - 1] = 0; in bpf_raw_tp_link_attach()
3962 return -EINVAL; in bpf_raw_tp_link_attach()
3967 return -ENOENT; in bpf_raw_tp_link_attach()
3971 err = -ENOMEM; in bpf_raw_tp_link_attach()
3974 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, in bpf_raw_tp_link_attach()
3976 tracepoint_is_faultable(btp->tp)); in bpf_raw_tp_link_attach()
3977 link->btp = btp; in bpf_raw_tp_link_attach()
3978 link->cookie = cookie; in bpf_raw_tp_link_attach()
3980 err = bpf_link_prime(&link->link, &link_primer); in bpf_raw_tp_link_attach()
3986 err = bpf_probe_register(link->btp, link); in bpf_raw_tp_link_attach()
4009 return -EINVAL; in bpf_raw_tracepoint_open()
4011 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); in bpf_raw_tracepoint_open()
4015 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); in bpf_raw_tracepoint_open()
4016 cookie = attr->raw_tracepoint.cookie; in bpf_raw_tracepoint_open()
4101 switch (prog->type) { in bpf_prog_attach_check_attach_type()
4106 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; in bpf_prog_attach_check_attach_type()
4108 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) in bpf_prog_attach_check_attach_type()
4109 /* cg-skb progs can be loaded by unpriv user. in bpf_prog_attach_check_attach_type()
4112 return -EPERM; in bpf_prog_attach_check_attach_type()
4115 if (prog->type != ptype) in bpf_prog_attach_check_attach_type()
4116 return -EINVAL; in bpf_prog_attach_check_attach_type()
4118 return prog->enforce_expected_attach_type && in bpf_prog_attach_check_attach_type()
4119 prog->expected_attach_type != attach_type ? in bpf_prog_attach_check_attach_type()
4120 -EINVAL : 0; in bpf_prog_attach_check_attach_type()
4125 return -EINVAL; in bpf_prog_attach_check_attach_type()
4130 return -EINVAL; in bpf_prog_attach_check_attach_type()
4133 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && in bpf_prog_attach_check_attach_type()
4135 return -EINVAL; in bpf_prog_attach_check_attach_type()
4136 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && in bpf_prog_attach_check_attach_type()
4138 return -EINVAL; in bpf_prog_attach_check_attach_type()
4139 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && in bpf_prog_attach_check_attach_type()
4141 return -EINVAL; in bpf_prog_attach_check_attach_type()
4142 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && in bpf_prog_attach_check_attach_type()
4144 return -EINVAL; in bpf_prog_attach_check_attach_type()
4150 return -EINVAL; in bpf_prog_attach_check_attach_type()
4157 return -EINVAL; in bpf_prog_attach_check_attach_type()
4161 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) in bpf_prog_attach_check_attach_type()
4162 return -EINVAL; in bpf_prog_attach_check_attach_type()
4188 return -EINVAL; in bpf_prog_attach()
4190 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_attach()
4192 return -EINVAL; in bpf_prog_attach()
4194 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_attach()
4195 return -EINVAL; in bpf_prog_attach()
4197 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) in bpf_prog_attach()
4198 return -EINVAL; in bpf_prog_attach()
4199 if (attr->relative_fd || in bpf_prog_attach()
4200 attr->expected_revision) in bpf_prog_attach()
4201 return -EINVAL; in bpf_prog_attach()
4204 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_attach()
4208 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { in bpf_prog_attach()
4210 return -EINVAL; in bpf_prog_attach()
4233 prog->expected_attach_type != BPF_LSM_CGROUP) in bpf_prog_attach()
4234 ret = -EINVAL; in bpf_prog_attach()
4239 if (attr->attach_type == BPF_TCX_INGRESS || in bpf_prog_attach()
4240 attr->attach_type == BPF_TCX_EGRESS) in bpf_prog_attach()
4246 ret = -EINVAL; in bpf_prog_attach()
4263 return -EINVAL; in bpf_prog_detach()
4265 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_detach()
4268 return -EINVAL; in bpf_prog_detach()
4269 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_detach()
4270 return -EINVAL; in bpf_prog_detach()
4271 if (attr->attach_bpf_fd) { in bpf_prog_detach()
4272 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_detach()
4276 } else if (attr->attach_flags || in bpf_prog_detach()
4277 attr->relative_fd || in bpf_prog_detach()
4278 attr->expected_revision) { in bpf_prog_detach()
4279 return -EINVAL; in bpf_prog_detach()
4304 if (attr->attach_type == BPF_TCX_INGRESS || in bpf_prog_detach()
4305 attr->attach_type == BPF_TCX_EGRESS) in bpf_prog_detach()
4311 ret = -EINVAL; in bpf_prog_detach()
4325 return -EPERM; in bpf_prog_query()
4327 return -EINVAL; in bpf_prog_query()
4328 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) in bpf_prog_query()
4329 return -EINVAL; in bpf_prog_query()
4331 switch (attr->query.attach_type) { in bpf_prog_query()
4379 return -EINVAL; in bpf_prog_query()
4389 int ret = -ENOTSUPP; in bpf_prog_test_run()
4392 return -EINVAL; in bpf_prog_test_run()
4394 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || in bpf_prog_test_run()
4395 (!attr->test.ctx_size_in && attr->test.ctx_in)) in bpf_prog_test_run()
4396 return -EINVAL; in bpf_prog_test_run()
4398 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || in bpf_prog_test_run()
4399 (!attr->test.ctx_size_out && attr->test.ctx_out)) in bpf_prog_test_run()
4400 return -EINVAL; in bpf_prog_test_run()
4402 prog = bpf_prog_get(attr->test.prog_fd); in bpf_prog_test_run()
4406 if (prog->aux->ops->test_run) in bpf_prog_test_run()
4407 ret = prog->aux->ops->test_run(prog, attr, uattr); in bpf_prog_test_run()
4420 u32 next_id = attr->start_id; in bpf_obj_get_next_id()
4424 return -EINVAL; in bpf_obj_get_next_id()
4427 return -EPERM; in bpf_obj_get_next_id()
4432 err = -ENOENT; in bpf_obj_get_next_id()
4436 err = put_user(next_id, &uattr->next_id); in bpf_obj_get_next_id()
4486 return ERR_PTR(-ENOENT); in bpf_prog_by_id()
4493 prog = ERR_PTR(-ENOENT); in bpf_prog_by_id()
4501 u32 id = attr->prog_id; in bpf_prog_get_fd_by_id()
4505 return -EINVAL; in bpf_prog_get_fd_by_id()
4508 return -EPERM; in bpf_prog_get_fd_by_id()
4526 u32 id = attr->map_id; in bpf_map_get_fd_by_id()
4531 attr->open_flags & ~BPF_OBJ_FLAG_MASK) in bpf_map_get_fd_by_id()
4532 return -EINVAL; in bpf_map_get_fd_by_id()
4535 return -EPERM; in bpf_map_get_fd_by_id()
4537 f_flags = bpf_get_file_flag(attr->open_flags); in bpf_map_get_fd_by_id()
4546 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
4566 mutex_lock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4567 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { in bpf_map_from_imm()
4568 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
4573 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
4575 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
4583 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4597 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), in bpf_insn_prepare_dump()
4602 for (i = 0; i < prog->len; i++) { in bpf_insn_prepare_dump()
4608 /* fall-through */ in bpf_insn_prepare_dump()
4636 insns[i].imm = map->id; in bpf_insn_prepare_dump()
4657 if ((info->nr_func_info || info->func_info_rec_size) && in set_info_rec_size()
4658 info->func_info_rec_size != sizeof(struct bpf_func_info)) in set_info_rec_size()
4659 return -EINVAL; in set_info_rec_size()
4661 if ((info->nr_line_info || info->line_info_rec_size) && in set_info_rec_size()
4662 info->line_info_rec_size != sizeof(struct bpf_line_info)) in set_info_rec_size()
4663 return -EINVAL; in set_info_rec_size()
4665 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && in set_info_rec_size()
4666 info->jited_line_info_rec_size != sizeof(__u64)) in set_info_rec_size()
4667 return -EINVAL; in set_info_rec_size()
4669 info->func_info_rec_size = sizeof(struct bpf_func_info); in set_info_rec_size()
4670 info->line_info_rec_size = sizeof(struct bpf_line_info); in set_info_rec_size()
4671 info->jited_line_info_rec_size = sizeof(__u64); in set_info_rec_size()
4681 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_prog_get_info_by_fd()
4684 u32 info_len = attr->info.info_len; in bpf_prog_get_info_by_fd()
4697 return -EFAULT; in bpf_prog_get_info_by_fd()
4699 info.type = prog->type; in bpf_prog_get_info_by_fd()
4700 info.id = prog->aux->id; in bpf_prog_get_info_by_fd()
4701 info.load_time = prog->aux->load_time; in bpf_prog_get_info_by_fd()
4703 prog->aux->user->uid); in bpf_prog_get_info_by_fd()
4704 info.gpl_compatible = prog->gpl_compatible; in bpf_prog_get_info_by_fd()
4706 memcpy(info.tag, prog->tag, sizeof(prog->tag)); in bpf_prog_get_info_by_fd()
4707 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); in bpf_prog_get_info_by_fd()
4709 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4711 info.nr_map_ids = prog->aux->used_map_cnt; in bpf_prog_get_info_by_fd()
4718 if (put_user(prog->aux->used_maps[i]->id, in bpf_prog_get_info_by_fd()
4720 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4721 return -EFAULT; in bpf_prog_get_info_by_fd()
4724 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4735 info.verified_insns = prog->aux->verified_insns; in bpf_prog_get_info_by_fd()
4754 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4758 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); in bpf_prog_get_info_by_fd()
4760 return -ENOMEM; in bpf_prog_get_info_by_fd()
4766 return -EFAULT; in bpf_prog_get_info_by_fd()
4769 if (bpf_prog_is_offloaded(prog->aux)) { in bpf_prog_get_info_by_fd()
4781 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4785 for (i = 0; i < prog->aux->func_cnt; i++) in bpf_prog_get_info_by_fd()
4786 info.jited_prog_len += prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4788 info.jited_prog_len = prog->jited_len; in bpf_prog_get_info_by_fd()
4792 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4796 /* for multi-function programs, copy the JITed in bpf_prog_get_info_by_fd()
4799 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4804 for (i = 0; i < prog->aux->func_cnt; i++) { in bpf_prog_get_info_by_fd()
4805 len = prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4807 img = (u8 *) prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
4809 return -EFAULT; in bpf_prog_get_info_by_fd()
4811 free -= len; in bpf_prog_get_info_by_fd()
4816 if (copy_to_user(uinsns, prog->bpf_func, ulen)) in bpf_prog_get_info_by_fd()
4817 return -EFAULT; in bpf_prog_get_info_by_fd()
4825 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4827 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4837 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4840 prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
4843 return -EFAULT; in bpf_prog_get_info_by_fd()
4846 ksym_addr = (unsigned long) prog->bpf_func; in bpf_prog_get_info_by_fd()
4848 return -EFAULT; in bpf_prog_get_info_by_fd()
4856 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4858 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4865 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4868 prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4870 return -EFAULT; in bpf_prog_get_info_by_fd()
4873 func_len = prog->jited_len; in bpf_prog_get_info_by_fd()
4875 return -EFAULT; in bpf_prog_get_info_by_fd()
4882 if (prog->aux->btf) in bpf_prog_get_info_by_fd()
4883 info.btf_id = btf_obj_id(prog->aux->btf); in bpf_prog_get_info_by_fd()
4884 info.attach_btf_id = prog->aux->attach_btf_id; in bpf_prog_get_info_by_fd()
4889 info.nr_func_info = prog->aux->func_info_cnt; in bpf_prog_get_info_by_fd()
4895 if (copy_to_user(user_finfo, prog->aux->func_info, in bpf_prog_get_info_by_fd()
4897 return -EFAULT; in bpf_prog_get_info_by_fd()
4901 info.nr_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
4907 if (copy_to_user(user_linfo, prog->aux->linfo, in bpf_prog_get_info_by_fd()
4909 return -EFAULT; in bpf_prog_get_info_by_fd()
4913 if (prog->aux->jited_linfo) in bpf_prog_get_info_by_fd()
4914 info.nr_jited_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
4918 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4926 line_addr = (unsigned long)prog->aux->jited_linfo[i]; in bpf_prog_get_info_by_fd()
4928 return -EFAULT; in bpf_prog_get_info_by_fd()
4936 info.nr_prog_tags = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4943 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4946 prog->aux->func[i]->tag, in bpf_prog_get_info_by_fd()
4948 return -EFAULT; in bpf_prog_get_info_by_fd()
4952 prog->tag, BPF_TAG_SIZE)) in bpf_prog_get_info_by_fd()
4953 return -EFAULT; in bpf_prog_get_info_by_fd()
4959 put_user(info_len, &uattr->info.info_len)) in bpf_prog_get_info_by_fd()
4960 return -EFAULT; in bpf_prog_get_info_by_fd()
4970 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_map_get_info_by_fd()
4972 u32 info_len = attr->info.info_len; in bpf_map_get_info_by_fd()
4981 info.type = map->map_type; in bpf_map_get_info_by_fd()
4982 info.id = map->id; in bpf_map_get_info_by_fd()
4983 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
4984 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
4985 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
4986 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
4987 info.map_extra = map->map_extra; in bpf_map_get_info_by_fd()
4988 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
4990 if (map->btf) { in bpf_map_get_info_by_fd()
4991 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
4992 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
4993 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
4995 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
4996 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) in bpf_map_get_info_by_fd()
5006 put_user(info_len, &uattr->info.info_len)) in bpf_map_get_info_by_fd()
5007 return -EFAULT; in bpf_map_get_info_by_fd()
5017 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_btf_get_info_by_fd()
5018 u32 info_len = attr->info.info_len; in bpf_btf_get_info_by_fd()
5033 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_link_get_info_by_fd()
5035 u32 info_len = attr->info.info_len; in bpf_link_get_info_by_fd()
5045 return -EFAULT; in bpf_link_get_info_by_fd()
5047 info.type = link->type; in bpf_link_get_info_by_fd()
5048 info.id = link->id; in bpf_link_get_info_by_fd()
5049 if (link->prog) in bpf_link_get_info_by_fd()
5050 info.prog_id = link->prog->aux->id; in bpf_link_get_info_by_fd()
5052 if (link->ops->fill_link_info) { in bpf_link_get_info_by_fd()
5053 err = link->ops->fill_link_info(link, &info); in bpf_link_get_info_by_fd()
5059 put_user(info_len, &uattr->info.info_len)) in bpf_link_get_info_by_fd()
5060 return -EFAULT; in bpf_link_get_info_by_fd()
5072 return -EINVAL; in bpf_obj_get_info_by_fd()
5074 CLASS(fd, f)(attr->info.bpf_fd); in bpf_obj_get_info_by_fd()
5076 return -EBADFD; in bpf_obj_get_info_by_fd()
5078 if (fd_file(f)->f_op == &bpf_prog_fops) in bpf_obj_get_info_by_fd()
5079 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, in bpf_obj_get_info_by_fd()
5081 else if (fd_file(f)->f_op == &bpf_map_fops) in bpf_obj_get_info_by_fd()
5082 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, in bpf_obj_get_info_by_fd()
5084 else if (fd_file(f)->f_op == &btf_fops) in bpf_obj_get_info_by_fd()
5085 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); in bpf_obj_get_info_by_fd()
5086 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) in bpf_obj_get_info_by_fd()
5087 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, in bpf_obj_get_info_by_fd()
5089 return -EINVAL; in bpf_obj_get_info_by_fd()
5099 return -EINVAL; in bpf_btf_load()
5101 if (attr->btf_flags & ~BPF_F_TOKEN_FD) in bpf_btf_load()
5102 return -EINVAL; in bpf_btf_load()
5104 if (attr->btf_flags & BPF_F_TOKEN_FD) { in bpf_btf_load()
5105 token = bpf_token_get_from_fd(attr->btf_token_fd); in bpf_btf_load()
5116 return -EPERM; in bpf_btf_load()
5129 return -EINVAL; in bpf_btf_get_fd_by_id()
5132 return -EPERM; in bpf_btf_get_fd_by_id()
5134 return btf_get_fd_by_id(attr->btf_id); in bpf_btf_get_fd_by_id()
5143 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); in bpf_task_fd_query_copy()
5147 if (put_user(len, &uattr->task_fd_query.buf_len)) in bpf_task_fd_query_copy()
5148 return -EFAULT; in bpf_task_fd_query_copy()
5149 input_len = attr->task_fd_query.buf_len; in bpf_task_fd_query_copy()
5156 return -EFAULT; in bpf_task_fd_query_copy()
5160 return -EFAULT; in bpf_task_fd_query_copy()
5167 err = -ENOSPC; in bpf_task_fd_query_copy()
5168 if (copy_to_user(ubuf, buf, input_len - 1)) in bpf_task_fd_query_copy()
5169 return -EFAULT; in bpf_task_fd_query_copy()
5170 if (put_user(zero, ubuf + input_len - 1)) in bpf_task_fd_query_copy()
5171 return -EFAULT; in bpf_task_fd_query_copy()
5175 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || in bpf_task_fd_query_copy()
5176 put_user(fd_type, &uattr->task_fd_query.fd_type) || in bpf_task_fd_query_copy()
5177 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || in bpf_task_fd_query_copy()
5178 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) in bpf_task_fd_query_copy()
5179 return -EFAULT; in bpf_task_fd_query_copy()
5189 pid_t pid = attr->task_fd_query.pid; in bpf_task_fd_query()
5190 u32 fd = attr->task_fd_query.fd; in bpf_task_fd_query()
5197 return -EINVAL; in bpf_task_fd_query()
5200 return -EPERM; in bpf_task_fd_query()
5202 if (attr->task_fd_query.flags != 0) in bpf_task_fd_query()
5203 return -EINVAL; in bpf_task_fd_query()
5209 return -ENOENT; in bpf_task_fd_query()
5215 return -EBADF; in bpf_task_fd_query()
5217 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { in bpf_task_fd_query()
5218 struct bpf_link *link = file->private_data; in bpf_task_fd_query()
5220 if (link->ops == &bpf_raw_tp_link_lops) { in bpf_task_fd_query()
5223 struct bpf_raw_event_map *btp = raw_tp->btp; in bpf_task_fd_query()
5226 raw_tp->link.prog->aux->id, in bpf_task_fd_query()
5228 btp->tp->name, 0, 0); in bpf_task_fd_query()
5252 err = -ENOTSUPP; in bpf_task_fd_query()
5263 err = -ENOTSUPP; \
5280 return -EINVAL; in bpf_map_do_batch()
5282 CLASS(fd, f)(attr->batch.map_fd); in bpf_map_do_batch()
5290 err = -EPERM; in bpf_map_do_batch()
5294 err = -EPERM; in bpf_map_do_batch()
5299 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); in bpf_map_do_batch()
5301 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5303 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); in bpf_map_do_batch()
5305 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5321 return -EINVAL; in link_create()
5323 if (attr->link_create.attach_type == BPF_STRUCT_OPS) in link_create()
5326 prog = bpf_prog_get(attr->link_create.prog_fd); in link_create()
5331 attr->link_create.attach_type); in link_create()
5335 switch (prog->type) { in link_create()
5347 attr->link_create.target_fd, in link_create()
5348 attr->link_create.target_btf_id, in link_create()
5349 attr->link_create.tracing.cookie); in link_create()
5353 if (attr->link_create.attach_type != prog->expected_attach_type) { in link_create()
5354 ret = -EINVAL; in link_create()
5357 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) in link_create()
5358 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); in link_create()
5359 else if (prog->expected_attach_type == BPF_TRACE_ITER) in link_create()
5361 else if (prog->expected_attach_type == BPF_LSM_CGROUP) in link_create()
5365 attr->link_create.target_fd, in link_create()
5366 attr->link_create.target_btf_id, in link_create()
5367 attr->link_create.tracing.cookie); in link_create()
5382 if (attr->link_create.attach_type == BPF_TCX_INGRESS || in link_create()
5383 attr->link_create.attach_type == BPF_TCX_EGRESS) in link_create()
5397 if (attr->link_create.attach_type == BPF_PERF_EVENT) in link_create()
5399 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || in link_create()
5400 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) in link_create()
5402 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || in link_create()
5403 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) in link_create()
5407 ret = -EINVAL; in link_create()
5421 new_map = bpf_map_get(attr->link_update.new_map_fd); in link_update_map()
5425 if (attr->link_update.flags & BPF_F_REPLACE) { in link_update_map()
5426 old_map = bpf_map_get(attr->link_update.old_map_fd); in link_update_map()
5431 } else if (attr->link_update.old_map_fd) { in link_update_map()
5432 ret = -EINVAL; in link_update_map()
5436 ret = link->ops->update_map(link, new_map, old_map); in link_update_map()
5455 return -EINVAL; in link_update()
5457 flags = attr->link_update.flags; in link_update()
5459 return -EINVAL; in link_update()
5461 link = bpf_link_get_from_fd(attr->link_update.link_fd); in link_update()
5465 if (link->ops->update_map) { in link_update()
5470 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); in link_update()
5477 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); in link_update()
5483 } else if (attr->link_update.old_prog_fd) { in link_update()
5484 ret = -EINVAL; in link_update()
5488 if (link->ops->update_prog) in link_update()
5489 ret = link->ops->update_prog(link, new_prog, old_prog); in link_update()
5491 ret = -EINVAL; in link_update()
5511 return -EINVAL; in link_detach()
5513 link = bpf_link_get_from_fd(attr->link_detach.link_fd); in link_detach()
5517 if (link->ops->detach) in link_detach()
5518 ret = link->ops->detach(link); in link_detach()
5520 ret = -EOPNOTSUPP; in link_detach()
5528 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); in bpf_link_inc_not_zero()
5537 return ERR_PTR(-ENOENT); in bpf_link_by_id()
5543 if (link->id) in bpf_link_by_id()
5546 link = ERR_PTR(-EAGAIN); in bpf_link_by_id()
5548 link = ERR_PTR(-ENOENT); in bpf_link_by_id()
5578 u32 id = attr->link_id; in bpf_link_get_fd_by_id()
5582 return -EINVAL; in bpf_link_get_fd_by_id()
5585 return -EPERM; in bpf_link_get_fd_by_id()
5621 return -EBUSY; in bpf_enable_runtime_stats()
5624 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); in bpf_enable_runtime_stats()
5638 return -EINVAL; in bpf_enable_stats()
5641 return -EPERM; in bpf_enable_stats()
5643 switch (attr->enable_stats.type) { in bpf_enable_stats()
5649 return -EINVAL; in bpf_enable_stats()
5660 return -EINVAL; in bpf_iter_create()
5662 if (attr->iter_create.flags) in bpf_iter_create()
5663 return -EINVAL; in bpf_iter_create()
5665 link = bpf_link_get_from_fd(attr->iter_create.link_fd); in bpf_iter_create()
5685 return -EINVAL; in bpf_prog_bind_map()
5687 if (attr->prog_bind_map.flags) in bpf_prog_bind_map()
5688 return -EINVAL; in bpf_prog_bind_map()
5690 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); in bpf_prog_bind_map()
5694 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
5700 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
5702 used_maps_old = prog->aux->used_maps; in bpf_prog_bind_map()
5704 for (i = 0; i < prog->aux->used_map_cnt; i++) in bpf_prog_bind_map()
5710 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, in bpf_prog_bind_map()
5714 ret = -ENOMEM; in bpf_prog_bind_map()
5721 if (prog->sleepable) in bpf_prog_bind_map()
5722 atomic64_inc(&map->sleepable_refcnt); in bpf_prog_bind_map()
5724 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); in bpf_prog_bind_map()
5725 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
5727 prog->aux->used_map_cnt++; in bpf_prog_bind_map()
5728 prog->aux->used_maps = used_maps_new; in bpf_prog_bind_map()
5733 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
5747 return -EINVAL; in token_create()
5750 if (attr->token_create.flags) in token_create()
5751 return -EINVAL; in token_create()
5769 return -EFAULT; in __sys_bpf()
5893 err = -EINVAL; in __sys_bpf()
5931 return -EINVAL; in BPF_CALL_3()
5937 /* To shut up -Wmissing-prototypes.
5952 if (attr->test.data_in || attr->test.data_out || in kern_sys_bpf()
5953 attr->test.ctx_out || attr->test.duration || in kern_sys_bpf()
5954 attr->test.repeat || attr->test.flags) in kern_sys_bpf()
5955 return -EINVAL; in kern_sys_bpf()
5957 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); in kern_sys_bpf()
5961 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || in kern_sys_bpf()
5962 attr->test.ctx_size_in > U16_MAX) { in kern_sys_bpf()
5964 return -EINVAL; in kern_sys_bpf()
5972 return -EBUSY; in kern_sys_bpf()
5974 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); in kern_sys_bpf()
6006 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close in BPF_CALL_1()
6022 return -EINVAL; in BPF_CALL_4()
6024 if (name_sz <= 1 || name[name_sz - 1]) in BPF_CALL_4()
6025 return -EINVAL; in BPF_CALL_4()
6028 return -EPERM; in BPF_CALL_4()
6031 return *res ? 0 : -ENOENT; in BPF_CALL_4()
6050 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) in syscall_prog_func_proto()
6076 struct static_key *key = (struct static_key *)table->data; in bpf_stats_handler()
6082 .mode = table->mode, in bpf_stats_handler()
6088 return -EPERM; in bpf_stats_handler()
6111 int ret, unpriv_enable = *(int *)table->data; in bpf_unpriv_handler()
6116 return -EPERM; in bpf_unpriv_handler()
6122 return -EPERM; in bpf_unpriv_handler()
6123 *(int *)table->data = unpriv_enable; in bpf_unpriv_handler()