Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
27 free_percpu(array->pptrs[i]); in bpf_array_free_percpu()
37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
42 return -ENOMEM; in bpf_array_alloc_percpu()
44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu()
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc_check()
58 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check()
59 attr->value_size == 0 || in array_map_alloc_check()
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || in array_map_alloc_check()
61 !bpf_map_flags_access_ok(attr->map_flags) || in array_map_alloc_check()
63 return -EINVAL; in array_map_alloc_check()
65 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in array_map_alloc_check()
66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) in array_map_alloc_check()
67 return -EINVAL; in array_map_alloc_check()
69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && in array_map_alloc_check()
70 attr->map_flags & BPF_F_PRESERVE_ELEMS) in array_map_alloc_check()
71 return -EINVAL; in array_map_alloc_check()
73 /* avoid overflow on round_up(map->value_size) */ in array_map_alloc_check()
74 if (attr->value_size > INT_MAX) in array_map_alloc_check()
75 return -E2BIG; in array_map_alloc_check()
76 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ in array_map_alloc_check()
77 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) in array_map_alloc_check()
78 return -E2BIG; in array_map_alloc_check()
85 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc()
92 elem_size = round_up(attr->value_size, 8); in array_map_alloc()
94 max_entries = attr->max_entries; in array_map_alloc()
100 mask64 = fls_long(max_entries - 1); in array_map_alloc()
102 mask64 -= 1; in array_map_alloc()
107 * since cpu will speculate within index_mask limits in array_map_alloc()
111 if (max_entries < attr->max_entries) in array_map_alloc()
112 return ERR_PTR(-E2BIG); in array_map_alloc()
119 /* rely on vmalloc() to return page-aligned memory and in array_map_alloc()
120 * ensure array->value is exactly page-aligned in array_map_alloc()
122 if (attr->map_flags & BPF_F_MMAPABLE) { in array_map_alloc()
130 /* allocate all map elements and zero-initialize them */ in array_map_alloc()
131 if (attr->map_flags & BPF_F_MMAPABLE) { in array_map_alloc()
137 return ERR_PTR(-ENOMEM); in array_map_alloc()
139 - offsetof(struct bpf_array, value); in array_map_alloc()
144 return ERR_PTR(-ENOMEM); in array_map_alloc()
145 array->index_mask = index_mask; in array_map_alloc()
146 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
148 /* copy mandatory map attributes */ in array_map_alloc()
149 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
150 array->elem_size = elem_size; in array_map_alloc()
154 return ERR_PTR(-ENOMEM); in array_map_alloc()
157 return &array->map; in array_map_alloc()
162 return array->value + (u64)array->elem_size * index; in array_map_elem_ptr()
166 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
168 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem()
171 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
174 return array->value + (u64)array->elem_size * (index & array->index_mask); in array_map_lookup_elem()
177 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, in array_map_direct_value_addr() argument
180 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_addr()
182 if (map->max_entries != 1) in array_map_direct_value_addr()
183 return -ENOTSUPP; in array_map_direct_value_addr()
184 if (off >= map->value_size) in array_map_direct_value_addr()
185 return -EINVAL; in array_map_direct_value_addr()
187 *imm = (unsigned long)array->value; in array_map_direct_value_addr()
191 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, in array_map_direct_value_meta() argument
194 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_meta()
195 u64 base = (unsigned long)array->value; in array_map_direct_value_meta()
196 u64 range = array->elem_size; in array_map_direct_value_meta()
198 if (map->max_entries != 1) in array_map_direct_value_meta()
199 return -ENOTSUPP; in array_map_direct_value_meta()
201 return -ENOENT; in array_map_direct_value_meta()
203 *off = imm - base; in array_map_direct_value_meta()
208 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in array_map_gen_lookup() argument
210 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup()
212 u32 elem_size = array->elem_size; in array_map_gen_lookup()
217 if (map->map_flags & BPF_F_INNER_MAP) in array_map_gen_lookup()
218 return -EOPNOTSUPP; in array_map_gen_lookup()
222 if (!map->bypass_spec_v1) { in array_map_gen_lookup()
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); in array_map_gen_lookup()
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_map_gen_lookup()
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); in array_map_gen_lookup()
237 return insn - insn_buf; in array_map_gen_lookup()
241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) in percpu_array_map_lookup_elem() argument
243 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem()
246 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem()
253 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in percpu_array_map_gen_lookup() argument
255 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_gen_lookup()
259 return -EOPNOTSUPP; in percpu_array_map_gen_lookup()
261 if (map->map_flags & BPF_F_INNER_MAP) in percpu_array_map_gen_lookup()
262 return -EOPNOTSUPP; in percpu_array_map_gen_lookup()
264 BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); in percpu_array_map_gen_lookup()
268 if (!map->bypass_spec_v1) { in percpu_array_map_gen_lookup()
269 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); in percpu_array_map_gen_lookup()
270 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); in percpu_array_map_gen_lookup()
272 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); in percpu_array_map_gen_lookup()
281 return insn - insn_buf; in percpu_array_map_gen_lookup()
284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in percpu_array_map_lookup_percpu_elem() argument
286 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_percpu_elem()
289 if (cpu >= nr_cpu_ids) in percpu_array_map_lookup_percpu_elem()
292 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_percpu_elem()
295 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); in percpu_array_map_lookup_percpu_elem()
298 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_array_copy() argument
300 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy()
303 int cpu, off = 0; in bpf_percpu_array_copy() local
306 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
307 return -ENOENT; in bpf_percpu_array_copy()
309 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_array_copy()
313 size = array->elem_size; in bpf_percpu_array_copy()
315 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_copy()
316 for_each_possible_cpu(cpu) { in bpf_percpu_array_copy()
317 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_copy()
318 check_and_init_map_value(map, value + off); in bpf_percpu_array_copy()
326 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in array_map_get_next_key() argument
328 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key()
332 if (index >= array->map.max_entries) { in array_map_get_next_key()
337 if (index == array->map.max_entries - 1) in array_map_get_next_key()
338 return -ENOENT; in array_map_get_next_key()
345 static long array_map_update_elem(struct bpf_map *map, void *key, void *value, in array_map_update_elem() argument
348 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem()
354 return -EINVAL; in array_map_update_elem()
356 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
357 /* all elements were pre-allocated, cannot insert a new one */ in array_map_update_elem()
358 return -E2BIG; in array_map_update_elem()
362 return -EEXIST; in array_map_update_elem()
365 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in array_map_update_elem()
366 return -EINVAL; in array_map_update_elem()
368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_update_elem()
369 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); in array_map_update_elem()
370 copy_map_value(map, val, value); in array_map_update_elem()
371 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
373 val = array->value + in array_map_update_elem()
374 (u64)array->elem_size * (index & array->index_mask); in array_map_update_elem()
376 copy_map_value_locked(map, val, value, false); in array_map_update_elem()
378 copy_map_value(map, val, value); in array_map_update_elem()
379 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
384 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_array_update() argument
387 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update()
390 int cpu, off = 0; in bpf_percpu_array_update() local
395 return -EINVAL; in bpf_percpu_array_update()
397 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
398 /* all elements were pre-allocated, cannot insert a new one */ in bpf_percpu_array_update()
399 return -E2BIG; in bpf_percpu_array_update()
403 return -EEXIST; in bpf_percpu_array_update()
406 * will be copied into per-cpu area. bpf programs can only access in bpf_percpu_array_update()
408 * returned or zeros which were zero-filled by percpu_alloc, in bpf_percpu_array_update()
411 size = array->elem_size; in bpf_percpu_array_update()
413 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_update()
414 for_each_possible_cpu(cpu) { in bpf_percpu_array_update()
415 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); in bpf_percpu_array_update()
416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_update()
424 static long array_map_delete_elem(struct bpf_map *map, void *key) in array_map_delete_elem() argument
426 return -EINVAL; in array_map_delete_elem()
434 static void array_map_free_timers_wq(struct bpf_map *map) in array_map_free_timers_wq() argument
436 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free_timers_wq()
442 if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { in array_map_free_timers_wq()
443 for (i = 0; i < array->map.max_entries; i++) { in array_map_free_timers_wq()
444 if (btf_record_has_field(map->record, BPF_TIMER)) in array_map_free_timers_wq()
445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
446 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) in array_map_free_timers_wq()
447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
453 static void array_map_free(struct bpf_map *map) in array_map_free() argument
455 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free()
458 if (!IS_ERR_OR_NULL(map->record)) { in array_map_free()
459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_free()
460 for (i = 0; i < array->map.max_entries; i++) { in array_map_free()
461 void __percpu *pptr = array->pptrs[i & array->index_mask]; in array_map_free()
462 int cpu; in array_map_free() local
464 for_each_possible_cpu(cpu) { in array_map_free()
465 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); in array_map_free()
470 for (i = 0; i < array->map.max_entries; i++) in array_map_free()
471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); in array_map_free()
475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
478 if (array->map.map_flags & BPF_F_MMAPABLE) in array_map_free()
484 static void array_map_seq_show_elem(struct bpf_map *map, void *key, in array_map_seq_show_elem() argument
491 value = array_map_lookup_elem(map, key); in array_map_seq_show_elem()
497 if (map->btf_key_type_id) in array_map_seq_show_elem()
499 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in array_map_seq_show_elem()
505 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, in percpu_array_map_seq_show_elem() argument
508 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_seq_show_elem()
511 int cpu; in percpu_array_map_seq_show_elem() local
516 pptr = array->pptrs[index & array->index_mask]; in percpu_array_map_seq_show_elem()
517 for_each_possible_cpu(cpu) { in percpu_array_map_seq_show_elem()
518 seq_printf(m, "\tcpu%d: ", cpu); in percpu_array_map_seq_show_elem()
519 btf_type_seq_show(map->btf, map->btf_value_type_id, in percpu_array_map_seq_show_elem()
520 per_cpu_ptr(pptr, cpu), m); in percpu_array_map_seq_show_elem()
528 static int array_map_check_btf(const struct bpf_map *map, in array_map_check_btf() argument
535 /* One exception for keyless BTF: .bss/.data/.rodata map */ in array_map_check_btf()
537 if (map->map_type != BPF_MAP_TYPE_ARRAY || in array_map_check_btf()
538 map->max_entries != 1) in array_map_check_btf()
539 return -EINVAL; in array_map_check_btf()
541 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) in array_map_check_btf()
542 return -EINVAL; in array_map_check_btf()
547 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) in array_map_check_btf()
548 return -EINVAL; in array_map_check_btf()
555 return -EINVAL; in array_map_check_btf()
560 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in array_map_mmap() argument
562 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mmap()
565 if (!(map->map_flags & BPF_F_MMAPABLE)) in array_map_mmap()
566 return -EINVAL; in array_map_mmap()
568 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > in array_map_mmap()
569 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) in array_map_mmap()
570 return -EINVAL; in array_map_mmap()
573 vma->vm_pgoff + pgoff); in array_map_mmap()
581 return meta0->map_flags & BPF_F_INNER_MAP ? true : in array_map_meta_equal()
582 meta0->max_entries == meta1->max_entries; in array_map_meta_equal()
586 struct bpf_map *map; member
593 struct bpf_iter_seq_array_map_info *info = seq->private; in bpf_array_map_seq_start()
594 struct bpf_map *map = info->map; in bpf_array_map_seq_start() local
598 if (info->index >= map->max_entries) in bpf_array_map_seq_start()
603 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_start()
604 index = info->index & array->index_mask; in bpf_array_map_seq_start()
605 if (info->percpu_value_buf) in bpf_array_map_seq_start()
606 return (void *)(uintptr_t)array->pptrs[index]; in bpf_array_map_seq_start()
612 struct bpf_iter_seq_array_map_info *info = seq->private; in bpf_array_map_seq_next()
613 struct bpf_map *map = info->map; in bpf_array_map_seq_next() local
618 ++info->index; in bpf_array_map_seq_next()
619 if (info->index >= map->max_entries) in bpf_array_map_seq_next()
622 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_next()
623 index = info->index & array->index_mask; in bpf_array_map_seq_next()
624 if (info->percpu_value_buf) in bpf_array_map_seq_next()
625 return (void *)(uintptr_t)array->pptrs[index]; in bpf_array_map_seq_next()
631 struct bpf_iter_seq_array_map_info *info = seq->private; in __bpf_array_map_seq_show()
633 struct bpf_map *map = info->map; in __bpf_array_map_seq_show() local
634 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_array_map_seq_show()
637 int off = 0, cpu = 0; in __bpf_array_map_seq_show() local
647 ctx.map = info->map; in __bpf_array_map_seq_show()
649 ctx.key = &info->index; in __bpf_array_map_seq_show()
651 if (!info->percpu_value_buf) { in __bpf_array_map_seq_show()
655 size = array->elem_size; in __bpf_array_map_seq_show()
656 for_each_possible_cpu(cpu) { in __bpf_array_map_seq_show()
657 copy_map_value_long(map, info->percpu_value_buf + off, in __bpf_array_map_seq_show()
658 per_cpu_ptr(pptr, cpu)); in __bpf_array_map_seq_show()
659 check_and_init_map_value(map, info->percpu_value_buf + off); in __bpf_array_map_seq_show()
662 ctx.value = info->percpu_value_buf; in __bpf_array_map_seq_show()
684 struct bpf_map *map = aux->map; in bpf_iter_init_array_map() local
685 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_iter_init_array_map()
689 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_iter_init_array_map()
690 buf_size = array->elem_size * num_possible_cpus(); in bpf_iter_init_array_map()
693 return -ENOMEM; in bpf_iter_init_array_map()
695 seq_info->percpu_value_buf = value_buf; in bpf_iter_init_array_map()
698 /* bpf_iter_attach_map() acquires a map uref, and the uref may be in bpf_iter_init_array_map()
699 * released before or in the middle of iterating map elements, so in bpf_iter_init_array_map()
700 * acquire an extra map uref for iterator. in bpf_iter_init_array_map()
702 bpf_map_inc_with_uref(map); in bpf_iter_init_array_map()
703 seq_info->map = map; in bpf_iter_init_array_map()
711 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_array_map()
712 kfree(seq_info->percpu_value_buf); in bpf_iter_fini_array_map()
729 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_array_elem() argument
741 return -EINVAL; in bpf_for_each_array_elem()
743 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in bpf_for_each_array_elem()
744 array = container_of(map, struct bpf_array, map); in bpf_for_each_array_elem()
745 for (i = 0; i < map->max_entries; i++) { in bpf_for_each_array_elem()
747 val = this_cpu_ptr(array->pptrs[i]); in bpf_for_each_array_elem()
752 ret = callback_fn((u64)(long)map, (u64)(long)&key, in bpf_for_each_array_elem()
754 /* return value: 0 - continue, 1 - stop and return */ in bpf_for_each_array_elem()
762 static u64 array_map_mem_usage(const struct bpf_map *map) in array_map_mem_usage() argument
764 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mem_usage()
765 bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_mem_usage()
766 u32 elem_size = array->elem_size; in array_map_mem_usage()
767 u64 entries = map->max_entries; in array_map_mem_usage()
774 if (map->map_flags & BPF_F_MMAPABLE) { in array_map_mem_usage()
834 /* only file descriptors can be stored in this type of map */ in fd_array_map_alloc_check()
835 if (attr->value_size != sizeof(u32)) in fd_array_map_alloc_check()
836 return -EINVAL; in fd_array_map_alloc_check()
837 /* Program read-only/write-only not supported for special maps yet. */ in fd_array_map_alloc_check()
838 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) in fd_array_map_alloc_check()
839 return -EINVAL; in fd_array_map_alloc_check()
843 static void fd_array_map_free(struct bpf_map *map) in fd_array_map_free() argument
845 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free()
849 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
850 BUG_ON(array->ptrs[i] != NULL); in fd_array_map_free()
855 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) in fd_array_map_lookup_elem() argument
857 return ERR_PTR(-EOPNOTSUPP); in fd_array_map_lookup_elem()
861 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_array_map_lookup_elem() argument
866 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_array_map_lookup_elem()
867 return -ENOTSUPP; in bpf_fd_array_map_lookup_elem()
870 elem = array_map_lookup_elem(map, key); in bpf_fd_array_map_lookup_elem()
872 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem()
874 ret = -ENOENT; in bpf_fd_array_map_lookup_elem()
881 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_array_map_update_elem() argument
884 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem()
889 return -EINVAL; in bpf_fd_array_map_update_elem()
891 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
892 return -E2BIG; in bpf_fd_array_map_update_elem()
895 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_array_map_update_elem()
899 if (map->ops->map_poke_run) { in bpf_fd_array_map_update_elem()
900 mutex_lock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
901 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
902 map->ops->map_poke_run(map, index, old_ptr, new_ptr); in bpf_fd_array_map_update_elem()
903 mutex_unlock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
905 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
909 map->ops->map_fd_put_ptr(map, old_ptr, true); in bpf_fd_array_map_update_elem()
913 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) in __fd_array_map_delete_elem() argument
915 struct bpf_array *array = container_of(map, struct bpf_array, map); in __fd_array_map_delete_elem()
919 if (index >= array->map.max_entries) in __fd_array_map_delete_elem()
920 return -E2BIG; in __fd_array_map_delete_elem()
922 if (map->ops->map_poke_run) { in __fd_array_map_delete_elem()
923 mutex_lock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
924 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
925 map->ops->map_poke_run(map, index, old_ptr, NULL); in __fd_array_map_delete_elem()
926 mutex_unlock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
928 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
932 map->ops->map_fd_put_ptr(map, old_ptr, need_defer); in __fd_array_map_delete_elem()
935 return -ENOENT; in __fd_array_map_delete_elem()
939 static long fd_array_map_delete_elem(struct bpf_map *map, void *key) in fd_array_map_delete_elem() argument
941 return __fd_array_map_delete_elem(map, key, true); in fd_array_map_delete_elem()
944 static void *prog_fd_array_get_ptr(struct bpf_map *map, in prog_fd_array_get_ptr() argument
953 if (prog->type == BPF_PROG_TYPE_EXT || in prog_fd_array_get_ptr()
954 !bpf_prog_map_compatible(map, prog)) { in prog_fd_array_get_ptr()
956 return ERR_PTR(-EINVAL); in prog_fd_array_get_ptr()
959 mutex_lock(&prog->aux->ext_mutex); in prog_fd_array_get_ptr()
960 is_extended = prog->aux->is_extended; in prog_fd_array_get_ptr()
962 prog->aux->prog_array_member_cnt++; in prog_fd_array_get_ptr()
963 mutex_unlock(&prog->aux->ext_mutex); in prog_fd_array_get_ptr()
967 * tail callee prog entry -> tail callee prog subprog -> in prog_fd_array_get_ptr()
968 * freplace prog entry --tailcall-> tail callee prog entry. in prog_fd_array_get_ptr()
971 return ERR_PTR(-EBUSY); in prog_fd_array_get_ptr()
977 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in prog_fd_array_put_ptr() argument
981 mutex_lock(&prog->aux->ext_mutex); in prog_fd_array_put_ptr()
982 prog->aux->prog_array_member_cnt--; in prog_fd_array_put_ptr()
983 mutex_unlock(&prog->aux->ext_mutex); in prog_fd_array_put_ptr()
990 return ((struct bpf_prog *)ptr)->aux->id; in prog_fd_array_sys_lookup_elem()
993 /* decrement refcnt of all bpf_progs that are stored in this map */
994 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) in bpf_fd_array_map_clear() argument
996 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear()
999 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
1000 __fd_array_map_delete_elem(map, &i, need_defer); in bpf_fd_array_map_clear()
1003 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, in prog_array_map_seq_show_elem() argument
1011 elem = array_map_lookup_elem(map, key); in prog_array_map_seq_show_elem()
1017 btf_type_seq_show(map->btf, map->btf_value_type_id, in prog_array_map_seq_show_elem()
1031 static int prog_array_map_poke_track(struct bpf_map *map, in prog_array_map_poke_track() argument
1038 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_track()
1039 mutex_lock(&aux->poke_mutex); in prog_array_map_poke_track()
1040 list_for_each_entry(elem, &aux->poke_progs, list) { in prog_array_map_poke_track()
1041 if (elem->aux == prog_aux) in prog_array_map_poke_track()
1047 ret = -ENOMEM; in prog_array_map_poke_track()
1051 INIT_LIST_HEAD(&elem->list); in prog_array_map_poke_track()
1056 elem->aux = prog_aux; in prog_array_map_poke_track()
1058 list_add_tail(&elem->list, &aux->poke_progs); in prog_array_map_poke_track()
1060 mutex_unlock(&aux->poke_mutex); in prog_array_map_poke_track()
1064 static void prog_array_map_poke_untrack(struct bpf_map *map, in prog_array_map_poke_untrack() argument
1070 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_untrack()
1071 mutex_lock(&aux->poke_mutex); in prog_array_map_poke_untrack()
1072 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { in prog_array_map_poke_untrack()
1073 if (elem->aux == prog_aux) { in prog_array_map_poke_untrack()
1074 list_del_init(&elem->list); in prog_array_map_poke_untrack()
1079 mutex_unlock(&aux->poke_mutex); in prog_array_map_poke_untrack()
1088 static void prog_array_map_poke_run(struct bpf_map *map, u32 key, in prog_array_map_poke_run() argument
1095 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_run()
1096 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); in prog_array_map_poke_run()
1098 list_for_each_entry(elem, &aux->poke_progs, list) { in prog_array_map_poke_run()
1102 for (i = 0; i < elem->aux->size_poke_tab; i++) { in prog_array_map_poke_run()
1103 poke = &elem->aux->poke_tab[i]; in prog_array_map_poke_run()
1108 * not aux->prog since it might not be stable yet and in prog_array_map_poke_run()
1112 * entry. We skip these as poke->tailcall_target_stable in prog_array_map_poke_run()
1115 * poke->tailcall_target_stable are successively in prog_array_map_poke_run()
1118 * non-activated poke entries. in prog_array_map_poke_run()
1124 if (!READ_ONCE(poke->tailcall_target_stable)) in prog_array_map_poke_run()
1126 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) in prog_array_map_poke_run()
1128 if (poke->tail_call.map != map || in prog_array_map_poke_run()
1129 poke->tail_call.key != key) in prog_array_map_poke_run()
1139 struct bpf_map *map = container_of(work, struct bpf_array_aux, in prog_array_map_clear_deferred() local
1140 work)->map; in prog_array_map_clear_deferred()
1141 bpf_fd_array_map_clear(map, true); in prog_array_map_clear_deferred()
1142 bpf_map_put(map); in prog_array_map_clear_deferred()
1145 static void prog_array_map_clear(struct bpf_map *map) in prog_array_map_clear() argument
1147 struct bpf_array_aux *aux = container_of(map, struct bpf_array, in prog_array_map_clear()
1148 map)->aux; in prog_array_map_clear()
1149 bpf_map_inc(map); in prog_array_map_clear()
1150 schedule_work(&aux->work); in prog_array_map_clear()
1156 struct bpf_map *map; in prog_array_map_alloc() local
1160 return ERR_PTR(-ENOMEM); in prog_array_map_alloc()
1162 INIT_WORK(&aux->work, prog_array_map_clear_deferred); in prog_array_map_alloc()
1163 INIT_LIST_HEAD(&aux->poke_progs); in prog_array_map_alloc()
1164 mutex_init(&aux->poke_mutex); in prog_array_map_alloc()
1166 map = array_map_alloc(attr); in prog_array_map_alloc()
1167 if (IS_ERR(map)) { in prog_array_map_alloc()
1169 return map; in prog_array_map_alloc()
1172 container_of(map, struct bpf_array, map)->aux = aux; in prog_array_map_alloc()
1173 aux->map = map; in prog_array_map_alloc()
1175 return map; in prog_array_map_alloc()
1178 static void prog_array_map_free(struct bpf_map *map) in prog_array_map_free() argument
1183 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_free()
1184 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { in prog_array_map_free()
1185 list_del_init(&elem->list); in prog_array_map_free()
1189 fd_array_map_free(map); in prog_array_map_free()
1192 /* prog_array->aux->{type,jited} is a runtime binding.
1223 ee->event = perf_file->private_data; in bpf_event_entry_gen()
1224 ee->perf_file = perf_file; in bpf_event_entry_gen()
1225 ee->map_file = map_file; in bpf_event_entry_gen()
1236 fput(ee->perf_file); in __bpf_event_entry_free()
1242 call_rcu(&ee->rcu, __bpf_event_entry_free); in bpf_event_entry_free_rcu()
1245 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, in perf_event_fd_array_get_ptr() argument
1257 ee = ERR_PTR(-EOPNOTSUPP); in perf_event_fd_array_get_ptr()
1258 event = perf_file->private_data; in perf_event_fd_array_get_ptr()
1259 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) in perf_event_fd_array_get_ptr()
1265 ee = ERR_PTR(-ENOMEM); in perf_event_fd_array_get_ptr()
1271 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in perf_event_fd_array_put_ptr() argument
1277 static void perf_event_fd_array_release(struct bpf_map *map, in perf_event_fd_array_release() argument
1280 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release()
1284 if (map->map_flags & BPF_F_PRESERVE_ELEMS) in perf_event_fd_array_release()
1288 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
1289 ee = READ_ONCE(array->ptrs[i]); in perf_event_fd_array_release()
1290 if (ee && ee->map_file == map_file) in perf_event_fd_array_release()
1291 __fd_array_map_delete_elem(map, &i, true); in perf_event_fd_array_release()
1296 static void perf_event_fd_array_map_free(struct bpf_map *map) in perf_event_fd_array_map_free() argument
1298 if (map->map_flags & BPF_F_PRESERVE_ELEMS) in perf_event_fd_array_map_free()
1299 bpf_fd_array_map_clear(map, false); in perf_event_fd_array_map_free()
1300 fd_array_map_free(map); in perf_event_fd_array_map_free()
1320 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, in cgroup_fd_array_get_ptr() argument
1327 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in cgroup_fd_array_put_ptr() argument
1333 static void cgroup_fd_array_free(struct bpf_map *map) in cgroup_fd_array_free() argument
1335 bpf_fd_array_map_clear(map, false); in cgroup_fd_array_free()
1336 fd_array_map_free(map); in cgroup_fd_array_free()
1357 struct bpf_map *map, *inner_map_meta; in array_of_map_alloc() local
1359 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); in array_of_map_alloc()
1363 map = array_map_alloc(attr); in array_of_map_alloc()
1364 if (IS_ERR(map)) { in array_of_map_alloc()
1366 return map; in array_of_map_alloc()
1369 map->inner_map_meta = inner_map_meta; in array_of_map_alloc()
1371 return map; in array_of_map_alloc()
1374 static void array_of_map_free(struct bpf_map *map) in array_of_map_free() argument
1376 /* map->inner_map_meta is only accessed by syscall which in array_of_map_free()
1379 bpf_map_meta_free(map->inner_map_meta); in array_of_map_free()
1380 bpf_fd_array_map_clear(map, false); in array_of_map_free()
1381 fd_array_map_free(map); in array_of_map_free()
1384 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) in array_of_map_lookup_elem() argument
1386 struct bpf_map **inner_map = array_map_lookup_elem(map, key); in array_of_map_lookup_elem()
1394 static int array_of_map_gen_lookup(struct bpf_map *map, in array_of_map_gen_lookup() argument
1397 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup()
1398 u32 elem_size = array->elem_size; in array_of_map_gen_lookup()
1406 if (!map->bypass_spec_v1) { in array_of_map_gen_lookup()
1407 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); in array_of_map_gen_lookup()
1408 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_of_map_gen_lookup()
1410 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); in array_of_map_gen_lookup()
1422 return insn - insn_buf; in array_of_map_gen_lookup()