Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/generic-radix-tree.h>
83 * We're using a radix tree like a vector - we're just pushing elements
88 size_t nr; member
102 int cpu; member
121 if (p->objs.nr) in __rcu_pending_has_pending()
124 static_array_for_each(p->lists, i) in __rcu_pending_has_pending()
125 if (i->head) in __rcu_pending_has_pending()
135 if (!l1->head) in rcu_pending_list_merge()
136 l1->head = l2->head; in rcu_pending_list_merge()
138 l1->tail->next = l2->head; in rcu_pending_list_merge()
140 if (!l1->head) in rcu_pending_list_merge()
141 l1->head = l2->head; in rcu_pending_list_merge()
143 l1->tail->next.next = (void *) l2->head; in rcu_pending_list_merge()
146 l1->tail = l2->tail; in rcu_pending_list_merge()
147 l2->head = l2->tail = NULL; in rcu_pending_list_merge()
154 if (!l->head) in rcu_pending_list_add()
155 l->head = n; in rcu_pending_list_add()
157 l->tail->next = n; in rcu_pending_list_add()
158 l->tail = n; in rcu_pending_list_add()
159 n->next = NULL; in rcu_pending_list_add()
161 if (!l->head) in rcu_pending_list_add()
162 l->head = n; in rcu_pending_list_add()
164 l->tail->next.next = (void *) n; in rcu_pending_list_add()
165 l->tail = n; in rcu_pending_list_add()
166 n->next.next = NULL; in rcu_pending_list_add()
172 struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE]; in merge_expired_lists()
174 for (struct rcu_pending_list *i = p->lists; i < expired; i++) in merge_expired_lists()
175 if (i->head && __poll_state_synchronize_rcu(p->parent->srcu, i->seq)) in merge_expired_lists()
180 static inline void kfree_bulk(size_t nr, void ** p) in kfree_bulk() argument
182 while (nr--) in kfree_bulk()
196 struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE]; in __process_finished_items()
200 if (p->objs.nr && in __process_finished_items()
201 __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) { in __process_finished_items()
202 objs = p->objs.data[0]; in __process_finished_items()
203 darray_remove_item(&p->objs, p->objs.data); in __process_finished_items()
208 list = expired->head; in __process_finished_items()
209 expired->head = expired->tail = NULL; in __process_finished_items()
211 spin_unlock_irqrestore(&p->lock, flags); in __process_finished_items()
213 switch ((ulong) pending->process) { in __process_finished_items()
215 for (size_t i = 0; i < objs.nr; ) { in __process_finished_items()
216 size_t nr_this_node = min(GENRADIX_NODE_SIZE / sizeof(void *), objs.nr - i); in __process_finished_items()
226 list = obj->next; in __process_finished_items()
228 list = (void *) obj->next.next; in __process_finished_items()
233 * to be freed - kvfree_rcu_mightsleep() in __process_finished_items()
237 void *ptr = (void *)(((unsigned long) obj->func) & ~1UL); in __process_finished_items()
238 bool free_head = ((unsigned long) obj->func) & 1UL; in __process_finished_items()
248 for (size_t i = 0; i < objs.nr; i++) { in __process_finished_items()
250 obj->func(obj); in __process_finished_items()
257 list = obj->next; in __process_finished_items()
259 list = (void *) obj->next.next; in __process_finished_items()
261 obj->func(obj); in __process_finished_items()
266 for (size_t i = 0; i < objs.nr; i++) in __process_finished_items()
267 pending->process(pending, *genradix_ptr(&objs.objs, i)); in __process_finished_items()
273 list = obj->next; in __process_finished_items()
275 list = (void *) obj->next.next; in __process_finished_items()
277 pending->process(pending, obj); in __process_finished_items()
292 if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) || in process_finished_items()
293 (p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) || in process_finished_items()
294 (p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) || in process_finished_items()
295 p->lists[2].head) { in process_finished_items()
307 struct rcu_pending *pending = p->parent; in rcu_pending_work()
311 spin_lock_irqsave(&p->lock, flags); in rcu_pending_work()
314 spin_unlock_irqrestore(&p->lock, flags); in rcu_pending_work()
321 schedule_work_on(p->cpu, &p->work); in rcu_pending_rcu_cb()
324 spin_lock_irqsave(&p->lock, flags); in rcu_pending_rcu_cb()
326 spin_unlock_irqrestore(&p->lock, flags); in rcu_pending_rcu_cb()
327 __call_rcu(p->parent->srcu, &p->cb, rcu_pending_rcu_cb); in rcu_pending_rcu_cb()
329 p->cb_armed = false; in rcu_pending_rcu_cb()
330 spin_unlock_irqrestore(&p->lock, flags); in rcu_pending_rcu_cb()
337 darray_for_each_reverse(p->objs, objs) in get_object_radix()
338 if (rcu_gp_poll_cookie_eq(objs->seq, seq)) in get_object_radix()
341 if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC)) in get_object_radix()
344 return &darray_last(p->objs); in get_object_radix()
363 spin_unlock_irqrestore(&p->lock, *flags); in rcu_pending_enqueue_list()
369 if (unlikely(__poll_state_synchronize_rcu(p->parent->srcu, seq))) { in rcu_pending_enqueue_list()
370 kvfree(--ptr); in rcu_pending_enqueue_list()
372 spin_lock_irqsave(&p->lock, *flags); in rcu_pending_enqueue_list()
378 head->func = ptr; in rcu_pending_enqueue_list()
381 for (struct rcu_pending_list *i = p->lists; in rcu_pending_enqueue_list()
382 i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) { in rcu_pending_enqueue_list()
383 if (rcu_gp_poll_cookie_eq(i->seq, seq)) { in rcu_pending_enqueue_list()
389 for (struct rcu_pending_list *i = p->lists; in rcu_pending_enqueue_list()
390 i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) { in rcu_pending_enqueue_list()
391 if (!i->head) { in rcu_pending_enqueue_list()
392 i->seq = seq; in rcu_pending_enqueue_list()
404 * pending->pracess) once grace period elapses.
409 * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a
412 * - If @ptr and @head are both not NULL, we're kvfree_rcu()
414 * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep()
416 * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process
430 BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN)); in __rcu_pending_enqueue()
433 p = this_cpu_ptr(pending->p); in __rcu_pending_enqueue()
434 spin_lock(&p->lock); in __rcu_pending_enqueue()
435 rcu_gp_poll_state_t seq = __get_state_synchronize_rcu(pending->srcu); in __rcu_pending_enqueue()
443 * that we can do kfree_bulk() - vmalloc pointers always use the linked in __rcu_pending_enqueue()
453 if (unlikely(!objs->cursor)) { in __rcu_pending_enqueue()
455 * New radix tree nodes must be added under @p->lock because the in __rcu_pending_enqueue()
458 * nodes) - hence preallocation and the retry loop: in __rcu_pending_enqueue()
460 objs->cursor = genradix_ptr_alloc_preallocated_inlined(&objs->objs, in __rcu_pending_enqueue()
461 objs->nr, &new_node, GFP_ATOMIC|__GFP_NOWARN); in __rcu_pending_enqueue()
462 if (unlikely(!objs->cursor)) { in __rcu_pending_enqueue()
464 spin_unlock_irqrestore(&p->lock, flags); in __rcu_pending_enqueue()
481 *objs->cursor++ = ptr ?: head; in __rcu_pending_enqueue()
483 if (!(((ulong) objs->cursor) & (GENRADIX_NODE_SIZE - 1))) in __rcu_pending_enqueue()
484 objs->cursor = NULL; in __rcu_pending_enqueue()
485 start_gp = !objs->nr; in __rcu_pending_enqueue()
486 objs->nr++; in __rcu_pending_enqueue()
491 * every outstanding graceperiod) - so if our callback is in __rcu_pending_enqueue()
495 if (!p->cb_armed) { in __rcu_pending_enqueue()
496 p->cb_armed = true; in __rcu_pending_enqueue()
497 __call_rcu(pending->srcu, &p->cb, rcu_pending_rcu_cb); in __rcu_pending_enqueue()
499 __start_poll_synchronize_rcu(pending->srcu); in __rcu_pending_enqueue()
502 spin_unlock_irqrestore(&p->lock, flags); in __rcu_pending_enqueue()
508 if (unlikely(__poll_state_synchronize_rcu(pending->srcu, seq))) { in __rcu_pending_enqueue()
509 switch ((ulong) pending->process) { in __rcu_pending_enqueue()
514 head->func(head); in __rcu_pending_enqueue()
517 pending->process(pending, head); in __rcu_pending_enqueue()
524 p = this_cpu_ptr(pending->p); in __rcu_pending_enqueue()
525 spin_lock(&p->lock); in __rcu_pending_enqueue()
538 spin_lock_irq(&p->lock); in rcu_pending_pcpu_dequeue()
539 darray_for_each(p->objs, objs) in rcu_pending_pcpu_dequeue()
540 if (objs->nr) { in rcu_pending_pcpu_dequeue()
541 ret = *genradix_ptr(&objs->objs, --objs->nr); in rcu_pending_pcpu_dequeue()
542 objs->cursor = NULL; in rcu_pending_pcpu_dequeue()
543 if (!objs->nr) in rcu_pending_pcpu_dequeue()
544 genradix_free(&objs->objs); in rcu_pending_pcpu_dequeue()
548 static_array_for_each(p->lists, i) in rcu_pending_pcpu_dequeue()
549 if (i->head) { in rcu_pending_pcpu_dequeue()
550 ret = i->head; in rcu_pending_pcpu_dequeue()
552 i->head = ret->next; in rcu_pending_pcpu_dequeue()
554 i->head = (void *) ret->next.next; in rcu_pending_pcpu_dequeue()
556 if (!i->head) in rcu_pending_pcpu_dequeue()
557 i->tail = NULL; in rcu_pending_pcpu_dequeue()
561 spin_unlock_irq(&p->lock); in rcu_pending_pcpu_dequeue()
568 return rcu_pending_pcpu_dequeue(raw_cpu_ptr(pending->p)); in rcu_pending_dequeue()
578 int cpu; in rcu_pending_dequeue_from_all() local
579 for_each_possible_cpu(cpu) { in rcu_pending_dequeue_from_all()
580 ret = rcu_pending_pcpu_dequeue(per_cpu_ptr(pending->p, cpu)); in rcu_pending_dequeue_from_all()
589 int cpu; in rcu_pending_has_pending_or_armed() local
590 for_each_possible_cpu(cpu) { in rcu_pending_has_pending_or_armed()
591 struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu); in rcu_pending_has_pending_or_armed()
592 spin_lock_irq(&p->lock); in rcu_pending_has_pending_or_armed()
593 if (__rcu_pending_has_pending(p) || p->cb_armed) { in rcu_pending_has_pending_or_armed()
594 spin_unlock_irq(&p->lock); in rcu_pending_has_pending_or_armed()
597 spin_unlock_irq(&p->lock); in rcu_pending_has_pending_or_armed()
605 int cpu; in rcu_pending_exit() local
607 if (!pending->p) in rcu_pending_exit()
611 __rcu_barrier(pending->srcu); in rcu_pending_exit()
613 for_each_possible_cpu(cpu) { in rcu_pending_exit()
614 struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu); in rcu_pending_exit()
615 flush_work(&p->work); in rcu_pending_exit()
619 for_each_possible_cpu(cpu) { in rcu_pending_exit()
620 struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu); in rcu_pending_exit()
621 flush_work(&p->work); in rcu_pending_exit()
624 for_each_possible_cpu(cpu) { in rcu_pending_exit()
625 struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu); in rcu_pending_exit()
627 static_array_for_each(p->lists, i) in rcu_pending_exit()
628 WARN_ON(i->head); in rcu_pending_exit()
629 WARN_ON(p->objs.nr); in rcu_pending_exit()
630 darray_exit(&p->objs); in rcu_pending_exit()
632 free_percpu(pending->p); in rcu_pending_exit()
636 * rcu_pending_init: - initialize a rcu_pending
648 pending->p = alloc_percpu(struct rcu_pending_pcpu); in rcu_pending_init()
649 if (!pending->p) in rcu_pending_init()
650 return -ENOMEM; in rcu_pending_init()
652 int cpu; in rcu_pending_init() local
653 for_each_possible_cpu(cpu) { in rcu_pending_init()
654 struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu); in rcu_pending_init()
655 p->parent = pending; in rcu_pending_init()
656 p->cpu = cpu; in rcu_pending_init()
657 spin_lock_init(&p->lock); in rcu_pending_init()
658 darray_init(&p->objs); in rcu_pending_init()
659 INIT_WORK(&p->work, rcu_pending_work); in rcu_pending_init()
662 pending->srcu = srcu; in rcu_pending_init()
663 pending->process = process; in rcu_pending_init()