Lines Matching full:bc

23 		bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++;	 \
55 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in btree_cache_can_free() local
59 can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve); in btree_cache_can_free()
63 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
68 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
70 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
73 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
78 bc->nr_freeable++; in __bch2_btree_node_to_freelist()
79 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
84 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_to_freelist() local
86 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist()
87 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
88 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist()
94 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free() argument
122 btree_node_to_freedlist(bc, b); in __btree_node_data_free()
125 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
129 --bc->nr_freeable; in btree_node_data_free()
130 __btree_node_data_free(bc, b); in btree_node_data_free()
194 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_mem_alloc() local
208 __bch2_btree_node_to_freelist(bc, b); in __bch2_btree_node_mem_alloc()
212 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
216 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
219 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && in __btree_node_pinned()
220 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0); in __btree_node_pinned()
225 struct btree_cache *bc = &c->btree_cache; in bch2_node_pin() local
227 mutex_lock(&bc->lock); in bch2_node_pin()
230 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
231 bc->live[0].nr--; in bch2_node_pin()
232 bc->live[1].nr++; in bch2_node_pin()
234 mutex_unlock(&bc->lock); in bch2_node_pin()
239 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_unpin() local
242 mutex_lock(&bc->lock); in bch2_btree_cache_unpin()
246 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
248 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
249 bc->live[0].nr++; in bch2_btree_cache_unpin()
250 bc->live[1].nr--; in bch2_btree_cache_unpin()
253 mutex_unlock(&bc->lock); in bch2_btree_cache_unpin()
258 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
260 lockdep_assert_held(&bc->lock); in __bch2_btree_node_hash_remove()
262 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
269 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
270 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
274 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
276 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
277 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
280 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
286 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
292 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
294 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
297 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
298 bc->live[p].nr++; in __bch2_btree_node_hash_insert()
302 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
308 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert()
309 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
310 mutex_unlock(&bc->lock); in bch2_btree_node_hash_insert()
345 static inline struct btree *btree_cache_find(struct btree_cache *bc, in btree_cache_find() argument
350 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); in btree_cache_find()
359 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim() local
362 lockdep_assert_held(&bc->lock); in __btree_node_reclaim()
468 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in bch2_btree_cache_scan() local
469 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_scan()
477 bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; in bch2_btree_cache_scan()
482 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
496 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
510 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
514 bc->nr_freed++; in bch2_btree_cache_scan()
523 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; in bch2_btree_cache_scan()
526 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
527 __btree_node_data_free(bc, b); in bch2_btree_cache_scan()
530 bc->nr_freed++; in bch2_btree_cache_scan()
543 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
548 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
559 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
580 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_exit() local
584 shrinker_free(bc->live[1].shrink); in bch2_fs_btree_cache_exit()
585 shrinker_free(bc->live[0].shrink); in bch2_fs_btree_cache_exit()
589 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit()
592 list_move(&c->verify_data->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
600 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
604 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
605 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
606 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
608 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
612 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
618 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); in bch2_fs_btree_cache_exit()
620 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
626 mutex_unlock(&bc->lock); in bch2_fs_btree_cache_exit()
629 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_fs_btree_cache_exit()
630 BUG_ON(bc->nr_by_btree[i]); in bch2_fs_btree_cache_exit()
631 BUG_ON(bc->live[0].nr); in bch2_fs_btree_cache_exit()
632 BUG_ON(bc->live[1].nr); in bch2_fs_btree_cache_exit()
633 BUG_ON(bc->nr_freeable); in bch2_fs_btree_cache_exit()
635 if (bc->table_init_done) in bch2_fs_btree_cache_exit()
636 rhashtable_destroy(&bc->table); in bch2_fs_btree_cache_exit()
641 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_init() local
646 ret = rhashtable_init(&bc->table, &bch_btree_cache_params); in bch2_fs_btree_cache_init()
650 bc->table_init_done = true; in bch2_fs_btree_cache_init()
654 for (i = 0; i < bc->nr_reserve; i++) in bch2_fs_btree_cache_init()
658 list_splice_init(&bc->live[0].list, &bc->freeable); in bch2_fs_btree_cache_init()
665 bc->live[0].shrink = shrink; in bch2_fs_btree_cache_init()
669 shrink->private_data = &bc->live[0]; in bch2_fs_btree_cache_init()
675 bc->live[1].shrink = shrink; in bch2_fs_btree_cache_init()
679 shrink->private_data = &bc->live[1]; in bch2_fs_btree_cache_init()
687 void bch2_fs_btree_cache_init_early(struct btree_cache *bc) in bch2_fs_btree_cache_init_early() argument
689 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early()
690 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) { in bch2_fs_btree_cache_init_early()
691 bc->live[i].idx = i; in bch2_fs_btree_cache_init_early()
692 INIT_LIST_HEAD(&bc->live[i].list); in bch2_fs_btree_cache_init_early()
694 INIT_LIST_HEAD(&bc->freeable); in bch2_fs_btree_cache_init_early()
695 INIT_LIST_HEAD(&bc->freed_pcpu); in bch2_fs_btree_cache_init_early()
696 INIT_LIST_HEAD(&bc->freed_nonpcpu); in bch2_fs_btree_cache_init_early()
708 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_unlock() local
710 if (bc->alloc_lock == current) { in bch2_btree_cache_cannibalize_unlock()
712 bc->alloc_lock = NULL; in bch2_btree_cache_cannibalize_unlock()
713 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_unlock()
720 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_lock() local
724 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) in bch2_btree_cache_cannibalize_lock()
732 closure_wait(&bc->alloc_wait, cl); in bch2_btree_cache_cannibalize_lock()
736 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) { in bch2_btree_cache_cannibalize_lock()
738 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_lock()
752 struct btree_cache *bc = &c->btree_cache; in btree_node_cannibalize() local
755 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
756 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
761 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
762 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
778 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_mem_alloc() local
780 ? &bc->freed_pcpu in bch2_btree_node_mem_alloc()
781 : &bc->freed_nonpcpu; in bch2_btree_node_mem_alloc()
785 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
801 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
807 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
818 list_for_each_entry(b2, &bc->freeable, list) in bch2_btree_node_mem_alloc()
824 --bc->nr_freeable; in bch2_btree_node_mem_alloc()
825 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
826 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
833 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
867 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
870 if (bc->alloc_lock == current) { in bch2_btree_node_mem_alloc()
873 __bch2_btree_node_hash_remove(bc, b2); in bch2_btree_node_mem_alloc()
878 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
886 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
892 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
906 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_fill() local
957 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
963 mutex_lock(&bc->lock); in bch2_btree_node_fill()
964 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
965 mutex_unlock(&bc->lock); in bch2_btree_node_fill()
1045 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_get() local
1052 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1244 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_get_noiter() local
1256 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1327 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_prefetch() local
1332 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch()
1349 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_evict() local
1352 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1380 mutex_lock(&bc->lock); in bch2_btree_node_evict()
1381 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1382 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1383 mutex_unlock(&bc->lock); in bch2_btree_node_evict()
1486 void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc) in bch2_btree_cache_to_text() argument
1488 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_to_text()
1493 prt_btree_cache_line(out, c, "live:", bc->live[0].nr); in bch2_btree_cache_to_text()
1494 prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); in bch2_btree_cache_to_text()
1495 prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable); in bch2_btree_cache_to_text()
1496 prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); in bch2_btree_cache_to_text()
1497 prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); in bch2_btree_cache_to_text()
1500 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) { in bch2_btree_cache_to_text()
1503 prt_human_readable_u64(out, bc->nr_by_btree[i] * c->opts.btree_node_size); in bch2_btree_cache_to_text()
1504 prt_printf(out, " (%zu)\n", bc->nr_by_btree[i]); in bch2_btree_cache_to_text()
1508 prt_printf(out, "freed:\t%zu\n", bc->nr_freed); in bch2_btree_cache_to_text()
1511 for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) in bch2_btree_cache_to_text()
1513 bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]); in bch2_btree_cache_to_text()