Lines Matching full:bc

104 static void bkey_cached_free(struct btree_key_cache *bc,  in bkey_cached_free()  argument
115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu); in bkey_cached_free()
116 this_cpu_inc(*bc->nr_pending); in bkey_cached_free()
139 struct btree_key_cache *bc = &c->btree_key_cache; in bkey_cached_alloc() local
144 rcu_pending_dequeue(&bc->pending[pcpu_readers]), in bkey_cached_alloc()
164 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]), in bkey_cached_alloc()
206 struct btree_key_cache *bc = &c->btree_key_cache; in btree_key_cache_create() local
228 ck = bkey_cached_reuse(bc); in btree_key_cache_create()
267 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params); in btree_key_cache_create()
274 atomic_long_inc(&bc->nr_keys); in btree_key_cache_create()
284 bkey_cached_free(bc, ck); in btree_key_cache_create()
599 struct btree_key_cache *bc = &c->btree_key_cache; in bch2_btree_key_cache_drop() local
612 bkey_cached_evict(bc, ck); in bch2_btree_key_cache_drop()
613 bkey_cached_free(bc, ck); in bch2_btree_key_cache_drop()
634 struct btree_key_cache *bc = &c->btree_key_cache; in bch2_btree_key_cache_scan() local
644 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_btree_key_cache_scan()
659 iter = bc->shrink_iter; in bch2_btree_key_cache_scan()
674 bc->skipped_dirty++; in bch2_btree_key_cache_scan()
677 bc->skipped_accessed++; in bch2_btree_key_cache_scan()
679 bc->skipped_lock_fail++; in bch2_btree_key_cache_scan()
680 } else if (bkey_cached_evict(bc, ck)) { in bch2_btree_key_cache_scan()
681 bkey_cached_free(bc, ck); in bch2_btree_key_cache_scan()
682 bc->freed++; in bch2_btree_key_cache_scan()
701 bc->shrink_iter = iter; in bch2_btree_key_cache_scan()
713 struct btree_key_cache *bc = &c->btree_key_cache; in bch2_btree_key_cache_count() local
714 long nr = atomic_long_read(&bc->nr_keys) - in bch2_btree_key_cache_count()
715 atomic_long_read(&bc->nr_dirty); in bch2_btree_key_cache_count()
728 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) in bch2_fs_btree_key_cache_exit() argument
730 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bch2_fs_btree_key_cache_exit()
737 shrinker_free(bc->shrink); in bch2_fs_btree_key_cache_exit()
742 while (atomic_long_read(&bc->nr_keys)) { in bch2_fs_btree_key_cache_exit()
744 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_fs_btree_key_cache_exit()
749 mutex_lock(&bc->table.mutex); in bch2_fs_btree_key_cache_exit()
750 mutex_unlock(&bc->table.mutex); in bch2_fs_btree_key_cache_exit()
756 BUG_ON(!bkey_cached_evict(bc, ck)); in bch2_fs_btree_key_cache_exit()
764 if (atomic_long_read(&bc->nr_dirty) && in bch2_fs_btree_key_cache_exit()
768 atomic_long_read(&bc->nr_dirty)); in bch2_fs_btree_key_cache_exit()
770 if (atomic_long_read(&bc->nr_keys)) in bch2_fs_btree_key_cache_exit()
772 atomic_long_read(&bc->nr_keys)); in bch2_fs_btree_key_cache_exit()
774 if (bc->table_init_done) in bch2_fs_btree_key_cache_exit()
775 rhashtable_destroy(&bc->table); in bch2_fs_btree_key_cache_exit()
777 rcu_pending_exit(&bc->pending[0]); in bch2_fs_btree_key_cache_exit()
778 rcu_pending_exit(&bc->pending[1]); in bch2_fs_btree_key_cache_exit()
780 free_percpu(bc->nr_pending); in bch2_fs_btree_key_cache_exit()
787 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) in bch2_fs_btree_key_cache_init() argument
789 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bch2_fs_btree_key_cache_init()
792 bc->nr_pending = alloc_percpu(size_t); in bch2_fs_btree_key_cache_init()
793 if (!bc->nr_pending) in bch2_fs_btree_key_cache_init()
796 if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) || in bch2_fs_btree_key_cache_init()
797 rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free)) in bch2_fs_btree_key_cache_init()
800 if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params)) in bch2_fs_btree_key_cache_init()
803 bc->table_init_done = true; in bch2_fs_btree_key_cache_init()
808 bc->shrink = shrink; in bch2_fs_btree_key_cache_init()
818 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc) in bch2_btree_key_cache_to_text() argument
823 prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys)); in bch2_btree_key_cache_to_text()
824 prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty)); in bch2_btree_key_cache_to_text()
825 prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size); in bch2_btree_key_cache_to_text()
828 prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free); in bch2_btree_key_cache_to_text()
829 prt_printf(out, "freed:\t%lu\r\n", bc->freed); in bch2_btree_key_cache_to_text()
830 prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty); in bch2_btree_key_cache_to_text()
831 prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed); in bch2_btree_key_cache_to_text()
832 prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail); in bch2_btree_key_cache_to_text()
834 prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending)); in bch2_btree_key_cache_to_text()