Lines Matching full:ck
27 const struct bkey_cached *ck = obj; in bch2_btree_key_cache_cmp_fn() local
30 return ck->key.btree_id != key->btree_id || in bch2_btree_key_cache_cmp_fn()
31 !bpos_eq(ck->key.pos, key->pos); in bch2_btree_key_cache_cmp_fn()
43 struct bkey_cached *ck, in btree_path_cached_set() argument
46 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); in btree_path_cached_set()
47 path->l[0].b = (void *) ck; in btree_path_cached_set()
64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck) in bkey_cached_lock_for_evict() argument
66 if (!six_trylock_intent(&ck->c.lock)) in bkey_cached_lock_for_evict()
69 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bkey_cached_lock_for_evict()
70 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
74 if (!six_trylock_write(&ck->c.lock)) { in bkey_cached_lock_for_evict()
75 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
83 struct bkey_cached *ck) in bkey_cached_evict() argument
85 bool ret = !rhashtable_remove_fast(&c->table, &ck->hash, in bkey_cached_evict()
88 memset(&ck->key, ~0, sizeof(ck->key)); in bkey_cached_evict()
98 struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu); in __bkey_cached_free() local
101 kmem_cache_free(bch2_key_cache, ck); in __bkey_cached_free()
105 struct bkey_cached *ck) in bkey_cached_free() argument
107 kfree(ck->k); in bkey_cached_free()
108 ck->k = NULL; in bkey_cached_free()
109 ck->u64s = 0; in bkey_cached_free()
111 six_unlock_write(&ck->c.lock); in bkey_cached_free()
112 six_unlock_intent(&ck->c.lock); in bkey_cached_free()
114 bool pcpu_readers = ck->c.lock.readers != NULL; in bkey_cached_free()
115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu); in bkey_cached_free()
123 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); in __bkey_cached_alloc() local
124 if (unlikely(!ck)) in __bkey_cached_alloc()
126 ck->k = kmalloc(key_u64s * sizeof(u64), gfp); in __bkey_cached_alloc()
127 if (unlikely(!ck->k)) { in __bkey_cached_alloc()
128 kmem_cache_free(bch2_key_cache, ck); in __bkey_cached_alloc()
131 ck->u64s = key_u64s; in __bkey_cached_alloc()
132 return ck; in __bkey_cached_alloc()
143 struct bkey_cached *ck = container_of_or_null( in bkey_cached_alloc() local
146 if (ck) in bkey_cached_alloc()
149 ck = allocate_dropping_locks(trans, ret, in bkey_cached_alloc()
152 if (ck) in bkey_cached_alloc()
153 kfree(ck->k); in bkey_cached_alloc()
154 kmem_cache_free(bch2_key_cache, ck); in bkey_cached_alloc()
158 if (ck) { in bkey_cached_alloc()
159 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); in bkey_cached_alloc()
160 ck->c.cached = true; in bkey_cached_alloc()
164 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]), in bkey_cached_alloc()
166 if (ck) in bkey_cached_alloc()
169 six_lock_intent(&ck->c.lock, NULL, NULL); in bkey_cached_alloc()
170 six_lock_write(&ck->c.lock, NULL, NULL); in bkey_cached_alloc()
171 return ck; in bkey_cached_alloc()
179 struct bkey_cached *ck; in bkey_cached_reuse() local
185 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { in bkey_cached_reuse()
186 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && in bkey_cached_reuse()
187 bkey_cached_lock_for_evict(ck)) { in bkey_cached_reuse()
188 if (bkey_cached_evict(c, ck)) in bkey_cached_reuse()
190 six_unlock_write(&ck->c.lock); in bkey_cached_reuse()
191 six_unlock_intent(&ck->c.lock); in bkey_cached_reuse()
194 ck = NULL; in bkey_cached_reuse()
197 return ck; in bkey_cached_reuse()
222 struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s); in btree_key_cache_create() local
223 int ret = PTR_ERR_OR_ZERO(ck); in btree_key_cache_create()
227 if (unlikely(!ck)) { in btree_key_cache_create()
228 ck = bkey_cached_reuse(bc); in btree_key_cache_create()
229 if (unlikely(!ck)) { in btree_key_cache_create()
236 ck->c.level = 0; in btree_key_cache_create()
237 ck->c.btree_id = ck_path->btree_id; in btree_key_cache_create()
238 ck->key.btree_id = ck_path->btree_id; in btree_key_cache_create()
239 ck->key.pos = ck_path->pos; in btree_key_cache_create()
240 ck->flags = 1U << BKEY_CACHED_ACCESSED; in btree_key_cache_create()
242 if (unlikely(key_u64s > ck->u64s)) { in btree_key_cache_create()
249 bch2_btree_id_str(ck->key.btree_id), key_u64s); in btree_key_cache_create()
256 kfree(ck->k); in btree_key_cache_create()
257 ck->k = new_k; in btree_key_cache_create()
258 ck->u64s = key_u64s; in btree_key_cache_create()
261 bkey_reassemble(ck->k, k); in btree_key_cache_create()
267 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params); in btree_key_cache_create()
275 six_unlock_write(&ck->c.lock); in btree_key_cache_create()
279 six_lock_downgrade(&ck->c.lock); in btree_key_cache_create()
280 btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want); in btree_key_cache_create()
284 bkey_cached_free(bc, ck); in btree_key_cache_create()
344 struct bkey_cached *ck; in btree_path_traverse_cached_fast() local
346 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); in btree_path_traverse_cached_fast()
347 if (!ck) in btree_path_traverse_cached_fast()
352 int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_); in btree_path_traverse_cached_fast()
356 if (ck->key.btree_id != path->btree_id || in btree_path_traverse_cached_fast()
357 !bpos_eq(ck->key.pos, path->pos)) { in btree_path_traverse_cached_fast()
358 six_unlock_type(&ck->c.lock, lock_want); in btree_path_traverse_cached_fast()
362 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) in btree_path_traverse_cached_fast()
363 set_bit(BKEY_CACHED_ACCESSED, &ck->flags); in btree_path_traverse_cached_fast()
365 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want); in btree_path_traverse_cached_fast()
403 struct bkey_cached *ck = NULL; in btree_key_cache_flush_pos() local
419 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; in btree_key_cache_flush_pos()
420 if (!ck) in btree_key_cache_flush_pos()
423 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
429 if (journal_seq && ck->journal.seq != journal_seq) in btree_key_cache_flush_pos()
432 trans->journal_res.seq = ck->journal.seq; in btree_key_cache_flush_pos()
440 if (ck->journal.seq == journal_last_seq(j)) in btree_key_cache_flush_pos()
443 if (ck->journal.seq != journal_last_seq(j) || in btree_key_cache_flush_pos()
455 ret = bch2_trans_update(trans, &b_iter, ck->k, in btree_key_cache_flush_pos()
472 bch2_journal_pin_drop(j, &ck->journal); in btree_key_cache_flush_pos()
478 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
479 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
490 bch2_btree_node_lock_write_nofail(trans, path, &ck->c); in btree_key_cache_flush_pos()
492 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
493 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
498 if (bkey_cached_evict(&c->btree_key_cache, ck)) { in btree_key_cache_flush_pos()
499 bkey_cached_free(&c->btree_key_cache, ck); in btree_key_cache_flush_pos()
501 six_unlock_write(&ck->c.lock); in btree_key_cache_flush_pos()
502 six_unlock_intent(&ck->c.lock); in btree_key_cache_flush_pos()
515 struct bkey_cached *ck = in bch2_btree_key_cache_journal_flush() local
522 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); in bch2_btree_key_cache_journal_flush()
523 key = ck->key; in bch2_btree_key_cache_journal_flush()
525 if (ck->journal.seq != seq || in bch2_btree_key_cache_journal_flush()
526 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_journal_flush()
527 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
531 if (ck->seq != seq) { in bch2_btree_key_cache_journal_flush()
532 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal, in bch2_btree_key_cache_journal_flush()
534 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
537 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
554 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b; in bch2_btree_insert_key_cached() local
558 BUG_ON(insert->k.u64s > ck->u64s); in bch2_btree_insert_key_cached()
560 bkey_copy(ck->k, insert); in bch2_btree_insert_key_cached()
562 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_insert_key_cached()
564 set_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_insert_key_cached()
584 !journal_pin_active(&ck->journal)) { in bch2_btree_insert_key_cached()
585 ck->seq = trans->journal_res.seq; in bch2_btree_insert_key_cached()
588 &ck->journal, bch2_btree_key_cache_journal_flush); in bch2_btree_insert_key_cached()
600 struct bkey_cached *ck = (void *) path->l[0].b; in bch2_btree_key_cache_drop() local
606 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_drop()
607 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_key_cache_drop()
609 bch2_journal_pin_drop(&c->journal, &ck->journal); in bch2_btree_key_cache_drop()
612 bkey_cached_evict(bc, ck); in bch2_btree_key_cache_drop()
613 bkey_cached_free(bc, ck); in bch2_btree_key_cache_drop()
620 if (path2->l[0].b == (void *) ck) { in bch2_btree_key_cache_drop()
636 struct bkey_cached *ck; in bch2_btree_key_cache_scan() local
671 ck = container_of(pos, struct bkey_cached, hash); in bch2_btree_key_cache_scan()
673 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_scan()
675 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { in bch2_btree_key_cache_scan()
676 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); in bch2_btree_key_cache_scan()
678 } else if (!bkey_cached_lock_for_evict(ck)) { in bch2_btree_key_cache_scan()
680 } else if (bkey_cached_evict(bc, ck)) { in bch2_btree_key_cache_scan()
681 bkey_cached_free(bc, ck); in bch2_btree_key_cache_scan()
685 six_unlock_write(&ck->c.lock); in bch2_btree_key_cache_scan()
686 six_unlock_intent(&ck->c.lock); in bch2_btree_key_cache_scan()
732 struct bkey_cached *ck; in bch2_fs_btree_key_cache_exit() local
755 ck = container_of(pos, struct bkey_cached, hash); in bch2_fs_btree_key_cache_exit()
756 BUG_ON(!bkey_cached_evict(bc, ck)); in bch2_fs_btree_key_cache_exit()
757 kfree(ck->k); in bch2_fs_btree_key_cache_exit()
758 kmem_cache_free(bch2_key_cache, ck); in bch2_fs_btree_key_cache_exit()