Lines Matching +full:last +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
115 if (b->level && b->keys.nsets) in bch_btree_init_next()
116 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
118 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
120 if (b->written < btree_blocks(b)) in bch_btree_init_next()
121 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
122 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
141 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
144 crc = crc64_be(crc, data, end - data); in btree_csum_set()
155 * c->fill_iter can allocate an iterator with more memory space in bch_btree_node_read_done()
157 * See the comment arount cache_set->fill_iter. in bch_btree_node_read_done()
159 iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
160 iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
164 iter.b = &b->keys; in bch_btree_node_read_done()
167 if (!i->seq) in bch_btree_node_read_done()
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
174 if (i->version > BCACHE_BSET_VERSION) in bch_btree_node_read_done()
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
183 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
187 switch (i->version) { in bch_btree_node_read_done()
189 if (i->csum != csum_set(i)) in bch_btree_node_read_done()
193 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
199 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
202 bch_btree_iter_push(&iter, i->start, bset_bkey_last(i)); in bch_btree_node_read_done()
204 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
210 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
211 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
214 bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort); in bch_btree_node_read_done()
216 i = b->keys.set[0].data; in bch_btree_node_read_done()
218 if (b->keys.set[0].size && in bch_btree_node_read_done()
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
222 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
223 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
224 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
226 mempool_free(iter.heap.data, &b->c->fill_iter); in bch_btree_node_read_done()
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
231 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
232 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
238 struct closure *cl = bio->bi_private; in btree_node_read_endio()
253 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
255 bio->bi_end_io = btree_node_read_endio; in bch_btree_node_read()
256 bio->bi_private = &cl; in bch_btree_node_read()
257 bio->bi_opf = REQ_OP_READ | REQ_META; in bch_btree_node_read()
259 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
261 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
264 if (bio->bi_status) in bch_btree_node_read()
267 bch_bbio_free(bio, b->c); in bch_btree_node_read()
273 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
277 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
278 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
283 if (w->prio_blocked && in btree_complete_write()
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
285 wake_up_allocators(b->c); in btree_complete_write()
287 if (w->journal) { in btree_complete_write()
288 atomic_dec_bug(w->journal); in btree_complete_write()
289 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
292 w->prio_blocked = 0; in btree_complete_write()
293 w->journal = NULL; in btree_complete_write()
300 up(&b->io_mutex); in CLOSURE_CALLBACK()
308 bch_bbio_free(b->bio, b->c); in CLOSURE_CALLBACK()
309 b->bio = NULL; in CLOSURE_CALLBACK()
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in CLOSURE_CALLBACK()
322 bio_free_pages(b->bio); in CLOSURE_CALLBACK()
323 __btree_node_write_done(&cl->work); in CLOSURE_CALLBACK()
328 struct closure *cl = bio->bi_private; in btree_node_write_endio()
331 if (bio->bi_status) in btree_node_write_endio()
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
340 struct closure *cl = &b->io; in do_btree_node_write()
344 i->version = BCACHE_BSET_VERSION; in do_btree_node_write()
345 i->csum = btree_csum_set(b, i); in do_btree_node_write()
347 BUG_ON(b->bio); in do_btree_node_write()
348 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
350 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
351 b->bio->bi_private = cl; in do_btree_node_write()
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
354 bch_bio_map(b->bio, i); in do_btree_node_write()
357 * If we're appending to a leaf node, we don't technically need FUA - in do_btree_node_write()
361 * Similarly if we're writing a new btree root - the pointer is going to in do_btree_node_write()
371 bkey_copy(&k.key, &b->key); in do_btree_node_write()
373 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); in do_btree_node_write()
380 bio_for_each_segment_all(bv, b->bio, iter_all) { in do_btree_node_write()
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); in do_btree_node_write()
385 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
393 b->bio->bi_vcnt = 0; in do_btree_node_write()
394 bch_bio_map(b->bio, i); in do_btree_node_write()
396 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
407 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
411 BUG_ON(current->bio_list); in __bch_btree_node_write()
412 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
413 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
414 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
415 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
417 cancel_delayed_work(&b->work); in __bch_btree_node_write()
420 down(&b->io_mutex); in __bch_btree_node_write()
421 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
423 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
424 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
429 &b->c->cache->btree_sectors_written); in __bch_btree_node_write()
431 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
436 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
438 lockdep_assert_held(&b->lock); in bch_btree_node_write()
446 if (nsets && !b->keys.nsets) in bch_btree_node_write()
458 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
460 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
469 mutex_lock(&b->write_lock); in btree_node_write_work()
472 mutex_unlock(&b->write_lock); in btree_node_write_work()
480 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
482 BUG_ON(!b->written); in bch_btree_leaf_dirty()
483 BUG_ON(!i->keys); in bch_btree_leaf_dirty()
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in bch_btree_leaf_dirty()
491 * w->journal is always the oldest journal pin of all bkeys in bch_btree_leaf_dirty()
496 if (w->journal && in bch_btree_leaf_dirty()
497 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
498 atomic_dec_bug(w->journal); in bch_btree_leaf_dirty()
499 w->journal = NULL; in bch_btree_leaf_dirty()
502 if (!w->journal) { in bch_btree_leaf_dirty()
503 w->journal = journal_ref; in bch_btree_leaf_dirty()
504 atomic_inc(w->journal); in bch_btree_leaf_dirty()
509 if (set_bytes(i) > PAGE_SIZE - 48 && in bch_btree_leaf_dirty()
510 !current->bio_list) in bch_btree_leaf_dirty()
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
526 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
528 bch_btree_keys_free(&b->keys); in mca_data_free()
530 b->c->btree_cache_used--; in mca_data_free()
531 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
538 b->key.ptr[0] = 0; in mca_bucket_free()
539 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
540 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
550 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
552 ilog2(b->c->btree_pages), in mca_data_alloc()
555 b->c->btree_cache_used++; in mca_data_alloc()
556 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
558 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
562 #define cmp_int(l, r) ((l > r) - (l < r))
571 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); in btree_lock_cmp_fn()
578 printk(KERN_CONT " l=%u %llu:%llu", b->level, in btree_lock_print_fn()
579 KEY_INODE(&b->key), KEY_OFFSET(&b->key)); in btree_lock_print_fn()
595 init_rwsem(&b->lock); in mca_bucket_alloc()
596 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); in mca_bucket_alloc()
597 mutex_init(&b->write_lock); in mca_bucket_alloc()
598 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
599 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
600 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
601 b->c = c; in mca_bucket_alloc()
602 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
613 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
615 if (!down_write_trylock(&b->lock)) in mca_reap()
616 return -ENOMEM; in mca_reap()
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
620 if (b->keys.page_order < min_order) in mca_reap()
627 if (down_trylock(&b->io_mutex)) in mca_reap()
629 up(&b->io_mutex); in mca_reap()
636 * b->write_lock before checking BTREE_NODE_dirty bit. in mca_reap()
638 mutex_lock(&b->write_lock); in mca_reap()
646 mutex_unlock(&b->write_lock); in mca_reap()
653 mutex_unlock(&b->write_lock); in mca_reap()
658 down(&b->io_mutex); in mca_reap()
659 up(&b->io_mutex); in mca_reap()
664 return -ENOMEM; in mca_reap()
670 struct cache_set *c = shrink->private_data; in bch_mca_scan()
672 unsigned long i, nr = sc->nr_to_scan; in bch_mca_scan()
676 if (c->shrinker_disabled) in bch_mca_scan()
679 if (c->btree_cache_alloc_lock) in bch_mca_scan()
682 /* Return -1 if we can't do anything right now */ in bch_mca_scan()
683 if (sc->gfp_mask & __GFP_IO) in bch_mca_scan()
684 mutex_lock(&c->bucket_lock); in bch_mca_scan()
685 else if (!mutex_trylock(&c->bucket_lock)) in bch_mca_scan()
686 return -1; in bch_mca_scan()
689 * It's _really_ critical that we don't free too many btree nodes - we in bch_mca_scan()
695 nr /= c->btree_pages; in bch_mca_scan()
701 btree_cache_used = c->btree_cache_used; in bch_mca_scan()
702 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
711 nr--; in bch_mca_scan()
715 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
726 nr--; in bch_mca_scan()
730 mutex_unlock(&c->bucket_lock); in bch_mca_scan()
731 return freed * c->btree_pages; in bch_mca_scan()
737 struct cache_set *c = shrink->private_data; in bch_mca_count()
739 if (c->shrinker_disabled) in bch_mca_count()
742 if (c->btree_cache_alloc_lock) in bch_mca_count()
745 return mca_can_free(c) * c->btree_pages; in bch_mca_count()
755 if (c->shrink) in bch_btree_cache_free()
756 shrinker_free(c->shrink); in bch_btree_cache_free()
758 mutex_lock(&c->bucket_lock); in bch_btree_cache_free()
761 if (c->verify_data) in bch_btree_cache_free()
762 list_move(&c->verify_data->list, &c->btree_cache); in bch_btree_cache_free()
764 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_free()
767 list_splice(&c->btree_cache_freeable, in bch_btree_cache_free()
768 &c->btree_cache); in bch_btree_cache_free()
770 while (!list_empty(&c->btree_cache)) { in bch_btree_cache_free()
771 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
776 * b->write_lock before clearing BTREE_NODE_dirty anymore. in bch_btree_cache_free()
780 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
785 while (!list_empty(&c->btree_cache_freed)) { in bch_btree_cache_free()
786 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
788 list_del(&b->list); in bch_btree_cache_free()
789 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
793 mutex_unlock(&c->bucket_lock); in bch_btree_cache_free()
802 return -ENOMEM; in bch_btree_cache_alloc()
804 list_splice_init(&c->btree_cache, in bch_btree_cache_alloc()
805 &c->btree_cache_freeable); in bch_btree_cache_alloc()
808 mutex_init(&c->verify_lock); in bch_btree_cache_alloc()
810 c->verify_ondisk = (void *) in bch_btree_cache_alloc()
812 ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_alloc()
813 if (!c->verify_ondisk) { in bch_btree_cache_alloc()
816 * allocated in previous for-loop, they will be in bch_btree_cache_alloc()
819 return -ENOMEM; in bch_btree_cache_alloc()
822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); in bch_btree_cache_alloc()
824 if (c->verify_data && in bch_btree_cache_alloc()
825 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
826 list_del_init(&c->verify_data->list); in bch_btree_cache_alloc()
828 c->verify_data = NULL; in bch_btree_cache_alloc()
831 c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid); in bch_btree_cache_alloc()
832 if (!c->shrink) { in bch_btree_cache_alloc()
837 c->shrink->count_objects = bch_mca_count; in bch_btree_cache_alloc()
838 c->shrink->scan_objects = bch_mca_scan; in bch_btree_cache_alloc()
839 c->shrink->seeks = 4; in bch_btree_cache_alloc()
840 c->shrink->batch = c->btree_pages * 2; in bch_btree_cache_alloc()
841 c->shrink->private_data = c; in bch_btree_cache_alloc()
843 shrinker_register(c->shrink); in bch_btree_cache_alloc()
848 /* Btree in memory cache - hash table */
852 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
861 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
871 spin_lock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
872 if (likely(c->btree_cache_alloc_lock == NULL)) { in mca_cannibalize_lock()
873 c->btree_cache_alloc_lock = current; in mca_cannibalize_lock()
874 } else if (c->btree_cache_alloc_lock != current) { in mca_cannibalize_lock()
876 prepare_to_wait(&c->btree_cache_wait, &op->wait, in mca_cannibalize_lock()
878 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
879 return -EINTR; in mca_cannibalize_lock()
881 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
894 return ERR_PTR(-EINTR); in mca_cannibalize()
896 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
900 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
905 return ERR_PTR(-ENOMEM); in mca_cannibalize()
916 spin_lock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
917 if (c->btree_cache_alloc_lock == current) { in bch_cannibalize_unlock()
918 c->btree_cache_alloc_lock = NULL; in bch_cannibalize_unlock()
919 wake_up(&c->btree_cache_wait); in bch_cannibalize_unlock()
921 spin_unlock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
925 struct bkey *k, int level) in mca_alloc() argument
929 BUG_ON(current->bio_list); in mca_alloc()
931 lockdep_assert_held(&c->bucket_lock); in mca_alloc()
939 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
946 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
949 if (!b->keys.set[0].data) in mca_alloc()
959 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
960 if (!b->keys.set->data) in mca_alloc()
963 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
965 bkey_copy(&b->key, k); in mca_alloc()
966 list_move(&b->list, &c->btree_cache); in mca_alloc()
967 hlist_del_init_rcu(&b->hash); in mca_alloc()
968 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
970 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
971 b->parent = (void *) ~0UL; in mca_alloc()
972 b->flags = 0; in mca_alloc()
973 b->written = 0; in mca_alloc()
974 b->level = level; in mca_alloc()
976 if (!b->level) in mca_alloc()
977 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
978 &b->c->expensive_debug_checks); in mca_alloc()
980 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
981 &b->c->expensive_debug_checks); in mca_alloc()
996 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
999 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1002 * level and op->lock.
1008 struct bkey *k, int level, bool write, in bch_btree_node_get() argument
1014 BUG_ON(level < 0); in bch_btree_node_get()
1019 if (current->bio_list) in bch_btree_node_get()
1020 return ERR_PTR(-EAGAIN); in bch_btree_node_get()
1022 mutex_lock(&c->bucket_lock); in bch_btree_node_get()
1023 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1024 mutex_unlock(&c->bucket_lock); in bch_btree_node_get()
1034 downgrade_write(&b->lock); in bch_btree_node_get()
1036 rw_lock(write, b, level); in bch_btree_node_get()
1037 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1041 BUG_ON(b->level != level); in bch_btree_node_get()
1046 return ERR_PTR(-EIO); in bch_btree_node_get()
1049 BUG_ON(!b->written); in bch_btree_node_get()
1051 b->parent = parent; in bch_btree_node_get()
1053 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1054 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1055 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1058 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1059 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1068 mutex_lock(&parent->c->bucket_lock); in btree_node_prefetch()
1069 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1070 mutex_unlock(&parent->c->bucket_lock); in btree_node_prefetch()
1073 b->parent = parent; in btree_node_prefetch()
1085 BUG_ON(b == b->c->root); in btree_node_free()
1088 mutex_lock(&b->write_lock); in btree_node_free()
1096 mutex_unlock(&b->write_lock); in btree_node_free()
1104 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1107 mutex_unlock(&b->write_lock); in btree_node_free()
1109 cancel_delayed_work(&b->work); in btree_node_free()
1111 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1112 bch_bucket_free(b->c, &b->key); in btree_node_free()
1114 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1122 int level, bool wait, in __bch_btree_node_alloc() argument
1128 mutex_lock(&c->bucket_lock); in __bch_btree_node_alloc()
1130 /* return ERR_PTR(-EAGAIN) when it fails */ in __bch_btree_node_alloc()
1131 b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc()
1136 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1138 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1148 b->parent = parent; in __bch_btree_node_alloc()
1149 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1151 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1158 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1165 struct btree_op *op, int level, in bch_btree_node_alloc() argument
1168 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); in bch_btree_node_alloc()
1174 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1177 mutex_lock(&n->write_lock); in btree_node_alloc_replacement()
1178 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1179 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1180 mutex_unlock(&n->write_lock); in btree_node_alloc_replacement()
1190 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1192 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1194 bkey_copy(k, &b->key); in make_btree_freeing_key()
1199 bch_inc_gen(b->c->cache, in make_btree_freeing_key()
1200 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1202 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1207 struct cache_set *c = b->c; in btree_check_reserve()
1208 struct cache *ca = c->cache; in btree_check_reserve()
1209 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1211 mutex_lock(&c->bucket_lock); in btree_check_reserve()
1213 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { in btree_check_reserve()
1215 prepare_to_wait(&c->btree_cache_wait, &op->wait, in btree_check_reserve()
1217 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1218 return -EINTR; in btree_check_reserve()
1221 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1223 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1228 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, in __bch_btree_mark_key() argument
1249 if (gen_after(g->last_gc, PTR_GEN(k, i))) in __bch_btree_mark_key()
1250 g->last_gc = PTR_GEN(k, i); in __bch_btree_mark_key()
1258 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), in __bch_btree_mark_key()
1259 c, "inconsistent ptrs: mark = %llu, level = %i", in __bch_btree_mark_key()
1260 GC_MARK(g), level); in __bch_btree_mark_key()
1262 if (level) in __bch_btree_mark_key()
1280 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1282 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) in bch_initial_mark_key() argument
1291 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1293 if (level && bkey_cmp(k, &ZERO_KEY)) in bch_initial_mark_key()
1294 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1295 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1296 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1299 __bch_btree_mark_key(c, level, k); in bch_initial_mark_key()
1304 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; in bch_update_bucket_in_use()
1317 gc->nodes++; in btree_gc_mark_node()
1319 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1323 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1326 gc->key_bytes += bkey_u64s(k); in btree_gc_mark_node()
1327 gc->nkeys++; in btree_gc_mark_node()
1330 gc->data += KEY_SIZE(k); in btree_gc_mark_node()
1333 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1334 btree_bug_on(t->size && in btree_gc_mark_node()
1335 bset_written(&b->keys, t) && in btree_gc_mark_node()
1336 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1339 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1345 if ((keys - good_keys) * 2 > keys) in btree_gc_mark_node()
1383 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1386 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1387 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1398 * nodes, to make sure the insert below will succeed - we also check in btree_gc_coalesce()
1406 mutex_lock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1408 for (i = nodes - 1; i > 0; --i) { in btree_gc_coalesce()
1410 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); in btree_gc_coalesce()
1411 struct bkey *k, *last = NULL; in btree_gc_coalesce() local
1416 for (k = n2->start; in btree_gc_coalesce()
1419 if (__set_blocks(n1, n1->keys + keys + in btree_gc_coalesce()
1421 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1424 last = k; in btree_gc_coalesce()
1429 * Last node we're not getting rid of - we're getting in btree_gc_coalesce()
1436 if (__set_blocks(n1, n1->keys + n2->keys, in btree_gc_coalesce()
1437 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1441 keys = n2->keys; in btree_gc_coalesce()
1443 last = &r->b->key; in btree_gc_coalesce()
1446 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1449 if (last) in btree_gc_coalesce()
1450 bkey_copy_key(&new_nodes[i]->key, last); in btree_gc_coalesce()
1453 n2->start, in btree_gc_coalesce()
1454 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); in btree_gc_coalesce()
1456 n1->keys += keys; in btree_gc_coalesce()
1457 r[i].keys = n1->keys; in btree_gc_coalesce()
1459 memmove(n2->start, in btree_gc_coalesce()
1461 (void *) bset_bkey_last(n2) - in btree_gc_coalesce()
1464 n2->keys -= keys; in btree_gc_coalesce()
1467 bkey_u64s(&new_nodes[i]->key))) in btree_gc_coalesce()
1471 bch_keylist_add(&keylist, &new_nodes[i]->key); in btree_gc_coalesce()
1475 mutex_unlock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1480 BUG_ON(btree_bset_first(new_nodes[0])->keys); in btree_gc_coalesce()
1486 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1503 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); in btree_gc_coalesce()
1504 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1507 gc->nodes--; in btree_gc_coalesce()
1512 return -EINTR; in btree_gc_coalesce()
1516 mutex_unlock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1523 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1557 bch_keylist_add(&keys, &n->key); in btree_gc_rewrite_node()
1569 return -EINTR; in btree_gc_rewrite_node()
1580 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1594 * GC would last a long time, and the front side I/Os in btree_gc_min_nodes()
1604 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; in btree_gc_min_nodes()
1620 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; in btree_gc_recurse() local
1623 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1626 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1629 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1631 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1633 if (IS_ERR(r->b)) { in btree_gc_recurse()
1634 ret = PTR_ERR(r->b); in btree_gc_recurse()
1638 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1645 if (!last->b) in btree_gc_recurse()
1648 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1649 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1651 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1656 if (last->b->level) { in btree_gc_recurse()
1657 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1662 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1668 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1669 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1670 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1671 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1672 rw_unlock(true, last->b); in btree_gc_recurse()
1675 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); in btree_gc_recurse()
1676 r->b = NULL; in btree_gc_recurse()
1678 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1679 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1680 gc->nodes_pre = gc->nodes; in btree_gc_recurse()
1681 ret = -EAGAIN; in btree_gc_recurse()
1686 ret = -EAGAIN; in btree_gc_recurse()
1692 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1693 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1694 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1695 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1696 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1697 rw_unlock(true, i->b); in btree_gc_recurse()
1721 return -EINTR; in bch_btree_gc_root()
1725 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1727 if (b->level) { in bch_btree_gc_root()
1733 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1743 if (!c->gc_mark_valid) in btree_gc_start()
1746 mutex_lock(&c->bucket_lock); in btree_gc_start()
1748 c->gc_done = ZERO_KEY; in btree_gc_start()
1750 ca = c->cache; in btree_gc_start()
1752 b->last_gc = b->gen; in btree_gc_start()
1754 b->reclaimable_in_gc = 1; in btree_gc_start()
1755 if (!atomic_read(&b->pin)) { in btree_gc_start()
1761 c->gc_mark_valid = 0; in btree_gc_start()
1762 mutex_unlock(&c->bucket_lock); in btree_gc_start()
1772 mutex_lock(&c->bucket_lock); in bch_btree_gc_finish()
1775 c->gc_mark_valid = 1; in bch_btree_gc_finish()
1776 c->need_gc = 0; in bch_btree_gc_finish()
1778 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) in bch_btree_gc_finish()
1779 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), in bch_btree_gc_finish()
1784 for (i = 0; i < c->devices_max_used; i++) { in bch_btree_gc_finish()
1785 struct bcache_device *d = c->devices[i]; in bch_btree_gc_finish()
1789 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) in bch_btree_gc_finish()
1793 spin_lock(&dc->writeback_keys.lock); in bch_btree_gc_finish()
1795 &dc->writeback_keys.keys, node) in bch_btree_gc_finish()
1796 for (j = 0; j < KEY_PTRS(&w->key); j++) in bch_btree_gc_finish()
1797 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), in bch_btree_gc_finish()
1799 spin_unlock(&dc->writeback_keys.lock); in bch_btree_gc_finish()
1803 c->avail_nbuckets = 0; in bch_btree_gc_finish()
1805 ca = c->cache; in bch_btree_gc_finish()
1806 ca->invalidate_needs_gc = 0; in bch_btree_gc_finish()
1808 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1809 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1811 for (k = ca->prio_buckets; in bch_btree_gc_finish()
1812 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) in bch_btree_gc_finish()
1813 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1816 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1818 if (b->reclaimable_in_gc) in bch_btree_gc_finish()
1819 b->reclaimable_in_gc = 0; in bch_btree_gc_finish()
1821 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1827 c->avail_nbuckets++; in bch_btree_gc_finish()
1830 mutex_unlock(&c->bucket_lock); in bch_btree_gc_finish()
1855 if (ret == -EAGAIN) in bch_btree_gc()
1860 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_btree_gc()
1865 bch_time_stats_update(&c->btree_gc_time, start_time); in bch_btree_gc()
1870 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); in bch_btree_gc()
1879 struct cache *ca = c->cache; in gc_should_run()
1881 if (ca->invalidate_needs_gc) in gc_should_run()
1884 if (atomic_read(&c->sectors_to_gc) < 0) in gc_should_run()
1895 wait_event_interruptible(c->gc_wait, in bch_gc_thread()
1897 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || in bch_gc_thread()
1901 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_gc_thread()
1914 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); in bch_gc_thread_start()
1915 return PTR_ERR_OR_ZERO(c->gc_thread); in bch_gc_thread_start()
1928 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1929 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1931 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1933 if (b->level) { in bch_btree_check_recurse()
1934 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1937 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1942 * initiallize c->gc_stats.nodes in bch_btree_check_recurse()
1945 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1963 struct btree_check_state *check_state = info->state; in bch_btree_check_thread()
1964 struct cache_set *c = check_state->c; in bch_btree_check_thread()
1976 bch_btree_iter_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1977 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1985 * sub-tree indexed by the fetched key. in bch_btree_check_thread()
1987 spin_lock(&check_state->idx_lock); in bch_btree_check_thread()
1988 cur_idx = check_state->key_idx; in bch_btree_check_thread()
1989 check_state->key_idx++; in bch_btree_check_thread()
1990 spin_unlock(&check_state->idx_lock); in bch_btree_check_thread()
1992 skip_nr = cur_idx - prev_idx; in bch_btree_check_thread()
1996 &c->root->keys, in bch_btree_check_thread()
2006 atomic_set(&check_state->enough, 1); in bch_btree_check_thread()
2007 /* Update check_state->enough earlier */ in bch_btree_check_thread()
2011 skip_nr--; in bch_btree_check_thread()
2018 btree_node_prefetch(c->root, p); in bch_btree_check_thread()
2019 c->gc_stats.nodes++; in bch_btree_check_thread()
2021 ret = bcache_btree(check_recurse, p, c->root, &op); in bch_btree_check_thread()
2030 finish_wait(&c->btree_cache_wait, &(&op)->wait); in bch_btree_check_thread()
2040 info->result = ret; in bch_btree_check_thread()
2041 /* update check_state->started among all CPUs */ in bch_btree_check_thread()
2043 if (atomic_dec_and_test(&check_state->started)) in bch_btree_check_thread()
2044 wake_up(&check_state->wait); in bch_btree_check_thread()
2074 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2075 bch_initial_mark_key(c, c->root->level, k); in bch_btree_check()
2077 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); in bch_btree_check()
2079 if (c->root->level == 0) in bch_btree_check()
2091 rw_lock(0, c->root, c->root->level); in bch_btree_check()
2094 * if check_state.enough is non-zero, it means current in bch_btree_check()
2113 for (--i; i >= 0; i--) in bch_btree_check()
2115 ret = -ENOMEM; in bch_btree_check()
2134 rw_unlock(0, c->root); in bch_btree_check()
2140 struct cache *ca = c->cache; in bch_initial_gc_finish()
2145 mutex_lock(&c->bucket_lock); in bch_initial_gc_finish()
2149 * order to get the allocator thread started - it needs freed buckets in in bch_initial_gc_finish()
2157 if (fifo_full(&ca->free[RESERVE_PRIO]) && in bch_initial_gc_finish()
2158 fifo_full(&ca->free[RESERVE_BTREE])) in bch_initial_gc_finish()
2164 if (!fifo_push(&ca->free[RESERVE_PRIO], in bch_initial_gc_finish()
2165 b - ca->buckets)) in bch_initial_gc_finish()
2166 fifo_push(&ca->free[RESERVE_BTREE], in bch_initial_gc_finish()
2167 b - ca->buckets); in bch_initial_gc_finish()
2171 mutex_unlock(&c->bucket_lock); in bch_initial_gc_finish()
2181 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2183 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2185 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2197 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2202 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2203 ret -= KEY_MAX_U64S; in insert_u64s_remaining()
2213 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2216 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys()
2221 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2222 if (!b->level) in bch_btree_insert_keys()
2223 bkey_put(b->c, k); in bch_btree_insert_keys()
2227 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2229 bkey_copy(&temp.key, insert_keys->keys); in bch_btree_insert_keys()
2231 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2232 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2242 op->insert_collision = true; in bch_btree_insert_keys()
2244 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2246 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2264 if (!b->level) in btree_split()
2265 return -EINTR; in btree_split()
2275 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2280 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2282 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2286 if (!b->parent) { in btree_split()
2287 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2292 mutex_lock(&n1->write_lock); in btree_split()
2293 mutex_lock(&n2->write_lock); in btree_split()
2302 while (keys < (btree_bset_first(n1)->keys * 3) / 5) in btree_split()
2306 bkey_copy_key(&n1->key, in btree_split()
2310 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; in btree_split()
2311 btree_bset_first(n1)->keys = keys; in btree_split()
2313 memcpy(btree_bset_first(n2)->start, in btree_split()
2315 btree_bset_first(n2)->keys * sizeof(uint64_t)); in btree_split()
2317 bkey_copy_key(&n2->key, &b->key); in btree_split()
2319 bch_keylist_add(&parent_keys, &n2->key); in btree_split()
2321 mutex_unlock(&n2->write_lock); in btree_split()
2324 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2326 mutex_lock(&n1->write_lock); in btree_split()
2330 bch_keylist_add(&parent_keys, &n1->key); in btree_split()
2332 mutex_unlock(&n1->write_lock); in btree_split()
2336 mutex_lock(&n3->write_lock); in btree_split()
2337 bkey_copy_key(&n3->key, &MAX_KEY); in btree_split()
2340 mutex_unlock(&n3->write_lock); in btree_split()
2345 } else if (!b->parent) { in btree_split()
2355 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2362 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2366 bkey_put(b->c, &n2->key); in btree_split()
2370 bkey_put(b->c, &n1->key); in btree_split()
2374 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2376 if (n3 == ERR_PTR(-EAGAIN) || in btree_split()
2377 n2 == ERR_PTR(-EAGAIN) || in btree_split()
2378 n1 == ERR_PTR(-EAGAIN)) in btree_split()
2379 return -EAGAIN; in btree_split()
2381 return -ENOMEM; in btree_split()
2391 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2395 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2398 b->keys.last_set_unwritten) in bch_btree_insert_node()
2402 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2409 if (!b->level) in bch_btree_insert_node()
2415 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2422 if (current->bio_list) { in bch_btree_insert_node()
2423 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2424 return -EAGAIN; in bch_btree_insert_node()
2425 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2426 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2427 return -EINTR; in bch_btree_insert_node()
2435 return -EINTR; in bch_btree_insert_node()
2443 int ret = -EINTR; in bch_btree_insert_check_key()
2444 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2445 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2447 bool upgrade = op->lock == -1; in bch_btree_insert_check_key()
2453 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2455 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2456 b->seq != seq + 1) { in bch_btree_insert_check_key()
2457 op->lock = b->level; in bch_btree_insert_check_key()
2463 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); in bch_btree_insert_check_key()
2474 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2490 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2491 op->journal_ref, op->replace_key); in btree_insert_fn()
2492 if (ret && !bch_keylist_empty(op->keys)) in btree_insert_fn()
2504 BUG_ON(current->bio_list); in bch_btree_insert()
2515 &START_KEY(keys->keys), in bch_btree_insert()
2527 ret = -ESRCH; in bch_btree_insert()
2541 BUG_ON(!b->written); in bch_btree_set_root()
2543 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2544 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2546 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2547 list_del_init(&b->list); in bch_btree_set_root()
2548 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2550 b->c->root = b; in bch_btree_set_root()
2552 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2564 if (b->level) { in bch_btree_map_nodes_recurse()
2569 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2571 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2582 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2603 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2605 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2606 ret = !b->level in bch_btree_map_keys_recurse()
2616 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2617 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2618 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2634 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) in keybuf_cmp()
2635 return -1; in keybuf_cmp()
2636 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) in keybuf_cmp()
2644 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); in keybuf_nonoverlapping_cmp()
2659 struct keybuf *buf = refill->buf; in refill_keybuf_fn()
2662 if (bkey_cmp(k, refill->end) > 0) { in refill_keybuf_fn()
2670 if (refill->pred(buf, k)) { in refill_keybuf_fn()
2673 spin_lock(&buf->lock); in refill_keybuf_fn()
2675 w = array_alloc(&buf->freelist); in refill_keybuf_fn()
2677 spin_unlock(&buf->lock); in refill_keybuf_fn()
2681 w->private = NULL; in refill_keybuf_fn()
2682 bkey_copy(&w->key, k); in refill_keybuf_fn()
2684 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) in refill_keybuf_fn()
2685 array_free(&buf->freelist, w); in refill_keybuf_fn()
2687 refill->nr_found++; in refill_keybuf_fn()
2689 if (array_freelist_empty(&buf->freelist)) in refill_keybuf_fn()
2692 spin_unlock(&buf->lock); in refill_keybuf_fn()
2695 buf->last_scanned = *k; in refill_keybuf_fn()
2702 struct bkey start = buf->last_scanned; in bch_refill_keybuf()
2707 bch_btree_op_init(&refill.op, -1); in bch_refill_keybuf()
2713 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, in bch_refill_keybuf()
2718 KEY_INODE(&buf->last_scanned), in bch_refill_keybuf()
2719 KEY_OFFSET(&buf->last_scanned)); in bch_refill_keybuf()
2721 spin_lock(&buf->lock); in bch_refill_keybuf()
2723 if (!RB_EMPTY_ROOT(&buf->keys)) { in bch_refill_keybuf()
2726 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2727 buf->start = START_KEY(&w->key); in bch_refill_keybuf()
2729 w = RB_LAST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2730 buf->end = w->key; in bch_refill_keybuf()
2732 buf->start = MAX_KEY; in bch_refill_keybuf()
2733 buf->end = MAX_KEY; in bch_refill_keybuf()
2736 spin_unlock(&buf->lock); in bch_refill_keybuf()
2741 rb_erase(&w->node, &buf->keys); in __bch_keybuf_del()
2742 array_free(&buf->freelist, w); in __bch_keybuf_del()
2747 spin_lock(&buf->lock); in bch_keybuf_del()
2749 spin_unlock(&buf->lock); in bch_keybuf_del()
2760 if (bkey_cmp(end, &buf->start) <= 0 || in bch_keybuf_check_overlapping()
2761 bkey_cmp(start, &buf->end) >= 0) in bch_keybuf_check_overlapping()
2764 spin_lock(&buf->lock); in bch_keybuf_check_overlapping()
2765 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); in bch_keybuf_check_overlapping()
2767 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { in bch_keybuf_check_overlapping()
2771 if (p->private) in bch_keybuf_check_overlapping()
2777 spin_unlock(&buf->lock); in bch_keybuf_check_overlapping()
2785 spin_lock(&buf->lock); in bch_keybuf_next()
2787 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_keybuf_next()
2789 while (w && w->private) in bch_keybuf_next()
2793 w->private = ERR_PTR(-EINTR); in bch_keybuf_next()
2795 spin_unlock(&buf->lock); in bch_keybuf_next()
2811 if (bkey_cmp(&buf->last_scanned, end) >= 0) { in bch_keybuf_next_rescan()
2824 buf->last_scanned = MAX_KEY; in bch_keybuf_init()
2825 buf->keys = RB_ROOT; in bch_keybuf_init()
2827 spin_lock_init(&buf->lock); in bch_keybuf_init()
2828 array_allocator_init(&buf->freelist); in bch_keybuf_init()
2841 return -ENOMEM; in bch_btree_init()