Lines Matching full:keys
18 * as keys are inserted we only sort the pages that have not yet been written.
52 * Check for bad keys in replay
115 if (b->level && b->keys.nsets) in bch_btree_init_next()
116 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
118 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
121 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
164 iter.b = &b->keys; in bch_btree_node_read_done()
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
199 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
211 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
214 bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort); in bch_btree_node_read_done()
216 i = b->keys.set[0].data; in bch_btree_node_read_done()
218 if (b->keys.set[0].size && in bch_btree_node_read_done()
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
223 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
232 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
259 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
373 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
413 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
415 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
436 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
446 if (nsets && !b->keys.nsets) in bch_btree_node_write()
483 BUG_ON(!i->keys); in bch_btree_leaf_dirty()
528 bch_btree_keys_free(&b->keys); in mca_data_free()
550 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
620 if (b->keys.page_order < min_order) in mca_reap()
692 * succeed, so that inserting keys into the btree can always succeed and in bch_mca_scan()
825 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
949 if (!b->keys.set[0].data) in mca_alloc()
960 if (!b->keys.set->data) in mca_alloc()
977 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
980 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
1053 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1054 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1055 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1058 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1059 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1149 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1178 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1236 * ptr_invalid() can't return true for the keys that mark btree nodes as in __bch_btree_mark_key()
1310 unsigned int keys = 0, good_keys = 0; in btree_gc_mark_node() local
1319 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1321 keys++; in btree_gc_mark_node()
1323 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1333 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1335 bset_written(&b->keys, t) && in btree_gc_mark_node()
1345 if ((keys - good_keys) * 2 > keys) in btree_gc_mark_node()
1355 unsigned int keys; member
1366 unsigned int i, nodes = 0, keys = 0, blocks; in btree_gc_coalesce() local
1381 keys += r[nodes++].keys; in btree_gc_coalesce()
1386 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1413 keys = 0; in btree_gc_coalesce()
1419 if (__set_blocks(n1, n1->keys + keys + in btree_gc_coalesce()
1425 keys += bkey_u64s(k); in btree_gc_coalesce()
1431 * the remaining keys into this node; we can't ensure in btree_gc_coalesce()
1433 * length keys (shouldn't be possible in practice, in btree_gc_coalesce()
1436 if (__set_blocks(n1, n1->keys + n2->keys, in btree_gc_coalesce()
1441 keys = n2->keys; in btree_gc_coalesce()
1446 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1454 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); in btree_gc_coalesce()
1456 n1->keys += keys; in btree_gc_coalesce()
1457 r[i].keys = n1->keys; in btree_gc_coalesce()
1460 bset_bkey_idx(n2, keys), in btree_gc_coalesce()
1462 (void *) bset_bkey_idx(n2, keys)); in btree_gc_coalesce()
1464 n2->keys -= keys; in btree_gc_coalesce()
1480 BUG_ON(btree_bset_first(new_nodes[0])->keys); in btree_gc_coalesce()
1537 struct keylist keys; in btree_gc_rewrite_node() local
1556 bch_keylist_init(&keys); in btree_gc_rewrite_node()
1557 bch_keylist_add(&keys, &n->key); in btree_gc_rewrite_node()
1559 make_btree_freeing_key(replace, keys.top); in btree_gc_rewrite_node()
1560 bch_keylist_push(&keys); in btree_gc_rewrite_node()
1562 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1563 BUG_ON(!bch_keylist_empty(&keys)); in btree_gc_rewrite_node()
1580 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1623 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1629 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1638 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1782 /* don't reclaim buckets to which writeback keys point */ in bch_btree_gc_finish()
1795 &dc->writeback_keys.keys, node) in bch_btree_gc_finish()
1808 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1928 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1934 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1937 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1975 /* root node keys are checked before thread created */ in bch_btree_check_thread()
1976 bch_btree_iter_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1977 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1983 * Fetch a root node key index, skip the keys which in bch_btree_check_thread()
1996 &c->root->keys, in bch_btree_check_thread()
2002 * No more keys to check in root node, in bch_btree_check_thread()
2073 /* check and mark root node keys */ in bch_btree_check()
2074 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2183 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2185 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2197 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2202 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2213 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2216 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys()
2229 bkey_copy(&temp.key, insert_keys->keys); in bch_btree_insert_keys()
2232 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2246 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2278 unsigned int keys = 0; in btree_split() local
2280 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2302 while (keys < (btree_bset_first(n1)->keys * 3) / 5) in btree_split()
2303 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), in btree_split()
2304 keys)); in btree_split()
2307 bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2308 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2310 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; in btree_split()
2311 btree_bset_first(n1)->keys = keys; in btree_split()
2315 btree_bset_first(n2)->keys * sizeof(uint64_t)); in btree_split()
2324 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2398 b->keys.last_set_unwritten) in bch_btree_insert_node()
2480 struct keylist *keys; member
2490 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2492 if (ret && !bch_keylist_empty(op->keys)) in btree_insert_fn()
2498 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2505 BUG_ON(bch_keylist_empty(keys)); in bch_btree_insert()
2508 op.keys = keys; in bch_btree_insert()
2512 while (!ret && !bch_keylist_empty(keys)) { in bch_btree_insert()
2515 &START_KEY(keys->keys), in bch_btree_insert()
2524 while ((k = bch_keylist_pop(keys))) in bch_btree_insert()
2556 /* Map across nodes or keys */
2569 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2571 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2603 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2605 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2633 /* Overlapping keys compare equal */ in keybuf_cmp()
2684 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) in refill_keybuf_fn()
2723 if (!RB_EMPTY_ROOT(&buf->keys)) { in bch_refill_keybuf()
2726 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2729 w = RB_LAST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2741 rb_erase(&w->node, &buf->keys); in __bch_keybuf_del()
2765 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); in bch_keybuf_check_overlapping()
2787 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_keybuf_next()
2825 buf->keys = RB_ROOT; in bch_keybuf_init()