Lines Matching +full:no +full:- +full:wp
1 // SPDX-License-Identifier: GPL-2.0
58 * - They track buckets that have been partially allocated, allowing for
59 * sub-bucket sized allocations - they're used by the sector allocator below
61 * - They provide a reference to the buckets they own that mark and sweep GC
66 * with a reference to an open bucket - the caller is required to put that
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor)); in bch2_reset_alloc_cursors()
80 open_bucket_idx_t idx = ob - c->open_buckets; in bch2_open_bucket_hash_add()
81 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket); in bch2_open_bucket_hash_add()
83 ob->hash = *slot; in bch2_open_bucket_hash_add()
89 open_bucket_idx_t idx = ob - c->open_buckets; in bch2_open_bucket_hash_remove()
90 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket); in bch2_open_bucket_hash_remove()
94 slot = &c->open_buckets[*slot].hash; in bch2_open_bucket_hash_remove()
97 *slot = ob->hash; in bch2_open_bucket_hash_remove()
98 ob->hash = 0; in bch2_open_bucket_hash_remove()
105 if (ob->ec) { in __bch2_open_bucket_put()
106 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io); in __bch2_open_bucket_put()
110 spin_lock(&ob->lock); in __bch2_open_bucket_put()
111 ob->valid = false; in __bch2_open_bucket_put()
112 ob->data_type = 0; in __bch2_open_bucket_put()
113 spin_unlock(&ob->lock); in __bch2_open_bucket_put()
115 spin_lock(&c->freelist_lock); in __bch2_open_bucket_put()
118 ob->freelist = c->open_buckets_freelist; in __bch2_open_bucket_put()
119 c->open_buckets_freelist = ob - c->open_buckets; in __bch2_open_bucket_put()
121 c->open_buckets_nr_free++; in __bch2_open_bucket_put()
122 ca->nr_open_buckets--; in __bch2_open_bucket_put()
123 spin_unlock(&c->freelist_lock); in __bch2_open_bucket_put()
125 closure_wake_up(&c->open_buckets_wait); in __bch2_open_bucket_put()
136 if (ob->dev == dev && ob->ec) in bch2_open_bucket_write_error()
144 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free); in bch2_open_bucket_alloc()
146 ob = c->open_buckets + c->open_buckets_freelist; in bch2_open_bucket_alloc()
147 c->open_buckets_freelist = ob->freelist; in bch2_open_bucket_alloc()
148 atomic_set(&ob->pin, 1); in bch2_open_bucket_alloc()
149 ob->data_type = 0; in bch2_open_bucket_alloc()
151 c->open_buckets_nr_free--; in bch2_open_bucket_alloc()
157 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs) in is_superblock_bucket()
165 BUG_ON(c->open_buckets_partial_nr >= in open_bucket_free_unused()
166 ARRAY_SIZE(c->open_buckets_partial)); in open_bucket_free_unused()
168 spin_lock(&c->freelist_lock); in open_bucket_free_unused()
170 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++; in open_bucket_free_unused()
173 ob->on_partial_list = true; in open_bucket_free_unused()
174 c->open_buckets_partial[c->open_buckets_partial_nr++] = in open_bucket_free_unused()
175 ob - c->open_buckets; in open_bucket_free_unused()
176 spin_unlock(&c->freelist_lock); in open_bucket_free_unused()
178 closure_wake_up(&c->open_buckets_wait); in open_bucket_free_unused()
179 closure_wake_up(&c->freelist_wait); in open_bucket_free_unused()
204 s->skipped_open++; in may_alloc_bucket()
209 bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, in may_alloc_bucket()
211 if (journal_seq_ready > c->journal.flushed_seq_ondisk) { in may_alloc_bucket()
212 if (journal_seq_ready > c->journal.flushing_seq) in may_alloc_bucket()
213 s->need_journal_commit++; in may_alloc_bucket()
214 s->skipped_need_journal_commit++; in may_alloc_bucket()
218 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) { in may_alloc_bucket()
219 s->skipped_nocow++; in may_alloc_bucket()
235 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { in __try_alloc_bucket()
236 s->skipped_nouse++; in __try_alloc_bucket()
240 spin_lock(&c->freelist_lock); in __try_alloc_bucket()
242 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) { in __try_alloc_bucket()
244 closure_wait(&c->open_buckets_wait, cl); in __try_alloc_bucket()
246 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true); in __try_alloc_bucket()
247 spin_unlock(&c->freelist_lock); in __try_alloc_bucket()
248 return ERR_PTR(-BCH_ERR_open_buckets_empty); in __try_alloc_bucket()
252 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { in __try_alloc_bucket()
253 spin_unlock(&c->freelist_lock); in __try_alloc_bucket()
254 s->skipped_open++; in __try_alloc_bucket()
260 spin_lock(&ob->lock); in __try_alloc_bucket()
261 ob->valid = true; in __try_alloc_bucket()
262 ob->sectors_free = ca->mi.bucket_size; in __try_alloc_bucket()
263 ob->dev = ca->dev_idx; in __try_alloc_bucket()
264 ob->gen = gen; in __try_alloc_bucket()
265 ob->bucket = bucket; in __try_alloc_bucket()
266 spin_unlock(&ob->lock); in __try_alloc_bucket()
268 ca->nr_open_buckets++; in __try_alloc_bucket()
271 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false); in __try_alloc_bucket()
272 track_event_change(&c->times[BCH_TIME_blocked_allocate], false); in __try_alloc_bucket()
274 spin_unlock(&c->freelist_lock); in __try_alloc_bucket()
284 struct bch_fs *c = trans->c; in try_alloc_bucket()
285 u64 b = freespace_iter->pos.offset & ~(~0ULL << 56); in try_alloc_bucket()
287 if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s)) in try_alloc_bucket()
310 struct bch_fs *c = trans->c; in bch2_bucket_alloc_early()
314 u64 first_bucket = ca->mi.first_bucket; in bch2_bucket_alloc_early()
315 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; in bch2_bucket_alloc_early()
323 * there is no other underlying protection for the associated key cache in bch2_bucket_alloc_early()
330 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), in bch2_bucket_alloc_early()
332 u64 bucket = k.k->p.offset; in bch2_bucket_alloc_early()
334 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) in bch2_bucket_alloc_early()
337 if (s->btree_bitmap != BTREE_BITMAP_ANY && in bch2_bucket_alloc_early()
338 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_early()
339 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_early()
340 if (s->btree_bitmap == BTREE_BITMAP_YES && in bch2_bucket_alloc_early()
341 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_early()
346 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_early()
347 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket)); in bch2_bucket_alloc_early()
348 s->buckets_seen++; in bch2_bucket_alloc_early()
349 s->skipped_mi_btree_bitmap++; in bch2_bucket_alloc_early()
355 if (a->data_type != BCH_DATA_free) in bch2_bucket_alloc_early()
359 ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached); in bch2_bucket_alloc_early()
365 if (a->data_type != BCH_DATA_free) in bch2_bucket_alloc_early()
368 s->buckets_seen++; in bch2_bucket_alloc_early()
370 ob = may_alloc_bucket(c, k.k->p, s) in bch2_bucket_alloc_early()
371 ? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen, in bch2_bucket_alloc_early()
406 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; in bch2_bucket_alloc_freelist()
407 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); in bch2_bucket_alloc_freelist()
412 POS(ca->dev_idx, alloc_cursor), in bch2_bucket_alloc_freelist()
413 POS(ca->dev_idx, U64_MAX), in bch2_bucket_alloc_freelist()
416 * peek normally dosen't trim extents - they can span iter.pos, in bch2_bucket_alloc_freelist()
419 iter.k.size = iter.k.p.offset - iter.pos.offset; in bch2_bucket_alloc_freelist()
422 s->buckets_seen++; in bch2_bucket_alloc_freelist()
425 if (s->btree_bitmap != BTREE_BITMAP_ANY && in bch2_bucket_alloc_freelist()
426 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_freelist()
427 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_freelist()
428 if (s->btree_bitmap == BTREE_BITMAP_YES && in bch2_bucket_alloc_freelist()
429 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_freelist()
434 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_freelist()
437 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); in bch2_bucket_alloc_freelist()
438 s->skipped_mi_btree_bitmap++; in bch2_bucket_alloc_freelist()
450 iter.k.size--; in bch2_bucket_alloc_freelist()
465 if (!ob && alloc_start > ca->mi.first_bucket) { in bch2_bucket_alloc_freelist()
466 alloc_cursor = alloc_start = ca->mi.first_bucket; in bch2_bucket_alloc_freelist()
485 prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx); in trace_bucket_alloc2()
489 prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets); in trace_bucket_alloc2()
493 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now)); in trace_bucket_alloc2()
494 prt_printf(&buf, "seen\t%llu\n", s->buckets_seen); in trace_bucket_alloc2()
495 prt_printf(&buf, "open\t%llu\n", s->skipped_open); in trace_bucket_alloc2()
496 prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit); in trace_bucket_alloc2()
497 prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow); in trace_bucket_alloc2()
498 prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse); in trace_bucket_alloc2()
499 prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap); in trace_bucket_alloc2()
502 prt_printf(&buf, "allocated\t%llu\n", ob->bucket); in trace_bucket_alloc2()
513 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
532 struct bch_fs *c = trans->c; in bch2_bucket_alloc_trans()
534 bool freespace = READ_ONCE(ca->mi.freespace_initialized); in bch2_bucket_alloc_trans()
544 if (usage->d[BCH_DATA_need_discard].buckets > avail) in bch2_bucket_alloc_trans()
547 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) in bch2_bucket_alloc_trans()
555 c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) in bch2_bucket_alloc_trans()
559 closure_wait(&c->freelist_wait, cl); in bch2_bucket_alloc_trans()
564 track_event_change(&c->times[BCH_TIME_blocked_allocate], true); in bch2_bucket_alloc_trans()
566 ob = ERR_PTR(-BCH_ERR_freelist_empty); in bch2_bucket_alloc_trans()
571 closure_wake_up(&c->freelist_wait); in bch2_bucket_alloc_trans()
578 bch2_journal_flush_async(&c->journal, NULL); in bch2_bucket_alloc_trans()
585 if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { in bch2_bucket_alloc_trans()
591 ob = ERR_PTR(-BCH_ERR_no_buckets_found); in bch2_bucket_alloc_trans()
594 ob->data_type = data_type; in bch2_bucket_alloc_trans()
626 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) - in __dev_stripe_cmp()
627 (stripe->next_alloc[l] < stripe->next_alloc[r])); in __dev_stripe_cmp()
639 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX) in bch2_dev_alloc_list()
650 u64 *v = stripe->next_alloc + ca->dev_idx; in bch2_dev_stripe_increment_inlined()
662 for (v = stripe->next_alloc; in bch2_dev_stripe_increment_inlined()
663 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) in bch2_dev_stripe_increment_inlined()
664 *v = *v < scale ? 0 : *v - scale; in bch2_dev_stripe_increment_inlined()
684 unsigned durability = ob_dev(c, ob)->mi.durability; in add_new_bucket()
688 __clear_bit(ob->dev, devs_may_alloc->d); in add_new_bucket()
696 if (ob->ec) in add_new_bucket()
713 struct bch_fs *c = trans->c; in bch2_bucket_alloc_set_trans()
714 int ret = -BCH_ERR_insufficient_devices; in bch2_bucket_alloc_set_trans()
724 if (!ca->mi.durability && *have_cache) { in bch2_bucket_alloc_set_trans()
764 struct write_point *wp, in bucket_alloc_from_stripe() argument
774 struct bch_fs *c = trans->c; in bucket_alloc_from_stripe()
784 bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); in bucket_alloc_from_stripe()
790 struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc); in bucket_alloc_from_stripe()
792 for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) { in bucket_alloc_from_stripe()
793 if (!h->s->blocks[ec_idx]) in bucket_alloc_from_stripe()
796 struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx]; in bucket_alloc_from_stripe()
797 if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) { in bucket_alloc_from_stripe()
798 ob->ec_idx = ec_idx; in bucket_alloc_from_stripe()
799 ob->ec = h->s; in bucket_alloc_from_stripe()
800 ec_stripe_new_get(h->s, STRIPE_REF_io); in bucket_alloc_from_stripe()
816 struct write_point *wp, in want_bucket() argument
823 if (!test_bit(ob->dev, devs_may_alloc->d)) in want_bucket()
826 if (ob->data_type != wp->data_type) in want_bucket()
829 if (!ca->mi.durability && in want_bucket()
830 (wp->data_type == BCH_DATA_btree || ec || *have_cache)) in want_bucket()
833 if (ec != (ob->ec != NULL)) in want_bucket()
841 struct write_point *wp, in bucket_alloc_set_writepoint() argument
853 open_bucket_for_each(c, &wp->ptrs, ob, i) { in bucket_alloc_set_writepoint()
854 if (!ret && want_bucket(c, wp, devs_may_alloc, in bucket_alloc_set_writepoint()
862 wp->ptrs = ptrs_skip; in bucket_alloc_set_writepoint()
869 struct write_point *wp, in bucket_alloc_set_partial() argument
878 if (!c->open_buckets_partial_nr) in bucket_alloc_set_partial()
881 spin_lock(&c->freelist_lock); in bucket_alloc_set_partial()
883 if (!c->open_buckets_partial_nr) in bucket_alloc_set_partial()
886 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { in bucket_alloc_set_partial()
887 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; in bucket_alloc_set_partial()
889 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) { in bucket_alloc_set_partial()
895 avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets; in bucket_alloc_set_partial()
899 array_remove_item(c->open_buckets_partial, in bucket_alloc_set_partial()
900 c->open_buckets_partial_nr, in bucket_alloc_set_partial()
902 ob->on_partial_list = false; in bucket_alloc_set_partial()
905 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; in bucket_alloc_set_partial()
916 spin_unlock(&c->freelist_lock); in bucket_alloc_set_partial()
922 struct write_point *wp, in __open_bucket_add_buckets() argument
933 struct bch_fs *c = trans->c; in __open_bucket_add_buckets()
940 devs = target_rw_devs(c, wp->data_type, target); in __open_bucket_add_buckets()
947 __clear_bit(ob->dev, devs.d); in __open_bucket_add_buckets()
949 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs, in __open_bucket_add_buckets()
955 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, in __open_bucket_add_buckets()
962 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs, in __open_bucket_add_buckets()
973 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, in __open_bucket_add_buckets()
975 flags, wp->data_type, watermark, cl); in __open_bucket_add_buckets()
990 struct write_point *wp, in open_bucket_add_buckets() argument
1003 if (erasure_code && !ec_open_bucket(trans->c, ptrs)) { in open_bucket_add_buckets()
1004 ret = __open_bucket_add_buckets(trans, ptrs, wp, in open_bucket_add_buckets()
1017 ret = __open_bucket_add_buckets(trans, ptrs, wp, in open_bucket_add_buckets()
1025 * should_drop_bucket - check if this is open_bucket should go away
1035 * coding, or the entire filesystem - check if this open_bucket matches:
1041 return ob->ec != NULL; in should_drop_bucket()
1043 bool drop = ob->dev == ca->dev_idx; in should_drop_bucket()
1047 if (!drop && ob->ec) { in should_drop_bucket()
1050 mutex_lock(&ob->ec->lock); in should_drop_bucket()
1051 nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks; in should_drop_bucket()
1054 if (!ob->ec->blocks[i]) in should_drop_bucket()
1057 ob2 = c->open_buckets + ob->ec->blocks[i]; in should_drop_bucket()
1058 drop |= ob2->dev == ca->dev_idx; in should_drop_bucket()
1060 mutex_unlock(&ob->ec->lock); in should_drop_bucket()
1070 bool ec, struct write_point *wp) in bch2_writepoint_stop() argument
1076 mutex_lock(&wp->lock); in bch2_writepoint_stop()
1077 open_bucket_for_each(c, &wp->ptrs, ob, i) in bch2_writepoint_stop()
1082 wp->ptrs = ptrs; in bch2_writepoint_stop()
1083 mutex_unlock(&wp->lock); in bch2_writepoint_stop()
1092 for (i = 0; i < ARRAY_SIZE(c->write_points); i++) in bch2_open_buckets_stop()
1093 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); in bch2_open_buckets_stop()
1095 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); in bch2_open_buckets_stop()
1096 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); in bch2_open_buckets_stop()
1097 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); in bch2_open_buckets_stop()
1099 mutex_lock(&c->btree_reserve_cache_lock); in bch2_open_buckets_stop()
1100 while (c->btree_reserve_cache_nr) { in bch2_open_buckets_stop()
1102 &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; in bch2_open_buckets_stop()
1104 bch2_open_buckets_put(c, &a->ob); in bch2_open_buckets_stop()
1106 mutex_unlock(&c->btree_reserve_cache_lock); in bch2_open_buckets_stop()
1108 spin_lock(&c->freelist_lock); in bch2_open_buckets_stop()
1110 while (i < c->open_buckets_partial_nr) { in bch2_open_buckets_stop()
1112 c->open_buckets + c->open_buckets_partial[i]; in bch2_open_buckets_stop()
1115 --c->open_buckets_partial_nr; in bch2_open_buckets_stop()
1116 swap(c->open_buckets_partial[i], in bch2_open_buckets_stop()
1117 c->open_buckets_partial[c->open_buckets_partial_nr]); in bch2_open_buckets_stop()
1119 ob->on_partial_list = false; in bch2_open_buckets_stop()
1122 bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; in bch2_open_buckets_stop()
1125 spin_unlock(&c->freelist_lock); in bch2_open_buckets_stop()
1127 spin_lock(&c->freelist_lock); in bch2_open_buckets_stop()
1132 spin_unlock(&c->freelist_lock); in bch2_open_buckets_stop()
1141 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash))); in writepoint_hash()
1143 return &c->write_points_hash[hash]; in writepoint_hash()
1149 struct write_point *wp; in __writepoint_find() local
1152 hlist_for_each_entry_rcu(wp, head, node) in __writepoint_find()
1153 if (wp->write_point == write_point) in __writepoint_find()
1155 wp = NULL; in __writepoint_find()
1158 return wp; in __writepoint_find()
1163 u64 stranded = c->write_points_nr * c->bucket_size_max; in too_many_writepoints()
1171 struct write_point *wp; in try_increase_writepoints() local
1173 if (c->write_points_nr == ARRAY_SIZE(c->write_points) || in try_increase_writepoints()
1177 wp = c->write_points + c->write_points_nr++; in try_increase_writepoints()
1178 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point)); in try_increase_writepoints()
1184 struct bch_fs *c = trans->c; in try_decrease_writepoints()
1185 struct write_point *wp; in try_decrease_writepoints() local
1189 mutex_lock(&c->write_points_hash_lock); in try_decrease_writepoints()
1190 if (c->write_points_nr < old_nr) { in try_decrease_writepoints()
1191 mutex_unlock(&c->write_points_hash_lock); in try_decrease_writepoints()
1195 if (c->write_points_nr == 1 || in try_decrease_writepoints()
1197 mutex_unlock(&c->write_points_hash_lock); in try_decrease_writepoints()
1201 wp = c->write_points + --c->write_points_nr; in try_decrease_writepoints()
1203 hlist_del_rcu(&wp->node); in try_decrease_writepoints()
1204 mutex_unlock(&c->write_points_hash_lock); in try_decrease_writepoints()
1206 bch2_trans_mutex_lock_norelock(trans, &wp->lock); in try_decrease_writepoints()
1207 open_bucket_for_each(c, &wp->ptrs, ob, i) in try_decrease_writepoints()
1209 wp->ptrs.nr = 0; in try_decrease_writepoints()
1210 mutex_unlock(&wp->lock); in try_decrease_writepoints()
1217 struct bch_fs *c = trans->c; in writepoint_find()
1218 struct write_point *wp, *oldest; in writepoint_find() local
1222 wp = (struct write_point *) write_point; in writepoint_find()
1223 bch2_trans_mutex_lock_norelock(trans, &wp->lock); in writepoint_find()
1224 return wp; in writepoint_find()
1229 wp = __writepoint_find(head, write_point); in writepoint_find()
1230 if (wp) { in writepoint_find()
1232 bch2_trans_mutex_lock_norelock(trans, &wp->lock); in writepoint_find()
1233 if (wp->write_point == write_point) in writepoint_find()
1235 mutex_unlock(&wp->lock); in writepoint_find()
1240 for (wp = c->write_points; in writepoint_find()
1241 wp < c->write_points + c->write_points_nr; wp++) in writepoint_find()
1242 if (!oldest || time_before64(wp->last_used, oldest->last_used)) in writepoint_find()
1243 oldest = wp; in writepoint_find()
1245 bch2_trans_mutex_lock_norelock(trans, &oldest->lock); in writepoint_find()
1246 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock); in writepoint_find()
1247 if (oldest >= c->write_points + c->write_points_nr || in writepoint_find()
1249 mutex_unlock(&c->write_points_hash_lock); in writepoint_find()
1250 mutex_unlock(&oldest->lock); in writepoint_find()
1254 wp = __writepoint_find(head, write_point); in writepoint_find()
1255 if (wp && wp != oldest) { in writepoint_find()
1256 mutex_unlock(&c->write_points_hash_lock); in writepoint_find()
1257 mutex_unlock(&oldest->lock); in writepoint_find()
1261 wp = oldest; in writepoint_find()
1262 hlist_del_rcu(&wp->node); in writepoint_find()
1263 wp->write_point = write_point; in writepoint_find()
1264 hlist_add_head_rcu(&wp->node, head); in writepoint_find()
1265 mutex_unlock(&c->write_points_hash_lock); in writepoint_find()
1267 wp->last_used = local_clock(); in writepoint_find()
1268 return wp; in writepoint_find()
1282 unsigned d = ob_dev(c, ob)->mi.durability; in deallocate_extra_replicas()
1285 extra_replicas -= d; in deallocate_extra_replicas()
1310 struct bch_fs *c = trans->c; in bch2_alloc_sectors_start_trans()
1311 struct write_point *wp; in bch2_alloc_sectors_start_trans() local
1326 write_points_nr = c->write_points_nr; in bch2_alloc_sectors_start_trans()
1329 *wp_ret = wp = writepoint_find(trans, write_point.v); in bch2_alloc_sectors_start_trans()
1336 if (wp->data_type != BCH_DATA_user) in bch2_alloc_sectors_start_trans()
1340 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, in bch2_alloc_sectors_start_trans()
1351 int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, in bch2_alloc_sectors_start_trans()
1370 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, in bch2_alloc_sectors_start_trans()
1376 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, in bch2_alloc_sectors_start_trans()
1388 if (ret == -BCH_ERR_insufficient_devices && in bch2_alloc_sectors_start_trans()
1396 deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas); in bch2_alloc_sectors_start_trans()
1399 open_bucket_for_each(c, &wp->ptrs, ob, i) in bch2_alloc_sectors_start_trans()
1402 wp->ptrs = ptrs; in bch2_alloc_sectors_start_trans()
1404 wp->sectors_free = UINT_MAX; in bch2_alloc_sectors_start_trans()
1406 open_bucket_for_each(c, &wp->ptrs, ob, i) in bch2_alloc_sectors_start_trans()
1407 wp->sectors_free = min(wp->sectors_free, ob->sectors_free); in bch2_alloc_sectors_start_trans()
1409 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); in bch2_alloc_sectors_start_trans()
1413 open_bucket_for_each(c, &wp->ptrs, ob, i) in bch2_alloc_sectors_start_trans()
1418 wp->ptrs = ptrs; in bch2_alloc_sectors_start_trans()
1420 mutex_unlock(&wp->lock); in bch2_alloc_sectors_start_trans()
1427 ret = -BCH_ERR_bucket_alloc_blocked; in bch2_alloc_sectors_start_trans()
1431 ret = -BCH_ERR_bucket_alloc_blocked; in bch2_alloc_sectors_start_trans()
1442 .gen = ob->gen, in bch2_ob_ptr()
1443 .dev = ob->dev, in bch2_ob_ptr()
1444 .offset = bucket_to_sector(ca, ob->bucket) + in bch2_ob_ptr()
1445 ca->mi.bucket_size - in bch2_ob_ptr()
1446 ob->sectors_free, in bch2_ob_ptr()
1450 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, in bch2_alloc_sectors_append_ptrs() argument
1454 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached); in bch2_alloc_sectors_append_ptrs()
1461 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp) in bch2_alloc_sectors_done() argument
1463 bch2_alloc_sectors_done_inlined(c, wp); in bch2_alloc_sectors_done()
1466 static inline void writepoint_init(struct write_point *wp, in writepoint_init() argument
1469 mutex_init(&wp->lock); in writepoint_init()
1470 wp->data_type = type; in writepoint_init()
1472 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates); in writepoint_init()
1473 INIT_LIST_HEAD(&wp->writes); in writepoint_init()
1474 spin_lock_init(&wp->writes_lock); in writepoint_init()
1480 struct write_point *wp; in bch2_fs_allocator_foreground_init() local
1482 mutex_init(&c->write_points_hash_lock); in bch2_fs_allocator_foreground_init()
1483 c->write_points_nr = ARRAY_SIZE(c->write_points); in bch2_fs_allocator_foreground_init()
1486 spin_lock_init(&c->open_buckets[0].lock); in bch2_fs_allocator_foreground_init()
1488 for (ob = c->open_buckets + 1; in bch2_fs_allocator_foreground_init()
1489 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) { in bch2_fs_allocator_foreground_init()
1490 spin_lock_init(&ob->lock); in bch2_fs_allocator_foreground_init()
1491 c->open_buckets_nr_free++; in bch2_fs_allocator_foreground_init()
1493 ob->freelist = c->open_buckets_freelist; in bch2_fs_allocator_foreground_init()
1494 c->open_buckets_freelist = ob - c->open_buckets; in bch2_fs_allocator_foreground_init()
1497 writepoint_init(&c->btree_write_point, BCH_DATA_btree); in bch2_fs_allocator_foreground_init()
1498 writepoint_init(&c->rebalance_write_point, BCH_DATA_user); in bch2_fs_allocator_foreground_init()
1499 writepoint_init(&c->copygc_write_point, BCH_DATA_user); in bch2_fs_allocator_foreground_init()
1501 for (wp = c->write_points; in bch2_fs_allocator_foreground_init()
1502 wp < c->write_points + c->write_points_nr; wp++) { in bch2_fs_allocator_foreground_init()
1503 writepoint_init(wp, BCH_DATA_user); in bch2_fs_allocator_foreground_init()
1505 wp->last_used = local_clock(); in bch2_fs_allocator_foreground_init()
1506 wp->write_point = (unsigned long) wp; in bch2_fs_allocator_foreground_init()
1507 hlist_add_head_rcu(&wp->node, in bch2_fs_allocator_foreground_init()
1508 writepoint_hash(c, wp->write_point)); in bch2_fs_allocator_foreground_init()
1515 unsigned data_type = ob->data_type; in bch2_open_bucket_to_text()
1519 ob - c->open_buckets, in bch2_open_bucket_to_text()
1520 atomic_read(&ob->pin)); in bch2_open_bucket_to_text()
1523 ob->dev, ob->bucket, ob->gen, in bch2_open_bucket_to_text()
1524 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); in bch2_open_bucket_to_text()
1525 if (ob->ec) in bch2_open_bucket_to_text()
1526 prt_printf(out, " ec idx %llu", ob->ec->idx); in bch2_open_bucket_to_text()
1527 if (ob->on_partial_list) in bch2_open_bucket_to_text()
1537 out->atomic++; in bch2_open_buckets_to_text()
1539 for (ob = c->open_buckets; in bch2_open_buckets_to_text()
1540 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); in bch2_open_buckets_to_text()
1542 spin_lock(&ob->lock); in bch2_open_buckets_to_text()
1543 if (ob->valid && (!ca || ob->dev == ca->dev_idx)) in bch2_open_buckets_to_text()
1545 spin_unlock(&ob->lock); in bch2_open_buckets_to_text()
1548 --out->atomic; in bch2_open_buckets_to_text()
1555 out->atomic++; in bch2_open_buckets_partial_to_text()
1556 spin_lock(&c->freelist_lock); in bch2_open_buckets_partial_to_text()
1558 for (i = 0; i < c->open_buckets_partial_nr; i++) in bch2_open_buckets_partial_to_text()
1560 c->open_buckets + c->open_buckets_partial[i]); in bch2_open_buckets_partial_to_text()
1562 spin_unlock(&c->freelist_lock); in bch2_open_buckets_partial_to_text()
1563 --out->atomic; in bch2_open_buckets_partial_to_text()
1574 struct write_point *wp) in bch2_write_point_to_text() argument
1579 prt_printf(out, "%lu: ", wp->write_point); in bch2_write_point_to_text()
1580 prt_human_readable_u64(out, wp->sectors_allocated); in bch2_write_point_to_text()
1583 bch2_pr_time_units(out, sched_clock() - wp->last_used); in bch2_write_point_to_text()
1587 bch2_pr_time_units(out, wp->time[i]); in bch2_write_point_to_text()
1593 open_bucket_for_each(c, &wp->ptrs, ob, i) in bch2_write_point_to_text()
1600 struct write_point *wp; in bch2_write_points_to_text() local
1603 for (wp = c->write_points; in bch2_write_points_to_text()
1604 wp < c->write_points + ARRAY_SIZE(c->write_points); in bch2_write_points_to_text()
1605 wp++) in bch2_write_points_to_text()
1606 bch2_write_point_to_text(out, c, wp); in bch2_write_points_to_text()
1609 bch2_write_point_to_text(out, c, &c->copygc_write_point); in bch2_write_points_to_text()
1612 bch2_write_point_to_text(out, c, &c->rebalance_write_point); in bch2_write_points_to_text()
1615 bch2_write_point_to_text(out, c, &c->btree_write_point); in bch2_write_points_to_text()
1624 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++) in bch2_fs_alloc_debug_to_text()
1625 nr[c->open_buckets[i].data_type]++; in bch2_fs_alloc_debug_to_text()
1630 prt_printf(out, "capacity\t%llu\n", c->capacity); in bch2_fs_alloc_debug_to_text()
1631 prt_printf(out, "reserved\t%llu\n", c->reserved); in bch2_fs_alloc_debug_to_text()
1632 prt_printf(out, "hidden\t%llu\n", percpu_u64_get(&c->usage->hidden)); in bch2_fs_alloc_debug_to_text()
1633 prt_printf(out, "btree\t%llu\n", percpu_u64_get(&c->usage->btree)); in bch2_fs_alloc_debug_to_text()
1634 prt_printf(out, "data\t%llu\n", percpu_u64_get(&c->usage->data)); in bch2_fs_alloc_debug_to_text()
1635 prt_printf(out, "cached\t%llu\n", percpu_u64_get(&c->usage->cached)); in bch2_fs_alloc_debug_to_text()
1636 prt_printf(out, "reserved\t%llu\n", percpu_u64_get(&c->usage->reserved)); in bch2_fs_alloc_debug_to_text()
1637 prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved)); in bch2_fs_alloc_debug_to_text()
1638 prt_printf(out, "nr_inodes\t%llu\n", percpu_u64_get(&c->usage->nr_inodes)); in bch2_fs_alloc_debug_to_text()
1641 prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty"); in bch2_fs_alloc_debug_to_text()
1642 prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free); in bch2_fs_alloc_debug_to_text()
1644 prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty"); in bch2_fs_alloc_debug_to_text()
1647 prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr); in bch2_fs_alloc_debug_to_text()
1652 struct bch_fs *c = ca->fs; in bch2_dev_alloc_debug_to_text()
1658 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++) in bch2_dev_alloc_debug_to_text()
1659 nr[c->open_buckets[i].data_type]++; in bch2_dev_alloc_debug_to_text()
1675 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets); in bch2_dev_alloc_debug_to_text()
1684 c->opts.allocator_stuck_timeout); in bch2_print_allocator_stuck()
1693 prt_printf(&buf, "Dev %u:\n", ca->dev_idx); in bch2_print_allocator_stuck()
1708 bch2_journal_debug_to_text(&buf, &c->journal); in bch2_print_allocator_stuck()
1717 if (c->allocator_last_stuck && in allocator_wait_timeout()
1718 time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies)) in allocator_wait_timeout()
1721 return c->opts.allocator_stuck_timeout * HZ; in allocator_wait_timeout()
1729 c->allocator_last_stuck = jiffies; in __bch2_wait_on_allocator()