Lines Matching +full:scrubber +full:- +full:done

1 // SPDX-License-Identifier: GPL-2.0-only
6 #include "slab-depot.h"
16 #include "memory-alloc.h"
19 #include "string-utils.h"
21 #include "action-manager.h"
22 #include "admin-state.h"
25 #include "data-vio.h"
27 #include "io-submitter.h"
28 #include "physical-zone.h"
29 #include "priority-table.h"
30 #include "recovery-journal.h"
32 #include "status-codes.h"
36 #include "wait-queue.h"
42 * get_lock() - Get the lock object for a slab journal block by sequence number.
51 return &journal->locks[sequence_number % journal->size]; in get_lock()
56 return (!vdo_is_state_quiescing(&slab->state) && in is_slab_open()
57 !vdo_is_state_quiescent(&slab->state)); in is_slab_open()
61 * must_make_entries_to_flush() - Check whether there are entry waiters which should delay a flush.
68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush()
69 vdo_waitq_has_waiters(&journal->entry_waiters)); in must_make_entries_to_flush()
73 * is_reaping() - Check whether a reap is currently in progress.
80 return (journal->head != journal->unreapable); in is_reaping()
84 * initialize_tail_block() - Initialize tail block as a new block.
89 struct slab_journal_block_header *header = &journal->tail_header; in initialize_tail_block()
91 header->sequence_number = journal->tail; in initialize_tail_block()
92 header->entry_count = 0; in initialize_tail_block()
93 header->has_block_map_increments = false; in initialize_tail_block()
97 * initialize_journal_state() - Set all journal fields appropriately to start journaling.
102 journal->unreapable = journal->head; in initialize_journal_state()
103 journal->reap_lock = get_lock(journal, journal->unreapable); in initialize_journal_state()
104 journal->next_commit = journal->tail; in initialize_journal_state()
105 journal->summarized = journal->last_summarized = journal->tail; in initialize_journal_state()
110 * block_is_full() - Check whether a journal block is full.
117 journal_entry_count_t count = journal->tail_header.entry_count; in block_is_full()
119 return (journal->tail_header.has_block_map_increments ? in block_is_full()
120 (journal->full_entries_per_block == count) : in block_is_full()
121 (journal->entries_per_block == count)); in block_is_full()
129 * is_slab_journal_blank() - Check whether a slab's journal is blank.
137 return ((slab->journal.tail == 1) && in is_slab_journal_blank()
138 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank()
142 * mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
150 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty()
152 VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); in mark_slab_journal_dirty()
154 journal->recovery_lock = lock; in mark_slab_journal_dirty()
156 if (dirty_journal->recovery_lock <= journal->recovery_lock) in mark_slab_journal_dirty()
160 list_move_tail(&journal->dirty_entry, dirty_journal->dirty_entry.next); in mark_slab_journal_dirty()
165 journal->recovery_lock = 0; in mark_slab_journal_clean()
166 list_del_init(&journal->dirty_entry); in mark_slab_journal_clean()
172 struct slab_journal *journal = &slab->journal; in check_if_slab_drained()
175 if (!vdo_is_state_draining(&slab->state) || in check_if_slab_drained()
178 journal->waiting_to_commit || in check_if_slab_drained()
179 !list_empty(&journal->uncommitted_blocks) || in check_if_slab_drained()
180 journal->updating_slab_summary || in check_if_slab_drained()
181 (slab->active_count > 0)) in check_if_slab_drained()
185 code = vdo_get_admin_state_code(&slab->state); in check_if_slab_drained()
186 read_only = vdo_is_read_only(slab->allocator->depot->vdo); in check_if_slab_drained()
188 vdo_waitq_has_waiters(&slab->dirty_blocks) && in check_if_slab_drained()
193 vdo_finish_draining_with_result(&slab->state, in check_if_slab_drained()
200 * compute_fullness_hint() - Translate a slab's free block count into a 'fullness hint' that can be
224 hint = free_blocks >> depot->hint_shift; in compute_fullness_hint()
229 * check_summary_drain_complete() - Check whether an allocators summary has finished draining.
233 if (!vdo_is_state_draining(&allocator->summary_state) || in check_summary_drain_complete()
234 (allocator->summary_write_count > 0)) in check_summary_drain_complete()
237 vdo_finish_operation(&allocator->summary_state, in check_summary_drain_complete()
238 (vdo_is_read_only(allocator->depot->vdo) ? in check_summary_drain_complete()
243 * notify_summary_waiters() - Wake all the waiters in a given queue.
250 int result = (vdo_is_read_only(allocator->depot->vdo) ? in notify_summary_waiters()
259 * finish_updating_slab_summary_block() - Finish processing a block which attempted to write,
265 notify_summary_waiters(block->allocator, &block->current_update_waiters); in finish_updating_slab_summary_block()
266 block->writing = false; in finish_updating_slab_summary_block()
267 block->allocator->summary_write_count--; in finish_updating_slab_summary_block()
268 if (vdo_waitq_has_waiters(&block->next_update_waiters)) in finish_updating_slab_summary_block()
271 check_summary_drain_complete(block->allocator); in finish_updating_slab_summary_block()
275 * finish_update() - This is the callback for a successful summary block write.
283 atomic64_inc(&block->allocator->depot->summary_statistics.blocks_written); in finish_update()
288 * handle_write_error() - Handle an error writing a slab summary block.
297 vdo_enter_read_only_mode(completion->vdo, completion->result); in handle_write_error()
303 struct vio *vio = bio->bi_private; in write_slab_summary_endio()
307 continue_vio_after_io(vio, finish_update, block->allocator->thread_id); in write_slab_summary_endio()
311 * launch_write() - Write a slab summary block unless it is currently out for writing.
316 struct block_allocator *allocator = block->allocator; in launch_write()
317 struct slab_depot *depot = allocator->depot; in launch_write()
320 if (block->writing) in launch_write()
323 allocator->summary_write_count++; in launch_write()
324 vdo_waitq_transfer_all_waiters(&block->next_update_waiters, in launch_write()
325 &block->current_update_waiters); in launch_write()
326 block->writing = true; in launch_write()
328 if (vdo_is_read_only(depot->vdo)) { in launch_write()
333 memcpy(block->outgoing_entries, block->entries, VDO_BLOCK_SIZE); in launch_write()
343 pbn = (depot->summary_origin + in launch_write()
344 (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) + in launch_write()
345 block->index); in launch_write()
346 vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio, in launch_write()
351 * update_slab_summary_entry() - Update the entry for a slab.
364 u8 index = slab->slab_number / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK; in update_slab_summary_entry()
365 struct block_allocator *allocator = slab->allocator; in update_slab_summary_entry()
366 struct slab_summary_block *block = &allocator->summary_blocks[index]; in update_slab_summary_entry()
370 if (vdo_is_read_only(block->vio.completion.vdo)) { in update_slab_summary_entry()
372 waiter->callback(waiter, &result); in update_slab_summary_entry()
376 if (vdo_is_state_draining(&allocator->summary_state) || in update_slab_summary_entry()
377 vdo_is_state_quiescent(&allocator->summary_state)) { in update_slab_summary_entry()
379 waiter->callback(waiter, &result); in update_slab_summary_entry()
383 entry = &allocator->summary_entries[slab->slab_number]; in update_slab_summary_entry()
386 .load_ref_counts = (entry->load_ref_counts || load_ref_counts), in update_slab_summary_entry()
388 .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks), in update_slab_summary_entry()
390 vdo_waitq_enqueue_waiter(&block->next_update_waiters, waiter); in update_slab_summary_entry()
395 * finish_reaping() - Actually advance the head of the journal now that any necessary flushes are
401 journal->head = journal->unreapable; in finish_reaping()
403 check_if_slab_drained(journal->slab); in finish_reaping()
409 * complete_reaping() - Finish reaping now that we have flushed the lower layer and then try
415 struct slab_journal *journal = completion->parent; in complete_reaping()
417 return_vio_to_pool(journal->slab->allocator->vio_pool, in complete_reaping()
424 * handle_flush_error() - Handle an error flushing the lower layer.
430 vdo_enter_read_only_mode(completion->vdo, completion->result); in handle_flush_error()
436 struct vio *vio = bio->bi_private; in flush_endio()
437 struct slab_journal *journal = vio->completion.parent; in flush_endio()
440 journal->slab->allocator->thread_id); in flush_endio()
444 * flush_for_reaping() - A waiter callback for getting a vio with which to flush the lower layer
454 struct vio *vio = &pooled->vio; in flush_for_reaping()
456 vio->completion.parent = journal; in flush_for_reaping()
461 * reap_slab_journal() - Conduct a reap on a slab journal to reclaim unreferenced blocks.
473 if ((journal->slab->status != VDO_SLAB_REBUILT) || in reap_slab_journal()
474 !vdo_is_state_normal(&journal->slab->state) || in reap_slab_journal()
475 vdo_is_read_only(journal->slab->allocator->depot->vdo)) { in reap_slab_journal()
477 * We must not reap in the first two cases, and there's no point in read-only mode. in reap_slab_journal()
487 while ((journal->unreapable < journal->tail) && (journal->reap_lock->count == 0)) { in reap_slab_journal()
489 journal->unreapable++; in reap_slab_journal()
490 journal->reap_lock++; in reap_slab_journal()
491 if (journal->reap_lock == &journal->locks[journal->size]) in reap_slab_journal()
492 journal->reap_lock = &journal->locks[0]; in reap_slab_journal()
505 * update is not persisted, they may still overwrite the to-be-reaped slab journal block in reap_slab_journal()
508 journal->flush_waiter.callback = flush_for_reaping; in reap_slab_journal()
509 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in reap_slab_journal()
510 &journal->flush_waiter); in reap_slab_journal()
514 * adjust_slab_journal_block_reference() - Adjust the reference count for a slab journal block.
530 if (journal->slab->status == VDO_SLAB_REPLAYING) { in adjust_slab_journal_block_reference()
535 VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero"); in adjust_slab_journal_block_reference()
538 VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count), in adjust_slab_journal_block_reference()
540 adjustment, lock->count, in adjust_slab_journal_block_reference()
544 lock->count += adjustment; in adjust_slab_journal_block_reference()
545 if (lock->count == 0) in adjust_slab_journal_block_reference()
550 * release_journal_locks() - Callback invoked after a slab summary update completes.
569 * read-only mode. in release_journal_locks()
572 (unsigned long long) journal->summarized); in release_journal_locks()
575 journal->updating_slab_summary = false; in release_journal_locks()
576 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in release_journal_locks()
577 check_if_slab_drained(journal->slab); in release_journal_locks()
581 if (journal->partial_write_in_progress && (journal->summarized == journal->tail)) { in release_journal_locks()
582 journal->partial_write_in_progress = false; in release_journal_locks()
586 first = journal->last_summarized; in release_journal_locks()
587 journal->last_summarized = journal->summarized; in release_journal_locks()
588 for (i = journal->summarized - 1; i >= first; i--) { in release_journal_locks()
593 if (journal->recovery_journal != NULL) { in release_journal_locks()
594 zone_count_t zone_number = journal->slab->allocator->zone_number; in release_journal_locks()
597 vdo_release_recovery_journal_block_reference(journal->recovery_journal, in release_journal_locks()
598 lock->recovery_start, in release_journal_locks()
607 adjust_slab_journal_block_reference(journal, i, -1); in release_journal_locks()
610 journal->updating_slab_summary = false; in release_journal_locks()
619 * update_tail_block_location() - Update the tail block location in the slab summary, if necessary.
625 struct vdo_slab *slab = journal->slab; in update_tail_block_location()
627 if (journal->updating_slab_summary || in update_tail_block_location()
628 vdo_is_read_only(journal->slab->allocator->depot->vdo) || in update_tail_block_location()
629 (journal->last_summarized >= journal->next_commit)) { in update_tail_block_location()
634 if (slab->status != VDO_SLAB_REBUILT) { in update_tail_block_location()
635 u8 hint = slab->allocator->summary_entries[slab->slab_number].fullness_hint; in update_tail_block_location()
637 free_block_count = ((block_count_t) hint) << slab->allocator->depot->hint_shift; in update_tail_block_location()
639 free_block_count = slab->free_blocks; in update_tail_block_location()
642 journal->summarized = journal->next_commit; in update_tail_block_location()
643 journal->updating_slab_summary = true; in update_tail_block_location()
651 update_slab_summary_entry(slab, &journal->slab_summary_waiter, in update_tail_block_location()
652 journal->summarized % journal->size, in update_tail_block_location()
653 (journal->head > 1), false, free_block_count); in update_tail_block_location()
657 * reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries.
661 struct slab_journal *journal = &slab->journal; in reopen_slab_journal()
664 VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, in reopen_slab_journal()
666 journal->head = journal->tail; in reopen_slab_journal()
670 for (block = 1; block <= journal->size; block++) { in reopen_slab_journal()
671 VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), in reopen_slab_journal()
682 (const struct packed_slab_journal_block *) vio->vio.data; in get_committing_sequence_number()
684 return __le64_to_cpu(block->header.sequence_number); in get_committing_sequence_number()
688 * complete_write() - Handle post-commit processing.
695 int result = completion->result; in complete_write()
697 struct slab_journal *journal = completion->parent; in complete_write()
700 list_del_init(&pooled->list_entry); in complete_write()
701 return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled)); in complete_write()
707 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in complete_write()
708 check_if_slab_drained(journal->slab); in complete_write()
712 WRITE_ONCE(journal->events->blocks_written, journal->events->blocks_written + 1); in complete_write()
714 if (list_empty(&journal->uncommitted_blocks)) { in complete_write()
716 journal->next_commit = journal->tail; in complete_write()
719 pooled = container_of(journal->uncommitted_blocks.next, in complete_write()
721 journal->next_commit = get_committing_sequence_number(pooled); in complete_write()
729 struct vio *vio = bio->bi_private; in write_slab_journal_endio()
730 struct slab_journal *journal = vio->completion.parent; in write_slab_journal_endio()
732 continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id); in write_slab_journal_endio()
736 * write_slab_journal_block() - Write a slab journal block.
745 struct vio *vio = &pooled->vio; in write_slab_journal_block()
748 struct slab_journal_block_header *header = &journal->tail_header; in write_slab_journal_block()
749 int unused_entries = journal->entries_per_block - header->entry_count; in write_slab_journal_block()
753 header->head = journal->head; in write_slab_journal_block()
754 list_add_tail(&pooled->list_entry, &journal->uncommitted_blocks); in write_slab_journal_block()
755 vdo_pack_slab_journal_block_header(header, &journal->block->header); in write_slab_journal_block()
758 memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE); in write_slab_journal_block()
763 * Release the per-entry locks for any unused entries in the block we are about to in write_slab_journal_block()
766 adjust_slab_journal_block_reference(journal, header->sequence_number, in write_slab_journal_block()
767 -unused_entries); in write_slab_journal_block()
768 journal->partial_write_in_progress = !block_is_full(journal); in write_slab_journal_block()
771 block_number = journal->slab->journal_origin + in write_slab_journal_block()
772 (header->sequence_number % journal->size); in write_slab_journal_block()
773 vio->completion.parent = journal; in write_slab_journal_block()
784 journal->tail++; in write_slab_journal_block()
786 journal->waiting_to_commit = false; in write_slab_journal_block()
788 operation = vdo_get_admin_state_code(&journal->slab->state); in write_slab_journal_block()
790 vdo_finish_operation(&journal->slab->state, in write_slab_journal_block()
791 (vdo_is_read_only(journal->slab->allocator->depot->vdo) ? in write_slab_journal_block()
800 * commit_tail() - Commit the tail block of the slab journal.
805 if ((journal->tail_header.entry_count == 0) && must_make_entries_to_flush(journal)) { in commit_tail()
813 if (vdo_is_read_only(journal->slab->allocator->depot->vdo) || in commit_tail()
814 journal->waiting_to_commit || in commit_tail()
815 (journal->tail_header.entry_count == 0)) { in commit_tail()
818 * is in read-only mode. in commit_tail()
829 journal->waiting_to_commit = true; in commit_tail()
831 journal->resource_waiter.callback = write_slab_journal_block; in commit_tail()
832 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in commit_tail()
833 &journal->resource_waiter); in commit_tail()
837 * encode_slab_journal_entry() - Encode a slab journal entry.
852 journal_entry_count_t entry_number = tail_header->entry_count++; in encode_slab_journal_entry()
855 if (!tail_header->has_block_map_increments) { in encode_slab_journal_entry()
856 memset(payload->full_entries.entry_types, 0, in encode_slab_journal_entry()
858 tail_header->has_block_map_increments = true; in encode_slab_journal_entry()
861 payload->full_entries.entry_types[entry_number / 8] |= in encode_slab_journal_entry()
865 vdo_pack_slab_journal_entry(&payload->entries[entry_number], sbn, increment); in encode_slab_journal_entry()
869 * expand_journal_point() - Convert a recovery journal journal_point which refers to both an
893 * add_entry() - Actually add an entry to the slab journal, potentially firing off a write if a
907 struct packed_slab_journal_block *block = journal->block; in add_entry()
910 result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, in add_entry()
915 (unsigned long long) journal->tail_header.recovery_point.sequence_number, in add_entry()
916 journal->tail_header.recovery_point.entry_count); in add_entry()
918 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in add_entry()
923 result = VDO_ASSERT((journal->tail_header.entry_count < in add_entry()
924 journal->full_entries_per_block), in add_entry()
927 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, in add_entry()
933 encode_slab_journal_entry(&journal->tail_header, &block->payload, in add_entry()
934 pbn - journal->slab->start, operation, increment); in add_entry()
935 journal->tail_header.recovery_point = recovery_point; in add_entry()
942 return journal->tail - journal->head; in journal_length()
946 * vdo_attempt_replay_into_slab() - Replay a recovery journal entry into a slab's journal.
962 struct slab_journal *journal = &slab->journal; in vdo_attempt_replay_into_slab()
963 struct slab_journal_block_header *header = &journal->tail_header; in vdo_attempt_replay_into_slab()
967 if (!vdo_before_journal_point(&journal->tail_header.recovery_point, &expanded)) in vdo_attempt_replay_into_slab()
970 if ((header->entry_count >= journal->full_entries_per_block) && in vdo_attempt_replay_into_slab()
971 (header->has_block_map_increments || (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING))) { in vdo_attempt_replay_into_slab()
979 if (journal->waiting_to_commit) { in vdo_attempt_replay_into_slab()
980 vdo_start_operation_with_waiter(&journal->slab->state, in vdo_attempt_replay_into_slab()
986 if (journal_length(journal) >= journal->size) { in vdo_attempt_replay_into_slab()
992 journal->head++; in vdo_attempt_replay_into_slab()
993 journal->unreapable++; in vdo_attempt_replay_into_slab()
996 if (journal->slab->status == VDO_SLAB_REBUILT) in vdo_attempt_replay_into_slab()
997 journal->slab->status = VDO_SLAB_REPLAYING; in vdo_attempt_replay_into_slab()
1004 * requires_reaping() - Check whether the journal must be reaped before adding new entries.
1011 return (journal_length(journal) >= journal->blocking_threshold); in requires_reaping()
1014 /** finish_summary_update() - A waiter callback that resets the writing state of a slab. */
1020 slab->active_count--; in finish_summary_update()
1024 vdo_enter_read_only_mode(slab->allocator->depot->vdo, result); in finish_summary_update()
1033 * launch_reference_block_write() - Launch the write of a dirty reference block by first acquiring
1045 if (vdo_is_read_only(slab->allocator->depot->vdo)) in launch_reference_block_write()
1048 slab->active_count++; in launch_reference_block_write()
1049 container_of(waiter, struct reference_block, waiter)->is_writing = true; in launch_reference_block_write()
1050 waiter->callback = write_reference_block; in launch_reference_block_write()
1051 acquire_vio_from_pool(slab->allocator->vio_pool, waiter); in launch_reference_block_write()
1056 vdo_waitq_notify_all_waiters(&slab->dirty_blocks, in save_dirty_reference_blocks()
1062 * finish_reference_block_write() - After a reference block has written, clean it, release its
1070 struct reference_block *block = completion->parent; in finish_reference_block_write()
1071 struct vdo_slab *slab = block->slab; in finish_reference_block_write()
1074 slab->active_count--; in finish_reference_block_write()
1077 adjust_slab_journal_block_reference(&slab->journal, in finish_reference_block_write()
1078 block->slab_journal_lock_to_release, -1); in finish_reference_block_write()
1079 return_vio_to_pool(slab->allocator->vio_pool, pooled); in finish_reference_block_write()
1085 block->is_writing = false; in finish_reference_block_write()
1087 if (vdo_is_read_only(completion->vdo)) { in finish_reference_block_write()
1092 /* Re-queue the block if it was re-dirtied while it was writing. */ in finish_reference_block_write()
1093 if (block->is_dirty) { in finish_reference_block_write()
1094 vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); in finish_reference_block_write()
1095 if (vdo_is_state_draining(&slab->state)) { in finish_reference_block_write()
1107 if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) { in finish_reference_block_write()
1112 offset = slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in finish_reference_block_write()
1113 slab->active_count++; in finish_reference_block_write()
1114 slab->summary_waiter.callback = finish_summary_update; in finish_reference_block_write()
1115 update_slab_summary_entry(slab, &slab->summary_waiter, offset, in finish_reference_block_write()
1116 true, true, slab->free_blocks); in finish_reference_block_write()
1120 * get_reference_counters_for_block() - Find the reference counters for a given block.
1127 size_t block_index = block - block->slab->reference_blocks; in get_reference_counters_for_block()
1129 return &block->slab->counters[block_index * COUNTS_PER_BLOCK]; in get_reference_counters_for_block()
1133 * pack_reference_block() - Copy data from a reference block to a buffer ready to be written out.
1144 vdo_pack_journal_point(&block->slab->slab_journal_point, &commit_point); in pack_reference_block()
1147 packed->sectors[i].commit_point = commit_point; in pack_reference_block()
1148 memcpy(packed->sectors[i].counts, counters + (i * COUNTS_PER_SECTOR), in pack_reference_block()
1155 struct vio *vio = bio->bi_private; in write_reference_block_endio()
1156 struct reference_block *block = vio->completion.parent; in write_reference_block_endio()
1157 thread_id_t thread_id = block->slab->allocator->thread_id; in write_reference_block_endio()
1163 * handle_io_error() - Handle an I/O error reading or writing a reference count block.
1168 int result = completion->result; in handle_io_error()
1170 struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab; in handle_io_error()
1173 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_io_error()
1174 slab->active_count--; in handle_io_error()
1175 vdo_enter_read_only_mode(slab->allocator->depot->vdo, result); in handle_io_error()
1180 * write_reference_block() - After a dirty block waiter has gotten a VIO from the VIO pool, copy
1190 struct vdo_completion *completion = &pooled->vio.completion; in write_reference_block()
1194 pack_reference_block(block, pooled->vio.data); in write_reference_block()
1195 block_offset = (block - block->slab->reference_blocks); in write_reference_block()
1196 pbn = (block->slab->ref_counts_origin + block_offset); in write_reference_block()
1197 block->slab_journal_lock_to_release = block->slab_journal_lock; in write_reference_block()
1198 completion->parent = block; in write_reference_block()
1205 block->is_dirty = false; in write_reference_block()
1212 WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written, in write_reference_block()
1213 block->slab->allocator->ref_counts_statistics.blocks_written + 1); in write_reference_block()
1215 completion->callback_thread_id = ((struct block_allocator *) pooled->context)->thread_id; in write_reference_block()
1216 vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio, in write_reference_block()
1223 struct vdo_slab *slab = journal->slab; in reclaim_journal_space()
1224 block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks); in reclaim_journal_space()
1227 if ((length < journal->flushing_threshold) || (write_count == 0)) in reclaim_journal_space()
1231 WRITE_ONCE(journal->events->flush_count, journal->events->flush_count + 1); in reclaim_journal_space()
1232 if (length < journal->flushing_deadline) { in reclaim_journal_space()
1234 write_count /= journal->flushing_deadline - length + 1; in reclaim_journal_space()
1239 vdo_waitq_notify_next_waiter(&slab->dirty_blocks, in reclaim_journal_space()
1245 * reference_count_to_status() - Convert a reference count to a reference status.
1263 * dirty_block() - Mark a reference count block as dirty, potentially adding it to the dirty queue
1269 if (block->is_dirty) in dirty_block()
1272 block->is_dirty = true; in dirty_block()
1273 if (!block->is_writing) in dirty_block()
1274 vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); in dirty_block()
1278 * get_reference_block() - Get the reference block that covers the given block index.
1283 return &slab->reference_blocks[index / COUNTS_PER_BLOCK]; in get_reference_block()
1287 * slab_block_number_from_pbn() - Determine the index within the slab of a particular physical
1301 if (pbn < slab->start) in slab_block_number_from_pbn()
1304 slab_block_number = pbn - slab->start; in slab_block_number_from_pbn()
1305 if (slab_block_number >= slab->allocator->depot->slab_config.data_blocks) in slab_block_number_from_pbn()
1313 * get_reference_counter() - Get the reference counter that covers the given physical block number.
1328 *counter_ptr = &slab->counters[index]; in get_reference_counter()
1335 block_count_t free_blocks = slab->free_blocks; in calculate_slab_priority()
1336 unsigned int unopened_slab_priority = slab->allocator->unopened_slab_priority; in calculate_slab_priority()
1348 * a better client of any underlying storage that is thinly-provisioned (though discarding in calculate_slab_priority()
1374 VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), in prioritize_slab()
1376 slab->priority = calculate_slab_priority(slab); in prioritize_slab()
1377 vdo_priority_table_enqueue(slab->allocator->prioritized_slabs, in prioritize_slab()
1378 slab->priority, &slab->allocq_entry); in prioritize_slab()
1382 * adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab.
1387 struct block_allocator *allocator = slab->allocator; in adjust_free_block_count()
1389 WRITE_ONCE(allocator->allocated_blocks, in adjust_free_block_count()
1390 allocator->allocated_blocks + (incremented ? -1 : 1)); in adjust_free_block_count()
1393 if (slab == allocator->open_slab) in adjust_free_block_count()
1397 if (slab->priority == calculate_slab_priority(slab)) in adjust_free_block_count()
1402 * and re-enqueuing it with the new priority. in adjust_free_block_count()
1404 vdo_priority_table_remove(allocator->prioritized_slabs, &slab->allocq_entry); in adjust_free_block_count()
1409 * increment_for_data() - Increment the reference count for a data block.
1429 block->allocated_count++; in increment_for_data()
1430 slab->free_blocks--; in increment_for_data()
1445 slab->slab_number, block_number); in increment_for_data()
1456 * decrement_for_data() - Decrement the reference count for a data block.
1477 block_number, slab->slab_number); in decrement_for_data()
1481 if (updater->zpbn.zone != NULL) { in decrement_for_data()
1482 struct pbn_lock *lock = vdo_get_physical_zone_pbn_lock(updater->zpbn.zone, in decrement_for_data()
1483 updater->zpbn.pbn); in decrement_for_data()
1497 block->allocated_count--; in decrement_for_data()
1498 slab->free_blocks++; in decrement_for_data()
1506 (*counter_ptr)--; in decrement_for_data()
1513 * increment_for_block_map() - Increment the reference count for a block map page.
1541 slab->slab_number, block_number); in increment_for_block_map()
1545 block->allocated_count++; in increment_for_block_map()
1546 slab->free_blocks--; in increment_for_block_map()
1556 slab->slab_number, block_number); in increment_for_block_map()
1566 *counter_ptr, slab->slab_number, in increment_for_block_map()
1573 return ((point != NULL) && (point->sequence_number > 0)); in is_valid_journal_point()
1577 * update_reference_count() - Update the reference count of a block.
1597 vdo_refcount_t *counter_ptr = &slab->counters[block_number]; in update_reference_count()
1601 if (!updater->increment) { in update_reference_count()
1609 } else if (updater->operation == VDO_JOURNAL_DATA_REMAPPING) { in update_reference_count()
1611 updater->lock, counter_ptr, adjust_block_count); in update_reference_count()
1614 updater->lock, normal_operation, in update_reference_count()
1622 slab->slab_journal_point = *slab_journal_point; in update_reference_count()
1639 result = slab_block_number_from_pbn(slab, updater->zpbn.pbn, &block_number); in adjust_reference_count()
1650 if (block->is_dirty && (block->slab_journal_lock > 0)) { in adjust_reference_count()
1651 sequence_number_t entry_lock = slab_journal_point->sequence_number; in adjust_reference_count()
1654 * the last time it was clean. We must release the per-entry slab journal lock for in adjust_reference_count()
1662 adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1); in adjust_reference_count()
1668 * entry to this block since the block was cleaned. Therefore, we convert the per-entry in adjust_reference_count()
1669 * slab journal lock to an uncommitted reference block lock, if there is a per-entry lock. in adjust_reference_count()
1672 block->slab_journal_lock = slab_journal_point->sequence_number; in adjust_reference_count()
1674 block->slab_journal_lock = 0; in adjust_reference_count()
1681 * add_entry_from_waiter() - Add an entry to the slab journal.
1695 struct slab_journal_block_header *header = &journal->tail_header; in add_entry_from_waiter()
1697 .sequence_number = header->sequence_number, in add_entry_from_waiter()
1698 .entry_count = header->entry_count, in add_entry_from_waiter()
1700 sequence_number_t recovery_block = data_vio->recovery_journal_point.sequence_number; in add_entry_from_waiter()
1702 if (header->entry_count == 0) { in add_entry_from_waiter()
1707 get_lock(journal, header->sequence_number)->recovery_start = recovery_block; in add_entry_from_waiter()
1708 if (journal->recovery_journal != NULL) { in add_entry_from_waiter()
1709 zone_count_t zone_number = journal->slab->allocator->zone_number; in add_entry_from_waiter()
1711 vdo_acquire_recovery_journal_block_reference(journal->recovery_journal, in add_entry_from_waiter()
1721 add_entry(journal, updater->zpbn.pbn, updater->operation, updater->increment, in add_entry_from_waiter()
1722 expand_journal_point(data_vio->recovery_journal_point, in add_entry_from_waiter()
1723 updater->increment)); in add_entry_from_waiter()
1725 if (journal->slab->status != VDO_SLAB_REBUILT) { in add_entry_from_waiter()
1731 slab_journal_point.sequence_number, -1); in add_entry_from_waiter()
1735 result = adjust_reference_count(journal->slab, updater, in add_entry_from_waiter()
1739 if (updater->increment) in add_entry_from_waiter()
1742 vdo_continue_completion(&data_vio->decrement_completion, result); in add_entry_from_waiter()
1746 * is_next_entry_a_block_map_increment() - Check whether the next entry to be made is a block map
1754 struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters); in is_next_entry_a_block_map_increment()
1758 return (updater->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING); in is_next_entry_a_block_map_increment()
1762 * add_entries() - Add as many entries as possible from the queue of vios waiting to make entries.
1770 if (journal->adding_entries) { in add_entries()
1771 /* Protect against re-entrancy. */ in add_entries()
1775 journal->adding_entries = true; in add_entries()
1776 while (vdo_waitq_has_waiters(&journal->entry_waiters)) { in add_entries()
1777 struct slab_journal_block_header *header = &journal->tail_header; in add_entries()
1779 if (journal->partial_write_in_progress || in add_entries()
1780 (journal->slab->status == VDO_SLAB_REBUILDING)) { in add_entries()
1788 if (journal->waiting_to_commit) { in add_entries()
1793 WRITE_ONCE(journal->events->tail_busy_count, in add_entries()
1794 journal->events->tail_busy_count + 1); in add_entries()
1797 (header->entry_count >= journal->full_entries_per_block)) { in add_entries()
1803 if (journal->waiting_to_commit) { in add_entries()
1804 WRITE_ONCE(journal->events->tail_busy_count, in add_entries()
1805 journal->events->tail_busy_count + 1); in add_entries()
1812 WRITE_ONCE(journal->events->blocked_count, in add_entries()
1813 journal->events->blocked_count + 1); in add_entries()
1814 save_dirty_reference_blocks(journal->slab); in add_entries()
1818 if (header->entry_count == 0) { in add_entries()
1820 get_lock(journal, header->sequence_number); in add_entries()
1826 if (lock->count > 0) { in add_entries()
1827 VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, in add_entries()
1835 VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), in add_entries()
1838 WRITE_ONCE(journal->events->disk_full_count, in add_entries()
1839 journal->events->disk_full_count + 1); in add_entries()
1840 save_dirty_reference_blocks(journal->slab); in add_entries()
1849 lock->count = journal->entries_per_block + 1; in add_entries()
1851 if (header->sequence_number == 1) { in add_entries()
1852 struct vdo_slab *slab = journal->slab; in add_entries()
1859 * counts are initialized. The lock acquisition must be done by the in add_entries()
1863 for (i = 0; i < slab->reference_block_count; i++) { in add_entries()
1864 slab->reference_blocks[i].slab_journal_lock = 1; in add_entries()
1865 dirty_block(&slab->reference_blocks[i]); in add_entries()
1869 slab->reference_block_count); in add_entries()
1873 vdo_waitq_notify_next_waiter(&journal->entry_waiters, in add_entries()
1877 journal->adding_entries = false; in add_entries()
1880 if (vdo_is_state_draining(&journal->slab->state) && in add_entries()
1881 !vdo_is_state_suspending(&journal->slab->state) && in add_entries()
1882 !vdo_waitq_has_waiters(&journal->entry_waiters)) in add_entries()
1887 * reset_search_cursor() - Reset the free block search back to the first reference counter in the
1892 struct search_cursor *cursor = &slab->search_cursor; in reset_search_cursor()
1894 cursor->block = cursor->first_block; in reset_search_cursor()
1895 cursor->index = 0; in reset_search_cursor()
1897 cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count); in reset_search_cursor()
1901 * advance_search_cursor() - Advance the search cursor to the start of the next reference block in
1910 struct search_cursor *cursor = &slab->search_cursor; in advance_search_cursor()
1916 if (cursor->block == cursor->last_block) { in advance_search_cursor()
1922 cursor->block++; in advance_search_cursor()
1923 cursor->index = cursor->end_index; in advance_search_cursor()
1925 if (cursor->block == cursor->last_block) { in advance_search_cursor()
1927 cursor->end_index = slab->block_count; in advance_search_cursor()
1929 cursor->end_index += COUNTS_PER_BLOCK; in advance_search_cursor()
1936 * vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild.
1968 * replay_reference_count_change() - Replay the reference count adjustment from a slab journal
1990 if (!vdo_before_journal_point(&block->commit_points[sector], entry_point)) { in replay_reference_count_change()
2006 * find_zero_byte_in_word() - Find the array index of the first zero byte in word-sized range of
2027 /* Assumes little-endian byte order, which we have on X86. */ in find_zero_byte_in_word()
2037 * find_free_block() - Find the first block with a reference count of zero in the specified
2049 slab_block_number next_index = slab->search_cursor.index; in find_free_block()
2050 slab_block_number end_index = slab->search_cursor.end_index; in find_free_block()
2051 u8 *next_counter = &slab->counters[next_index]; in find_free_block()
2052 u8 *end_counter = &slab->counters[end_index]; in find_free_block()
2072 * Now we're word-aligned; check an word at a time until we find a word containing a zero. in find_free_block()
2095 * search_current_reference_block() - Search the reference block currently saved in the search
2107 return ((slab->search_cursor.block->allocated_count < COUNTS_PER_BLOCK) && in search_current_reference_block()
2112 * search_reference_blocks() - Search each reference block for a reference count of zero.
2139 * make_provisional_reference() - Do the bookkeeping for making a provisional reference.
2150 slab->counters[block_number] = PROVISIONAL_REFERENCE_COUNT; in make_provisional_reference()
2153 block->allocated_count++; in make_provisional_reference()
2154 slab->free_blocks--; in make_provisional_reference()
2158 * dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty.
2164 for (i = 0; i < slab->reference_block_count; i++) in dirty_all_reference_blocks()
2165 dirty_block(&slab->reference_blocks[i]); in dirty_all_reference_blocks()
2169 * clear_provisional_references() - Clear the provisional reference counts from a reference block.
2180 block->allocated_count--; in clear_provisional_references()
2193 * unpack_reference_block() - Unpack reference counts blocks into the internal memory structure.
2202 struct vdo_slab *slab = block->slab; in unpack_reference_block()
2206 struct packed_reference_sector *sector = &packed->sectors[i]; in unpack_reference_block()
2208 vdo_unpack_journal_point(&sector->commit_point, &block->commit_points[i]); in unpack_reference_block()
2209 memcpy(counters + (i * COUNTS_PER_SECTOR), sector->counts, in unpack_reference_block()
2212 if (vdo_before_journal_point(&slab->slab_journal_point, in unpack_reference_block()
2213 &block->commit_points[i])) in unpack_reference_block()
2214 slab->slab_journal_point = block->commit_points[i]; in unpack_reference_block()
2217 !journal_points_equal(block->commit_points[0], in unpack_reference_block()
2218 block->commit_points[i])) { in unpack_reference_block()
2219 size_t block_index = block - block->slab->reference_blocks; in unpack_reference_block()
2222 i, block_index, block->slab->slab_number); in unpack_reference_block()
2226 block->allocated_count = 0; in unpack_reference_block()
2229 block->allocated_count++; in unpack_reference_block()
2234 * finish_reference_block_load() - After a reference block has been read, unpack it.
2241 struct reference_block *block = completion->parent; in finish_reference_block_load()
2242 struct vdo_slab *slab = block->slab; in finish_reference_block_load()
2244 unpack_reference_block((struct packed_reference_block *) vio->data, block); in finish_reference_block_load()
2245 return_vio_to_pool(slab->allocator->vio_pool, pooled); in finish_reference_block_load()
2246 slab->active_count--; in finish_reference_block_load()
2249 slab->free_blocks -= block->allocated_count; in finish_reference_block_load()
2255 struct vio *vio = bio->bi_private; in load_reference_block_endio()
2256 struct reference_block *block = vio->completion.parent; in load_reference_block_endio()
2259 block->slab->allocator->thread_id); in load_reference_block_endio()
2263 * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
2271 struct vio *vio = &pooled->vio; in load_reference_block()
2274 size_t block_offset = (block - block->slab->reference_blocks); in load_reference_block()
2276 vio->completion.parent = block; in load_reference_block()
2277 vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset, in load_reference_block()
2283 * load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a
2284 * pre-allocated reference counter.
2290 slab->free_blocks = slab->block_count; in load_reference_blocks()
2291 slab->active_count = slab->reference_block_count; in load_reference_blocks()
2292 for (i = 0; i < slab->reference_block_count; i++) { in load_reference_blocks()
2293 struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter; in load_reference_blocks()
2295 waiter->callback = load_reference_block; in load_reference_blocks()
2296 acquire_vio_from_pool(slab->allocator->vio_pool, waiter); in load_reference_blocks()
2301 * drain_slab() - Drain all reference count I/O.
2310 const struct admin_state_code *state = vdo_get_admin_state_code(&slab->state); in drain_slab()
2317 commit_tail(&slab->journal); in drain_slab()
2319 if ((state == VDO_ADMIN_STATE_RECOVERING) || (slab->counters == NULL)) in drain_slab()
2323 load = slab->allocator->summary_entries[slab->slab_number].load_ref_counts; in drain_slab()
2338 * non-zero reference counts, or there are any slab journal blocks. in drain_slab()
2340 block_count_t data_blocks = slab->allocator->depot->slab_config.data_blocks; in drain_slab()
2342 if (load || (slab->free_blocks != data_blocks) || in drain_slab()
2348 save = (slab->status == VDO_SLAB_REBUILT); in drain_slab()
2350 vdo_finish_draining_with_result(&slab->state, VDO_SUCCESS); in drain_slab()
2363 result = VDO_ASSERT(slab->reference_blocks == NULL, in allocate_slab_counters()
2365 slab->slab_number); in allocate_slab_counters()
2369 result = vdo_allocate(slab->reference_block_count, struct reference_block, in allocate_slab_counters()
2370 __func__, &slab->reference_blocks); in allocate_slab_counters()
2375 * Allocate such that the runt slab has a full-length memory array, plus a little padding in allocate_slab_counters()
2376 * so we can word-search even at the very end. in allocate_slab_counters()
2378 bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD); in allocate_slab_counters()
2380 &slab->counters); in allocate_slab_counters()
2382 vdo_free(vdo_forget(slab->reference_blocks)); in allocate_slab_counters()
2386 slab->search_cursor.first_block = slab->reference_blocks; in allocate_slab_counters()
2387 slab->search_cursor.last_block = &slab->reference_blocks[slab->reference_block_count - 1]; in allocate_slab_counters()
2390 for (index = 0; index < slab->reference_block_count; index++) { in allocate_slab_counters()
2391 slab->reference_blocks[index] = (struct reference_block) { in allocate_slab_counters()
2401 if (vdo_is_state_clean_load(&slab->state)) in allocate_counters_if_clean()
2410 struct slab_journal *journal = completion->parent; in finish_loading_journal()
2411 struct vdo_slab *slab = journal->slab; in finish_loading_journal()
2412 struct packed_slab_journal_block *block = (struct packed_slab_journal_block *) vio->data; in finish_loading_journal()
2415 vdo_unpack_slab_journal_block_header(&block->header, &header); in finish_loading_journal()
2419 (header.nonce == slab->allocator->nonce)) { in finish_loading_journal()
2420 journal->tail = header.sequence_number + 1; in finish_loading_journal()
2426 journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ? in finish_loading_journal()
2427 header.head : journal->tail); in finish_loading_journal()
2428 journal->tail_header = header; in finish_loading_journal()
2432 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in finish_loading_journal()
2433 vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab)); in finish_loading_journal()
2438 struct vio *vio = bio->bi_private; in read_slab_journal_tail_endio()
2439 struct slab_journal *journal = vio->completion.parent; in read_slab_journal_tail_endio()
2442 journal->slab->allocator->thread_id); in read_slab_journal_tail_endio()
2447 int result = completion->result; in handle_load_error()
2448 struct slab_journal *journal = completion->parent; in handle_load_error()
2452 return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_load_error()
2453 vdo_finish_loading_with_result(&journal->slab->state, result); in handle_load_error()
2457 * read_slab_journal_tail() - Read the slab journal tail block by using a vio acquired from the vio
2468 struct vdo_slab *slab = journal->slab; in read_slab_journal_tail()
2470 struct vio *vio = &pooled->vio; in read_slab_journal_tail()
2472 slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in read_slab_journal_tail()
2479 (tail_block_offset_t)(journal->size - 1) : in read_slab_journal_tail()
2480 (last_commit_point - 1)); in read_slab_journal_tail()
2482 vio->completion.parent = journal; in read_slab_journal_tail()
2483 vio->completion.callback_thread_id = slab->allocator->thread_id; in read_slab_journal_tail()
2484 vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block, in read_slab_journal_tail()
2490 * load_slab_journal() - Load a slab's journal by reading the journal's tail.
2494 struct slab_journal *journal = &slab->journal; in load_slab_journal()
2497 last_commit_point = slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in load_slab_journal()
2499 !slab->allocator->summary_entries[slab->slab_number].load_ref_counts) { in load_slab_journal()
2501 * This slab claims that it has a tail block at (journal->size - 1), but a head of in load_slab_journal()
2505 VDO_ASSERT_LOG_ONLY(((journal->size < 16) || in load_slab_journal()
2506 (journal->scrubbing_threshold < (journal->size - 1))), in load_slab_journal()
2508 vdo_finish_loading_with_result(&slab->state, in load_slab_journal()
2513 journal->resource_waiter.callback = read_slab_journal_tail; in load_slab_journal()
2514 acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter); in load_slab_journal()
2519 struct slab_scrubber *scrubber = &slab->allocator->scrubber; in register_slab_for_scrubbing() local
2521 VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), in register_slab_for_scrubbing()
2524 if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING) in register_slab_for_scrubbing()
2527 list_del_init(&slab->allocq_entry); in register_slab_for_scrubbing()
2528 if (!slab->was_queued_for_scrubbing) { in register_slab_for_scrubbing()
2529 WRITE_ONCE(scrubber->slab_count, scrubber->slab_count + 1); in register_slab_for_scrubbing()
2530 slab->was_queued_for_scrubbing = true; in register_slab_for_scrubbing()
2534 slab->status = VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING; in register_slab_for_scrubbing()
2535 list_add_tail(&slab->allocq_entry, &scrubber->high_priority_slabs); in register_slab_for_scrubbing()
2539 list_add_tail(&slab->allocq_entry, &scrubber->slabs); in register_slab_for_scrubbing()
2545 struct block_allocator *allocator = slab->allocator; in queue_slab()
2549 VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), in queue_slab()
2552 if (vdo_is_read_only(allocator->depot->vdo)) in queue_slab()
2555 free_blocks = slab->free_blocks; in queue_slab()
2556 result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks), in queue_slab()
2558 slab->slab_number, (unsigned long long) free_blocks, in queue_slab()
2559 (unsigned long long) allocator->depot->slab_config.data_blocks); in queue_slab()
2561 vdo_enter_read_only_mode(allocator->depot->vdo, result); in queue_slab()
2565 if (slab->status != VDO_SLAB_REBUILT) { in queue_slab()
2570 if (!vdo_is_state_resuming(&slab->state)) { in queue_slab()
2576 WRITE_ONCE(allocator->allocated_blocks, in queue_slab()
2577 allocator->allocated_blocks - free_blocks); in queue_slab()
2579 WRITE_ONCE(allocator->statistics.slabs_opened, in queue_slab()
2580 allocator->statistics.slabs_opened + 1); in queue_slab()
2584 if (allocator->depot->vdo->suspend_type == VDO_ADMIN_STATE_SAVING) in queue_slab()
2591 * initiate_slab_action() - Initiate a slab action.
2603 slab->status = VDO_SLAB_REBUILDING; in initiate_slab_action()
2625 * get_next_slab() - Get the next slab to scrub.
2626 * @scrubber: The slab scrubber.
2630 static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber) in get_next_slab() argument
2634 slab = list_first_entry_or_null(&scrubber->high_priority_slabs, in get_next_slab()
2639 return list_first_entry_or_null(&scrubber->slabs, struct vdo_slab, in get_next_slab()
2644 * has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub.
2645 * @scrubber: The scrubber to check.
2647 * Return: true if the scrubber has slabs to scrub.
2649 static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber) in has_slabs_to_scrub() argument
2651 return (get_next_slab(scrubber) != NULL); in has_slabs_to_scrub()
2655 * uninitialize_scrubber_vio() - Clean up the slab_scrubber's vio.
2656 * @scrubber: The scrubber.
2658 static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber) in uninitialize_scrubber_vio() argument
2660 vdo_free(vdo_forget(scrubber->vio.data)); in uninitialize_scrubber_vio()
2661 free_vio_components(&scrubber->vio); in uninitialize_scrubber_vio()
2665 * finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because
2667 * @scrubber: The scrubber.
2669 static void finish_scrubbing(struct slab_scrubber *scrubber, int result) in finish_scrubbing() argument
2671 bool notify = vdo_waitq_has_waiters(&scrubber->waiters); in finish_scrubbing()
2672 bool done = !has_slabs_to_scrub(scrubber); in finish_scrubbing() local
2674 container_of(scrubber, struct block_allocator, scrubber); in finish_scrubbing()
2676 if (done) in finish_scrubbing()
2677 uninitialize_scrubber_vio(scrubber); in finish_scrubbing()
2679 if (scrubber->high_priority_only) { in finish_scrubbing()
2680 scrubber->high_priority_only = false; in finish_scrubbing()
2681 vdo_fail_completion(vdo_forget(scrubber->vio.completion.parent), result); in finish_scrubbing()
2682 } else if (done && (atomic_add_return(-1, &allocator->depot->zones_to_scrub) == 0)) { in finish_scrubbing()
2685 atomic_cmpxchg(&allocator->depot->vdo->state, VDO_RECOVERING, in finish_scrubbing()
2696 * the compare-swap-above could have failed due to a read-only entry which our own in finish_scrubbing()
2706 * Note that the scrubber has stopped, and inform anyone who might be waiting for that to in finish_scrubbing()
2709 if (!vdo_finish_draining(&scrubber->admin_state)) in finish_scrubbing()
2710 WRITE_ONCE(scrubber->admin_state.current_state, in finish_scrubbing()
2718 vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL); in finish_scrubbing()
2721 static void scrub_next_slab(struct slab_scrubber *scrubber);
2724 * slab_scrubbed() - Notify the scrubber that a slab has been scrubbed.
2731 struct slab_scrubber *scrubber = in slab_scrubbed() local
2733 struct vdo_slab *slab = scrubber->slab; in slab_scrubbed()
2735 slab->status = VDO_SLAB_REBUILT; in slab_scrubbed()
2738 WRITE_ONCE(scrubber->slab_count, scrubber->slab_count - 1); in slab_scrubbed()
2739 scrub_next_slab(scrubber); in slab_scrubbed()
2743 * abort_scrubbing() - Abort scrubbing due to an error.
2744 * @scrubber: The slab scrubber.
2747 static void abort_scrubbing(struct slab_scrubber *scrubber, int result) in abort_scrubbing() argument
2749 vdo_enter_read_only_mode(scrubber->vio.completion.vdo, result); in abort_scrubbing()
2750 finish_scrubbing(scrubber, result); in abort_scrubbing()
2754 * handle_scrubber_error() - Handle errors while rebuilding a slab.
2763 completion->result); in handle_scrubber_error()
2767 * apply_block_entries() - Apply all the entries in a block to the reference counts.
2784 slab_block_number max_sbn = slab->end - slab->start; in apply_block_entries()
2806 entry.sbn, slab->slab_number); in apply_block_entries()
2816 * apply_journal_entries() - Find the relevant vio of the slab journal and apply all valid entries.
2824 struct slab_scrubber *scrubber = in apply_journal_entries() local
2826 struct vdo_slab *slab = scrubber->slab; in apply_journal_entries()
2827 struct slab_journal *journal = &slab->journal; in apply_journal_entries()
2830 sequence_number_t tail = journal->tail; in apply_journal_entries()
2831 tail_block_offset_t end_index = (tail - 1) % journal->size; in apply_journal_entries()
2832 char *end_data = scrubber->vio.data + (end_index * VDO_BLOCK_SIZE); in apply_journal_entries()
2836 sequence_number_t head = __le64_to_cpu(end_block->header.head); in apply_journal_entries()
2837 tail_block_offset_t head_index = head % journal->size; in apply_journal_entries()
2840 struct journal_point ref_counts_point = slab->slab_journal_point; in apply_journal_entries()
2845 char *block_data = scrubber->vio.data + (index * VDO_BLOCK_SIZE); in apply_journal_entries()
2850 vdo_unpack_slab_journal_block_header(&block->header, &header); in apply_journal_entries()
2852 if ((header.nonce != slab->allocator->nonce) || in apply_journal_entries()
2855 (header.entry_count > journal->entries_per_block) || in apply_journal_entries()
2857 (header.entry_count > journal->full_entries_per_block))) { in apply_journal_entries()
2860 slab->slab_number); in apply_journal_entries()
2861 abort_scrubbing(scrubber, VDO_CORRUPT_JOURNAL); in apply_journal_entries()
2867 abort_scrubbing(scrubber, result); in apply_journal_entries()
2872 last_entry_applied.entry_count = header.entry_count - 1; in apply_journal_entries()
2874 if (index == journal->size) in apply_journal_entries()
2886 abort_scrubbing(scrubber, result); in apply_journal_entries()
2892 slab->allocator->thread_id, completion->parent); in apply_journal_entries()
2893 vdo_start_operation_with_waiter(&slab->state, in apply_journal_entries()
2900 struct vio *vio = bio->bi_private; in read_slab_journal_endio()
2901 struct slab_scrubber *scrubber = container_of(vio, struct slab_scrubber, vio); in read_slab_journal_endio() local
2903 continue_vio_after_io(bio->bi_private, apply_journal_entries, in read_slab_journal_endio()
2904 scrubber->slab->allocator->thread_id); in read_slab_journal_endio()
2908 * start_scrubbing() - Read the current slab's journal from disk now that it has been flushed.
2909 * @completion: The scrubber's vio completion.
2915 struct slab_scrubber *scrubber = in start_scrubbing() local
2917 struct vdo_slab *slab = scrubber->slab; in start_scrubbing()
2919 if (!slab->allocator->summary_entries[slab->slab_number].is_dirty) { in start_scrubbing()
2924 vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin, in start_scrubbing()
2930 * scrub_next_slab() - Scrub the next slab if there is one.
2931 * @scrubber: The scrubber.
2933 static void scrub_next_slab(struct slab_scrubber *scrubber) in scrub_next_slab() argument
2935 struct vdo_completion *completion = &scrubber->vio.completion; in scrub_next_slab()
2942 vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL); in scrub_next_slab()
2944 if (vdo_is_read_only(completion->vdo)) { in scrub_next_slab()
2945 finish_scrubbing(scrubber, VDO_READ_ONLY); in scrub_next_slab()
2949 slab = get_next_slab(scrubber); in scrub_next_slab()
2951 (scrubber->high_priority_only && list_empty(&scrubber->high_priority_slabs))) { in scrub_next_slab()
2952 finish_scrubbing(scrubber, VDO_SUCCESS); in scrub_next_slab()
2956 if (vdo_finish_draining(&scrubber->admin_state)) in scrub_next_slab()
2959 list_del_init(&slab->allocq_entry); in scrub_next_slab()
2960 scrubber->slab = slab; in scrub_next_slab()
2962 slab->allocator->thread_id, completion->parent); in scrub_next_slab()
2963 vdo_start_operation_with_waiter(&slab->state, VDO_ADMIN_STATE_SCRUBBING, in scrub_next_slab()
2968 * scrub_slabs() - Scrub all of an allocator's slabs that are eligible for scrubbing.
2970 * @parent: The completion to notify when scrubbing is done, implies high_priority, may be NULL.
2974 struct slab_scrubber *scrubber = &allocator->scrubber; in scrub_slabs() local
2976 scrubber->vio.completion.parent = parent; in scrub_slabs()
2977 scrubber->high_priority_only = (parent != NULL); in scrub_slabs()
2978 if (!has_slabs_to_scrub(scrubber)) { in scrub_slabs()
2979 finish_scrubbing(scrubber, VDO_SUCCESS); in scrub_slabs()
2983 if (scrubber->high_priority_only && in scrub_slabs()
2984 vdo_is_priority_table_empty(allocator->prioritized_slabs) && in scrub_slabs()
2985 list_empty(&scrubber->high_priority_slabs)) in scrub_slabs()
2986 register_slab_for_scrubbing(get_next_slab(scrubber), true); in scrub_slabs()
2988 vdo_resume_if_quiescent(&scrubber->admin_state); in scrub_slabs()
2989 scrub_next_slab(scrubber); in scrub_slabs()
3002 allocator->slab_count++; in register_slab_with_allocator()
3003 allocator->last_slab = slab->slab_number; in register_slab_with_allocator()
3007 * get_depot_slab_iterator() - Return a slab_iterator over the slabs in a slab_depot.
3021 struct vdo_slab **slabs = depot->slabs; in get_depot_slab_iterator()
3033 return get_depot_slab_iterator(allocator->depot, allocator->last_slab, in get_slab_iterator()
3034 allocator->zone_number, in get_slab_iterator()
3035 allocator->depot->zone_count); in get_slab_iterator()
3039 * next_slab() - Get the next slab from a slab_iterator and advance the iterator
3046 struct vdo_slab *slab = iterator->next; in next_slab()
3048 if ((slab == NULL) || (slab->slab_number < iterator->end + iterator->stride)) in next_slab()
3049 iterator->next = NULL; in next_slab()
3051 iterator->next = iterator->slabs[slab->slab_number - iterator->stride]; in next_slab()
3057 * abort_waiter() - Abort vios waiting to make journal entries when read-only.
3060 * into read-only mode. Implements waiter_callback_fn.
3068 if (updater->increment) { in abort_waiter()
3073 vdo_continue_completion(&data_vio->decrement_completion, VDO_READ_ONLY); in abort_waiter()
3083 assert_on_allocator_thread(allocator->thread_id, __func__); in notify_block_allocator_of_read_only_mode()
3088 vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters, in notify_block_allocator_of_read_only_mode()
3089 abort_waiter, &slab->journal); in notify_block_allocator_of_read_only_mode()
3097 * vdo_acquire_provisional_reference() - Acquire a provisional reference on behalf of a PBN lock if
3121 if (slab->counters[block_number] == EMPTY_REFERENCE_COUNT) { in vdo_acquire_provisional_reference()
3144 VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), in allocate_slab_block()
3153 slab->search_cursor.index = (free_index + 1); in allocate_slab_block()
3155 *block_number_ptr = slab->start + free_index; in allocate_slab_block()
3160 * open_slab() - Prepare a slab to be allocated from.
3167 WRITE_ONCE(slab->allocator->statistics.slabs_opened, in open_slab()
3168 slab->allocator->statistics.slabs_opened + 1); in open_slab()
3171 WRITE_ONCE(slab->allocator->statistics.slabs_reopened, in open_slab()
3172 slab->allocator->statistics.slabs_reopened + 1); in open_slab()
3175 slab->allocator->open_slab = slab; in open_slab()
3189 if (allocator->open_slab != NULL) { in vdo_allocate_block()
3191 result = allocate_slab_block(allocator->open_slab, block_number_ptr); in vdo_allocate_block()
3196 prioritize_slab(allocator->open_slab); in vdo_allocate_block()
3200 open_slab(list_entry(vdo_priority_table_dequeue(allocator->prioritized_slabs), in vdo_allocate_block()
3207 return allocate_slab_block(allocator->open_slab, block_number_ptr); in vdo_allocate_block()
3211 * vdo_enqueue_clean_slab_waiter() - Wait for a clean slab.
3221 if (vdo_is_read_only(allocator->depot->vdo)) in vdo_enqueue_clean_slab_waiter()
3224 if (vdo_is_state_quiescent(&allocator->scrubber.admin_state)) in vdo_enqueue_clean_slab_waiter()
3227 vdo_waitq_enqueue_waiter(&allocator->scrubber.waiters, waiter); in vdo_enqueue_clean_slab_waiter()
3232 * vdo_modify_reference_count() - Modify the reference count of a block by first making a slab
3240 struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn); in vdo_modify_reference_count()
3247 if (vdo_is_read_only(completion->vdo)) { in vdo_modify_reference_count()
3252 vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter); in vdo_modify_reference_count()
3253 if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal)) in vdo_modify_reference_count()
3256 add_entries(&slab->journal); in vdo_modify_reference_count()
3276 return adjust_reference_count(vdo_get_slab(allocator->depot, pbn), in vdo_release_block_reference()
3295 if (info1->is_clean != info2->is_clean) in slab_status_is_less_than()
3296 return info1->is_clean; in slab_status_is_less_than()
3297 if (info1->emptiness != info2->emptiness) in slab_status_is_less_than()
3298 return info1->emptiness > info2->emptiness; in slab_status_is_less_than()
3299 return info1->slab_number < info2->slab_number; in slab_status_is_less_than()
3311 struct slab_actor *actor = &allocator->slab_actor; in slab_action_callback()
3313 if (--actor->slab_action_count == 0) { in slab_action_callback()
3314 actor->callback(completion); in slab_action_callback()
3326 if (allocator->state.waiter != NULL) in handle_operation_error()
3327 vdo_set_completion_result(allocator->state.waiter, completion->result); in handle_operation_error()
3328 completion->callback(completion); in handle_operation_error()
3336 vdo_prepare_completion(&allocator->completion, slab_action_callback, in apply_to_slabs()
3337 handle_operation_error, allocator->thread_id, NULL); in apply_to_slabs()
3338 allocator->completion.requeue = false; in apply_to_slabs()
3344 allocator->open_slab = NULL; in apply_to_slabs()
3346 /* Ensure that we don't finish before we're done starting. */ in apply_to_slabs()
3347 allocator->slab_actor = (struct slab_actor) { in apply_to_slabs()
3355 vdo_get_admin_state_code(&allocator->state); in apply_to_slabs()
3358 list_del_init(&slab->allocq_entry); in apply_to_slabs()
3359 allocator->slab_actor.slab_action_count++; in apply_to_slabs()
3360 vdo_start_operation_with_waiter(&slab->state, operation, in apply_to_slabs()
3361 &allocator->completion, in apply_to_slabs()
3365 slab_action_callback(&allocator->completion); in apply_to_slabs()
3372 vdo_get_admin_state_code(&allocator->state); in finish_loading_allocator()
3374 if (allocator->eraser != NULL) in finish_loading_allocator()
3375 dm_kcopyd_client_destroy(vdo_forget(allocator->eraser)); in finish_loading_allocator()
3379 vdo_get_current_action_context(allocator->depot->action_manager); in finish_loading_allocator()
3385 vdo_finish_loading(&allocator->state); in finish_loading_allocator()
3393 int result = (((read_err == 0) && (write_err == 0)) ? VDO_SUCCESS : -EIO); in copy_callback()
3396 vdo_fail_completion(&allocator->completion, result); in copy_callback()
3403 /* erase_next_slab_journal() - Erase the next slab journal. */
3409 struct slab_depot *depot = allocator->depot; in erase_next_slab_journal()
3410 block_count_t blocks = depot->slab_config.slab_journal_blocks; in erase_next_slab_journal()
3412 if (allocator->slabs_to_erase.next == NULL) { in erase_next_slab_journal()
3413 vdo_finish_completion(&allocator->completion); in erase_next_slab_journal()
3417 slab = next_slab(&allocator->slabs_to_erase); in erase_next_slab_journal()
3418 pbn = slab->journal_origin - depot->vdo->geometry.bio_offset; in erase_next_slab_journal()
3420 .bdev = vdo_get_backing_device(depot->vdo), in erase_next_slab_journal()
3424 dm_kcopyd_zero(allocator->eraser, 1, regions, 0, copy_callback, allocator); in erase_next_slab_journal()
3439 vdo_prepare_completion_for_requeue(&allocator->completion, in initiate_load()
3442 allocator->thread_id, NULL); in initiate_load()
3443 allocator->eraser = dm_kcopyd_client_create(NULL); in initiate_load()
3444 if (IS_ERR(allocator->eraser)) { in initiate_load()
3445 vdo_fail_completion(&allocator->completion, in initiate_load()
3446 PTR_ERR(allocator->eraser)); in initiate_load()
3447 allocator->eraser = NULL; in initiate_load()
3450 allocator->slabs_to_erase = get_slab_iterator(allocator); in initiate_load()
3460 * vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have
3468 vdo_finish_loading_with_result(&allocator->state, completion->result); in vdo_notify_slab_journals_are_recovered()
3478 result = vdo_allocate(allocator->slab_count, struct slab_status, __func__, in get_slab_statuses()
3486 slab_count_t slab_number = next_slab(&iterator)->slab_number; in get_slab_statuses()
3490 .is_clean = !allocator->summary_entries[slab_number].is_dirty, in get_slab_statuses()
3491 .emptiness = allocator->summary_entries[slab_number].fullness_hint, in get_slab_statuses()
3505 struct slab_depot *depot = allocator->depot; in vdo_prepare_slabs_for_allocation()
3507 WRITE_ONCE(allocator->allocated_blocks, in vdo_prepare_slabs_for_allocation()
3508 allocator->slab_count * depot->slab_config.data_blocks); in vdo_prepare_slabs_for_allocation()
3516 .nr = allocator->slab_count, in vdo_prepare_slabs_for_allocation()
3517 .size = allocator->slab_count, in vdo_prepare_slabs_for_allocation()
3528 slab = depot->slabs[current_slab_status.slab_number]; in vdo_prepare_slabs_for_allocation()
3530 if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) || in vdo_prepare_slabs_for_allocation()
3531 (!allocator->summary_entries[slab->slab_number].load_ref_counts && in vdo_prepare_slabs_for_allocation()
3537 slab->status = VDO_SLAB_REQUIRES_SCRUBBING; in vdo_prepare_slabs_for_allocation()
3538 journal = &slab->journal; in vdo_prepare_slabs_for_allocation()
3540 (depot->load_type == VDO_SLAB_DEPOT_NORMAL_LOAD)) || in vdo_prepare_slabs_for_allocation()
3541 (journal_length(journal) >= journal->scrubbing_threshold)); in vdo_prepare_slabs_for_allocation()
3571 const struct slab_scrubber *scrubber = &allocator->scrubber; in vdo_dump_block_allocator() local
3573 vdo_log_info("block_allocator zone %u", allocator->zone_number); in vdo_dump_block_allocator()
3576 struct slab_journal *journal = &slab->journal; in vdo_dump_block_allocator()
3578 if (slab->reference_blocks != NULL) { in vdo_dump_block_allocator()
3580 vdo_log_info("slab %u: P%u, %llu free", slab->slab_number, in vdo_dump_block_allocator()
3581 slab->priority, in vdo_dump_block_allocator()
3582 (unsigned long long) slab->free_blocks); in vdo_dump_block_allocator()
3584 vdo_log_info("slab %u: status %s", slab->slab_number, in vdo_dump_block_allocator()
3585 status_to_string(slab->status)); in vdo_dump_block_allocator()
3589 vdo_waitq_num_waiters(&journal->entry_waiters), in vdo_dump_block_allocator()
3590 vdo_bool_to_string(journal->waiting_to_commit), in vdo_dump_block_allocator()
3591 vdo_bool_to_string(journal->updating_slab_summary), in vdo_dump_block_allocator()
3592 (unsigned long long) journal->head, in vdo_dump_block_allocator()
3593 (unsigned long long) journal->unreapable, in vdo_dump_block_allocator()
3594 (unsigned long long) journal->tail, in vdo_dump_block_allocator()
3595 (unsigned long long) journal->next_commit, in vdo_dump_block_allocator()
3596 (unsigned long long) journal->summarized, in vdo_dump_block_allocator()
3597 (unsigned long long) journal->last_summarized, in vdo_dump_block_allocator()
3598 (unsigned long long) journal->recovery_lock, in vdo_dump_block_allocator()
3599 vdo_bool_to_string(journal->recovery_lock != 0)); in vdo_dump_block_allocator()
3605 if (slab->counters != NULL) { in vdo_dump_block_allocator()
3608 slab->free_blocks, slab->block_count, in vdo_dump_block_allocator()
3609 slab->reference_block_count, in vdo_dump_block_allocator()
3610 vdo_waitq_num_waiters(&slab->dirty_blocks), in vdo_dump_block_allocator()
3611 slab->active_count, in vdo_dump_block_allocator()
3612 (unsigned long long) slab->slab_journal_point.sequence_number, in vdo_dump_block_allocator()
3613 slab->slab_journal_point.entry_count); in vdo_dump_block_allocator()
3629 READ_ONCE(scrubber->slab_count), in vdo_dump_block_allocator()
3630 vdo_waitq_num_waiters(&scrubber->waiters), in vdo_dump_block_allocator()
3631 vdo_get_admin_state_code(&scrubber->admin_state)->name, in vdo_dump_block_allocator()
3632 scrubber->high_priority_only ? ", high_priority_only " : ""); in vdo_dump_block_allocator()
3640 list_del(&slab->allocq_entry); in free_slab()
3641 vdo_free(vdo_forget(slab->journal.block)); in free_slab()
3642 vdo_free(vdo_forget(slab->journal.locks)); in free_slab()
3643 vdo_free(vdo_forget(slab->counters)); in free_slab()
3644 vdo_free(vdo_forget(slab->reference_blocks)); in free_slab()
3650 struct slab_journal *journal = &slab->journal; in initialize_slab_journal()
3651 const struct slab_config *slab_config = &slab->allocator->depot->slab_config; in initialize_slab_journal()
3654 result = vdo_allocate(slab_config->slab_journal_blocks, struct journal_lock, in initialize_slab_journal()
3655 __func__, &journal->locks); in initialize_slab_journal()
3660 (char **) &journal->block); in initialize_slab_journal()
3664 journal->slab = slab; in initialize_slab_journal()
3665 journal->size = slab_config->slab_journal_blocks; in initialize_slab_journal()
3666 journal->flushing_threshold = slab_config->slab_journal_flushing_threshold; in initialize_slab_journal()
3667 journal->blocking_threshold = slab_config->slab_journal_blocking_threshold; in initialize_slab_journal()
3668 journal->scrubbing_threshold = slab_config->slab_journal_scrubbing_threshold; in initialize_slab_journal()
3669 journal->entries_per_block = VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK; in initialize_slab_journal()
3670 journal->full_entries_per_block = VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK; in initialize_slab_journal()
3671 journal->events = &slab->allocator->slab_journal_statistics; in initialize_slab_journal()
3672 journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal; in initialize_slab_journal()
3673 journal->tail = 1; in initialize_slab_journal()
3674 journal->head = 1; in initialize_slab_journal()
3676 journal->flushing_deadline = journal->flushing_threshold; in initialize_slab_journal()
3679 * hopefully all are done before blocking. in initialize_slab_journal()
3681 if ((journal->blocking_threshold - journal->flushing_threshold) > 5) in initialize_slab_journal()
3682 journal->flushing_deadline = journal->blocking_threshold - 5; in initialize_slab_journal()
3684 journal->slab_summary_waiter.callback = release_journal_locks; in initialize_slab_journal()
3686 INIT_LIST_HEAD(&journal->dirty_entry); in initialize_slab_journal()
3687 INIT_LIST_HEAD(&journal->uncommitted_blocks); in initialize_slab_journal()
3689 journal->tail_header.nonce = slab->allocator->nonce; in initialize_slab_journal()
3690 journal->tail_header.metadata_type = VDO_METADATA_SLAB_JOURNAL; in initialize_slab_journal()
3696 * make_slab() - Construct a new, empty slab.
3711 const struct slab_config *slab_config = &allocator->depot->slab_config; in make_slab()
3722 .end = slab_origin + slab_config->slab_blocks, in make_slab()
3724 .ref_counts_origin = slab_origin + slab_config->data_blocks, in make_slab()
3727 .block_count = slab_config->data_blocks, in make_slab()
3728 .free_blocks = slab_config->data_blocks, in make_slab()
3730 vdo_get_saved_reference_count_size(slab_config->data_blocks), in make_slab()
3732 INIT_LIST_HEAD(&slab->allocq_entry); in make_slab()
3741 vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NEW); in make_slab()
3748 vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in make_slab()
3756 * allocate_slabs() - Allocate a new slab pointer array.
3773 "slab pointer array", &depot->new_slabs); in allocate_slabs()
3777 if (depot->slabs != NULL) { in allocate_slabs()
3778 memcpy(depot->new_slabs, depot->slabs, in allocate_slabs()
3779 depot->slab_count * sizeof(struct vdo_slab *)); in allocate_slabs()
3783 slab_size = depot->slab_config.slab_blocks; in allocate_slabs()
3784 slab_origin = depot->first_block + (depot->slab_count * slab_size); in allocate_slabs()
3786 for (depot->new_slab_count = depot->slab_count; in allocate_slabs()
3787 depot->new_slab_count < slab_count; in allocate_slabs()
3788 depot->new_slab_count++, slab_origin += slab_size) { in allocate_slabs()
3790 &depot->allocators[depot->new_slab_count % depot->zone_count]; in allocate_slabs()
3791 struct vdo_slab **slab_ptr = &depot->new_slabs[depot->new_slab_count]; in allocate_slabs()
3793 result = make_slab(slab_origin, allocator, depot->new_slab_count, in allocate_slabs()
3803 * vdo_abandon_new_slabs() - Abandon any new slabs in this depot, freeing them as needed.
3810 if (depot->new_slabs == NULL) in vdo_abandon_new_slabs()
3813 for (i = depot->slab_count; i < depot->new_slab_count; i++) in vdo_abandon_new_slabs()
3814 free_slab(vdo_forget(depot->new_slabs[i])); in vdo_abandon_new_slabs()
3815 depot->new_slab_count = 0; in vdo_abandon_new_slabs()
3816 depot->new_size = 0; in vdo_abandon_new_slabs()
3817 vdo_free(vdo_forget(depot->new_slabs)); in vdo_abandon_new_slabs()
3821 * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates.
3827 return ((struct slab_depot *) context)->allocators[zone_number].thread_id; in get_allocator_thread_id()
3831 * release_recovery_journal_lock() - Request the slab journal to release the recovery journal lock
3842 if (recovery_lock > journal->recovery_lock) { in release_recovery_journal_lock()
3843 VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), in release_recovery_journal_lock()
3848 if ((recovery_lock < journal->recovery_lock) || in release_recovery_journal_lock()
3849 vdo_is_read_only(journal->slab->allocator->depot->vdo)) in release_recovery_journal_lock()
3868 struct list_head *list = &depot->allocators[zone_number].dirty_slab_journals; in release_tail_block_locks()
3872 depot->active_release_request)) in release_tail_block_locks()
3880 * prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks.
3888 depot->active_release_request = depot->new_release_request; in prepare_for_tail_block_commit()
3893 * schedule_tail_block_commit() - Schedule a tail block commit if necessary.
3904 if (depot->new_release_request == depot->active_release_request) in schedule_tail_block_commit()
3907 return vdo_schedule_action(depot->action_manager, in schedule_tail_block_commit()
3914 * initialize_slab_scrubber() - Initialize an allocator's slab scrubber.
3921 struct slab_scrubber *scrubber = &allocator->scrubber; in initialize_slab_scrubber() local
3923 allocator->depot->slab_config.slab_journal_blocks; in initialize_slab_scrubber()
3932 result = allocate_vio_components(allocator->completion.vdo, in initialize_slab_scrubber()
3936 journal_data, &scrubber->vio); in initialize_slab_scrubber()
3942 INIT_LIST_HEAD(&scrubber->high_priority_slabs); in initialize_slab_scrubber()
3943 INIT_LIST_HEAD(&scrubber->slabs); in initialize_slab_scrubber()
3944 vdo_set_admin_state_code(&scrubber->admin_state, VDO_ADMIN_STATE_SUSPENDED); in initialize_slab_scrubber()
3949 * initialize_slab_summary_block() - Initialize a slab_summary_block.
3958 struct slab_summary_block *block = &allocator->summary_blocks[index]; in initialize_slab_summary_block()
3961 result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &block->outgoing_entries); in initialize_slab_summary_block()
3965 result = allocate_vio_components(allocator->depot->vdo, VIO_TYPE_SLAB_SUMMARY, in initialize_slab_summary_block()
3967 block->outgoing_entries, &block->vio); in initialize_slab_summary_block()
3971 block->allocator = allocator; in initialize_slab_summary_block()
3972 block->entries = &allocator->summary_entries[VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK * index]; in initialize_slab_summary_block()
3973 block->index = index; in initialize_slab_summary_block()
3982 struct block_allocator *allocator = &depot->allocators[zone]; in initialize_block_allocator()
3983 struct vdo *vdo = depot->vdo; in initialize_block_allocator()
3984 block_count_t max_free_blocks = depot->slab_config.data_blocks; in initialize_block_allocator()
3990 .thread_id = vdo->thread_config.physical_threads[zone], in initialize_block_allocator()
3991 .nonce = vdo->states.vdo.nonce, in initialize_block_allocator()
3994 INIT_LIST_HEAD(&allocator->dirty_slab_journals); in initialize_block_allocator()
3995 vdo_set_admin_state_code(&allocator->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_block_allocator()
3998 allocator->thread_id); in initialize_block_allocator()
4002 vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION); in initialize_block_allocator()
4003 result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, allocator->thread_id, in initialize_block_allocator()
4005 allocator, &allocator->vio_pool); in initialize_block_allocator()
4013 result = vdo_make_priority_table(max_priority, &allocator->prioritized_slabs); in initialize_block_allocator()
4019 &allocator->summary_blocks); in initialize_block_allocator()
4023 vdo_set_admin_state_code(&allocator->summary_state, in initialize_block_allocator()
4025 allocator->summary_entries = depot->summary_entries + (MAX_VDO_SLABS * zone); in initialize_block_allocator()
4041 * ideal for the story, but anything less than a very high threshold (max_priority - 1) in initialize_block_allocator()
4048 allocator->unopened_slab_priority = (1 + ilog2((max_free_blocks * 3) / 4)); in initialize_block_allocator()
4061 const struct thread_config *thread_config = &depot->vdo->thread_config; in allocate_components()
4063 result = vdo_make_action_manager(depot->zone_count, get_allocator_thread_id, in allocate_components()
4064 thread_config->journal_thread, depot, in allocate_components()
4066 depot->vdo, &depot->action_manager); in allocate_components()
4070 depot->origin = depot->first_block; in allocate_components()
4075 depot->summary_origin = summary_partition->offset; in allocate_components()
4076 depot->hint_shift = vdo_get_slab_summary_hint_shift(depot->slab_size_shift); in allocate_components()
4079 &depot->summary_entries); in allocate_components()
4085 hint = compute_fullness_hint(depot, depot->slab_config.data_blocks); in allocate_components()
4091 depot->summary_entries[i] = (struct slab_summary_entry) { in allocate_components()
4099 slab_count = vdo_compute_slab_count(depot->first_block, depot->last_block, in allocate_components()
4100 depot->slab_size_shift); in allocate_components()
4101 if (thread_config->physical_zone_count > slab_count) { in allocate_components()
4104 thread_config->physical_zone_count, in allocate_components()
4109 for (zone = 0; zone < depot->zone_count; zone++) { in allocate_components()
4121 for (i = depot->slab_count; i < depot->new_slab_count; i++) { in allocate_components()
4122 struct vdo_slab *slab = depot->new_slabs[i]; in allocate_components()
4124 register_slab_with_allocator(slab->allocator, slab); in allocate_components()
4125 WRITE_ONCE(depot->slab_count, depot->slab_count + 1); in allocate_components()
4128 depot->slabs = depot->new_slabs; in allocate_components()
4129 depot->new_slabs = NULL; in allocate_components()
4130 depot->new_slab_count = 0; in allocate_components()
4136 * vdo_decode_slab_depot() - Make a slab depot and configure it with the state read from the super
4166 vdo->thread_config.physical_zone_count, in vdo_decode_slab_depot()
4171 depot->vdo = vdo; in vdo_decode_slab_depot()
4172 depot->old_zone_count = state.zone_count; in vdo_decode_slab_depot()
4173 depot->zone_count = vdo->thread_config.physical_zone_count; in vdo_decode_slab_depot()
4174 depot->slab_config = state.slab_config; in vdo_decode_slab_depot()
4175 depot->first_block = state.first_block; in vdo_decode_slab_depot()
4176 depot->last_block = state.last_block; in vdo_decode_slab_depot()
4177 depot->slab_size_shift = slab_size_shift; in vdo_decode_slab_depot()
4193 if (allocator->summary_blocks == NULL) in uninitialize_allocator_summary()
4197 free_vio_components(&allocator->summary_blocks[i].vio); in uninitialize_allocator_summary()
4198 vdo_free(vdo_forget(allocator->summary_blocks[i].outgoing_entries)); in uninitialize_allocator_summary()
4201 vdo_free(vdo_forget(allocator->summary_blocks)); in uninitialize_allocator_summary()
4205 * vdo_free_slab_depot() - Destroy a slab depot.
4217 for (zone = 0; zone < depot->zone_count; zone++) { in vdo_free_slab_depot()
4218 struct block_allocator *allocator = &depot->allocators[zone]; in vdo_free_slab_depot()
4220 if (allocator->eraser != NULL) in vdo_free_slab_depot()
4221 dm_kcopyd_client_destroy(vdo_forget(allocator->eraser)); in vdo_free_slab_depot()
4224 uninitialize_scrubber_vio(&allocator->scrubber); in vdo_free_slab_depot()
4225 free_vio_pool(vdo_forget(allocator->vio_pool)); in vdo_free_slab_depot()
4226 vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs)); in vdo_free_slab_depot()
4229 if (depot->slabs != NULL) { in vdo_free_slab_depot()
4232 for (i = 0; i < depot->slab_count; i++) in vdo_free_slab_depot()
4233 free_slab(vdo_forget(depot->slabs[i])); in vdo_free_slab_depot()
4236 vdo_free(vdo_forget(depot->slabs)); in vdo_free_slab_depot()
4237 vdo_free(vdo_forget(depot->action_manager)); in vdo_free_slab_depot()
4238 vdo_free(vdo_forget(depot->summary_entries)); in vdo_free_slab_depot()
4243 * vdo_record_slab_depot() - Record the state of a slab depot for encoding into the super block.
4256 zone_count_t zones_to_record = depot->zone_count; in vdo_record_slab_depot()
4258 if (depot->zone_count == 0) in vdo_record_slab_depot()
4259 zones_to_record = depot->old_zone_count; in vdo_record_slab_depot()
4262 .slab_config = depot->slab_config, in vdo_record_slab_depot()
4263 .first_block = depot->first_block, in vdo_record_slab_depot()
4264 .last_block = depot->last_block, in vdo_record_slab_depot()
4272 * vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot.
4281 get_depot_slab_iterator(depot, depot->slab_count - 1, 0, 1); in vdo_allocate_reference_counters()
4294 * get_slab_number() - Get the number of the slab that contains a specified block.
4307 if (pbn < depot->first_block) in get_slab_number()
4310 slab_number = (pbn - depot->first_block) >> depot->slab_size_shift; in get_slab_number()
4311 if (slab_number >= depot->slab_count) in get_slab_number()
4319 * vdo_get_slab() - Get the slab object for the slab that contains a specified block.
4323 * Will put the VDO in read-only mode if the PBN is not a valid data block nor the zero block.
4339 vdo_enter_read_only_mode(depot->vdo, result); in vdo_get_slab()
4343 return depot->slabs[slab_number]; in vdo_get_slab()
4347 * vdo_get_increment_limit() - Determine how many new references a block can acquire.
4361 if ((slab == NULL) || (slab->status != VDO_SLAB_REBUILT)) in vdo_get_increment_limit()
4369 return (MAXIMUM_REFERENCE_COUNT - 1); in vdo_get_increment_limit()
4371 return (MAXIMUM_REFERENCE_COUNT - *counter_ptr); in vdo_get_increment_limit()
4375 * vdo_is_physical_data_block() - Determine whether the given PBN refers to a data block.
4389 (slab_block_number_from_pbn(depot->slabs[slab_number], pbn, &sbn) == in vdo_is_physical_data_block()
4394 * vdo_get_slab_depot_allocated_blocks() - Get the total number of data blocks allocated across all
4398 * This is the total number of blocks with a non-zero reference count.
4402 * Return: The total number of blocks with a non-zero reference count.
4409 for (zone = 0; zone < depot->zone_count; zone++) { in vdo_get_slab_depot_allocated_blocks()
4411 total += READ_ONCE(depot->allocators[zone].allocated_blocks); in vdo_get_slab_depot_allocated_blocks()
4418 * vdo_get_slab_depot_data_blocks() - Get the total number of data blocks in all the slabs in the
4428 return (READ_ONCE(depot->slab_count) * depot->slab_config.data_blocks); in vdo_get_slab_depot_data_blocks()
4432 * finish_combining_zones() - Clean up after saving out the combined slab summary.
4437 int result = completion->result; in finish_combining_zones()
4438 struct vdo_completion *parent = completion->parent; in finish_combining_zones()
4452 struct vio *vio = bio->bi_private; in write_summary_endio()
4453 struct vdo *vdo = vio->completion.vdo; in write_summary_endio()
4456 vdo->thread_config.admin_thread); in write_summary_endio()
4460 * combine_summaries() - Treating the current entries buffer as the on-disk value of all zones,
4471 struct slab_summary_entry *entries = depot->summary_entries; in combine_summaries()
4473 if (depot->old_zone_count > 1) { in combine_summaries()
4484 if (zone == depot->old_zone_count) in combine_summaries()
4497 * finish_loading_summary() - Finish loading slab summary data.
4506 struct slab_depot *depot = completion->vdo->depot; in finish_loading_summary()
4512 vdo_submit_metadata_vio(as_vio(completion), depot->summary_origin, in finish_loading_summary()
4519 struct vio *vio = bio->bi_private; in load_summary_endio()
4520 struct vdo *vdo = vio->completion.vdo; in load_summary_endio()
4523 vdo->thread_config.admin_thread); in load_summary_endio()
4527 * load_slab_summary() - The preamble of a load operation.
4537 vdo_get_current_manager_operation(depot->action_manager); in load_slab_summary()
4539 result = create_multi_block_metadata_vio(depot->vdo, VIO_TYPE_SLAB_SUMMARY, in load_slab_summary()
4542 (char *) depot->summary_entries, &vio); in load_slab_summary()
4550 finish_loading_summary(&vio->completion); in load_slab_summary()
4554 vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio, in load_slab_summary()
4564 vdo_start_loading(&depot->allocators[zone_number].state, in load_allocator()
4565 vdo_get_current_manager_operation(depot->action_manager), in load_allocator()
4570 * vdo_load_slab_depot() - Asynchronously load any slab depot state that isn't included in the
4586 vdo_schedule_operation_with_context(depot->action_manager, operation, in vdo_load_slab_depot()
4596 struct block_allocator *allocator = &depot->allocators[zone_number]; in prepare_to_allocate()
4609 * vdo_prepare_slab_depot_to_allocate() - Prepare the slab depot to come online and start
4622 depot->load_type = load_type; in vdo_prepare_slab_depot_to_allocate()
4623 atomic_set(&depot->zones_to_scrub, depot->zone_count); in vdo_prepare_slab_depot_to_allocate()
4624 vdo_schedule_action(depot->action_manager, NULL, in vdo_prepare_slab_depot_to_allocate()
4629 * vdo_update_slab_depot_size() - Update the slab depot to reflect its new size in memory.
4636 depot->last_block = depot->new_last_block; in vdo_update_slab_depot_size()
4640 * vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to
4654 if ((partition->count >> depot->slab_size_shift) <= depot->slab_count) in vdo_prepare_to_grow_slab_depot()
4658 VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset, in vdo_prepare_to_grow_slab_depot()
4660 result = vdo_configure_slab_depot(partition, depot->slab_config, in vdo_prepare_to_grow_slab_depot()
4661 depot->zone_count, &new_state); in vdo_prepare_to_grow_slab_depot()
4665 new_slab_count = vdo_compute_slab_count(depot->first_block, in vdo_prepare_to_grow_slab_depot()
4667 depot->slab_size_shift); in vdo_prepare_to_grow_slab_depot()
4668 if (new_slab_count <= depot->slab_count) in vdo_prepare_to_grow_slab_depot()
4671 if (new_slab_count == depot->new_slab_count) { in vdo_prepare_to_grow_slab_depot()
4683 depot->new_size = partition->count; in vdo_prepare_to_grow_slab_depot()
4684 depot->old_last_block = depot->last_block; in vdo_prepare_to_grow_slab_depot()
4685 depot->new_last_block = new_state.last_block; in vdo_prepare_to_grow_slab_depot()
4691 * finish_registration() - Finish registering new slabs now that all of the allocators have
4700 WRITE_ONCE(depot->slab_count, depot->new_slab_count); in finish_registration()
4701 vdo_free(depot->slabs); in finish_registration()
4702 depot->slabs = depot->new_slabs; in finish_registration()
4703 depot->new_slabs = NULL; in finish_registration()
4704 depot->new_slab_count = 0; in finish_registration()
4713 struct block_allocator *allocator = &depot->allocators[zone_number]; in register_new_slabs()
4716 for (i = depot->slab_count; i < depot->new_slab_count; i++) { in register_new_slabs()
4717 struct vdo_slab *slab = depot->new_slabs[i]; in register_new_slabs()
4719 if (slab->allocator == allocator) in register_new_slabs()
4727 * vdo_use_new_slabs() - Use the new slabs allocated for resize.
4733 VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use"); in vdo_use_new_slabs()
4734 vdo_schedule_operation(depot->action_manager, in vdo_use_new_slabs()
4741 * stop_scrubbing() - Tell the scrubber to stop scrubbing after it finishes the slab it is
4743 * @allocator: The block allocator owning the scrubber to stop.
4747 struct slab_scrubber *scrubber = &allocator->scrubber; in stop_scrubbing() local
4749 if (vdo_is_state_quiescent(&scrubber->admin_state)) { in stop_scrubbing()
4750 vdo_finish_completion(&allocator->completion); in stop_scrubbing()
4752 vdo_start_draining(&scrubber->admin_state, in stop_scrubbing()
4754 &allocator->completion, NULL); in stop_scrubbing()
4769 vdo_prepare_completion_for_requeue(&allocator->completion, do_drain_step, in do_drain_step()
4770 handle_operation_error, allocator->thread_id, in do_drain_step()
4772 switch (++allocator->drain_step) { in do_drain_step()
4782 vdo_start_draining(&allocator->summary_state, in do_drain_step()
4783 vdo_get_admin_state_code(&allocator->state), in do_drain_step()
4788 VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool), in do_drain_step()
4790 vdo_finish_draining_with_result(&allocator->state, completion->result); in do_drain_step()
4794 vdo_finish_draining_with_result(&allocator->state, UDS_BAD_STATE); in do_drain_step()
4804 allocator->drain_step = VDO_DRAIN_ALLOCATOR_START; in initiate_drain()
4805 do_drain_step(&allocator->completion); in initiate_drain()
4819 vdo_start_draining(&depot->allocators[zone_number].state, in drain_allocator()
4820 vdo_get_current_manager_operation(depot->action_manager), in drain_allocator()
4825 * vdo_drain_slab_depot() - Drain all slab depot I/O.
4837 vdo_schedule_operation(depot->action_manager, operation, in vdo_drain_slab_depot()
4842 * resume_scrubbing() - Tell the scrubber to resume scrubbing if it has been stopped.
4848 struct slab_scrubber *scrubber = &allocator->scrubber; in resume_scrubbing() local
4850 if (!has_slabs_to_scrub(scrubber)) { in resume_scrubbing()
4851 vdo_finish_completion(&allocator->completion); in resume_scrubbing()
4855 result = vdo_resume_if_quiescent(&scrubber->admin_state); in resume_scrubbing()
4857 vdo_fail_completion(&allocator->completion, result); in resume_scrubbing()
4861 scrub_next_slab(scrubber); in resume_scrubbing()
4862 vdo_finish_completion(&allocator->completion); in resume_scrubbing()
4869 vdo_prepare_completion_for_requeue(&allocator->completion, do_resume_step, in do_resume_step()
4871 allocator->thread_id, NULL); in do_resume_step()
4872 switch (--allocator->drain_step) { in do_resume_step()
4875 vdo_resume_if_quiescent(&allocator->summary_state)); in do_resume_step()
4887 vdo_finish_resuming_with_result(&allocator->state, completion->result); in do_resume_step()
4891 vdo_finish_resuming_with_result(&allocator->state, UDS_BAD_STATE); in do_resume_step()
4901 allocator->drain_step = VDO_DRAIN_ALLOCATOR_STEP_FINISHED; in initiate_resume()
4902 do_resume_step(&allocator->completion); in initiate_resume()
4911 vdo_start_resuming(&depot->allocators[zone_number].state, in resume_allocator()
4912 vdo_get_current_manager_operation(depot->action_manager), in resume_allocator()
4917 * vdo_resume_slab_depot() - Resume a suspended slab depot.
4923 if (vdo_is_read_only(depot->vdo)) { in vdo_resume_slab_depot()
4928 vdo_schedule_operation(depot->action_manager, VDO_ADMIN_STATE_RESUMING, in vdo_resume_slab_depot()
4933 * vdo_commit_oldest_slab_journal_tail_blocks() - Commit all dirty tail blocks which are locking a
4947 depot->new_release_request = recovery_block_number; in vdo_commit_oldest_slab_journal_tail_blocks()
4948 vdo_schedule_default_action(depot->action_manager); in vdo_commit_oldest_slab_journal_tail_blocks()
4957 scrub_slabs(&depot->allocators[zone_number], NULL); in scrub_all_unrecovered_slabs()
4962 * vdo_scrub_all_unrecovered_slabs() - Scrub all unrecovered slabs.
4969 vdo_schedule_action(depot->action_manager, NULL, in vdo_scrub_all_unrecovered_slabs()
4975 * get_block_allocator_statistics() - Get the total of the statistics from all the block allocators
4989 for (zone = 0; zone < depot->zone_count; zone++) { in get_block_allocator_statistics()
4990 const struct block_allocator *allocator = &depot->allocators[zone]; in get_block_allocator_statistics()
4991 const struct block_allocator_statistics *stats = &allocator->statistics; in get_block_allocator_statistics()
4993 totals.slab_count += allocator->slab_count; in get_block_allocator_statistics()
4994 totals.slabs_opened += READ_ONCE(stats->slabs_opened); in get_block_allocator_statistics()
4995 totals.slabs_reopened += READ_ONCE(stats->slabs_reopened); in get_block_allocator_statistics()
5002 * get_ref_counts_statistics() - Get the cumulative ref_counts statistics for the depot.
5015 for (zone = 0; zone < depot->zone_count; zone++) { in get_ref_counts_statistics()
5017 READ_ONCE(depot->allocators[zone].ref_counts_statistics.blocks_written); in get_ref_counts_statistics()
5024 * get_slab_journal_statistics() - Get the aggregated slab journal statistics for the depot.
5037 for (zone = 0; zone < depot->zone_count; zone++) { in get_slab_journal_statistics()
5039 &depot->allocators[zone].slab_journal_statistics; in get_slab_journal_statistics()
5041 totals.disk_full_count += READ_ONCE(stats->disk_full_count); in get_slab_journal_statistics()
5042 totals.flush_count += READ_ONCE(stats->flush_count); in get_slab_journal_statistics()
5043 totals.blocked_count += READ_ONCE(stats->blocked_count); in get_slab_journal_statistics()
5044 totals.blocks_written += READ_ONCE(stats->blocks_written); in get_slab_journal_statistics()
5045 totals.tail_busy_count += READ_ONCE(stats->tail_busy_count); in get_slab_journal_statistics()
5052 * vdo_get_slab_depot_statistics() - Get all the vdo_statistics fields that are properties of the
5060 slab_count_t slab_count = READ_ONCE(depot->slab_count); in vdo_get_slab_depot_statistics()
5064 for (zone = 0; zone < depot->zone_count; zone++) { in vdo_get_slab_depot_statistics()
5066 unrecovered += READ_ONCE(depot->allocators[zone].scrubber.slab_count); in vdo_get_slab_depot_statistics()
5069 stats->recovery_percentage = (slab_count - unrecovered) * 100 / slab_count; in vdo_get_slab_depot_statistics()
5070 stats->allocator = get_block_allocator_statistics(depot); in vdo_get_slab_depot_statistics()
5071 stats->ref_counts = get_ref_counts_statistics(depot); in vdo_get_slab_depot_statistics()
5072 stats->slab_journal = get_slab_journal_statistics(depot); in vdo_get_slab_depot_statistics()
5073 stats->slab_summary = (struct slab_summary_statistics) { in vdo_get_slab_depot_statistics()
5074 .blocks_written = atomic64_read(&depot->summary_statistics.blocks_written), in vdo_get_slab_depot_statistics()
5079 * vdo_dump_slab_depot() - Dump the slab depot, in a thread-unsafe fashion.
5086 (unsigned int) depot->zone_count, in vdo_dump_slab_depot()
5087 (unsigned int) depot->old_zone_count, READ_ONCE(depot->slab_count), in vdo_dump_slab_depot()
5088 (unsigned long long) depot->active_release_request, in vdo_dump_slab_depot()
5089 (unsigned long long) depot->new_release_request); in vdo_dump_slab_depot()