Lines Matching full:zone
23 #include "physical-zone.h"
60 struct block_map_zone *zone; member
98 struct block_map_zone *zone; member
121 * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
199 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info()
247 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread()
249 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread()
255 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed()
622 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument
624 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete()
625 (zone->active_lookups == 0) && in check_for_drain_complete()
626 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete()
627 !is_vio_pool_busy(zone->vio_pool) && in check_for_drain_complete()
628 (zone->page_cache.outstanding_reads == 0) && in check_for_drain_complete()
629 (zone->page_cache.outstanding_writes == 0)) { in check_for_drain_complete()
630 vdo_finish_draining_with_result(&zone->state, in check_for_drain_complete()
631 (vdo_is_read_only(zone->block_map->vdo) ? in check_for_drain_complete()
636 static void enter_zone_read_only_mode(struct block_map_zone *zone, int result) in enter_zone_read_only_mode() argument
638 vdo_enter_read_only_mode(zone->block_map->vdo, result); in enter_zone_read_only_mode()
642 * Just take all waiters off the waitq so the zone can drain. in enter_zone_read_only_mode()
644 vdo_waitq_init(&zone->flush_waiters); in enter_zone_read_only_mode()
645 check_for_drain_complete(zone); in enter_zone_read_only_mode()
657 enter_zone_read_only_mode(completion->info->cache->zone, result); in validate_completed_page_or_enter_read_only_mode()
673 vdo_enter_read_only_mode(cache->zone->block_map->vdo, result); in handle_load_error()
684 check_for_drain_complete(cache->zone); in handle_load_error()
695 nonce_t nonce = info->cache->zone->block_map->nonce; in page_is_loaded()
726 check_for_drain_complete(cache->zone); in page_is_loaded()
756 continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id); in load_cache_page_endio()
807 continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id); in flush_endio()
1025 check_for_drain_complete(cache->zone); in handle_page_write_error()
1035 continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id); in write_cache_page_endio()
1060 vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal, in page_is_written_out()
1063 cache->zone->zone_number); in page_is_written_out()
1081 check_for_drain_complete(cache->zone); in page_is_written_out()
1195 * @zone: The block map zone of the desired page.
1209 struct block_map_zone *zone, physical_block_number_t pbn, in vdo_get_page() argument
1213 struct vdo_page_cache *cache = &zone->page_cache; in vdo_get_page()
1229 cache->zone->thread_id, parent); in vdo_get_page()
1375 static inline struct tree_page *get_tree_page(const struct block_map_zone *zone, in get_tree_page() argument
1378 return get_tree_page_by_index(zone->block_map->forest, lock->root_index, in get_tree_page()
1430 * context of a zone's current generation range.
1431 * @zone: The zone in which to do the comparison.
1435 * Return: true if generation @a is not strictly older than generation @b in the context of @zone
1437 static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b) in is_not_older() argument
1441 result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && in is_not_older()
1442 in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), in is_not_older()
1444 a, b, zone->oldest_generation, zone->generation); in is_not_older()
1446 enter_zone_read_only_mode(zone, result); in is_not_older()
1450 return in_cyclic_range(b, a, zone->generation, 1 << 8); in is_not_older()
1453 static void release_generation(struct block_map_zone *zone, u8 generation) in release_generation() argument
1457 result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0), in release_generation()
1460 enter_zone_read_only_mode(zone, result); in release_generation()
1464 zone->dirty_page_counts[generation]--; in release_generation()
1465 while ((zone->dirty_page_counts[zone->oldest_generation] == 0) && in release_generation()
1466 (zone->oldest_generation != zone->generation)) in release_generation()
1467 zone->oldest_generation++; in release_generation()
1470 static void set_generation(struct block_map_zone *zone, struct tree_page *page, in set_generation() argument
1482 new_count = ++zone->dirty_page_counts[new_generation]; in set_generation()
1486 enter_zone_read_only_mode(zone, result); in set_generation()
1491 release_generation(zone, old_generation); in set_generation()
1502 static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone) in acquire_vio() argument
1505 acquire_vio_from_pool(zone->vio_pool, waiter); in acquire_vio()
1509 static bool attempt_increment(struct block_map_zone *zone) in attempt_increment() argument
1511 u8 generation = zone->generation + 1; in attempt_increment()
1513 if (zone->oldest_generation == generation) in attempt_increment()
1516 zone->generation = generation; in attempt_increment()
1521 static void enqueue_page(struct tree_page *page, struct block_map_zone *zone) in enqueue_page() argument
1523 if ((zone->flusher == NULL) && attempt_increment(zone)) { in enqueue_page()
1524 zone->flusher = page; in enqueue_page()
1525 acquire_vio(&page->waiter, zone); in enqueue_page()
1529 vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter); in enqueue_page()
1538 acquire_vio(waiter, write_context->zone); in write_page_if_not_dirtied()
1542 enqueue_page(page, write_context->zone); in write_page_if_not_dirtied()
1545 static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio) in return_to_pool() argument
1547 return_vio_to_pool(zone->vio_pool, vio); in return_to_pool()
1548 check_for_drain_complete(zone); in return_to_pool()
1558 struct block_map_zone *zone = pooled->context; in finish_page_write() local
1560 vdo_release_recovery_journal_block_reference(zone->block_map->journal, in finish_page_write()
1563 zone->zone_number); in finish_page_write()
1566 release_generation(zone, page->writing_generation); in finish_page_write()
1569 if (zone->flusher == page) { in finish_page_write()
1571 .zone = zone, in finish_page_write()
1575 vdo_waitq_notify_all_waiters(&zone->flush_waiters, in finish_page_write()
1577 if (dirty && attempt_increment(zone)) { in finish_page_write()
1582 zone->flusher = NULL; in finish_page_write()
1586 enqueue_page(page, zone); in finish_page_write()
1587 } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) && in finish_page_write()
1588 attempt_increment(zone)) { in finish_page_write()
1589 zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters), in finish_page_write()
1591 write_page(zone->flusher, pooled); in finish_page_write()
1595 return_to_pool(zone, pooled); in finish_page_write()
1603 struct block_map_zone *zone = pooled->context; in handle_write_error() local
1606 enter_zone_read_only_mode(zone, result); in handle_write_error()
1607 return_to_pool(zone, pooled); in handle_write_error()
1616 struct block_map_zone *zone = pooled->context; in write_initialized_page() local
1627 if (zone->flusher == tree_page) in write_initialized_page()
1638 struct block_map_zone *zone = vio->context; in write_page_endio() local
1644 zone->thread_id); in write_page_endio()
1650 struct block_map_zone *zone = vio->context; in write_page() local
1653 if ((zone->flusher != tree_page) && in write_page()
1654 is_not_older(zone, tree_page->generation, zone->generation)) { in write_page()
1659 enqueue_page(tree_page, zone); in write_page()
1660 return_to_pool(zone, vio); in write_page()
1666 completion->callback_thread_id = zone->thread_id; in write_page()
1695 struct block_map_zone *zone; in release_page_lock() local
1703 zone = data_vio->logical.zone->block_map_zone; in release_page_lock()
1704 lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key); in release_page_lock()
1715 --data_vio->logical.zone->block_map_zone->active_lookups; in finish_lookup()
1740 enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result); in abort_lookup()
1773 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
1774 static void allocate_block_map_page(struct block_map_zone *zone,
1797 allocate_block_map_page(data_vio->logical.zone->block_map_zone, in continue_with_loaded_page()
1809 load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_with_loaded_page()
1829 struct block_map_zone *zone = pooled->context; in finish_block_map_page_load() local
1834 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_page_load()
1836 nonce = zone->block_map->nonce; in finish_block_map_page_load()
1840 return_vio_to_pool(zone->vio_pool, pooled); in finish_block_map_page_load()
1854 struct block_map_zone *zone = pooled->context; in handle_io_error() local
1857 return_vio_to_pool(zone->vio_pool, pooled); in handle_io_error()
1867 data_vio->logical.zone->thread_id); in load_page_endio()
1886 static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio) in attempt_page_lock() argument
1903 result = vdo_int_map_put(zone->loading_pages, lock->key, in attempt_page_lock()
1920 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio) in load_block_map_page() argument
1924 result = attempt_page_lock(zone, data_vio); in load_block_map_page()
1932 acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter); in load_block_map_page()
1941 data_vio->logical.zone->thread_id)) in allocation_failure()
1961 allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_allocation_for_waiter()
1996 static void write_expired_elements(struct block_map_zone *zone) in write_expired_elements() argument
2001 u8 generation = zone->generation; in write_expired_elements()
2003 expired = &zone->dirty_lists->expired[VDO_TREE_PAGE]; in write_expired_elements()
2012 enter_zone_read_only_mode(zone, result); in write_expired_elements()
2016 set_generation(zone, page, generation); in write_expired_elements()
2018 enqueue_page(page, zone); in write_expired_elements()
2021 expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE]; in write_expired_elements()
2027 save_pages(&zone->page_cache); in write_expired_elements()
2032 * @zone: The zone in which we are operating.
2039 static void add_to_dirty_lists(struct block_map_zone *zone, in add_to_dirty_lists() argument
2045 struct dirty_lists *dirty_lists = zone->dirty_lists; in add_to_dirty_lists()
2058 write_expired_elements(zone); in add_to_dirty_lists()
2072 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in finish_block_map_allocation() local
2078 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_allocation()
2090 if (zone->flusher != tree_page) { in finish_block_map_allocation()
2095 set_generation(zone, tree_page, zone->generation); in finish_block_map_allocation()
2101 add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE, in finish_block_map_allocation()
2108 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_allocation()
2110 zone->block_map->nonce, in finish_block_map_allocation()
2123 allocate_block_map_page(zone, data_vio); in finish_block_map_allocation()
2188 static void allocate_block_map_page(struct block_map_zone *zone, in allocate_block_map_page() argument
2199 result = attempt_page_lock(zone, data_vio); in allocate_block_map_page()
2225 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_find_block_map_slot() local
2227 zone->active_lookups++; in vdo_find_block_map_slot()
2228 if (vdo_is_state_draining(&zone->state)) { in vdo_find_block_map_slot()
2235 page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count); in vdo_find_block_map_slot()
2248 page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer); in vdo_find_block_map_slot()
2275 allocate_block_map_page(zone, data_vio); in vdo_find_block_map_slot()
2287 load_block_map_page(zone, data_vio); in vdo_find_block_map_slot()
2321 void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone) in vdo_write_tree_page() argument
2325 if (waiting && (zone->flusher == page)) in vdo_write_tree_page()
2328 set_generation(zone, page, zone->generation); in vdo_write_tree_page()
2332 enqueue_page(page, zone); in vdo_write_tree_page()
2537 cursor->parent->zone->block_map->nonce, in finish_traversal_load()
2548 cursor->parent->zone->thread_id); in traversal_endio()
2578 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2588 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2597 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2634 pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id; in launch_cursor()
2689 cursors->zone = &map->zones[0]; in vdo_traverse_forest()
2690 cursors->pool = cursors->zone->vio_pool; in vdo_traverse_forest()
2710 * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
2722 struct block_map_zone *zone = &map->zones[zone_number]; in initialize_block_map_zone() local
2726 zone->zone_number = zone_number; in initialize_block_map_zone()
2727 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_block_map_zone()
2728 zone->block_map = map; in initialize_block_map_zone()
2732 &zone->dirty_lists); in initialize_block_map_zone()
2736 zone->dirty_lists->maximum_age = maximum_age; in initialize_block_map_zone()
2737 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]); in initialize_block_map_zone()
2738 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]); in initialize_block_map_zone()
2741 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]); in initialize_block_map_zone()
2742 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]); in initialize_block_map_zone()
2745 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages); in initialize_block_map_zone()
2750 zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR, in initialize_block_map_zone()
2751 VIO_PRIORITY_METADATA, zone, &zone->vio_pool); in initialize_block_map_zone()
2755 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_block_map_zone()
2757 zone->page_cache.zone = zone; in initialize_block_map_zone()
2758 zone->page_cache.vdo = vdo; in initialize_block_map_zone()
2759 zone->page_cache.page_count = cache_size / map->zone_count; in initialize_block_map_zone()
2760 zone->page_cache.stats.free_pages = zone->page_cache.page_count; in initialize_block_map_zone()
2762 result = allocate_cache_components(&zone->page_cache); in initialize_block_map_zone()
2767 INIT_LIST_HEAD(&zone->page_cache.lru_list); in initialize_block_map_zone()
2768 INIT_LIST_HEAD(&zone->page_cache.outgoing_list); in initialize_block_map_zone()
2795 struct block_map_zone *zone = &map->zones[zone_number]; in advance_block_map_zone_era() local
2797 update_period(zone->dirty_lists, map->current_era_point); in advance_block_map_zone_era()
2798 write_expired_elements(zone); in advance_block_map_zone_era()
2819 static void uninitialize_block_map_zone(struct block_map_zone *zone) in uninitialize_block_map_zone() argument
2821 struct vdo_page_cache *cache = &zone->page_cache; in uninitialize_block_map_zone()
2823 vdo_free(vdo_forget(zone->dirty_lists)); in uninitialize_block_map_zone()
2824 free_vio_pool(vdo_forget(zone->vio_pool)); in uninitialize_block_map_zone()
2825 vdo_int_map_free(vdo_forget(zone->loading_pages)); in uninitialize_block_map_zone()
2840 zone_count_t zone; in vdo_free_block_map() local
2845 for (zone = 0; zone < map->zone_count; zone++) in vdo_free_block_map()
2846 uninitialize_block_map_zone(&map->zones[zone]); in vdo_free_block_map()
2863 zone_count_t zone = 0; in vdo_decode_block_map() local
2894 for (zone = 0; zone < map->zone_count; zone++) { in vdo_decode_block_map()
2895 result = initialize_block_map_zone(map, zone, cache_size, maximum_age); in vdo_decode_block_map()
2945 /* Compute the logical zone for the LBN of a data vio. */
2970 struct block_map_zone *zone = container_of(state, struct block_map_zone, state); in initiate_drain() local
2972 VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0), in initiate_drain()
2976 while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period) in initiate_drain()
2977 expire_oldest_list(zone->dirty_lists); in initiate_drain()
2978 write_expired_elements(zone); in initiate_drain()
2981 check_for_drain_complete(zone); in initiate_drain()
2989 struct block_map_zone *zone = &map->zones[zone_number]; in drain_zone() local
2991 vdo_start_draining(&zone->state, in drain_zone()
3008 struct block_map_zone *zone = &map->zones[zone_number]; in resume_block_map_zone() local
3010 vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state)); in resume_block_map_zone()
3080 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in fetch_mapping_page() local
3082 if (vdo_is_state_draining(&zone->state)) { in fetch_mapping_page()
3087 vdo_get_page(&data_vio->page_completion, zone, in fetch_mapping_page()
3122 mapped.pbn, &data_vio->mapped.zone); in set_mapped_location()
3194 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_update_block_map_page() local
3195 struct block_map *block_map = zone->block_map; in vdo_update_block_map_page()
3211 zone->zone_number); in vdo_update_block_map_page()
3216 zone->zone_number); in vdo_update_block_map_page()
3256 add_to_dirty_lists(info->cache->zone, &info->state_entry, in put_mapping_in_fetched_page()
3285 zone_count_t zone = 0; in vdo_get_block_map_statistics() local
3289 for (zone = 0; zone < map->zone_count; zone++) { in vdo_get_block_map_statistics()
3291 &(map->zones[zone].page_cache.stats); in vdo_get_block_map_statistics()