Lines Matching +full:fine +full:- +full:granular
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
18 #include <linux/page-isolation.h>
62 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
63 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
70 zone->name); in mminit_verify_zonelist()
74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
86 width = shift - NR_NON_PAGEFLAG_BITS; in mminit_verify_pageflags_layout()
112 "Node/Zone ID: %lu -> %lu\n", in mminit_verify_pageflags_layout()
116 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", in mminit_verify_pageflags_layout()
128 shift -= SECTIONS_WIDTH; in mminit_verify_pageflags_layout()
132 shift -= NODES_WIDTH; in mminit_verify_pageflags_layout()
136 shift -= ZONES_WIDTH; in mminit_verify_pageflags_layout()
213 return -ENOMEM; in mm_sysfs_init()
242 return -EINVAL; in cmdline_parse_core()
304 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages()
321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { in find_usable_zone_for_movable()
330 VM_BUG_ON(zone_index == -1); in find_usable_zone_for_movable()
437 * Round-up so that ZONE_MOVABLE is at least as large as what in find_zone_movable_pfns_for_nodes()
443 corepages = totalpages - required_movablecore; in find_zone_movable_pfns_for_nodes()
491 - start_pfn; in find_zone_movable_pfns_for_nodes()
493 kernelcore_remaining -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
495 required_kernelcore -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
515 * start_pfn->end_pfn. Calculate size_pages as the in find_zone_movable_pfns_for_nodes()
518 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes()
528 required_kernelcore -= min(required_kernelcore, in find_zone_movable_pfns_for_nodes()
530 kernelcore_remaining -= size_pages; in find_zone_movable_pfns_for_nodes()
542 usable_nodes--; in find_zone_movable_pfns_for_nodes()
570 atomic_set(&page->_mapcount, -1); in __init_single_page()
574 INIT_LIST_HEAD(&page->lru); in __init_single_page()
605 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
606 return state->last_nid; in __early_pfn_to_nid()
610 state->last_start = start_pfn; in __early_pfn_to_nid()
611 state->last_end = end_pfn; in __early_pfn_to_nid()
612 state->last_nid = nid; in __early_pfn_to_nid()
655 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_initialised()
679 /* Always populate low zones for address-constrained allocations */ in defer_init()
683 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init()
701 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
702 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
719 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
804 * - physical memory bank size is not necessarily the exact multiple of the
806 * - early reserved memory may not be listed in memblock.memory
807 * - non-memory regions covered by the contigious flatmem mapping
808 * - memory layouts defined with memmap= kernel parameter may not align
812 * - PG_Reserved is set
813 * - zone and node links point to zone and node that span the page if the
815 * - zone and node links point to adjacent zone/node if the hole falls on
829 pfn = pageblock_end_pfn(pfn) - 1; in init_unavailable_range()
843 * Initially all pages are reserved - free ones are freed
845 * done. Non-atomic initialization, single-pass.
859 if (highest_memmap_pfn < end_pfn - 1) in memmap_init_range()
860 highest_memmap_pfn = end_pfn - 1; in memmap_init_range()
874 if (start_pfn == altmap->base_pfn) in memmap_init_range()
875 start_pfn += altmap->reserve; in memmap_init_range()
876 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_range()
882 * There can be holes in boot-time mem_map[]s handed to this in memmap_init_range()
923 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
924 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
933 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, in memmap_init_zone_range()
952 struct zone *zone = node->node_zones + j; in memmap_init()
991 * We can use the non-atomic __set_bit operation for setting in __init_zone_device_page()
997 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in __init_zone_device_page()
999 * ever freed or placed on a driver-private list. in __init_zone_device_page()
1001 page->pgmap = pgmap; in __init_zone_device_page()
1002 page->zone_device_data = NULL; in __init_zone_device_page()
1008 * the address space during boot when many long-lived in __init_zone_device_page()
1023 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in __init_zone_device_page()
1024 pgmap->type == MEMORY_DEVICE_COHERENT) in __init_zone_device_page()
1052 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound()
1059 prep_compound_tail(head, pfn - head_pfn); in memmap_init_compound()
1078 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
1083 int nid = pgdat->node_id; in memmap_init_zone_device()
1094 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone_device()
1095 nr_pages = end_pfn - start_pfn; in memmap_init_zone_device()
1111 nr_pages, jiffies_to_msecs(jiffies - start)); in memmap_init_zone_device()
1159 unsigned long nr_absent = range_end_pfn - range_start_pfn; in __absent_pages_in_range()
1166 nr_absent -= end_pfn - start_pfn; in __absent_pages_in_range()
1172 * absent_pages_in_range - Return number of page frames in holes within a range
1215 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1219 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1228 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1255 return *zone_end_pfn - *zone_start_pfn; in zone_spanned_pages_in_node()
1262 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages()
1263 z->zone_start_pfn = 0; in reset_memoryless_node_totalpages()
1264 z->spanned_pages = 0; in reset_memoryless_node_totalpages()
1265 z->present_pages = 0; in reset_memoryless_node_totalpages()
1267 z->present_early_pages = 0; in reset_memoryless_node_totalpages()
1271 pgdat->node_spanned_pages = 0; in reset_memoryless_node_totalpages()
1272 pgdat->node_present_pages = 0; in reset_memoryless_node_totalpages()
1273 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); in reset_memoryless_node_totalpages()
1290 nr_all_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1295 nr_kernel_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1308 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
1313 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1318 absent = zone_absent_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1322 real_size = spanned - absent; in calculate_node_totalpages()
1325 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
1327 zone->zone_start_pfn = 0; in calculate_node_totalpages()
1328 zone->spanned_pages = spanned; in calculate_node_totalpages()
1329 zone->present_pages = real_size; in calculate_node_totalpages()
1331 zone->present_early_pages = real_size; in calculate_node_totalpages()
1338 pgdat->node_spanned_pages = totalpages; in calculate_node_totalpages()
1339 pgdat->node_present_pages = realtotalpages; in calculate_node_totalpages()
1340 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); in calculate_node_totalpages()
1346 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; in pgdat_init_split_queue()
1348 spin_lock_init(&ds_queue->split_queue_lock); in pgdat_init_split_queue()
1349 INIT_LIST_HEAD(&ds_queue->split_queue); in pgdat_init_split_queue()
1350 ds_queue->split_queue_len = 0; in pgdat_init_split_queue()
1359 init_waitqueue_head(&pgdat->kcompactd_wait); in pgdat_init_kcompactd()
1375 init_waitqueue_head(&pgdat->kswapd_wait); in pgdat_init_internals()
1376 init_waitqueue_head(&pgdat->pfmemalloc_wait); in pgdat_init_internals()
1379 init_waitqueue_head(&pgdat->reclaim_wait[i]); in pgdat_init_internals()
1382 lruvec_init(&pgdat->__lruvec); in pgdat_init_internals()
1388 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
1390 zone->name = zone_names[idx]; in zone_init_internals()
1391 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
1392 spin_lock_init(&zone->lock); in zone_init_internals()
1401 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
1402 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
1406 INIT_LIST_HEAD(&zone->unaccepted_pages); in zone_init_free_lists()
1414 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
1417 if (zone_idx > pgdat->nr_zones) in init_currently_empty_zone()
1418 pgdat->nr_zones = zone_idx; in init_currently_empty_zone()
1420 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
1423 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
1424 pgdat->node_id, in init_currently_empty_zone()
1429 zone->initialized = 1; in init_currently_empty_zone()
1434 * Calculate the size of the zone->blockflags rounded to an unsigned long
1444 zonesize += zone_start_pfn & (pageblock_nr_pages-1); in usemap_size()
1455 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
1456 zone->spanned_pages); in setup_usemap()
1457 zone->pageblock_flags = NULL; in setup_usemap()
1459 zone->pageblock_flags = in setup_usemap()
1462 if (!zone->pageblock_flags) in setup_usemap()
1464 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
1496 * is unused as pageblock_order is set at compile-time. See
1497 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1508 * - init pgdat internals
1509 * - init all zones belonging to this node
1516 int nid = pgdat->node_id; in free_area_init_core_hotplug()
1522 if (pgdat->per_cpu_nodestats == &boot_nodestats) in free_area_init_core_hotplug()
1523 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); in free_area_init_core_hotplug()
1530 pgdat->nr_zones = 0; in free_area_init_core_hotplug()
1531 pgdat->kswapd_order = 0; in free_area_init_core_hotplug()
1532 pgdat->kswapd_highest_zoneidx = 0; in free_area_init_core_hotplug()
1533 pgdat->node_start_pfn = 0; in free_area_init_core_hotplug()
1534 pgdat->node_present_pages = 0; in free_area_init_core_hotplug()
1539 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in free_area_init_core_hotplug()
1544 * When memory is hot-added, all the memory is in offline state. So in free_area_init_core_hotplug()
1549 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug()
1551 zone->present_pages = 0; in free_area_init_core_hotplug()
1560 int nid = pgdat->node_id; in free_area_init_core()
1563 pgdat->per_cpu_nodestats = &boot_nodestats; in free_area_init_core()
1566 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
1567 unsigned long size = zone->spanned_pages; in free_area_init_core()
1570 * Initialize zone->managed_pages as 0 , it will be reset in free_area_init_core()
1573 zone_init_internals(zone, j, nid, zone->present_pages); in free_area_init_core()
1579 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
1614 if (!pgdat->node_spanned_pages) in alloc_node_mem_map()
1617 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); in alloc_node_mem_map()
1618 offset = pgdat->node_start_pfn - start; in alloc_node_mem_map()
1625 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
1627 pgdat->node_id, false); in alloc_node_mem_map()
1630 size, pgdat->node_id); in alloc_node_mem_map()
1631 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
1634 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map()
1635 (unsigned long)pgdat->node_mem_map); in alloc_node_mem_map()
1639 mem_map = NODE_DATA(0)->node_mem_map; in alloc_node_mem_map()
1640 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) in alloc_node_mem_map()
1641 mem_map -= offset; in alloc_node_mem_map()
1650 * get_pfn_range_for_nid - Return the start and end page frames for a node
1665 *start_pfn = -1UL; in get_pfn_range_for_nid()
1673 if (*start_pfn == -1UL) in get_pfn_range_for_nid()
1684 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node()
1688 pgdat->node_id = nid; in free_area_init_node()
1689 pgdat->node_start_pfn = start_pfn; in free_area_init_node()
1690 pgdat->per_cpu_nodestats = NULL; in free_area_init_node()
1693 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, in free_area_init_node()
1695 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node()
1716 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { in check_for_memory()
1717 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
1720 node_set_state(pgdat->node_id, N_HIGH_MEMORY); in check_for_memory()
1722 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); in check_for_memory()
1751 * free_area_init - Initialise all pg_data_t and zone data
1780 zone = MAX_NR_ZONES - i - 1; in free_area_init()
1803 pr_info(" %-8s ", zone_names[i]); in free_area_init()
1808 pr_cont("[mem %#018Lx-%#018Lx]\n", in free_area_init()
1812 << PAGE_SHIFT) - 1); in free_area_init()
1825 * subsection-map relative to active online memory ranges to in free_area_init()
1826 * enable future "sub-section" extensions of the memory map. in free_area_init()
1830 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, in free_area_init()
1832 ((u64)end_pfn << PAGE_SHIFT) - 1); in free_area_init()
1833 subsection_map_init(start_pfn, end_pfn - start_pfn); in free_area_init()
1852 *for memory-less node because here it's not marked as N_MEMORY in free_area_init()
1855 *memory-less node. The pgdat will get fully initialized by in free_area_init()
1858 if (pgdat->node_present_pages) { in free_area_init()
1872 * node_map_pfn_alignment - determine the maximum internode alignment
1879 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1883 * This is used to test whether pfn -> nid mapping of the chosen memory
1884 * model has fine enough granularity to avoid incorrect mapping for the
1905 * Start with a mask granular enough to pin-point to the in node_map_pfn_alignment()
1906 * start pfn and tick off bits one-by-one until it becomes in node_map_pfn_alignment()
1909 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment()
1933 /* Free a large naturally-aligned chunk if possible */ in deferred_free_pages()
1970 unsigned long nr_pages = end_pfn - pfn; in deferred_init_pages()
1980 * This function is meant to pre-load the iterator for the zone init from
2059 deferred_free_pages(spfn, t - spfn); in deferred_init_maxorder()
2098 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
2111 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
2119 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
2120 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
2121 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_init_memmap()
2126 * pre-grown prior to start of deferred page initialization. in deferred_init_memmap()
2131 zone = pgdat->node_zones + pgdat->nr_zones - 1; in deferred_init_memmap()
2141 .size = first_init_pfn - spfn, in deferred_init_memmap()
2152 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2155 pgdat->node_id, jiffies_to_msecs(jiffies - start)); in deferred_init_memmap()
2175 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2176 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; in deferred_grow_zone()
2191 if (first_deferred_pfn != pgdat->first_deferred_pfn) { in deferred_grow_zone()
2199 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_grow_zone()
2226 pgdat->first_deferred_pfn = spfn; in deferred_grow_zone()
2243 } while (++p, --i); in init_cma_reserved_pageblock()
2252 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock()
2258 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
2275 zone->contiguous = true; in set_zone_contiguous()
2297 * on-demand struct page initialization. in page_alloc_init_late()
2331 * Because 32-bit systems cannot have large physical memory, where this scaling
2342 * - it is assumed that the hash table must contain an exact power-of-2
2344 * - limit is the number of hash buckets, not the total allocation size
2384 numentries >>= (scale - PAGE_SHIFT); in alloc_large_system_hash()
2386 numentries <<= (PAGE_SHIFT - scale); in alloc_large_system_hash()
2424 * If bucketsize is not a power-of-two, we may free in alloc_large_system_hash()
2431 } while (!table && size > PAGE_SIZE && --log2qty); in alloc_large_system_hash()
2437 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, in alloc_large_system_hash()
2443 *_hash_mask = (1 << log2qty) - 1; in alloc_large_system_hash()
2520 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " in mem_debugging_and_hardening_init()
2542 …pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running … in mem_debugging_and_hardening_init()
2563 /* Report memory auto-initialization states for this boot. */
2581 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", in report_meminit()
2585 pr_info("mem auto-init: clearing system memory may take some time...\n"); in report_meminit()
2594 codesize = _etext - _stext; in mem_init_print_info()
2595 datasize = _edata - _sdata; in mem_init_print_info()
2596 rosize = __end_rodata - __start_rodata; in mem_init_print_info()
2597 bss_size = __bss_stop - __bss_start; in mem_init_print_info()
2598 init_data_size = __init_end - __init_begin; in mem_init_print_info()
2599 init_code_size = _einittext - _sinittext; in mem_init_print_info()
2611 size -= adj; \ in mem_init_print_info()
2623 …(%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" in mem_init_print_info()
2631 K(physpages - totalram_pages() - totalcma_pages), in mem_init_print_info()
2674 /* Should be run before the first non-init thread is created */ in mm_core_init()