Lines Matching +full:boot +full:- +full:pages

1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
18 #include <linux/page-isolation.h>
62 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
63 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
70 zone->name); in mminit_verify_zonelist()
74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
86 width = shift - NR_NON_PAGEFLAG_BITS; in mminit_verify_pageflags_layout()
112 "Node/Zone ID: %lu -> %lu\n", in mminit_verify_pageflags_layout()
116 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", in mminit_verify_pageflags_layout()
128 shift -= SECTIONS_WIDTH; in mminit_verify_pageflags_layout()
132 shift -= NODES_WIDTH; in mminit_verify_pageflags_layout()
136 shift -= ZONES_WIDTH; in mminit_verify_pageflags_layout()
213 return -ENOMEM; in mm_sysfs_init()
242 return -EINVAL; in cmdline_parse_core()
294 * Sum pages in active regions for movable zone.
304 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages() local
306 totalpages += pages; in early_calculate_totalpages()
307 if (pages) in early_calculate_totalpages()
314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { in find_usable_zone_for_movable()
330 VM_BUG_ON(zone_index == -1); in find_usable_zone_for_movable()
437 * Round-up so that ZONE_MOVABLE is at least as large as what in find_zone_movable_pfns_for_nodes()
443 corepages = totalpages - required_movablecore; in find_zone_movable_pfns_for_nodes()
491 - start_pfn; in find_zone_movable_pfns_for_nodes()
493 kernelcore_remaining -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
495 required_kernelcore -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
515 * start_pfn->end_pfn. Calculate size_pages as the in find_zone_movable_pfns_for_nodes()
516 * number of pages used as kernelcore in find_zone_movable_pfns_for_nodes()
518 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes()
528 required_kernelcore -= min(required_kernelcore, in find_zone_movable_pfns_for_nodes()
530 kernelcore_remaining -= size_pages; in find_zone_movable_pfns_for_nodes()
542 usable_nodes--; in find_zone_movable_pfns_for_nodes()
570 atomic_set(&page->_mapcount, -1); in __init_single_page()
574 INIT_LIST_HEAD(&page->lru); in __init_single_page()
605 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
606 return state->last_nid; in __early_pfn_to_nid()
610 state->last_start = start_pfn; in __early_pfn_to_nid()
611 state->last_end = end_pfn; in __early_pfn_to_nid()
612 state->last_nid = nid; in __early_pfn_to_nid()
655 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_initialised()
669 * later in the boot cycle when it can be parallelised.
679 /* Always populate low zones for address-constrained allocations */ in defer_init()
683 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init()
688 * No need to protect because called very early in boot before smp_init. in defer_init()
696 * We start only with one section of pages, more pages are added as in defer_init()
697 * needed until the rest of deferred pages are initialized. in defer_init()
701 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
702 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
719 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
748 * Initialised pages do not have PageReserved set. This function is
750 * marks the pages PageReserved. The remaining valid pages are later
798 * Only struct pages that correspond to ranges defined by memblock.memory
802 * But, there could be struct pages that correspond to holes in
804 * - physical memory bank size is not necessarily the exact multiple of the
806 * - early reserved memory may not be listed in memblock.memory
807 * - non-memory regions covered by the contigious flatmem mapping
808 * - memory layouts defined with memmap= kernel parameter may not align
811 * Explicitly initialize those struct pages so that:
812 * - PG_Reserved is set
813 * - zone and node links point to zone and node that span the page if the
815 * - zone and node links point to adjacent zone/node if the hole falls on
816 * the zone boundary; the pages in such holes will be prepended to the
817 * zone/node above the hole except for the trailing pages in the last
829 pfn = pageblock_end_pfn(pfn) - 1; in init_unavailable_range()
838 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n", in init_unavailable_range()
843 * Initially all pages are reserved - free ones are freed
844 * up by memblock_free_all() once the early boot process is
845 * done. Non-atomic initialization, single-pass.
859 if (highest_memmap_pfn < end_pfn - 1) in memmap_init_range()
860 highest_memmap_pfn = end_pfn - 1; in memmap_init_range()
865 * memory. We limit the total number of pages to initialize to just in memmap_init_range()
874 if (start_pfn == altmap->base_pfn) in memmap_init_range()
875 start_pfn += altmap->reserve; in memmap_init_range()
876 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_range()
882 * There can be holes in boot-time mem_map[]s handed to this in memmap_init_range()
908 * over the place during system boot. in memmap_init_range()
923 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
924 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
933 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, in memmap_init_zone_range()
952 struct zone *zone = node->node_zones + j; in memmap_init()
967 * Append the pages in this hole to the highest zone in the last in memmap_init()
991 * We can use the non-atomic __set_bit operation for setting in __init_zone_device_page()
992 * the flag as we are still initializing the pages. in __init_zone_device_page()
997 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in __init_zone_device_page()
999 * ever freed or placed on a driver-private list. in __init_zone_device_page()
1001 page->pgmap = pgmap; in __init_zone_device_page()
1002 page->zone_device_data = NULL; in __init_zone_device_page()
1008 * the address space during boot when many long-lived in __init_zone_device_page()
1020 * ZONE_DEVICE pages are released directly to the driver page allocator in __init_zone_device_page()
1023 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in __init_zone_device_page()
1024 pgmap->type == MEMORY_DEVICE_COHERENT) in __init_zone_device_page()
1029 * With compound page geometry and when struct pages are stored in ram most
1030 * tail pages are reused. Consequently, the amount of unique struct pages to
1031 * initialize is a lot smaller that the total amount of struct pages being
1033 * of how the sparse_vmemmap internals handle compound pages in the lack
1052 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound()
1059 prep_compound_tail(head, pfn - head_pfn); in memmap_init_compound()
1078 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
1083 int nid = pgdat->node_id; in memmap_init_zone_device()
1090 * of the pages reserved for the memmap, so we can just jump to in memmap_init_zone_device()
1091 * the end of that region and start processing the device pages. in memmap_init_zone_device()
1094 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone_device()
1095 nr_pages = end_pfn - start_pfn; in memmap_init_zone_device()
1110 pr_debug("%s initialised %lu pages in %ums\n", __func__, in memmap_init_zone_device()
1111 nr_pages, jiffies_to_msecs(jiffies - start)); in memmap_init_zone_device()
1159 unsigned long nr_absent = range_end_pfn - range_start_pfn; in __absent_pages_in_range()
1166 nr_absent -= end_pfn - start_pfn; in __absent_pages_in_range()
1172 * absent_pages_in_range - Return number of page frames in holes within a range
1176 * Return: the number of pages frames in memory holes within a range.
1192 /* zone is empty, we don't have any absent pages */ in zone_absent_pages_in_node()
1200 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages in zone_absent_pages_in_node()
1215 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1219 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1227 * Return the number of pages a zone spans in a node, including holes
1228 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1246 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
1254 /* Return the spanned pages */ in zone_spanned_pages_in_node()
1255 return *zone_end_pfn - *zone_start_pfn; in zone_spanned_pages_in_node()
1262 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages()
1263 z->zone_start_pfn = 0; in reset_memoryless_node_totalpages()
1264 z->spanned_pages = 0; in reset_memoryless_node_totalpages()
1265 z->present_pages = 0; in reset_memoryless_node_totalpages()
1267 z->present_early_pages = 0; in reset_memoryless_node_totalpages()
1271 pgdat->node_spanned_pages = 0; in reset_memoryless_node_totalpages()
1272 pgdat->node_present_pages = 0; in reset_memoryless_node_totalpages()
1273 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); in reset_memoryless_node_totalpages()
1290 nr_all_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1295 nr_kernel_pages += end_pfn - start_pfn; in calc_nr_kernel_pages()
1308 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
1313 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1318 absent = zone_absent_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1322 real_size = spanned - absent; in calculate_node_totalpages()
1325 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
1327 zone->zone_start_pfn = 0; in calculate_node_totalpages()
1328 zone->spanned_pages = spanned; in calculate_node_totalpages()
1329 zone->present_pages = real_size; in calculate_node_totalpages()
1331 zone->present_early_pages = real_size; in calculate_node_totalpages()
1338 pgdat->node_spanned_pages = totalpages; in calculate_node_totalpages()
1339 pgdat->node_present_pages = realtotalpages; in calculate_node_totalpages()
1340 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); in calculate_node_totalpages()
1346 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; in pgdat_init_split_queue()
1348 spin_lock_init(&ds_queue->split_queue_lock); in pgdat_init_split_queue()
1349 INIT_LIST_HEAD(&ds_queue->split_queue); in pgdat_init_split_queue()
1350 ds_queue->split_queue_len = 0; in pgdat_init_split_queue()
1359 init_waitqueue_head(&pgdat->kcompactd_wait); in pgdat_init_kcompactd()
1375 init_waitqueue_head(&pgdat->kswapd_wait); in pgdat_init_internals()
1376 init_waitqueue_head(&pgdat->pfmemalloc_wait); in pgdat_init_internals()
1379 init_waitqueue_head(&pgdat->reclaim_wait[i]); in pgdat_init_internals()
1382 lruvec_init(&pgdat->__lruvec); in pgdat_init_internals()
1388 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
1390 zone->name = zone_names[idx]; in zone_init_internals()
1391 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
1392 spin_lock_init(&zone->lock); in zone_init_internals()
1401 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
1402 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
1406 INIT_LIST_HEAD(&zone->unaccepted_pages); in zone_init_free_lists()
1414 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
1417 if (zone_idx > pgdat->nr_zones) in init_currently_empty_zone()
1418 pgdat->nr_zones = zone_idx; in init_currently_empty_zone()
1420 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
1423 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
1424 pgdat->node_id, in init_currently_empty_zone()
1429 zone->initialized = 1; in init_currently_empty_zone()
1434 * Calculate the size of the zone->blockflags rounded to an unsigned long
1444 zonesize += zone_start_pfn & (pageblock_nr_pages-1); in usemap_size()
1455 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
1456 zone->spanned_pages); in setup_usemap()
1457 zone->pageblock_flags = NULL; in setup_usemap()
1459 zone->pageblock_flags = in setup_usemap()
1462 if (!zone->pageblock_flags) in setup_usemap()
1464 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
1473 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1488 * This value may be variable depending on boot parameters on powerpc. in set_pageblock_order()
1496 * is unused as pageblock_order is set at compile-time. See
1497 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1508 * - init pgdat internals
1509 * - init all zones belonging to this node
1516 int nid = pgdat->node_id; in free_area_init_core_hotplug()
1522 if (pgdat->per_cpu_nodestats == &boot_nodestats) in free_area_init_core_hotplug()
1523 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); in free_area_init_core_hotplug()
1530 pgdat->nr_zones = 0; in free_area_init_core_hotplug()
1531 pgdat->kswapd_order = 0; in free_area_init_core_hotplug()
1532 pgdat->kswapd_highest_zoneidx = 0; in free_area_init_core_hotplug()
1533 pgdat->node_start_pfn = 0; in free_area_init_core_hotplug()
1534 pgdat->node_present_pages = 0; in free_area_init_core_hotplug()
1539 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in free_area_init_core_hotplug()
1544 * When memory is hot-added, all the memory is in offline state. So in free_area_init_core_hotplug()
1549 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug()
1551 zone->present_pages = 0; in free_area_init_core_hotplug()
1560 int nid = pgdat->node_id; in free_area_init_core()
1563 pgdat->per_cpu_nodestats = &boot_nodestats; in free_area_init_core()
1566 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
1567 unsigned long size = zone->spanned_pages; in free_area_init_core()
1570 * Initialize zone->managed_pages as 0 , it will be reset in free_area_init_core()
1571 * when memblock allocator frees pages into buddy system. in free_area_init_core()
1573 zone_init_internals(zone, j, nid, zone->present_pages); in free_area_init_core()
1579 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
1614 if (!pgdat->node_spanned_pages) in alloc_node_mem_map()
1617 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); in alloc_node_mem_map()
1618 offset = pgdat->node_start_pfn - start; in alloc_node_mem_map()
1625 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
1627 pgdat->node_id, false); in alloc_node_mem_map()
1630 size, pgdat->node_id); in alloc_node_mem_map()
1631 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
1634 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map()
1635 (unsigned long)pgdat->node_mem_map); in alloc_node_mem_map()
1639 mem_map = NODE_DATA(0)->node_mem_map; in alloc_node_mem_map()
1640 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) in alloc_node_mem_map()
1641 mem_map -= offset; in alloc_node_mem_map()
1650 * get_pfn_range_for_nid - Return the start and end page frames for a node
1665 *start_pfn = -1UL; in get_pfn_range_for_nid()
1673 if (*start_pfn == -1UL) in get_pfn_range_for_nid()
1684 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node()
1688 pgdat->node_id = nid; in free_area_init_node()
1689 pgdat->node_start_pfn = start_pfn; in free_area_init_node()
1690 pgdat->per_cpu_nodestats = NULL; in free_area_init_node()
1693 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, in free_area_init_node()
1695 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node()
1716 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { in check_for_memory()
1717 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
1720 node_set_state(pgdat->node_id, N_HIGH_MEMORY); in check_for_memory()
1722 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); in check_for_memory()
1751 * free_area_init - Initialise all pg_data_t and zone data
1759 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1780 zone = MAX_NR_ZONES - i - 1; in free_area_init()
1803 pr_info(" %-8s ", zone_names[i]); in free_area_init()
1808 pr_cont("[mem %#018Lx-%#018Lx]\n", in free_area_init()
1812 << PAGE_SHIFT) - 1); in free_area_init()
1825 * subsection-map relative to active online memory ranges to in free_area_init()
1826 * enable future "sub-section" extensions of the memory map. in free_area_init()
1830 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, in free_area_init()
1832 ((u64)end_pfn << PAGE_SHIFT) - 1); in free_area_init()
1833 subsection_map_init(start_pfn, end_pfn - start_pfn); in free_area_init()
1852 *for memory-less node because here it's not marked as N_MEMORY in free_area_init()
1855 *memory-less node. The pgdat will get fully initialized by in free_area_init()
1858 if (pgdat->node_present_pages) { in free_area_init()
1872 * node_map_pfn_alignment - determine the maximum internode alignment
1879 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1883 * This is used to test whether pfn -> nid mapping of the chosen memory
1905 * Start with a mask granular enough to pin-point to the in node_map_pfn_alignment()
1906 * start pfn and tick off bits one-by-one until it becomes in node_map_pfn_alignment()
1909 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment()
1917 /* convert mask to number of pages */ in node_map_pfn_alignment()
1933 /* Free a large naturally-aligned chunk if possible */ in deferred_free_pages()
1962 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1964 * Return number of pages initialized.
1970 unsigned long nr_pages = end_pfn - pfn; in deferred_init_pages()
1980 * This function is meant to pre-load the iterator for the zone init from
2015 * Initialize and free pages. We do it in two loops: first we initialize
2017 * freeing pages we can access pages that are ahead (computing buddy
2049 /* Reset values and now loop through freeing pages as needed */ in deferred_init_maxorder()
2059 deferred_free_pages(spfn, t - spfn); in deferred_init_maxorder()
2079 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that in deferred_init_memmap_chunk()
2098 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
2111 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
2119 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
2120 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
2121 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_init_memmap()
2125 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
2126 * pre-grown prior to start of deferred page initialization. in deferred_init_memmap()
2131 zone = pgdat->node_zones + pgdat->nr_zones - 1; in deferred_init_memmap()
2141 .size = first_init_pfn - spfn, in deferred_init_memmap()
2152 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2154 pr_info("node %d deferred pages initialised in %ums\n", in deferred_init_memmap()
2155 pgdat->node_id, jiffies_to_msecs(jiffies - start)); in deferred_init_memmap()
2162 * If this zone has deferred pages, try to grow it by initializing enough
2163 * deferred pages to satisfy the allocation specified by order, rounded up to
2165 * of SECTION_SIZE bytes by initializing struct pages in increments of
2170 * enough pages to satisfy the allocation.
2175 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2176 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; in deferred_grow_zone()
2181 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2189 * true, as there might be enough pages already. in deferred_grow_zone()
2191 if (first_deferred_pfn != pgdat->first_deferred_pfn) { in deferred_grow_zone()
2199 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_grow_zone()
2206 * Initialize and free pages in MAX_PAGE_ORDER sized increments so in deferred_grow_zone()
2226 pgdat->first_deferred_pfn = spfn; in deferred_grow_zone()
2243 } while (++p, --i); in init_cma_reserved_pageblock()
2247 /* pages were reserved and not allocated */ in init_cma_reserved_pageblock()
2252 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock()
2258 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
2275 zone->contiguous = true; in set_zone_contiguous()
2296 * We initialized the rest of the deferred pages. Permanently disable in page_alloc_init_late()
2297 * on-demand struct page initialization. in page_alloc_init_late()
2301 /* Reinit limits that are based on free pages after the kernel is up */ in page_alloc_init_late()
2318 /* Initialize page ext after all struct pages are initialized. */ in page_alloc_init_late()
2331 * Because 32-bit systems cannot have large physical memory, where this scaling
2342 * - it is assumed that the hash table must contain an exact power-of-2
2344 * - limit is the number of hash buckets, not the total allocation size
2384 numentries >>= (scale - PAGE_SHIFT); in alloc_large_system_hash()
2386 numentries <<= (PAGE_SHIFT - scale); in alloc_large_system_hash()
2424 * If bucketsize is not a power-of-two, we may free in alloc_large_system_hash()
2425 * some pages at the end of hash table which in alloc_large_system_hash()
2431 } while (!table && size > PAGE_SIZE && --log2qty); in alloc_large_system_hash()
2437 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, in alloc_large_system_hash()
2443 *_hash_mask = (1 << log2qty) - 1; in alloc_large_system_hash()
2459 /* KMSAN will take care of these pages. */ in memblock_free_pages()
2463 /* pages were reserved and not allocated */ in memblock_free_pages()
2520 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " in mem_debugging_and_hardening_init()
2542 …pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running … in mem_debugging_and_hardening_init()
2556 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's in mem_debugging_and_hardening_init()
2563 /* Report memory auto-initialization states for this boot. */
2581 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", in report_meminit()
2585 pr_info("mem auto-init: clearing system memory may take some time...\n"); in report_meminit()
2594 codesize = _etext - _stext; in mem_init_print_info()
2595 datasize = _edata - _sdata; in mem_init_print_info()
2596 rosize = __end_rodata - __start_rodata; in mem_init_print_info()
2597 bss_size = __bss_stop - __bss_start; in mem_init_print_info()
2598 init_data_size = __init_end - __init_begin; in mem_init_print_info()
2599 init_code_size = _einittext - _sinittext; in mem_init_print_info()
2611 size -= adj; \ in mem_init_print_info()
2623 …(%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" in mem_init_print_info()
2631 K(physpages - totalram_pages() - totalcma_pages), in mem_init_print_info()
2650 * page_ext requires contiguous pages, in mm_core_init()
2674 /* Should be run before the first non-init thread is created */ in mm_core_init()