Lines Matching full:zone

129 	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
702 * zone lock contention and keep cache-hot pages reusing.
791 * faulted, they come from the right zone right away. However, it is
795 * to a different zone. When migration fails - pinning fails.
816 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
819 * memory to the MOVABLE zone, the vmemmap pages are also placed in
820 * such zone. Such pages cannot be really moved around as they are
842 struct zone { struct
845 /* zone watermarks, access with *_wmark_pages(zone) macros */
855 * wasting several GB of ram we must reserve some of the lower zone argument
889 * spanned_pages is the total pages spanned by the zone, including argument
893 * present_pages is physical pages existing within the zone, which
897 * present_early_pages is present pages existing within the zone
918 * It is a seqlock because it has to be read outside of zone->lock,
922 * The span_seq lock is declared along with zone->lock because it is
923 * frequently read in proximity to zone->lock. It's good to
946 * of pageblock. Protected by zone->lock. argument
969 /* zone flags, see below */ argument
1014 /* Zone statistics */
1031 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. argument
1034 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
1035 ZONE_BELOW_HIGH, /* zone is below high watermark. */
1038 static inline unsigned long wmark_pages(const struct zone *z, in wmark_pages()
1044 static inline unsigned long min_wmark_pages(const struct zone *z) in min_wmark_pages()
1049 static inline unsigned long low_wmark_pages(const struct zone *z) in low_wmark_pages()
1054 static inline unsigned long high_wmark_pages(const struct zone *z) in high_wmark_pages()
1059 static inline unsigned long promo_wmark_pages(const struct zone *z) in promo_wmark_pages()
1064 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() argument
1066 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
1069 static inline unsigned long zone_cma_pages(struct zone *zone) in zone_cma_pages() argument
1072 return zone->cma_pages; in zone_cma_pages()
1078 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() argument
1080 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
1083 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument
1085 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
1088 static inline bool zone_is_initialized(struct zone *zone) in zone_is_initialized() argument
1090 return zone->initialized; in zone_is_initialized()
1093 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument
1095 return zone->spanned_pages == 0; in zone_is_empty()
1100 * The zone field is never updated after free_area_init_core()
1104 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
1124 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
1162 * Consecutive zone device pages should not be merged into the same sgl
1166 * both pages are not zone device pages or both pages are zone device pages
1179 extern void memmap_init_zone_device(struct zone *, unsigned long,
1211 * intersection with the given zone
1213 static inline bool zone_intersects(struct zone *zone, in zone_intersects() argument
1216 if (zone_is_empty(zone)) in zone_intersects()
1218 if (start_pfn >= zone_end_pfn(zone) || in zone_intersects()
1219 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
1248 * This struct contains information about a zone in a zonelist. It is stored
1252 struct zone *zone; /* Pointer to actual zone */ member
1253 int zone_idx; /* zone_idx(zoneref->zone) */
1262 * To speed the reading of the zonelist, the zonerefs contain the zone index
1266 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1267 * zonelist_zone_idx() - Return the index of the zone for an entry
1320 * per-zone basis.
1328 struct zone node_zones[MAX_NR_ZONES];
1355 * Nests above zone->lock and zone->span_seqlock
1478 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
1480 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1483 bool zone_watermark_ok(struct zone *z, unsigned int order,
1486 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1497 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1518 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
1520 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) argument
1523 static inline bool zone_is_zone_device(struct zone *zone) in zone_is_zone_device() argument
1525 return zone_idx(zone) == ZONE_DEVICE; in zone_is_zone_device()
1528 static inline bool zone_is_zone_device(struct zone *zone) in zone_is_zone_device() argument
1535 * Returns true if a zone has pages managed by the buddy allocator.
1537 * populated_zone(). If the whole zone is reserved then we can easily
1540 static inline bool managed_zone(struct zone *zone) in managed_zone() argument
1542 return zone_managed_pages(zone); in managed_zone()
1545 /* Returns true if a zone has memory */
1546 static inline bool populated_zone(struct zone *zone) in populated_zone() argument
1548 return zone->present_pages; in populated_zone()
1552 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument
1554 return zone->node; in zone_to_nid()
1557 static inline void zone_set_nid(struct zone *zone, int nid) in zone_set_nid() argument
1559 zone->node = nid; in zone_set_nid()
1562 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument
1567 static inline void zone_set_nid(struct zone *zone, int nid) {} in zone_set_nid() argument
1583 * is_highmem - helper function to quickly check if a struct zone is a
1584 * highmem zone or not. This is an attempt to keep references
1586 * @zone: pointer to struct zone variable
1587 * Return: 1 for a highmem zone, 0 otherwise
1589 static inline int is_highmem(struct zone *zone) in is_highmem() argument
1591 return is_highmem_idx(zone_idx(zone)); in is_highmem()
1620 extern struct zone *next_zone(struct zone *zone);
1632 * @zone: pointer to struct zone variable
1634 * The user only needs to declare the zone variable, for_each_zone
1637 #define for_each_zone(zone) \ argument
1638 for (zone = (first_online_pgdat())->node_zones; \
1639 zone; \
1640 zone = next_zone(zone))
1642 #define for_each_populated_zone(zone) \ argument
1643 for (zone = (first_online_pgdat())->node_zones; \
1644 zone; \
1645 zone = next_zone(zone)) \
1646 if (!populated_zone(zone)) \
1650 static inline struct zone *zonelist_zone(struct zoneref *zoneref) in zonelist_zone()
1652 return zoneref->zone; in zonelist_zone()
1662 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1670 …* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodem…
1672 * @highest_zoneidx: The zone index of the highest zone to return
1675 * This function returns the next zone at or below a given zone index that is
1677 * search. The zoneref returned is a cursor that represents the current zone
1681 * Return: the next zone at or below highest_zoneidx within the allowed
1694 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1695 * @zonelist: The zonelist to search for a suitable zone
1696 * @highest_zoneidx: The zone index of the highest zone to return
1699 * This function returns the first zone at or below a given zone index that is
1704 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1708 * Return: Zoneref pointer for the first suitable zone found
1719 …elper macro to iterate over valid zones in a zonelist at or below a given zone index and within a …
1720 * @zone: The current zone in the iterator
1723 * @highidx: The zone index of the highest zone to return
1726 * This iterator iterates though all zones at or below a given zone index and
1729 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ argument
1730 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1731 zone; \
1733 zone = zonelist_zone(z))
1735 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ argument
1736 for (zone = zonelist_zone(z); \
1737 zone; \
1739 zone = zonelist_zone(z))
1743 …ne_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1744 * @zone: The current zone in the iterator
1747 * @highidx: The zone index of the highest zone to return
1749 * This iterator iterates though all zones at or below a given zone index.
1751 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ argument
1752 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1767 * at least one zone that can satisfy kernel allocations. in movable_only_nodes()
1838 /* See declaration of similar field in struct zone */