1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/list_nulls.h> 11 #include <linux/wait.h> 12 #include <linux/bitops.h> 13 #include <linux/cache.h> 14 #include <linux/threads.h> 15 #include <linux/numa.h> 16 #include <linux/init.h> 17 #include <linux/seqlock.h> 18 #include <linux/nodemask.h> 19 #include <linux/pageblock-flags.h> 20 #include <linux/page-flags-layout.h> 21 #include <linux/atomic.h> 22 #include <linux/mm_types.h> 23 #include <linux/page-flags.h> 24 #include <linux/local_lock.h> 25 #include <linux/zswap.h> 26 #include <asm/page.h> 27 28 /* Free memory management - zoned buddy allocator. */ 29 #ifndef CONFIG_ARCH_FORCE_MAX_ORDER 30 #define MAX_PAGE_ORDER 10 31 #else 32 #define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER 33 #endif 34 #define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER) 35 36 #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) 37 38 #define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) 39 40 /* Defines the order for the number of pages that have a migrate type. */ 41 #ifndef CONFIG_PAGE_BLOCK_MAX_ORDER 42 #define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER 43 #else 44 #define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER 45 #endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */ 46 47 /* 48 * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated 49 * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER, 50 * which defines the order for the number of pages that can have a migrate type 51 */ 52 #if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER) 53 #error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER 54 #endif 55 56 /* 57 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 58 * costly to service. That is between allocation orders which should 59 * coalesce naturally under reasonable reclaim pressure and those which 60 * will not. 61 */ 62 #define PAGE_ALLOC_COSTLY_ORDER 3 63 64 enum migratetype { 65 MIGRATE_UNMOVABLE, 66 MIGRATE_MOVABLE, 67 MIGRATE_RECLAIMABLE, 68 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 69 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 70 #ifdef CONFIG_CMA 71 /* 72 * MIGRATE_CMA migration type is designed to mimic the way 73 * ZONE_MOVABLE works. Only movable pages can be allocated 74 * from MIGRATE_CMA pageblocks and page allocator never 75 * implicitly change migration type of MIGRATE_CMA pageblock. 76 * 77 * The way to use it is to change migratetype of a range of 78 * pageblocks to MIGRATE_CMA which can be done by 79 * __free_pageblock_cma() function. 80 */ 81 MIGRATE_CMA, 82 __MIGRATE_TYPE_END = MIGRATE_CMA, 83 #else 84 __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC, 85 #endif 86 #ifdef CONFIG_MEMORY_ISOLATION 87 MIGRATE_ISOLATE, /* can't allocate from here */ 88 #endif 89 MIGRATE_TYPES 90 }; 91 92 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 93 extern const char * const migratetype_names[MIGRATE_TYPES]; 94 95 #ifdef CONFIG_CMA 96 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 97 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 98 /* 99 * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio, 100 * so folio_pfn() cannot be used and pfn is needed. 101 */ 102 # define is_migrate_cma_folio(folio, pfn) \ 103 (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA) 104 #else 105 # define is_migrate_cma(migratetype) false 106 # define is_migrate_cma_page(_page) false 107 # define is_migrate_cma_folio(folio, pfn) false 108 #endif 109 110 static inline bool is_migrate_movable(int mt) 111 { 112 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 113 } 114 115 /* 116 * Check whether a migratetype can be merged with another migratetype. 117 * 118 * It is only mergeable when it can fall back to other migratetypes for 119 * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. 120 */ 121 static inline bool migratetype_is_mergeable(int mt) 122 { 123 return mt < MIGRATE_PCPTYPES; 124 } 125 126 #define for_each_migratetype_order(order, type) \ 127 for (order = 0; order < NR_PAGE_ORDERS; order++) \ 128 for (type = 0; type < MIGRATE_TYPES; type++) 129 130 extern int page_group_by_mobility_disabled; 131 132 #define get_pageblock_migratetype(page) \ 133 get_pfnblock_migratetype(page, page_to_pfn(page)) 134 135 #define folio_migratetype(folio) \ 136 get_pageblock_migratetype(&folio->page) 137 138 struct free_area { 139 struct list_head free_list[MIGRATE_TYPES]; 140 unsigned long nr_free; 141 }; 142 143 struct pglist_data; 144 145 #ifdef CONFIG_NUMA 146 enum numa_stat_item { 147 NUMA_HIT, /* allocated in intended node */ 148 NUMA_MISS, /* allocated in non intended node */ 149 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 150 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 151 NUMA_LOCAL, /* allocation from local node */ 152 NUMA_OTHER, /* allocation from other node */ 153 NR_VM_NUMA_EVENT_ITEMS 154 }; 155 #else 156 #define NR_VM_NUMA_EVENT_ITEMS 0 157 #endif 158 159 enum zone_stat_item { 160 /* First 128 byte cacheline (assuming 64 bit words) */ 161 NR_FREE_PAGES, 162 NR_FREE_PAGES_BLOCKS, 163 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 164 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 165 NR_ZONE_ACTIVE_ANON, 166 NR_ZONE_INACTIVE_FILE, 167 NR_ZONE_ACTIVE_FILE, 168 NR_ZONE_UNEVICTABLE, 169 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 170 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 171 /* Second 128 byte cacheline */ 172 #if IS_ENABLED(CONFIG_ZSMALLOC) 173 NR_ZSPAGES, /* allocated in zsmalloc */ 174 #endif 175 NR_FREE_CMA_PAGES, 176 #ifdef CONFIG_UNACCEPTED_MEMORY 177 NR_UNACCEPTED, 178 #endif 179 NR_VM_ZONE_STAT_ITEMS }; 180 181 enum node_stat_item { 182 NR_LRU_BASE, 183 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 184 NR_ACTIVE_ANON, /* " " " " " */ 185 NR_INACTIVE_FILE, /* " " " " " */ 186 NR_ACTIVE_FILE, /* " " " " " */ 187 NR_UNEVICTABLE, /* " " " " " */ 188 NR_SLAB_RECLAIMABLE_B, 189 NR_SLAB_UNRECLAIMABLE_B, 190 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 191 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 192 WORKINGSET_NODES, 193 WORKINGSET_REFAULT_BASE, 194 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, 195 WORKINGSET_REFAULT_FILE, 196 WORKINGSET_ACTIVATE_BASE, 197 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, 198 WORKINGSET_ACTIVATE_FILE, 199 WORKINGSET_RESTORE_BASE, 200 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, 201 WORKINGSET_RESTORE_FILE, 202 WORKINGSET_NODERECLAIM, 203 NR_ANON_MAPPED, /* Mapped anonymous pages */ 204 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 205 only modified from process context */ 206 NR_FILE_PAGES, 207 NR_FILE_DIRTY, 208 NR_WRITEBACK, 209 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 210 NR_SHMEM_THPS, 211 NR_SHMEM_PMDMAPPED, 212 NR_FILE_THPS, 213 NR_FILE_PMDMAPPED, 214 NR_ANON_THPS, 215 NR_VMSCAN_WRITE, 216 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 217 NR_DIRTIED, /* page dirtyings since bootup */ 218 NR_WRITTEN, /* page writings since bootup */ 219 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ 220 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 221 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 222 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 223 NR_KERNEL_STACK_KB, /* measured in KiB */ 224 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 225 NR_KERNEL_SCS_KB, /* measured in KiB */ 226 #endif 227 NR_PAGETABLE, /* used for pagetables */ 228 NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ 229 #ifdef CONFIG_IOMMU_SUPPORT 230 NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ 231 #endif 232 #ifdef CONFIG_SWAP 233 NR_SWAPCACHE, 234 #endif 235 #ifdef CONFIG_NUMA_BALANCING 236 PGPROMOTE_SUCCESS, /* promote successfully */ 237 /** 238 * Candidate pages for promotion based on hint fault latency. This 239 * counter is used to control the promotion rate and adjust the hot 240 * threshold. 241 */ 242 PGPROMOTE_CANDIDATE, 243 /** 244 * Not rate-limited (NRL) candidate pages for those can be promoted 245 * without considering hot threshold because of enough free pages in 246 * fast-tier node. These promotions bypass the regular hotness checks 247 * and do NOT influence the promotion rate-limiter or 248 * threshold-adjustment logic. 249 * This is for statistics/monitoring purposes. 250 */ 251 PGPROMOTE_CANDIDATE_NRL, 252 #endif 253 /* PGDEMOTE_*: pages demoted */ 254 PGDEMOTE_KSWAPD, 255 PGDEMOTE_DIRECT, 256 PGDEMOTE_KHUGEPAGED, 257 PGDEMOTE_PROACTIVE, 258 #ifdef CONFIG_HUGETLB_PAGE 259 NR_HUGETLB, 260 #endif 261 NR_BALLOON_PAGES, 262 NR_KERNEL_FILE_PAGES, 263 NR_VM_NODE_STAT_ITEMS 264 }; 265 266 /* 267 * Returns true if the item should be printed in THPs (/proc/vmstat 268 * currently prints number of anon, file and shmem THPs. But the item 269 * is charged in pages). 270 */ 271 static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) 272 { 273 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 274 return false; 275 276 return item == NR_ANON_THPS || 277 item == NR_FILE_THPS || 278 item == NR_SHMEM_THPS || 279 item == NR_SHMEM_PMDMAPPED || 280 item == NR_FILE_PMDMAPPED; 281 } 282 283 /* 284 * Returns true if the value is measured in bytes (most vmstat values are 285 * measured in pages). This defines the API part, the internal representation 286 * might be different. 287 */ 288 static __always_inline bool vmstat_item_in_bytes(int idx) 289 { 290 /* 291 * Global and per-node slab counters track slab pages. 292 * It's expected that changes are multiples of PAGE_SIZE. 293 * Internally values are stored in pages. 294 * 295 * Per-memcg and per-lruvec counters track memory, consumed 296 * by individual slab objects. These counters are actually 297 * byte-precise. 298 */ 299 return (idx == NR_SLAB_RECLAIMABLE_B || 300 idx == NR_SLAB_UNRECLAIMABLE_B); 301 } 302 303 /* 304 * We do arithmetic on the LRU lists in various places in the code, 305 * so it is important to keep the active lists LRU_ACTIVE higher in 306 * the array than the corresponding inactive lists, and to keep 307 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 308 * 309 * This has to be kept in sync with the statistics in zone_stat_item 310 * above and the descriptions in vmstat_text in mm/vmstat.c 311 */ 312 #define LRU_BASE 0 313 #define LRU_ACTIVE 1 314 #define LRU_FILE 2 315 316 enum lru_list { 317 LRU_INACTIVE_ANON = LRU_BASE, 318 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 319 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 320 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 321 LRU_UNEVICTABLE, 322 NR_LRU_LISTS 323 }; 324 325 enum vmscan_throttle_state { 326 VMSCAN_THROTTLE_WRITEBACK, 327 VMSCAN_THROTTLE_ISOLATED, 328 VMSCAN_THROTTLE_NOPROGRESS, 329 VMSCAN_THROTTLE_CONGESTED, 330 NR_VMSCAN_THROTTLE, 331 }; 332 333 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 334 335 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 336 337 static inline bool is_file_lru(enum lru_list lru) 338 { 339 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 340 } 341 342 static inline bool is_active_lru(enum lru_list lru) 343 { 344 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 345 } 346 347 #define WORKINGSET_ANON 0 348 #define WORKINGSET_FILE 1 349 #define ANON_AND_FILE 2 350 351 enum lruvec_flags { 352 /* 353 * An lruvec has many dirty pages backed by a congested BDI: 354 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. 355 * It can be cleared by cgroup reclaim or kswapd. 356 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. 357 * It can only be cleared by kswapd. 358 * 359 * Essentially, kswapd can unthrottle an lruvec throttled by cgroup 360 * reclaim, but not vice versa. This only applies to the root cgroup. 361 * The goal is to prevent cgroup reclaim on the root cgroup (e.g. 362 * memory.reclaim) to unthrottle an unbalanced node (that was throttled 363 * by kswapd). 364 */ 365 LRUVEC_CGROUP_CONGESTED, 366 LRUVEC_NODE_CONGESTED, 367 }; 368 369 #endif /* !__GENERATING_BOUNDS_H */ 370 371 /* 372 * Evictable folios are divided into multiple generations. The youngest and the 373 * oldest generation numbers, max_seq and min_seq, are monotonically increasing. 374 * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An 375 * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the 376 * corresponding generation. The gen counter in folio->flags stores gen+1 while 377 * a folio is on one of lrugen->folios[]. Otherwise it stores 0. 378 * 379 * After a folio is faulted in, the aging needs to check the accessed bit at 380 * least twice before handing this folio over to the eviction. The first check 381 * clears the accessed bit from the initial fault; the second check makes sure 382 * this folio hasn't been used since then. This process, AKA second chance, 383 * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI 384 * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two 385 * generations are considered active; the rest of generations, if they exist, 386 * are considered inactive. See lru_gen_is_active(). 387 * 388 * PG_active is always cleared while a folio is on one of lrugen->folios[] so 389 * that the sliding window needs not to worry about it. And it's set again when 390 * a folio considered active is isolated for non-reclaiming purposes, e.g., 391 * migration. See lru_gen_add_folio() and lru_gen_del_folio(). 392 * 393 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the 394 * number of categories of the active/inactive LRU when keeping track of 395 * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits 396 * in folio->flags, masked by LRU_GEN_MASK. 397 */ 398 #define MIN_NR_GENS 2U 399 #define MAX_NR_GENS 4U 400 401 /* 402 * Each generation is divided into multiple tiers. A folio accessed N times 403 * through file descriptors is in tier order_base_2(N). A folio in the first 404 * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page 405 * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by 406 * PG_workingset. A folio in any other tier (1<N<5) between the first and last 407 * is marked by additional bits of LRU_REFS_WIDTH in folio->flags. 408 * 409 * In contrast to moving across generations which requires the LRU lock, moving 410 * across tiers only involves atomic operations on folio->flags and therefore 411 * has a negligible cost in the buffered access path. In the eviction path, 412 * comparisons of refaulted/(evicted+protected) from the first tier and the rest 413 * infer whether folios accessed multiple times through file descriptors are 414 * statistically hot and thus worth protecting. 415 * 416 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the 417 * number of categories of the active/inactive LRU when keeping track of 418 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in 419 * folio->flags, masked by LRU_REFS_MASK. 420 */ 421 #define MAX_NR_TIERS 4U 422 423 #ifndef __GENERATING_BOUNDS_H 424 425 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) 426 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) 427 428 /* 429 * For folios accessed multiple times through file descriptors, 430 * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags 431 * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its 432 * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily 433 * promoted into the second oldest generation in the eviction path. And when 434 * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that 435 * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is 436 * only valid when PG_referenced is set. 437 * 438 * For folios accessed multiple times through page tables, folio_update_gen() 439 * from a page table walk or lru_gen_set_refs() from a rmap walk sets 440 * PG_referenced after the accessed bit is cleared for the first time. 441 * Thereafter, those two paths set PG_workingset and promote folios to the 442 * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears 443 * PG_referenced. Note that for this case, LRU_REFS_MASK is not used. 444 * 445 * For both cases above, after PG_workingset is set on a folio, it remains until 446 * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It 447 * can be set again if lru_gen_test_recent() returns true upon a refault. 448 */ 449 #define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced)) 450 451 struct lruvec; 452 struct page_vma_mapped_walk; 453 454 #ifdef CONFIG_LRU_GEN 455 456 enum { 457 LRU_GEN_ANON, 458 LRU_GEN_FILE, 459 }; 460 461 enum { 462 LRU_GEN_CORE, 463 LRU_GEN_MM_WALK, 464 LRU_GEN_NONLEAF_YOUNG, 465 NR_LRU_GEN_CAPS 466 }; 467 468 #define MIN_LRU_BATCH BITS_PER_LONG 469 #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) 470 471 /* whether to keep historical stats from evicted generations */ 472 #ifdef CONFIG_LRU_GEN_STATS 473 #define NR_HIST_GENS MAX_NR_GENS 474 #else 475 #define NR_HIST_GENS 1U 476 #endif 477 478 /* 479 * The youngest generation number is stored in max_seq for both anon and file 480 * types as they are aged on an equal footing. The oldest generation numbers are 481 * stored in min_seq[] separately for anon and file types so that they can be 482 * incremented independently. Ideally min_seq[] are kept in sync when both anon 483 * and file types are evictable. However, to adapt to situations like extreme 484 * swappiness, they are allowed to be out of sync by at most 485 * MAX_NR_GENS-MIN_NR_GENS-1. 486 * 487 * The number of pages in each generation is eventually consistent and therefore 488 * can be transiently negative when reset_batch_size() is pending. 489 */ 490 struct lru_gen_folio { 491 /* the aging increments the youngest generation number */ 492 unsigned long max_seq; 493 /* the eviction increments the oldest generation numbers */ 494 unsigned long min_seq[ANON_AND_FILE]; 495 /* the birth time of each generation in jiffies */ 496 unsigned long timestamps[MAX_NR_GENS]; 497 /* the multi-gen LRU lists, lazily sorted on eviction */ 498 struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 499 /* the multi-gen LRU sizes, eventually consistent */ 500 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 501 /* the exponential moving average of refaulted */ 502 unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; 503 /* the exponential moving average of evicted+protected */ 504 unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; 505 /* can only be modified under the LRU lock */ 506 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 507 /* can be modified without holding the LRU lock */ 508 atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 509 atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 510 /* whether the multi-gen LRU is enabled */ 511 bool enabled; 512 /* the memcg generation this lru_gen_folio belongs to */ 513 u8 gen; 514 /* the list segment this lru_gen_folio belongs to */ 515 u8 seg; 516 /* per-node lru_gen_folio list for global reclaim */ 517 struct hlist_nulls_node list; 518 }; 519 520 enum { 521 MM_LEAF_TOTAL, /* total leaf entries */ 522 MM_LEAF_YOUNG, /* young leaf entries */ 523 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */ 524 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */ 525 NR_MM_STATS 526 }; 527 528 /* double-buffering Bloom filters */ 529 #define NR_BLOOM_FILTERS 2 530 531 struct lru_gen_mm_state { 532 /* synced with max_seq after each iteration */ 533 unsigned long seq; 534 /* where the current iteration continues after */ 535 struct list_head *head; 536 /* where the last iteration ended before */ 537 struct list_head *tail; 538 /* Bloom filters flip after each iteration */ 539 unsigned long *filters[NR_BLOOM_FILTERS]; 540 /* the mm stats for debugging */ 541 unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; 542 }; 543 544 struct lru_gen_mm_walk { 545 /* the lruvec under reclaim */ 546 struct lruvec *lruvec; 547 /* max_seq from lru_gen_folio: can be out of date */ 548 unsigned long seq; 549 /* the next address within an mm to scan */ 550 unsigned long next_addr; 551 /* to batch promoted pages */ 552 int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 553 /* to batch the mm stats */ 554 int mm_stats[NR_MM_STATS]; 555 /* total batched items */ 556 int batched; 557 int swappiness; 558 bool force_scan; 559 }; 560 561 /* 562 * For each node, memcgs are divided into two generations: the old and the 563 * young. For each generation, memcgs are randomly sharded into multiple bins 564 * to improve scalability. For each bin, the hlist_nulls is virtually divided 565 * into three segments: the head, the tail and the default. 566 * 567 * An onlining memcg is added to the tail of a random bin in the old generation. 568 * The eviction starts at the head of a random bin in the old generation. The 569 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes 570 * the old generation, is incremented when all its bins become empty. 571 * 572 * There are four operations: 573 * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its 574 * current generation (old or young) and updates its "seg" to "head"; 575 * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its 576 * current generation (old or young) and updates its "seg" to "tail"; 577 * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old 578 * generation, updates its "gen" to "old" and resets its "seg" to "default"; 579 * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the 580 * young generation, updates its "gen" to "young" and resets its "seg" to 581 * "default". 582 * 583 * The events that trigger the above operations are: 584 * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD; 585 * 2. The first attempt to reclaim a memcg below low, which triggers 586 * MEMCG_LRU_TAIL; 587 * 3. The first attempt to reclaim a memcg offlined or below reclaimable size 588 * threshold, which triggers MEMCG_LRU_TAIL; 589 * 4. The second attempt to reclaim a memcg offlined or below reclaimable size 590 * threshold, which triggers MEMCG_LRU_YOUNG; 591 * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG; 592 * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG; 593 * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD. 594 * 595 * Notes: 596 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing 597 * of their max_seq counters ensures the eventual fairness to all eligible 598 * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). 599 * 2. There are only two valid generations: old (seq) and young (seq+1). 600 * MEMCG_NR_GENS is set to three so that when reading the generation counter 601 * locklessly, a stale value (seq-1) does not wraparound to young. 602 */ 603 #define MEMCG_NR_GENS 3 604 #define MEMCG_NR_BINS 8 605 606 struct lru_gen_memcg { 607 /* the per-node memcg generation counter */ 608 unsigned long seq; 609 /* each memcg has one lru_gen_folio per node */ 610 unsigned long nr_memcgs[MEMCG_NR_GENS]; 611 /* per-node lru_gen_folio list for global reclaim */ 612 struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS]; 613 /* protects the above */ 614 spinlock_t lock; 615 }; 616 617 void lru_gen_init_pgdat(struct pglist_data *pgdat); 618 void lru_gen_init_lruvec(struct lruvec *lruvec); 619 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 620 621 void lru_gen_init_memcg(struct mem_cgroup *memcg); 622 void lru_gen_exit_memcg(struct mem_cgroup *memcg); 623 void lru_gen_online_memcg(struct mem_cgroup *memcg); 624 void lru_gen_offline_memcg(struct mem_cgroup *memcg); 625 void lru_gen_release_memcg(struct mem_cgroup *memcg); 626 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 627 628 #else /* !CONFIG_LRU_GEN */ 629 630 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) 631 { 632 } 633 634 static inline void lru_gen_init_lruvec(struct lruvec *lruvec) 635 { 636 } 637 638 static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) 639 { 640 return false; 641 } 642 643 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) 644 { 645 } 646 647 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) 648 { 649 } 650 651 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) 652 { 653 } 654 655 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) 656 { 657 } 658 659 static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) 660 { 661 } 662 663 static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 664 { 665 } 666 667 #endif /* CONFIG_LRU_GEN */ 668 669 struct lruvec { 670 struct list_head lists[NR_LRU_LISTS]; 671 /* per lruvec lru_lock for memcg */ 672 spinlock_t lru_lock; 673 /* 674 * These track the cost of reclaiming one LRU - file or anon - 675 * over the other. As the observed cost of reclaiming one LRU 676 * increases, the reclaim scan balance tips toward the other. 677 */ 678 unsigned long anon_cost; 679 unsigned long file_cost; 680 /* Non-resident age, driven by LRU movement */ 681 atomic_long_t nonresident_age; 682 /* Refaults at the time of last reclaim cycle */ 683 unsigned long refaults[ANON_AND_FILE]; 684 /* Various lruvec state flags (enum lruvec_flags) */ 685 unsigned long flags; 686 #ifdef CONFIG_LRU_GEN 687 /* evictable pages divided into generations */ 688 struct lru_gen_folio lrugen; 689 #ifdef CONFIG_LRU_GEN_WALKS_MMU 690 /* to concurrently iterate lru_gen_mm_list */ 691 struct lru_gen_mm_state mm_state; 692 #endif 693 #endif /* CONFIG_LRU_GEN */ 694 #ifdef CONFIG_MEMCG 695 struct pglist_data *pgdat; 696 #endif 697 struct zswap_lruvec_state zswap_lruvec_state; 698 }; 699 700 /* Isolate for asynchronous migration */ 701 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 702 /* Isolate unevictable pages */ 703 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 704 705 /* LRU Isolation modes. */ 706 typedef unsigned __bitwise isolate_mode_t; 707 708 enum zone_watermarks { 709 WMARK_MIN, 710 WMARK_LOW, 711 WMARK_HIGH, 712 WMARK_PROMO, 713 NR_WMARK 714 }; 715 716 /* 717 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists 718 * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list 719 * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE. 720 */ 721 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 722 #define NR_PCP_THP 2 723 #else 724 #define NR_PCP_THP 0 725 #endif 726 #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) 727 #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) 728 729 /* 730 * Flags used in pcp->flags field. 731 * 732 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the 733 * previous page freeing. To avoid to drain PCP for an accident 734 * high-order page freeing. 735 * 736 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before 737 * draining PCP for consecutive high-order pages freeing without 738 * allocation if data cache slice of CPU is large enough. To reduce 739 * zone lock contention and keep cache-hot pages reusing. 740 */ 741 #define PCPF_PREV_FREE_HIGH_ORDER BIT(0) 742 #define PCPF_FREE_HIGH_BATCH BIT(1) 743 744 struct per_cpu_pages { 745 spinlock_t lock; /* Protects lists field */ 746 int count; /* number of pages in the list */ 747 int high; /* high watermark, emptying needed */ 748 int high_min; /* min high watermark */ 749 int high_max; /* max high watermark */ 750 int batch; /* chunk size for buddy add/remove */ 751 u8 flags; /* protected by pcp->lock */ 752 u8 alloc_factor; /* batch scaling factor during allocate */ 753 #ifdef CONFIG_NUMA 754 u8 expire; /* When 0, remote pagesets are drained */ 755 #endif 756 short free_count; /* consecutive free count */ 757 758 /* Lists of pages, one per migrate type stored on the pcp-lists */ 759 struct list_head lists[NR_PCP_LISTS]; 760 } ____cacheline_aligned_in_smp; 761 762 struct per_cpu_zonestat { 763 #ifdef CONFIG_SMP 764 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 765 s8 stat_threshold; 766 #endif 767 #ifdef CONFIG_NUMA 768 /* 769 * Low priority inaccurate counters that are only folded 770 * on demand. Use a large type to avoid the overhead of 771 * folding during refresh_cpu_vm_stats. 772 */ 773 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 774 #endif 775 }; 776 777 struct per_cpu_nodestat { 778 s8 stat_threshold; 779 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 780 }; 781 782 #endif /* !__GENERATING_BOUNDS.H */ 783 784 enum zone_type { 785 /* 786 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able 787 * to DMA to all of the addressable memory (ZONE_NORMAL). 788 * On architectures where this area covers the whole 32 bit address 789 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller 790 * DMA addressing constraints. This distinction is important as a 32bit 791 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit 792 * platforms may need both zones as they support peripherals with 793 * different DMA addressing limitations. 794 */ 795 #ifdef CONFIG_ZONE_DMA 796 ZONE_DMA, 797 #endif 798 #ifdef CONFIG_ZONE_DMA32 799 ZONE_DMA32, 800 #endif 801 /* 802 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 803 * performed on pages in ZONE_NORMAL if the DMA devices support 804 * transfers to all addressable memory. 805 */ 806 ZONE_NORMAL, 807 #ifdef CONFIG_HIGHMEM 808 /* 809 * A memory area that is only addressable by the kernel through 810 * mapping portions into its own address space. This is for example 811 * used by i386 to allow the kernel to address the memory beyond 812 * 900MB. The kernel will set up special mappings (page 813 * table entries on i386) for each page that the kernel needs to 814 * access. 815 */ 816 ZONE_HIGHMEM, 817 #endif 818 /* 819 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains 820 * movable pages with few exceptional cases described below. Main use 821 * cases for ZONE_MOVABLE are to make memory offlining/unplug more 822 * likely to succeed, and to locally limit unmovable allocations - e.g., 823 * to increase the number of THP/huge pages. Notable special cases are: 824 * 825 * 1. Pinned pages: (long-term) pinning of movable pages might 826 * essentially turn such pages unmovable. Therefore, we do not allow 827 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and 828 * faulted, they come from the right zone right away. However, it is 829 * still possible that address space already has pages in 830 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has 831 * touches that memory before pinning). In such case we migrate them 832 * to a different zone. When migration fails - pinning fails. 833 * 2. memblock allocations: kernelcore/movablecore setups might create 834 * situations where ZONE_MOVABLE contains unmovable allocations 835 * after boot. Memory offlining and allocations fail early. 836 * 3. Memory holes: kernelcore/movablecore setups might create very rare 837 * situations where ZONE_MOVABLE contains memory holes after boot, 838 * for example, if we have sections that are only partially 839 * populated. Memory offlining and allocations fail early. 840 * 4. PG_hwpoison pages: while poisoned pages can be skipped during 841 * memory offlining, such pages cannot be allocated. 842 * 5. Unmovable PG_offline pages: in paravirtualized environments, 843 * hotplugged memory blocks might only partially be managed by the 844 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The 845 * parts not manged by the buddy are unmovable PG_offline pages. In 846 * some cases (virtio-mem), such pages can be skipped during 847 * memory offlining, however, cannot be moved/allocated. These 848 * techniques might use alloc_contig_range() to hide previously 849 * exposed pages from the buddy again (e.g., to implement some sort 850 * of memory unplug in virtio-mem). 851 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create 852 * situations where ZERO_PAGE(0) which is allocated differently 853 * on different platforms may end up in a movable zone. ZERO_PAGE(0) 854 * cannot be migrated. 855 * 7. Memory-hotplug: when using memmap_on_memory and onlining the 856 * memory to the MOVABLE zone, the vmemmap pages are also placed in 857 * such zone. Such pages cannot be really moved around as they are 858 * self-stored in the range, but they are treated as movable when 859 * the range they describe is about to be offlined. 860 * 861 * In general, no unmovable allocations that degrade memory offlining 862 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) 863 * have to expect that migrating pages in ZONE_MOVABLE can fail (even 864 * if has_unmovable_pages() states that there are no unmovable pages, 865 * there can be false negatives). 866 */ 867 ZONE_MOVABLE, 868 #ifdef CONFIG_ZONE_DEVICE 869 ZONE_DEVICE, 870 #endif 871 __MAX_NR_ZONES 872 873 }; 874 875 #ifndef __GENERATING_BOUNDS_H 876 877 #define ASYNC_AND_SYNC 2 878 879 struct zone { 880 /* Read-mostly fields */ 881 882 /* zone watermarks, access with *_wmark_pages(zone) macros */ 883 unsigned long _watermark[NR_WMARK]; 884 unsigned long watermark_boost; 885 886 unsigned long nr_reserved_highatomic; 887 unsigned long nr_free_highatomic; 888 889 /* 890 * We don't know if the memory that we're going to allocate will be 891 * freeable or/and it will be released eventually, so to avoid totally 892 * wasting several GB of ram we must reserve some of the lower zone 893 * memory (otherwise we risk to run OOM on the lower zones despite 894 * there being tons of freeable ram on the higher zones). This array is 895 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 896 * changes. 897 */ 898 long lowmem_reserve[MAX_NR_ZONES]; 899 900 #ifdef CONFIG_NUMA 901 int node; 902 #endif 903 struct pglist_data *zone_pgdat; 904 struct per_cpu_pages __percpu *per_cpu_pageset; 905 struct per_cpu_zonestat __percpu *per_cpu_zonestats; 906 /* 907 * the high and batch values are copied to individual pagesets for 908 * faster access 909 */ 910 int pageset_high_min; 911 int pageset_high_max; 912 int pageset_batch; 913 914 #ifndef CONFIG_SPARSEMEM 915 /* 916 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 917 * In SPARSEMEM, this map is stored in struct mem_section 918 */ 919 unsigned long *pageblock_flags; 920 #endif /* CONFIG_SPARSEMEM */ 921 922 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 923 unsigned long zone_start_pfn; 924 925 /* 926 * spanned_pages is the total pages spanned by the zone, including 927 * holes, which is calculated as: 928 * spanned_pages = zone_end_pfn - zone_start_pfn; 929 * 930 * present_pages is physical pages existing within the zone, which 931 * is calculated as: 932 * present_pages = spanned_pages - absent_pages(pages in holes); 933 * 934 * present_early_pages is present pages existing within the zone 935 * located on memory available since early boot, excluding hotplugged 936 * memory. 937 * 938 * managed_pages is present pages managed by the buddy system, which 939 * is calculated as (reserved_pages includes pages allocated by the 940 * bootmem allocator): 941 * managed_pages = present_pages - reserved_pages; 942 * 943 * cma pages is present pages that are assigned for CMA use 944 * (MIGRATE_CMA). 945 * 946 * So present_pages may be used by memory hotplug or memory power 947 * management logic to figure out unmanaged pages by checking 948 * (present_pages - managed_pages). And managed_pages should be used 949 * by page allocator and vm scanner to calculate all kinds of watermarks 950 * and thresholds. 951 * 952 * Locking rules: 953 * 954 * zone_start_pfn and spanned_pages are protected by span_seqlock. 955 * It is a seqlock because it has to be read outside of zone->lock, 956 * and it is done in the main allocator path. But, it is written 957 * quite infrequently. 958 * 959 * The span_seq lock is declared along with zone->lock because it is 960 * frequently read in proximity to zone->lock. It's good to 961 * give them a chance of being in the same cacheline. 962 * 963 * Write access to present_pages at runtime should be protected by 964 * mem_hotplug_begin/done(). Any reader who can't tolerant drift of 965 * present_pages should use get_online_mems() to get a stable value. 966 */ 967 atomic_long_t managed_pages; 968 unsigned long spanned_pages; 969 unsigned long present_pages; 970 #if defined(CONFIG_MEMORY_HOTPLUG) 971 unsigned long present_early_pages; 972 #endif 973 #ifdef CONFIG_CMA 974 unsigned long cma_pages; 975 #endif 976 977 const char *name; 978 979 #ifdef CONFIG_MEMORY_ISOLATION 980 /* 981 * Number of isolated pageblock. It is used to solve incorrect 982 * freepage counting problem due to racy retrieving migratetype 983 * of pageblock. Protected by zone->lock. 984 */ 985 unsigned long nr_isolate_pageblock; 986 #endif 987 988 #ifdef CONFIG_MEMORY_HOTPLUG 989 /* see spanned/present_pages for more description */ 990 seqlock_t span_seqlock; 991 #endif 992 993 int initialized; 994 995 /* Write-intensive fields used from the page allocator */ 996 CACHELINE_PADDING(_pad1_); 997 998 /* free areas of different sizes */ 999 struct free_area free_area[NR_PAGE_ORDERS]; 1000 1001 #ifdef CONFIG_UNACCEPTED_MEMORY 1002 /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ 1003 struct list_head unaccepted_pages; 1004 1005 /* To be called once the last page in the zone is accepted */ 1006 struct work_struct unaccepted_cleanup; 1007 #endif 1008 1009 /* zone flags, see below */ 1010 unsigned long flags; 1011 1012 /* Primarily protects free_area */ 1013 spinlock_t lock; 1014 1015 /* Pages to be freed when next trylock succeeds */ 1016 struct llist_head trylock_free_pages; 1017 1018 /* Write-intensive fields used by compaction and vmstats. */ 1019 CACHELINE_PADDING(_pad2_); 1020 1021 /* 1022 * When free pages are below this point, additional steps are taken 1023 * when reading the number of free pages to avoid per-cpu counter 1024 * drift allowing watermarks to be breached 1025 */ 1026 unsigned long percpu_drift_mark; 1027 1028 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 1029 /* pfn where compaction free scanner should start */ 1030 unsigned long compact_cached_free_pfn; 1031 /* pfn where compaction migration scanner should start */ 1032 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; 1033 unsigned long compact_init_migrate_pfn; 1034 unsigned long compact_init_free_pfn; 1035 #endif 1036 1037 #ifdef CONFIG_COMPACTION 1038 /* 1039 * On compaction failure, 1<<compact_defer_shift compactions 1040 * are skipped before trying again. The number attempted since 1041 * last failure is tracked with compact_considered. 1042 * compact_order_failed is the minimum compaction failed order. 1043 */ 1044 unsigned int compact_considered; 1045 unsigned int compact_defer_shift; 1046 int compact_order_failed; 1047 #endif 1048 1049 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 1050 /* Set to true when the PG_migrate_skip bits should be cleared */ 1051 bool compact_blockskip_flush; 1052 #endif 1053 1054 bool contiguous; 1055 1056 CACHELINE_PADDING(_pad3_); 1057 /* Zone statistics */ 1058 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 1059 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 1060 } ____cacheline_internodealigned_in_smp; 1061 1062 enum pgdat_flags { 1063 PGDAT_WRITEBACK, /* reclaim scanning has recently found 1064 * many pages under writeback 1065 */ 1066 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 1067 }; 1068 1069 enum zone_flags { 1070 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 1071 * Cleared when kswapd is woken. 1072 */ 1073 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ 1074 ZONE_BELOW_HIGH, /* zone is below high watermark. */ 1075 }; 1076 1077 static inline unsigned long wmark_pages(const struct zone *z, 1078 enum zone_watermarks w) 1079 { 1080 return z->_watermark[w] + z->watermark_boost; 1081 } 1082 1083 static inline unsigned long min_wmark_pages(const struct zone *z) 1084 { 1085 return wmark_pages(z, WMARK_MIN); 1086 } 1087 1088 static inline unsigned long low_wmark_pages(const struct zone *z) 1089 { 1090 return wmark_pages(z, WMARK_LOW); 1091 } 1092 1093 static inline unsigned long high_wmark_pages(const struct zone *z) 1094 { 1095 return wmark_pages(z, WMARK_HIGH); 1096 } 1097 1098 static inline unsigned long promo_wmark_pages(const struct zone *z) 1099 { 1100 return wmark_pages(z, WMARK_PROMO); 1101 } 1102 1103 static inline unsigned long zone_managed_pages(const struct zone *zone) 1104 { 1105 return (unsigned long)atomic_long_read(&zone->managed_pages); 1106 } 1107 1108 static inline unsigned long zone_cma_pages(struct zone *zone) 1109 { 1110 #ifdef CONFIG_CMA 1111 return zone->cma_pages; 1112 #else 1113 return 0; 1114 #endif 1115 } 1116 1117 static inline unsigned long zone_end_pfn(const struct zone *zone) 1118 { 1119 return zone->zone_start_pfn + zone->spanned_pages; 1120 } 1121 1122 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 1123 { 1124 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 1125 } 1126 1127 static inline bool zone_is_initialized(const struct zone *zone) 1128 { 1129 return zone->initialized; 1130 } 1131 1132 static inline bool zone_is_empty(const struct zone *zone) 1133 { 1134 return zone->spanned_pages == 0; 1135 } 1136 1137 #ifndef BUILD_VDSO32_64 1138 /* 1139 * The zone field is never updated after free_area_init_core() 1140 * sets it, so none of the operations on it need to be atomic. 1141 */ 1142 1143 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 1144 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 1145 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 1146 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 1147 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 1148 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) 1149 #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH) 1150 #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH) 1151 1152 /* 1153 * Define the bit shifts to access each section. For non-existent 1154 * sections we define the shift as 0; that plus a 0 mask ensures 1155 * the compiler will optimise away reference to them. 1156 */ 1157 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 1158 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 1159 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 1160 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 1161 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) 1162 1163 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 1164 #ifdef NODE_NOT_IN_PAGE_FLAGS 1165 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 1166 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ 1167 SECTIONS_PGOFF : ZONES_PGOFF) 1168 #else 1169 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 1170 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ 1171 NODES_PGOFF : ZONES_PGOFF) 1172 #endif 1173 1174 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 1175 1176 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 1177 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 1178 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 1179 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 1180 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) 1181 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 1182 1183 static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags) 1184 { 1185 ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT); 1186 return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK; 1187 } 1188 1189 static inline enum zone_type page_zonenum(const struct page *page) 1190 { 1191 return memdesc_zonenum(page->flags); 1192 } 1193 1194 static inline enum zone_type folio_zonenum(const struct folio *folio) 1195 { 1196 return memdesc_zonenum(folio->flags); 1197 } 1198 1199 #ifdef CONFIG_ZONE_DEVICE 1200 static inline bool memdesc_is_zone_device(memdesc_flags_t mdf) 1201 { 1202 return memdesc_zonenum(mdf) == ZONE_DEVICE; 1203 } 1204 1205 static inline struct dev_pagemap *page_pgmap(const struct page *page) 1206 { 1207 VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page); 1208 return page_folio(page)->pgmap; 1209 } 1210 1211 /* 1212 * Consecutive zone device pages should not be merged into the same sgl 1213 * or bvec segment with other types of pages or if they belong to different 1214 * pgmaps. Otherwise getting the pgmap of a given segment is not possible 1215 * without scanning the entire segment. This helper returns true either if 1216 * both pages are not zone device pages or both pages are zone device pages 1217 * with the same pgmap. 1218 */ 1219 static inline bool zone_device_pages_have_same_pgmap(const struct page *a, 1220 const struct page *b) 1221 { 1222 if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags)) 1223 return false; 1224 if (!memdesc_is_zone_device(a->flags)) 1225 return true; 1226 return page_pgmap(a) == page_pgmap(b); 1227 } 1228 1229 extern void memmap_init_zone_device(struct zone *, unsigned long, 1230 unsigned long, struct dev_pagemap *); 1231 #else 1232 static inline bool memdesc_is_zone_device(memdesc_flags_t mdf) 1233 { 1234 return false; 1235 } 1236 static inline bool zone_device_pages_have_same_pgmap(const struct page *a, 1237 const struct page *b) 1238 { 1239 return true; 1240 } 1241 static inline struct dev_pagemap *page_pgmap(const struct page *page) 1242 { 1243 return NULL; 1244 } 1245 #endif 1246 1247 static inline bool is_zone_device_page(const struct page *page) 1248 { 1249 return memdesc_is_zone_device(page->flags); 1250 } 1251 1252 static inline bool folio_is_zone_device(const struct folio *folio) 1253 { 1254 return memdesc_is_zone_device(folio->flags); 1255 } 1256 1257 static inline bool is_zone_movable_page(const struct page *page) 1258 { 1259 return page_zonenum(page) == ZONE_MOVABLE; 1260 } 1261 1262 static inline bool folio_is_zone_movable(const struct folio *folio) 1263 { 1264 return folio_zonenum(folio) == ZONE_MOVABLE; 1265 } 1266 #endif 1267 1268 /* 1269 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 1270 * intersection with the given zone 1271 */ 1272 static inline bool zone_intersects(const struct zone *zone, 1273 unsigned long start_pfn, unsigned long nr_pages) 1274 { 1275 if (zone_is_empty(zone)) 1276 return false; 1277 if (start_pfn >= zone_end_pfn(zone) || 1278 start_pfn + nr_pages <= zone->zone_start_pfn) 1279 return false; 1280 1281 return true; 1282 } 1283 1284 /* 1285 * The "priority" of VM scanning is how much of the queues we will scan in one 1286 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 1287 * queues ("queue_length >> 12") during an aging round. 1288 */ 1289 #define DEF_PRIORITY 12 1290 1291 /* Maximum number of zones on a zonelist */ 1292 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 1293 1294 enum { 1295 ZONELIST_FALLBACK, /* zonelist with fallback */ 1296 #ifdef CONFIG_NUMA 1297 /* 1298 * The NUMA zonelists are doubled because we need zonelists that 1299 * restrict the allocations to a single node for __GFP_THISNODE. 1300 */ 1301 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 1302 #endif 1303 MAX_ZONELISTS 1304 }; 1305 1306 /* 1307 * This struct contains information about a zone in a zonelist. It is stored 1308 * here to avoid dereferences into large structures and lookups of tables 1309 */ 1310 struct zoneref { 1311 struct zone *zone; /* Pointer to actual zone */ 1312 int zone_idx; /* zone_idx(zoneref->zone) */ 1313 }; 1314 1315 /* 1316 * One allocation request operates on a zonelist. A zonelist 1317 * is a list of zones, the first one is the 'goal' of the 1318 * allocation, the other zones are fallback zones, in decreasing 1319 * priority. 1320 * 1321 * To speed the reading of the zonelist, the zonerefs contain the zone index 1322 * of the entry being read. Helper functions to access information given 1323 * a struct zoneref are 1324 * 1325 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 1326 * zonelist_zone_idx() - Return the index of the zone for an entry 1327 * zonelist_node_idx() - Return the index of the node for an entry 1328 */ 1329 struct zonelist { 1330 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 1331 }; 1332 1333 /* 1334 * The array of struct pages for flatmem. 1335 * It must be declared for SPARSEMEM as well because there are configurations 1336 * that rely on that. 1337 */ 1338 extern struct page *mem_map; 1339 1340 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1341 struct deferred_split { 1342 spinlock_t split_queue_lock; 1343 struct list_head split_queue; 1344 unsigned long split_queue_len; 1345 }; 1346 #endif 1347 1348 #ifdef CONFIG_MEMORY_FAILURE 1349 /* 1350 * Per NUMA node memory failure handling statistics. 1351 */ 1352 struct memory_failure_stats { 1353 /* 1354 * Number of raw pages poisoned. 1355 * Cases not accounted: memory outside kernel control, offline page, 1356 * arch-specific memory_failure (SGX), hwpoison_filter() filtered 1357 * error events, and unpoison actions from hwpoison_unpoison. 1358 */ 1359 unsigned long total; 1360 /* 1361 * Recovery results of poisoned raw pages handled by memory_failure, 1362 * in sync with mf_result. 1363 * total = ignored + failed + delayed + recovered. 1364 * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted. 1365 */ 1366 unsigned long ignored; 1367 unsigned long failed; 1368 unsigned long delayed; 1369 unsigned long recovered; 1370 }; 1371 #endif 1372 1373 /* 1374 * On NUMA machines, each NUMA node would have a pg_data_t to describe 1375 * it's memory layout. On UMA machines there is a single pglist_data which 1376 * describes the whole memory. 1377 * 1378 * Memory statistics and page replacement data structures are maintained on a 1379 * per-zone basis. 1380 */ 1381 typedef struct pglist_data { 1382 /* 1383 * node_zones contains just the zones for THIS node. Not all of the 1384 * zones may be populated, but it is the full list. It is referenced by 1385 * this node's node_zonelists as well as other node's node_zonelists. 1386 */ 1387 struct zone node_zones[MAX_NR_ZONES]; 1388 1389 /* 1390 * node_zonelists contains references to all zones in all nodes. 1391 * Generally the first zones will be references to this node's 1392 * node_zones. 1393 */ 1394 struct zonelist node_zonelists[MAX_ZONELISTS]; 1395 1396 int nr_zones; /* number of populated zones in this node */ 1397 #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ 1398 struct page *node_mem_map; 1399 #ifdef CONFIG_PAGE_EXTENSION 1400 struct page_ext *node_page_ext; 1401 #endif 1402 #endif 1403 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 1404 /* 1405 * Must be held any time you expect node_start_pfn, 1406 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 1407 * Also synchronizes pgdat->first_deferred_pfn during deferred page 1408 * init. 1409 * 1410 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 1411 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 1412 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 1413 * 1414 * Nests above zone->lock and zone->span_seqlock 1415 */ 1416 spinlock_t node_size_lock; 1417 #endif 1418 unsigned long node_start_pfn; 1419 unsigned long node_present_pages; /* total number of physical pages */ 1420 unsigned long node_spanned_pages; /* total size of physical page 1421 range, including holes */ 1422 int node_id; 1423 wait_queue_head_t kswapd_wait; 1424 wait_queue_head_t pfmemalloc_wait; 1425 1426 /* workqueues for throttling reclaim for different reasons. */ 1427 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; 1428 1429 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ 1430 unsigned long nr_reclaim_start; /* nr pages written while throttled 1431 * when throttling started. */ 1432 #ifdef CONFIG_MEMORY_HOTPLUG 1433 struct mutex kswapd_lock; 1434 #endif 1435 struct task_struct *kswapd; /* Protected by kswapd_lock */ 1436 int kswapd_order; 1437 enum zone_type kswapd_highest_zoneidx; 1438 1439 atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */ 1440 1441 #ifdef CONFIG_COMPACTION 1442 int kcompactd_max_order; 1443 enum zone_type kcompactd_highest_zoneidx; 1444 wait_queue_head_t kcompactd_wait; 1445 struct task_struct *kcompactd; 1446 bool proactive_compact_trigger; 1447 #endif 1448 /* 1449 * This is a per-node reserve of pages that are not available 1450 * to userspace allocations. 1451 */ 1452 unsigned long totalreserve_pages; 1453 1454 #ifdef CONFIG_NUMA 1455 /* 1456 * node reclaim becomes active if more unmapped pages exist. 1457 */ 1458 unsigned long min_unmapped_pages; 1459 unsigned long min_slab_pages; 1460 #endif /* CONFIG_NUMA */ 1461 1462 /* Write-intensive fields used by page reclaim */ 1463 CACHELINE_PADDING(_pad1_); 1464 1465 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1466 /* 1467 * If memory initialisation on large machines is deferred then this 1468 * is the first PFN that needs to be initialised. 1469 */ 1470 unsigned long first_deferred_pfn; 1471 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1472 1473 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1474 struct deferred_split deferred_split_queue; 1475 #endif 1476 1477 #ifdef CONFIG_NUMA_BALANCING 1478 /* start time in ms of current promote rate limit period */ 1479 unsigned int nbp_rl_start; 1480 /* number of promote candidate pages at start time of current rate limit period */ 1481 unsigned long nbp_rl_nr_cand; 1482 /* promote threshold in ms */ 1483 unsigned int nbp_threshold; 1484 /* start time in ms of current promote threshold adjustment period */ 1485 unsigned int nbp_th_start; 1486 /* 1487 * number of promote candidate pages at start time of current promote 1488 * threshold adjustment period 1489 */ 1490 unsigned long nbp_th_nr_cand; 1491 #endif 1492 /* Fields commonly accessed by the page reclaim scanner */ 1493 1494 /* 1495 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. 1496 * 1497 * Use mem_cgroup_lruvec() to look up lruvecs. 1498 */ 1499 struct lruvec __lruvec; 1500 1501 unsigned long flags; 1502 1503 #ifdef CONFIG_LRU_GEN 1504 /* kswap mm walk data */ 1505 struct lru_gen_mm_walk mm_walk; 1506 /* lru_gen_folio list */ 1507 struct lru_gen_memcg memcg_lru; 1508 #endif 1509 1510 CACHELINE_PADDING(_pad2_); 1511 1512 /* Per-node vmstats */ 1513 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 1514 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 1515 #ifdef CONFIG_NUMA 1516 struct memory_tier __rcu *memtier; 1517 #endif 1518 #ifdef CONFIG_MEMORY_FAILURE 1519 struct memory_failure_stats mf_stats; 1520 #endif 1521 } pg_data_t; 1522 1523 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 1524 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 1525 1526 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 1527 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 1528 1529 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 1530 { 1531 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 1532 } 1533 1534 #include <linux/memory_hotplug.h> 1535 1536 void build_all_zonelists(pg_data_t *pgdat); 1537 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 1538 int highest_zoneidx, unsigned int alloc_flags, 1539 long free_pages); 1540 bool zone_watermark_ok(struct zone *z, unsigned int order, 1541 unsigned long mark, int highest_zoneidx, 1542 unsigned int alloc_flags); 1543 1544 enum kswapd_clear_hopeless_reason { 1545 KSWAPD_CLEAR_HOPELESS_OTHER = 0, 1546 KSWAPD_CLEAR_HOPELESS_KSWAPD, 1547 KSWAPD_CLEAR_HOPELESS_DIRECT, 1548 KSWAPD_CLEAR_HOPELESS_PCP, 1549 }; 1550 1551 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 1552 enum zone_type highest_zoneidx); 1553 void kswapd_try_clear_hopeless(struct pglist_data *pgdat, 1554 unsigned int order, int highest_zoneidx); 1555 void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason); 1556 bool kswapd_test_hopeless(pg_data_t *pgdat); 1557 1558 /* 1559 * Memory initialization context, use to differentiate memory added by 1560 * the platform statically or via memory hotplug interface. 1561 */ 1562 enum meminit_context { 1563 MEMINIT_EARLY, 1564 MEMINIT_HOTPLUG, 1565 }; 1566 1567 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 1568 unsigned long size); 1569 1570 extern void lruvec_init(struct lruvec *lruvec); 1571 1572 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 1573 { 1574 #ifdef CONFIG_MEMCG 1575 return lruvec->pgdat; 1576 #else 1577 return container_of(lruvec, struct pglist_data, __lruvec); 1578 #endif 1579 } 1580 1581 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 1582 int local_memory_node(int node_id); 1583 #else 1584 static inline int local_memory_node(int node_id) { return node_id; }; 1585 #endif 1586 1587 /* 1588 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 1589 */ 1590 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 1591 1592 #ifdef CONFIG_ZONE_DEVICE 1593 static inline bool zone_is_zone_device(const struct zone *zone) 1594 { 1595 return zone_idx(zone) == ZONE_DEVICE; 1596 } 1597 #else 1598 static inline bool zone_is_zone_device(const struct zone *zone) 1599 { 1600 return false; 1601 } 1602 #endif 1603 1604 /* 1605 * Returns true if a zone has pages managed by the buddy allocator. 1606 * All the reclaim decisions have to use this function rather than 1607 * populated_zone(). If the whole zone is reserved then we can easily 1608 * end up with populated_zone() && !managed_zone(). 1609 */ 1610 static inline bool managed_zone(const struct zone *zone) 1611 { 1612 return zone_managed_pages(zone); 1613 } 1614 1615 /* Returns true if a zone has memory */ 1616 static inline bool populated_zone(const struct zone *zone) 1617 { 1618 return zone->present_pages; 1619 } 1620 1621 #ifdef CONFIG_NUMA 1622 static inline int zone_to_nid(const struct zone *zone) 1623 { 1624 return zone->node; 1625 } 1626 1627 static inline void zone_set_nid(struct zone *zone, int nid) 1628 { 1629 zone->node = nid; 1630 } 1631 #else 1632 static inline int zone_to_nid(const struct zone *zone) 1633 { 1634 return 0; 1635 } 1636 1637 static inline void zone_set_nid(struct zone *zone, int nid) {} 1638 #endif 1639 1640 extern int movable_zone; 1641 1642 static inline int is_highmem_idx(enum zone_type idx) 1643 { 1644 #ifdef CONFIG_HIGHMEM 1645 return (idx == ZONE_HIGHMEM || 1646 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); 1647 #else 1648 return 0; 1649 #endif 1650 } 1651 1652 /** 1653 * is_highmem - helper function to quickly check if a struct zone is a 1654 * highmem zone or not. This is an attempt to keep references 1655 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 1656 * @zone: pointer to struct zone variable 1657 * Return: 1 for a highmem zone, 0 otherwise 1658 */ 1659 static inline int is_highmem(const struct zone *zone) 1660 { 1661 return is_highmem_idx(zone_idx(zone)); 1662 } 1663 1664 bool has_managed_zone(enum zone_type zone); 1665 static inline bool has_managed_dma(void) 1666 { 1667 #ifdef CONFIG_ZONE_DMA 1668 return has_managed_zone(ZONE_DMA); 1669 #else 1670 return false; 1671 #endif 1672 } 1673 1674 1675 #ifndef CONFIG_NUMA 1676 1677 extern struct pglist_data contig_page_data; 1678 static inline struct pglist_data *NODE_DATA(int nid) 1679 { 1680 return &contig_page_data; 1681 } 1682 1683 #else /* CONFIG_NUMA */ 1684 1685 #include <asm/mmzone.h> 1686 1687 #endif /* !CONFIG_NUMA */ 1688 1689 extern struct pglist_data *first_online_pgdat(void); 1690 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1691 extern struct zone *next_zone(struct zone *zone); 1692 1693 /** 1694 * for_each_online_pgdat - helper macro to iterate over all online nodes 1695 * @pgdat: pointer to a pg_data_t variable 1696 */ 1697 #define for_each_online_pgdat(pgdat) \ 1698 for (pgdat = first_online_pgdat(); \ 1699 pgdat; \ 1700 pgdat = next_online_pgdat(pgdat)) 1701 /** 1702 * for_each_zone - helper macro to iterate over all memory zones 1703 * @zone: pointer to struct zone variable 1704 * 1705 * The user only needs to declare the zone variable, for_each_zone 1706 * fills it in. 1707 */ 1708 #define for_each_zone(zone) \ 1709 for (zone = (first_online_pgdat())->node_zones; \ 1710 zone; \ 1711 zone = next_zone(zone)) 1712 1713 #define for_each_populated_zone(zone) \ 1714 for (zone = (first_online_pgdat())->node_zones; \ 1715 zone; \ 1716 zone = next_zone(zone)) \ 1717 if (!populated_zone(zone)) \ 1718 ; /* do nothing */ \ 1719 else 1720 1721 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1722 { 1723 return zoneref->zone; 1724 } 1725 1726 static inline int zonelist_zone_idx(const struct zoneref *zoneref) 1727 { 1728 return zoneref->zone_idx; 1729 } 1730 1731 static inline int zonelist_node_idx(const struct zoneref *zoneref) 1732 { 1733 return zone_to_nid(zoneref->zone); 1734 } 1735 1736 struct zoneref *__next_zones_zonelist(struct zoneref *z, 1737 enum zone_type highest_zoneidx, 1738 nodemask_t *nodes); 1739 1740 /** 1741 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1742 * @z: The cursor used as a starting point for the search 1743 * @highest_zoneidx: The zone index of the highest zone to return 1744 * @nodes: An optional nodemask to filter the zonelist with 1745 * 1746 * This function returns the next zone at or below a given zone index that is 1747 * within the allowed nodemask using a cursor as the starting point for the 1748 * search. The zoneref returned is a cursor that represents the current zone 1749 * being examined. It should be advanced by one before calling 1750 * next_zones_zonelist again. 1751 * 1752 * Return: the next zone at or below highest_zoneidx within the allowed 1753 * nodemask using a cursor within a zonelist as a starting point 1754 */ 1755 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1756 enum zone_type highest_zoneidx, 1757 nodemask_t *nodes) 1758 { 1759 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1760 return z; 1761 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1762 } 1763 1764 /** 1765 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1766 * @zonelist: The zonelist to search for a suitable zone 1767 * @highest_zoneidx: The zone index of the highest zone to return 1768 * @nodes: An optional nodemask to filter the zonelist with 1769 * 1770 * This function returns the first zone at or below a given zone index that is 1771 * within the allowed nodemask. The zoneref returned is a cursor that can be 1772 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1773 * one before calling. 1774 * 1775 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1776 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1777 * update due to cpuset modification. 1778 * 1779 * Return: Zoneref pointer for the first suitable zone found 1780 */ 1781 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1782 enum zone_type highest_zoneidx, 1783 nodemask_t *nodes) 1784 { 1785 return next_zones_zonelist(zonelist->_zonerefs, 1786 highest_zoneidx, nodes); 1787 } 1788 1789 /** 1790 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1791 * @zone: The current zone in the iterator 1792 * @z: The current pointer within zonelist->_zonerefs being iterated 1793 * @zlist: The zonelist being iterated 1794 * @highidx: The zone index of the highest zone to return 1795 * @nodemask: Nodemask allowed by the allocator 1796 * 1797 * This iterator iterates though all zones at or below a given zone index and 1798 * within a given nodemask 1799 */ 1800 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1801 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1802 zone; \ 1803 z = next_zones_zonelist(++z, highidx, nodemask), \ 1804 zone = zonelist_zone(z)) 1805 1806 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1807 for (zone = zonelist_zone(z); \ 1808 zone; \ 1809 z = next_zones_zonelist(++z, highidx, nodemask), \ 1810 zone = zonelist_zone(z)) 1811 1812 1813 /** 1814 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1815 * @zone: The current zone in the iterator 1816 * @z: The current pointer within zonelist->zones being iterated 1817 * @zlist: The zonelist being iterated 1818 * @highidx: The zone index of the highest zone to return 1819 * 1820 * This iterator iterates though all zones at or below a given zone index. 1821 */ 1822 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1823 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1824 1825 /* Whether the 'nodes' are all movable nodes */ 1826 static inline bool movable_only_nodes(nodemask_t *nodes) 1827 { 1828 struct zonelist *zonelist; 1829 struct zoneref *z; 1830 int nid; 1831 1832 if (nodes_empty(*nodes)) 1833 return false; 1834 1835 /* 1836 * We can chose arbitrary node from the nodemask to get a 1837 * zonelist as they are interlinked. We just need to find 1838 * at least one zone that can satisfy kernel allocations. 1839 */ 1840 nid = first_node(*nodes); 1841 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 1842 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); 1843 return (!zonelist_zone(z)) ? true : false; 1844 } 1845 1846 1847 #ifdef CONFIG_SPARSEMEM 1848 #include <asm/sparsemem.h> 1849 #endif 1850 1851 #ifdef CONFIG_FLATMEM 1852 #define pfn_to_nid(pfn) (0) 1853 #endif 1854 1855 #ifdef CONFIG_SPARSEMEM 1856 1857 /* 1858 * PA_SECTION_SHIFT physical address to/from section number 1859 * PFN_SECTION_SHIFT pfn to/from section number 1860 */ 1861 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1862 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1863 1864 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1865 1866 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1867 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1868 1869 #define SECTION_BLOCKFLAGS_BITS \ 1870 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1871 1872 #if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS 1873 #error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE 1874 #endif 1875 1876 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1877 { 1878 return pfn >> PFN_SECTION_SHIFT; 1879 } 1880 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1881 { 1882 return sec << PFN_SECTION_SHIFT; 1883 } 1884 1885 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1886 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1887 1888 #define SUBSECTION_SHIFT 21 1889 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) 1890 1891 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1892 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1893 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1894 1895 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1896 #error Subsection size exceeds section size 1897 #else 1898 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1899 #endif 1900 1901 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1902 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1903 1904 struct mem_section_usage { 1905 struct rcu_head rcu; 1906 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1907 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1908 #endif 1909 /* See declaration of similar field in struct zone */ 1910 unsigned long pageblock_flags[0]; 1911 }; 1912 1913 void subsection_map_init(unsigned long pfn, unsigned long nr_pages); 1914 1915 struct page; 1916 struct page_ext; 1917 struct mem_section { 1918 /* 1919 * This is, logically, a pointer to an array of struct 1920 * pages. However, it is stored with some other magic. 1921 * (see sparse.c::sparse_init_one_section()) 1922 * 1923 * Additionally during early boot we encode node id of 1924 * the location of the section here to guide allocation. 1925 * (see sparse.c::memory_present()) 1926 * 1927 * Making it a UL at least makes someone do a cast 1928 * before using it wrong. 1929 */ 1930 unsigned long section_mem_map; 1931 1932 struct mem_section_usage *usage; 1933 #ifdef CONFIG_PAGE_EXTENSION 1934 /* 1935 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1936 * section. (see page_ext.h about this.) 1937 */ 1938 struct page_ext *page_ext; 1939 unsigned long pad; 1940 #endif 1941 /* 1942 * WARNING: mem_section must be a power-of-2 in size for the 1943 * calculation and use of SECTION_ROOT_MASK to make sense. 1944 */ 1945 }; 1946 1947 #ifdef CONFIG_SPARSEMEM_EXTREME 1948 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1949 #else 1950 #define SECTIONS_PER_ROOT 1 1951 #endif 1952 1953 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1954 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1955 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1956 1957 #ifdef CONFIG_SPARSEMEM_EXTREME 1958 extern struct mem_section **mem_section; 1959 #else 1960 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1961 #endif 1962 1963 static inline unsigned long *section_to_usemap(struct mem_section *ms) 1964 { 1965 return ms->usage->pageblock_flags; 1966 } 1967 1968 static inline struct mem_section *__nr_to_section(unsigned long nr) 1969 { 1970 unsigned long root = SECTION_NR_TO_ROOT(nr); 1971 1972 if (unlikely(root >= NR_SECTION_ROOTS)) 1973 return NULL; 1974 1975 #ifdef CONFIG_SPARSEMEM_EXTREME 1976 if (!mem_section || !mem_section[root]) 1977 return NULL; 1978 #endif 1979 return &mem_section[root][nr & SECTION_ROOT_MASK]; 1980 } 1981 extern size_t mem_section_usage_size(void); 1982 1983 /* 1984 * We use the lower bits of the mem_map pointer to store 1985 * a little bit of information. The pointer is calculated 1986 * as mem_map - section_nr_to_pfn(pnum). The result is 1987 * aligned to the minimum alignment of the two values: 1988 * 1. All mem_map arrays are page-aligned. 1989 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1990 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1991 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1992 * worst combination is powerpc with 256k pages, 1993 * which results in PFN_SECTION_SHIFT equal 6. 1994 * To sum it up, at least 6 bits are available on all architectures. 1995 * However, we can exceed 6 bits on some other architectures except 1996 * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available 1997 * with the worst case of 64K pages on arm64) if we make sure the 1998 * exceeded bit is not applicable to powerpc. 1999 */ 2000 enum { 2001 SECTION_MARKED_PRESENT_BIT, 2002 SECTION_HAS_MEM_MAP_BIT, 2003 SECTION_IS_ONLINE_BIT, 2004 SECTION_IS_EARLY_BIT, 2005 #ifdef CONFIG_ZONE_DEVICE 2006 SECTION_TAINT_ZONE_DEVICE_BIT, 2007 #endif 2008 #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2009 SECTION_IS_VMEMMAP_PREINIT_BIT, 2010 #endif 2011 SECTION_MAP_LAST_BIT, 2012 }; 2013 2014 #define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) 2015 #define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) 2016 #define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) 2017 #define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) 2018 #ifdef CONFIG_ZONE_DEVICE 2019 #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) 2020 #endif 2021 #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2022 #define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT) 2023 #endif 2024 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) 2025 #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT 2026 2027 static inline struct page *__section_mem_map_addr(struct mem_section *section) 2028 { 2029 unsigned long map = section->section_mem_map; 2030 map &= SECTION_MAP_MASK; 2031 return (struct page *)map; 2032 } 2033 2034 static inline int present_section(const struct mem_section *section) 2035 { 2036 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 2037 } 2038 2039 static inline int present_section_nr(unsigned long nr) 2040 { 2041 return present_section(__nr_to_section(nr)); 2042 } 2043 2044 static inline int valid_section(const struct mem_section *section) 2045 { 2046 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 2047 } 2048 2049 static inline int early_section(const struct mem_section *section) 2050 { 2051 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 2052 } 2053 2054 static inline int valid_section_nr(unsigned long nr) 2055 { 2056 return valid_section(__nr_to_section(nr)); 2057 } 2058 2059 static inline int online_section(const struct mem_section *section) 2060 { 2061 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 2062 } 2063 2064 #ifdef CONFIG_ZONE_DEVICE 2065 static inline int online_device_section(const struct mem_section *section) 2066 { 2067 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; 2068 2069 return section && ((section->section_mem_map & flags) == flags); 2070 } 2071 #else 2072 static inline int online_device_section(const struct mem_section *section) 2073 { 2074 return 0; 2075 } 2076 #endif 2077 2078 #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2079 static inline int preinited_vmemmap_section(const struct mem_section *section) 2080 { 2081 return (section && 2082 (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); 2083 } 2084 2085 void sparse_vmemmap_init_nid_early(int nid); 2086 void sparse_vmemmap_init_nid_late(int nid); 2087 2088 #else 2089 static inline int preinited_vmemmap_section(const struct mem_section *section) 2090 { 2091 return 0; 2092 } 2093 static inline void sparse_vmemmap_init_nid_early(int nid) 2094 { 2095 } 2096 2097 static inline void sparse_vmemmap_init_nid_late(int nid) 2098 { 2099 } 2100 #endif 2101 2102 static inline int online_section_nr(unsigned long nr) 2103 { 2104 return online_section(__nr_to_section(nr)); 2105 } 2106 2107 #ifdef CONFIG_MEMORY_HOTPLUG 2108 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2109 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2110 #endif 2111 2112 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 2113 { 2114 return __nr_to_section(pfn_to_section_nr(pfn)); 2115 } 2116 2117 extern unsigned long __highest_present_section_nr; 2118 2119 static inline int subsection_map_index(unsigned long pfn) 2120 { 2121 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 2122 } 2123 2124 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2125 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 2126 { 2127 int idx = subsection_map_index(pfn); 2128 struct mem_section_usage *usage = READ_ONCE(ms->usage); 2129 2130 return usage ? test_bit(idx, usage->subsection_map) : 0; 2131 } 2132 2133 static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) 2134 { 2135 struct mem_section_usage *usage = READ_ONCE(ms->usage); 2136 int idx = subsection_map_index(*pfn); 2137 unsigned long bit; 2138 2139 if (!usage) 2140 return false; 2141 2142 if (test_bit(idx, usage->subsection_map)) 2143 return true; 2144 2145 /* Find the next subsection that exists */ 2146 bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx); 2147 if (bit == SUBSECTIONS_PER_SECTION) 2148 return false; 2149 2150 *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION); 2151 return true; 2152 } 2153 #else 2154 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 2155 { 2156 return 1; 2157 } 2158 2159 static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) 2160 { 2161 return true; 2162 } 2163 #endif 2164 2165 void sparse_init_early_section(int nid, struct page *map, unsigned long pnum, 2166 unsigned long flags); 2167 2168 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 2169 /** 2170 * pfn_valid - check if there is a valid memory map entry for a PFN 2171 * @pfn: the page frame number to check 2172 * 2173 * Check if there is a valid memory map entry aka struct page for the @pfn. 2174 * Note, that availability of the memory map entry does not imply that 2175 * there is actual usable memory at that @pfn. The struct page may 2176 * represent a hole or an unusable page frame. 2177 * 2178 * Return: 1 for PFNs that have memory map entries and 0 otherwise 2179 */ 2180 static inline int pfn_valid(unsigned long pfn) 2181 { 2182 struct mem_section *ms; 2183 int ret; 2184 2185 /* 2186 * Ensure the upper PAGE_SHIFT bits are clear in the 2187 * pfn. Else it might lead to false positives when 2188 * some of the upper bits are set, but the lower bits 2189 * match a valid pfn. 2190 */ 2191 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) 2192 return 0; 2193 2194 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 2195 return 0; 2196 ms = __pfn_to_section(pfn); 2197 rcu_read_lock_sched(); 2198 if (!valid_section(ms)) { 2199 rcu_read_unlock_sched(); 2200 return 0; 2201 } 2202 /* 2203 * Traditionally early sections always returned pfn_valid() for 2204 * the entire section-sized span. 2205 */ 2206 ret = early_section(ms) || pfn_section_valid(ms, pfn); 2207 rcu_read_unlock_sched(); 2208 2209 return ret; 2210 } 2211 2212 /* Returns end_pfn or higher if no valid PFN remaining in range */ 2213 static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn) 2214 { 2215 unsigned long nr = pfn_to_section_nr(pfn); 2216 2217 rcu_read_lock_sched(); 2218 2219 while (nr <= __highest_present_section_nr && pfn < end_pfn) { 2220 struct mem_section *ms = __pfn_to_section(pfn); 2221 2222 if (valid_section(ms) && 2223 (early_section(ms) || pfn_section_first_valid(ms, &pfn))) { 2224 rcu_read_unlock_sched(); 2225 return pfn; 2226 } 2227 2228 /* Nothing left in this section? Skip to next section */ 2229 nr++; 2230 pfn = section_nr_to_pfn(nr); 2231 } 2232 2233 rcu_read_unlock_sched(); 2234 return end_pfn; 2235 } 2236 2237 static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn) 2238 { 2239 pfn++; 2240 2241 if (pfn >= end_pfn) 2242 return end_pfn; 2243 2244 /* 2245 * Either every PFN within the section (or subsection for VMEMMAP) is 2246 * valid, or none of them are. So there's no point repeating the check 2247 * for every PFN; only call first_valid_pfn() again when crossing a 2248 * (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)). 2249 */ 2250 if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ? 2251 PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK)) 2252 return pfn; 2253 2254 return first_valid_pfn(pfn, end_pfn); 2255 } 2256 2257 2258 #define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ 2259 for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \ 2260 (_pfn) < (_end_pfn); \ 2261 (_pfn) = next_valid_pfn((_pfn), (_end_pfn))) 2262 2263 #endif 2264 2265 static inline int pfn_in_present_section(unsigned long pfn) 2266 { 2267 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 2268 return 0; 2269 return present_section(__pfn_to_section(pfn)); 2270 } 2271 2272 static inline unsigned long next_present_section_nr(unsigned long section_nr) 2273 { 2274 while (++section_nr <= __highest_present_section_nr) { 2275 if (present_section_nr(section_nr)) 2276 return section_nr; 2277 } 2278 2279 return -1; 2280 } 2281 2282 #define for_each_present_section_nr(start, section_nr) \ 2283 for (section_nr = next_present_section_nr(start - 1); \ 2284 section_nr != -1; \ 2285 section_nr = next_present_section_nr(section_nr)) 2286 2287 /* 2288 * These are _only_ used during initialisation, therefore they 2289 * can use __initdata ... They could have names to indicate 2290 * this restriction. 2291 */ 2292 #ifdef CONFIG_NUMA 2293 #define pfn_to_nid(pfn) \ 2294 ({ \ 2295 unsigned long __pfn_to_nid_pfn = (pfn); \ 2296 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 2297 }) 2298 #else 2299 #define pfn_to_nid(pfn) (0) 2300 #endif 2301 2302 #else 2303 #define sparse_index_init(_sec, _nid) do {} while (0) 2304 #define sparse_vmemmap_init_nid_early(_nid) do {} while (0) 2305 #define sparse_vmemmap_init_nid_late(_nid) do {} while (0) 2306 #define pfn_in_present_section pfn_valid 2307 #define subsection_map_init(_pfn, _nr_pages) do {} while (0) 2308 #endif /* CONFIG_SPARSEMEM */ 2309 2310 /* 2311 * Fallback case for when the architecture provides its own pfn_valid() but 2312 * not a corresponding for_each_valid_pfn(). 2313 */ 2314 #ifndef for_each_valid_pfn 2315 #define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ 2316 for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \ 2317 if (pfn_valid(_pfn)) 2318 #endif 2319 2320 #endif /* !__GENERATING_BOUNDS.H */ 2321 #endif /* !__ASSEMBLY__ */ 2322 #endif /* _LINUX_MMZONE_H */ 2323