1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/interrupt.h>
21 #include <linux/jiffies.h>
22 #include <linux/compiler.h>
23 #include <linux/kernel.h>
24 #include <linux/kasan.h>
25 #include <linux/kmsan.h>
26 #include <linux/module.h>
27 #include <linux/suspend.h>
28 #include <linux/ratelimit.h>
29 #include <linux/oom.h>
30 #include <linux/topology.h>
31 #include <linux/sysctl.h>
32 #include <linux/cpu.h>
33 #include <linux/cpuset.h>
34 #include <linux/folio_batch.h>
35 #include <linux/memory_hotplug.h>
36 #include <linux/nodemask.h>
37 #include <linux/vmstat.h>
38 #include <linux/fault-inject.h>
39 #include <linux/compaction.h>
40 #include <trace/events/kmem.h>
41 #include <trace/events/oom.h>
42 #include <linux/prefetch.h>
43 #include <linux/mm_inline.h>
44 #include <linux/mmu_notifier.h>
45 #include <linux/migrate.h>
46 #include <linux/sched/mm.h>
47 #include <linux/page_owner.h>
48 #include <linux/page_table_check.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/lockdep.h>
52 #include <linux/psi.h>
53 #include <linux/khugepaged.h>
54 #include <linux/delayacct.h>
55 #include <linux/cacheinfo.h>
56 #include <linux/pgalloc_tag.h>
57 #include <asm/div64.h>
58 #include "internal.h"
59 #include "shuffle.h"
60 #include "page_reporting.h"
61
62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
63 typedef int __bitwise fpi_t;
64
65 /* No special request */
66 #define FPI_NONE ((__force fpi_t)0)
67
68 /*
69 * Skip free page reporting notification for the (possibly merged) page.
70 * This does not hinder free page reporting from grabbing the page,
71 * reporting it and marking it "reported" - it only skips notifying
72 * the free page reporting infrastructure about a newly freed page. For
73 * example, used when temporarily pulling a page from a freelist and
74 * putting it back unmodified.
75 */
76 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
77
78 /*
79 * Place the (possibly merged) page to the tail of the freelist. Will ignore
80 * page shuffling (relevant code - e.g., memory onlining - is expected to
81 * shuffle the whole zone).
82 *
83 * Note: No code should rely on this flag for correctness - it's purely
84 * to allow for optimizations when handing back either fresh pages
85 * (memory onlining) or untouched pages (page isolation, free page
86 * reporting).
87 */
88 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
89
90 /* Free the page without taking locks. Rely on trylock only. */
91 #define FPI_TRYLOCK ((__force fpi_t)BIT(2))
92
93 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
94 static DEFINE_MUTEX(pcp_batch_high_lock);
95 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
96
97 /*
98 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
99 * a migration causing the wrong PCP to be locked and remote memory being
100 * potentially allocated, pin the task to the CPU for the lookup+lock.
101 * preempt_disable is used on !RT because it is faster than migrate_disable.
102 * migrate_disable is used on RT because otherwise RT spinlock usage is
103 * interfered with and a high priority task cannot preempt the allocator.
104 */
105 #ifndef CONFIG_PREEMPT_RT
106 #define pcpu_task_pin() preempt_disable()
107 #define pcpu_task_unpin() preempt_enable()
108 #else
109 #define pcpu_task_pin() migrate_disable()
110 #define pcpu_task_unpin() migrate_enable()
111 #endif
112
113 /*
114 * A helper to lookup and trylock pcp with embedded spinlock.
115 * The return value should be used with the unlock helper.
116 * NULL return value means the trylock failed.
117 */
118 #ifdef CONFIG_SMP
119 #define pcp_spin_trylock(ptr) \
120 ({ \
121 struct per_cpu_pages *_ret; \
122 pcpu_task_pin(); \
123 _ret = this_cpu_ptr(ptr); \
124 if (!spin_trylock(&_ret->lock)) { \
125 pcpu_task_unpin(); \
126 _ret = NULL; \
127 } \
128 _ret; \
129 })
130
131 #define pcp_spin_unlock(ptr) \
132 ({ \
133 spin_unlock(&ptr->lock); \
134 pcpu_task_unpin(); \
135 })
136
137 /*
138 * On CONFIG_SMP=n the UP implementation of spin_trylock() never fails and thus
139 * is not compatible with our locking scheme. However we do not need pcp for
140 * scalability in the first place, so just make all the trylocks fail and take
141 * the slow path unconditionally.
142 */
143 #else
144 #define pcp_spin_trylock(ptr) \
145 NULL
146
147 #define pcp_spin_unlock(ptr) \
148 BUG_ON(1)
149 #endif
150
151 /*
152 * In some cases we do not need to pin the task to the CPU because we are
153 * already given a specific cpu's pcp pointer.
154 */
155 #define pcp_spin_lock_nopin(ptr) \
156 spin_lock(&(ptr)->lock)
157 #define pcp_spin_unlock_nopin(ptr) \
158 spin_unlock(&(ptr)->lock)
159
160 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
161 DEFINE_PER_CPU(int, numa_node);
162 EXPORT_PER_CPU_SYMBOL(numa_node);
163 #endif
164
165 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
166
167 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
168 /*
169 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
170 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
171 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
172 * defined in <linux/topology.h>.
173 */
174 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
175 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
176 #endif
177
178 static DEFINE_MUTEX(pcpu_drain_mutex);
179
180 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
181 volatile unsigned long latent_entropy __latent_entropy;
182 EXPORT_SYMBOL(latent_entropy);
183 #endif
184
185 /*
186 * Array of node states.
187 */
188 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
189 [N_POSSIBLE] = NODE_MASK_ALL,
190 [N_ONLINE] = { { [0] = 1UL } },
191 #ifndef CONFIG_NUMA
192 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
193 #ifdef CONFIG_HIGHMEM
194 [N_HIGH_MEMORY] = { { [0] = 1UL } },
195 #endif
196 [N_MEMORY] = { { [0] = 1UL } },
197 [N_CPU] = { { [0] = 1UL } },
198 #endif /* NUMA */
199 };
200 EXPORT_SYMBOL(node_states);
201
202 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
203
204 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
205 unsigned int pageblock_order __read_mostly;
206 #endif
207
208 static void __free_pages_ok(struct page *page, unsigned int order,
209 fpi_t fpi_flags);
210 static void reserve_highatomic_pageblock(struct page *page, int order,
211 struct zone *zone);
212
213 /*
214 * results with 256, 32 in the lowmem_reserve sysctl:
215 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
216 * 1G machine -> (16M dma, 784M normal, 224M high)
217 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
218 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
219 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
220 *
221 * TBD: should special case ZONE_DMA32 machines here - in those we normally
222 * don't need any ZONE_NORMAL reservation
223 */
224 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
225 #ifdef CONFIG_ZONE_DMA
226 [ZONE_DMA] = 256,
227 #endif
228 #ifdef CONFIG_ZONE_DMA32
229 [ZONE_DMA32] = 256,
230 #endif
231 [ZONE_NORMAL] = 32,
232 #ifdef CONFIG_HIGHMEM
233 [ZONE_HIGHMEM] = 0,
234 #endif
235 [ZONE_MOVABLE] = 0,
236 };
237
238 char * const zone_names[MAX_NR_ZONES] = {
239 #ifdef CONFIG_ZONE_DMA
240 "DMA",
241 #endif
242 #ifdef CONFIG_ZONE_DMA32
243 "DMA32",
244 #endif
245 "Normal",
246 #ifdef CONFIG_HIGHMEM
247 "HighMem",
248 #endif
249 "Movable",
250 #ifdef CONFIG_ZONE_DEVICE
251 "Device",
252 #endif
253 };
254
255 const char * const migratetype_names[MIGRATE_TYPES] = {
256 "Unmovable",
257 "Movable",
258 "Reclaimable",
259 "HighAtomic",
260 #ifdef CONFIG_CMA
261 "CMA",
262 #endif
263 #ifdef CONFIG_MEMORY_ISOLATION
264 "Isolate",
265 #endif
266 };
267
268 int min_free_kbytes = 1024;
269 int user_min_free_kbytes = -1;
270 static int watermark_boost_factor __read_mostly = 15000;
271 static int watermark_scale_factor = 10;
272 int defrag_mode;
273
274 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
275 int movable_zone;
276 EXPORT_SYMBOL(movable_zone);
277
278 #if MAX_NUMNODES > 1
279 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
280 unsigned int nr_online_nodes __read_mostly = 1;
281 EXPORT_SYMBOL(nr_node_ids);
282 EXPORT_SYMBOL(nr_online_nodes);
283 #endif
284
285 static bool page_contains_unaccepted(struct page *page, unsigned int order);
286 static bool cond_accept_memory(struct zone *zone, unsigned int order,
287 int alloc_flags);
288 static bool __free_unaccepted(struct page *page);
289
290 int page_group_by_mobility_disabled __read_mostly;
291
292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
293 /*
294 * During boot we initialize deferred pages on-demand, as needed, but once
295 * page_alloc_init_late() has finished, the deferred pages are all initialized,
296 * and we can permanently disable that path.
297 */
298 DEFINE_STATIC_KEY_TRUE(deferred_pages);
299
300 /*
301 * deferred_grow_zone() is __init, but it is called from
302 * get_page_from_freelist() during early boot until deferred_pages permanently
303 * disables this call. This is why we have refdata wrapper to avoid warning,
304 * and to ensure that the function body gets unloaded.
305 */
306 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)307 _deferred_grow_zone(struct zone *zone, unsigned int order)
308 {
309 return deferred_grow_zone(zone, order);
310 }
311 #else
_deferred_grow_zone(struct zone * zone,unsigned int order)312 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
313 {
314 return false;
315 }
316 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
317
318 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)319 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
320 unsigned long pfn)
321 {
322 #ifdef CONFIG_SPARSEMEM
323 return section_to_usemap(__pfn_to_section(pfn));
324 #else
325 return page_zone(page)->pageblock_flags;
326 #endif /* CONFIG_SPARSEMEM */
327 }
328
pfn_to_bitidx(const struct page * page,unsigned long pfn)329 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
330 {
331 #ifdef CONFIG_SPARSEMEM
332 pfn &= (PAGES_PER_SECTION-1);
333 #else
334 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
335 #endif /* CONFIG_SPARSEMEM */
336 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
337 }
338
is_standalone_pb_bit(enum pageblock_bits pb_bit)339 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit)
340 {
341 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS;
342 }
343
344 static __always_inline void
get_pfnblock_bitmap_bitidx(const struct page * page,unsigned long pfn,unsigned long ** bitmap_word,unsigned long * bitidx)345 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
346 unsigned long **bitmap_word, unsigned long *bitidx)
347 {
348 unsigned long *bitmap;
349 unsigned long word_bitidx;
350
351 #ifdef CONFIG_MEMORY_ISOLATION
352 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
353 #else
354 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
355 #endif
356 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK);
357 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
358
359 bitmap = get_pageblock_bitmap(page, pfn);
360 *bitidx = pfn_to_bitidx(page, pfn);
361 word_bitidx = *bitidx / BITS_PER_LONG;
362 *bitidx &= (BITS_PER_LONG - 1);
363 *bitmap_word = &bitmap[word_bitidx];
364 }
365
366
367 /**
368 * __get_pfnblock_flags_mask - Return the requested group of flags for
369 * a pageblock_nr_pages block of pages
370 * @page: The page within the block of interest
371 * @pfn: The target page frame number
372 * @mask: mask of bits that the caller is interested in
373 *
374 * Return: pageblock_bits flags
375 */
__get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)376 static unsigned long __get_pfnblock_flags_mask(const struct page *page,
377 unsigned long pfn,
378 unsigned long mask)
379 {
380 unsigned long *bitmap_word;
381 unsigned long bitidx;
382 unsigned long word;
383
384 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
385 /*
386 * This races, without locks, with set_pfnblock_migratetype(). Ensure
387 * a consistent read of the memory array, so that results, even though
388 * racy, are not corrupted.
389 */
390 word = READ_ONCE(*bitmap_word);
391 return (word >> bitidx) & mask;
392 }
393
394 /**
395 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set
396 * @page: The page within the block of interest
397 * @pfn: The target page frame number
398 * @pb_bit: pageblock bit to check
399 *
400 * Return: true if the bit is set, otherwise false
401 */
get_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)402 bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
403 enum pageblock_bits pb_bit)
404 {
405 unsigned long *bitmap_word;
406 unsigned long bitidx;
407
408 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
409 return false;
410
411 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
412
413 return test_bit(bitidx + pb_bit, bitmap_word);
414 }
415
416 /**
417 * get_pfnblock_migratetype - Return the migratetype of a pageblock
418 * @page: The page within the block of interest
419 * @pfn: The target page frame number
420 *
421 * Return: The migratetype of the pageblock
422 *
423 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
424 * to save a call to page_to_pfn().
425 */
426 __always_inline enum migratetype
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)427 get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
428 {
429 unsigned long mask = MIGRATETYPE_AND_ISO_MASK;
430 unsigned long flags;
431
432 flags = __get_pfnblock_flags_mask(page, pfn, mask);
433
434 #ifdef CONFIG_MEMORY_ISOLATION
435 if (flags & BIT(PB_migrate_isolate))
436 return MIGRATE_ISOLATE;
437 #endif
438 return flags & MIGRATETYPE_MASK;
439 }
440
441 /**
442 * __set_pfnblock_flags_mask - Set the requested group of flags for
443 * a pageblock_nr_pages block of pages
444 * @page: The page within the block of interest
445 * @pfn: The target page frame number
446 * @flags: The flags to set
447 * @mask: mask of bits that the caller is interested in
448 */
__set_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long flags,unsigned long mask)449 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
450 unsigned long flags, unsigned long mask)
451 {
452 unsigned long *bitmap_word;
453 unsigned long bitidx;
454 unsigned long word;
455
456 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
457
458 mask <<= bitidx;
459 flags <<= bitidx;
460
461 word = READ_ONCE(*bitmap_word);
462 do {
463 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags));
464 }
465
466 /**
467 * set_pfnblock_bit - Set a standalone bit of a pageblock
468 * @page: The page within the block of interest
469 * @pfn: The target page frame number
470 * @pb_bit: pageblock bit to set
471 */
set_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)472 void set_pfnblock_bit(const struct page *page, unsigned long pfn,
473 enum pageblock_bits pb_bit)
474 {
475 unsigned long *bitmap_word;
476 unsigned long bitidx;
477
478 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
479 return;
480
481 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
482
483 set_bit(bitidx + pb_bit, bitmap_word);
484 }
485
486 /**
487 * clear_pfnblock_bit - Clear a standalone bit of a pageblock
488 * @page: The page within the block of interest
489 * @pfn: The target page frame number
490 * @pb_bit: pageblock bit to clear
491 */
clear_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)492 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
493 enum pageblock_bits pb_bit)
494 {
495 unsigned long *bitmap_word;
496 unsigned long bitidx;
497
498 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
499 return;
500
501 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
502
503 clear_bit(bitidx + pb_bit, bitmap_word);
504 }
505
506 /**
507 * set_pageblock_migratetype - Set the migratetype of a pageblock
508 * @page: The page within the block of interest
509 * @migratetype: migratetype to set
510 */
set_pageblock_migratetype(struct page * page,enum migratetype migratetype)511 static void set_pageblock_migratetype(struct page *page,
512 enum migratetype migratetype)
513 {
514 if (unlikely(page_group_by_mobility_disabled &&
515 migratetype < MIGRATE_PCPTYPES))
516 migratetype = MIGRATE_UNMOVABLE;
517
518 #ifdef CONFIG_MEMORY_ISOLATION
519 if (migratetype == MIGRATE_ISOLATE) {
520 VM_WARN_ONCE(1,
521 "Use set_pageblock_isolate() for pageblock isolation");
522 return;
523 }
524 VM_WARN_ONCE(get_pageblock_isolate(page),
525 "Use clear_pageblock_isolate() to unisolate pageblock");
526 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */
527 #endif
528 __set_pfnblock_flags_mask(page, page_to_pfn(page),
529 (unsigned long)migratetype,
530 MIGRATETYPE_AND_ISO_MASK);
531 }
532
init_pageblock_migratetype(struct page * page,enum migratetype migratetype,bool isolate)533 void __meminit init_pageblock_migratetype(struct page *page,
534 enum migratetype migratetype,
535 bool isolate)
536 {
537 unsigned long flags;
538
539 if (unlikely(page_group_by_mobility_disabled &&
540 migratetype < MIGRATE_PCPTYPES))
541 migratetype = MIGRATE_UNMOVABLE;
542
543 flags = migratetype;
544
545 #ifdef CONFIG_MEMORY_ISOLATION
546 if (migratetype == MIGRATE_ISOLATE) {
547 VM_WARN_ONCE(
548 1,
549 "Set isolate=true to isolate pageblock with a migratetype");
550 return;
551 }
552 if (isolate)
553 flags |= BIT(PB_migrate_isolate);
554 #endif
555 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags,
556 MIGRATETYPE_AND_ISO_MASK);
557 }
558
559 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)560 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
561 {
562 int ret;
563 unsigned seq;
564 unsigned long pfn = page_to_pfn(page);
565 unsigned long sp, start_pfn;
566
567 do {
568 seq = zone_span_seqbegin(zone);
569 start_pfn = zone->zone_start_pfn;
570 sp = zone->spanned_pages;
571 ret = !zone_spans_pfn(zone, pfn);
572 } while (zone_span_seqretry(zone, seq));
573
574 if (ret)
575 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
576 pfn, zone_to_nid(zone), zone->name,
577 start_pfn, start_pfn + sp);
578
579 return ret;
580 }
581
582 /*
583 * Temporary debugging check for pages not lying within a given zone.
584 */
bad_range(struct zone * zone,struct page * page)585 static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
586 {
587 if (page_outside_zone_boundaries(zone, page))
588 return true;
589 if (zone != page_zone(page))
590 return true;
591
592 return false;
593 }
594 #else
bad_range(struct zone * zone,struct page * page)595 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
596 {
597 return false;
598 }
599 #endif
600
bad_page(struct page * page,const char * reason)601 static void bad_page(struct page *page, const char *reason)
602 {
603 static unsigned long resume;
604 static unsigned long nr_shown;
605 static unsigned long nr_unshown;
606
607 /*
608 * Allow a burst of 60 reports, then keep quiet for that minute;
609 * or allow a steady drip of one report per second.
610 */
611 if (nr_shown == 60) {
612 if (time_before(jiffies, resume)) {
613 nr_unshown++;
614 goto out;
615 }
616 if (nr_unshown) {
617 pr_alert(
618 "BUG: Bad page state: %lu messages suppressed\n",
619 nr_unshown);
620 nr_unshown = 0;
621 }
622 nr_shown = 0;
623 }
624 if (nr_shown++ == 0)
625 resume = jiffies + 60 * HZ;
626
627 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
628 current->comm, page_to_pfn(page));
629 dump_page(page, reason);
630
631 print_modules();
632 dump_stack();
633 out:
634 /* Leave bad fields for debug, except PageBuddy could make trouble */
635 if (PageBuddy(page))
636 __ClearPageBuddy(page);
637 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
638 }
639
order_to_pindex(int migratetype,int order)640 static inline unsigned int order_to_pindex(int migratetype, int order)
641 {
642
643 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
644 bool movable;
645 if (order > PAGE_ALLOC_COSTLY_ORDER) {
646 VM_BUG_ON(!is_pmd_order(order));
647
648 movable = migratetype == MIGRATE_MOVABLE;
649
650 return NR_LOWORDER_PCP_LISTS + movable;
651 }
652 #else
653 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
654 #endif
655
656 return (MIGRATE_PCPTYPES * order) + migratetype;
657 }
658
pindex_to_order(unsigned int pindex)659 static inline int pindex_to_order(unsigned int pindex)
660 {
661 int order = pindex / MIGRATE_PCPTYPES;
662
663 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
664 if (pindex >= NR_LOWORDER_PCP_LISTS)
665 order = HPAGE_PMD_ORDER;
666 #else
667 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
668 #endif
669
670 return order;
671 }
672
pcp_allowed_order(unsigned int order)673 static inline bool pcp_allowed_order(unsigned int order)
674 {
675 if (order <= PAGE_ALLOC_COSTLY_ORDER)
676 return true;
677 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
678 if (is_pmd_order(order))
679 return true;
680 #endif
681 return false;
682 }
683
684 /*
685 * Higher-order pages are called "compound pages". They are structured thusly:
686 *
687 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
688 *
689 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
690 * in bit 0 of page->compound_info. The rest of bits is pointer to head page.
691 *
692 * The first tail page's ->compound_order holds the order of allocation.
693 * This usage means that zero-order pages may not be compound.
694 */
695
prep_compound_page(struct page * page,unsigned int order)696 void prep_compound_page(struct page *page, unsigned int order)
697 {
698 int i;
699 int nr_pages = 1 << order;
700
701 __SetPageHead(page);
702 for (i = 1; i < nr_pages; i++)
703 prep_compound_tail(page + i, page, order);
704
705 prep_compound_head(page, order);
706 }
707
set_buddy_order(struct page * page,unsigned int order)708 static inline void set_buddy_order(struct page *page, unsigned int order)
709 {
710 set_page_private(page, order);
711 __SetPageBuddy(page);
712 }
713
714 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)715 static inline struct capture_control *task_capc(struct zone *zone)
716 {
717 struct capture_control *capc = current->capture_control;
718
719 return unlikely(capc) &&
720 !(current->flags & PF_KTHREAD) &&
721 !capc->page &&
722 capc->cc->zone == zone ? capc : NULL;
723 }
724
725 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)726 compaction_capture(struct capture_control *capc, struct page *page,
727 int order, int migratetype)
728 {
729 if (!capc || order != capc->cc->order)
730 return false;
731
732 /* Do not accidentally pollute CMA or isolated regions*/
733 if (is_migrate_cma(migratetype) ||
734 is_migrate_isolate(migratetype))
735 return false;
736
737 /*
738 * Do not let lower order allocations pollute a movable pageblock
739 * unless compaction is also requesting movable pages.
740 * This might let an unmovable request use a reclaimable pageblock
741 * and vice-versa but no more than normal fallback logic which can
742 * have trouble finding a high-order free page.
743 */
744 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
745 capc->cc->migratetype != MIGRATE_MOVABLE)
746 return false;
747
748 if (migratetype != capc->cc->migratetype)
749 trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
750 capc->cc->migratetype, migratetype);
751
752 capc->page = page;
753 return true;
754 }
755
756 #else
task_capc(struct zone * zone)757 static inline struct capture_control *task_capc(struct zone *zone)
758 {
759 return NULL;
760 }
761
762 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)763 compaction_capture(struct capture_control *capc, struct page *page,
764 int order, int migratetype)
765 {
766 return false;
767 }
768 #endif /* CONFIG_COMPACTION */
769
account_freepages(struct zone * zone,int nr_pages,int migratetype)770 static inline void account_freepages(struct zone *zone, int nr_pages,
771 int migratetype)
772 {
773 lockdep_assert_held(&zone->lock);
774
775 if (is_migrate_isolate(migratetype))
776 return;
777
778 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
779
780 if (is_migrate_cma(migratetype))
781 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
782 else if (migratetype == MIGRATE_HIGHATOMIC)
783 WRITE_ONCE(zone->nr_free_highatomic,
784 zone->nr_free_highatomic + nr_pages);
785 }
786
787 /* Used for pages not on another list */
__add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)788 static inline void __add_to_free_list(struct page *page, struct zone *zone,
789 unsigned int order, int migratetype,
790 bool tail)
791 {
792 struct free_area *area = &zone->free_area[order];
793 int nr_pages = 1 << order;
794
795 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
796 "page type is %d, passed migratetype is %d (nr=%d)\n",
797 get_pageblock_migratetype(page), migratetype, nr_pages);
798
799 if (tail)
800 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
801 else
802 list_add(&page->buddy_list, &area->free_list[migratetype]);
803 area->nr_free++;
804
805 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
806 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
807 }
808
809 /*
810 * Used for pages which are on another list. Move the pages to the tail
811 * of the list - so the moved pages won't immediately be considered for
812 * allocation again (e.g., optimization for memory onlining).
813 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt)814 static inline void move_to_free_list(struct page *page, struct zone *zone,
815 unsigned int order, int old_mt, int new_mt)
816 {
817 struct free_area *area = &zone->free_area[order];
818 int nr_pages = 1 << order;
819
820 /* Free page moving can fail, so it happens before the type update */
821 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
822 "page type is %d, passed migratetype is %d (nr=%d)\n",
823 get_pageblock_migratetype(page), old_mt, nr_pages);
824
825 list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
826
827 account_freepages(zone, -nr_pages, old_mt);
828 account_freepages(zone, nr_pages, new_mt);
829
830 if (order >= pageblock_order &&
831 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
832 if (!is_migrate_isolate(old_mt))
833 nr_pages = -nr_pages;
834 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
835 }
836 }
837
__del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)838 static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
839 unsigned int order, int migratetype)
840 {
841 int nr_pages = 1 << order;
842
843 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
844 "page type is %d, passed migratetype is %d (nr=%d)\n",
845 get_pageblock_migratetype(page), migratetype, nr_pages);
846
847 /* clear reported state and update reported page count */
848 if (page_reported(page))
849 __ClearPageReported(page);
850
851 list_del(&page->buddy_list);
852 __ClearPageBuddy(page);
853 set_page_private(page, 0);
854 zone->free_area[order].nr_free--;
855
856 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
857 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
858 }
859
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)860 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
861 unsigned int order, int migratetype)
862 {
863 __del_page_from_free_list(page, zone, order, migratetype);
864 account_freepages(zone, -(1 << order), migratetype);
865 }
866
get_page_from_free_area(struct free_area * area,int migratetype)867 static inline struct page *get_page_from_free_area(struct free_area *area,
868 int migratetype)
869 {
870 return list_first_entry_or_null(&area->free_list[migratetype],
871 struct page, buddy_list);
872 }
873
874 /*
875 * If this is less than the 2nd largest possible page, check if the buddy
876 * of the next-higher order is free. If it is, it's possible
877 * that pages are being freed that will coalesce soon. In case,
878 * that is happening, add the free page to the tail of the list
879 * so it's less likely to be used soon and more likely to be merged
880 * as a 2-level higher order page
881 */
882 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)883 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
884 struct page *page, unsigned int order)
885 {
886 unsigned long higher_page_pfn;
887 struct page *higher_page;
888
889 if (order >= MAX_PAGE_ORDER - 1)
890 return false;
891
892 higher_page_pfn = buddy_pfn & pfn;
893 higher_page = page + (higher_page_pfn - pfn);
894
895 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
896 NULL) != NULL;
897 }
898
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)899 static void change_pageblock_range(struct page *pageblock_page,
900 int start_order, int migratetype)
901 {
902 int nr_pageblocks = 1 << (start_order - pageblock_order);
903
904 while (nr_pageblocks--) {
905 set_pageblock_migratetype(pageblock_page, migratetype);
906 pageblock_page += pageblock_nr_pages;
907 }
908 }
909
910 /*
911 * Freeing function for a buddy system allocator.
912 *
913 * The concept of a buddy system is to maintain direct-mapped table
914 * (containing bit values) for memory blocks of various "orders".
915 * The bottom level table contains the map for the smallest allocatable
916 * units of memory (here, pages), and each level above it describes
917 * pairs of units from the levels below, hence, "buddies".
918 * At a high level, all that happens here is marking the table entry
919 * at the bottom level available, and propagating the changes upward
920 * as necessary, plus some accounting needed to play nicely with other
921 * parts of the VM system.
922 * At each level, we keep a list of pages, which are heads of continuous
923 * free pages of length of (1 << order) and marked with PageBuddy.
924 * Page's order is recorded in page_private(page) field.
925 * So when we are allocating or freeing one, we can derive the state of the
926 * other. That is, if we allocate a small block, and both were
927 * free, the remainder of the region must be split into blocks.
928 * If a block is freed, and its buddy is also free, then this
929 * triggers coalescing into a block of larger size.
930 *
931 * -- nyc
932 */
933
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)934 static inline void __free_one_page(struct page *page,
935 unsigned long pfn,
936 struct zone *zone, unsigned int order,
937 int migratetype, fpi_t fpi_flags)
938 {
939 struct capture_control *capc = task_capc(zone);
940 unsigned long buddy_pfn = 0;
941 unsigned long combined_pfn;
942 struct page *buddy;
943 bool to_tail;
944
945 VM_BUG_ON(!zone_is_initialized(zone));
946 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
947
948 VM_BUG_ON(migratetype == -1);
949 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
950 VM_BUG_ON_PAGE(bad_range(zone, page), page);
951
952 account_freepages(zone, 1 << order, migratetype);
953
954 while (order < MAX_PAGE_ORDER) {
955 int buddy_mt = migratetype;
956
957 if (compaction_capture(capc, page, order, migratetype)) {
958 account_freepages(zone, -(1 << order), migratetype);
959 return;
960 }
961
962 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
963 if (!buddy)
964 goto done_merging;
965
966 if (unlikely(order >= pageblock_order)) {
967 /*
968 * We want to prevent merge between freepages on pageblock
969 * without fallbacks and normal pageblock. Without this,
970 * pageblock isolation could cause incorrect freepage or CMA
971 * accounting or HIGHATOMIC accounting.
972 */
973 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
974
975 if (migratetype != buddy_mt &&
976 (!migratetype_is_mergeable(migratetype) ||
977 !migratetype_is_mergeable(buddy_mt)))
978 goto done_merging;
979 }
980
981 /*
982 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
983 * merge with it and move up one order.
984 */
985 if (page_is_guard(buddy))
986 clear_page_guard(zone, buddy, order);
987 else
988 __del_page_from_free_list(buddy, zone, order, buddy_mt);
989
990 if (unlikely(buddy_mt != migratetype)) {
991 /*
992 * Match buddy type. This ensures that an
993 * expand() down the line puts the sub-blocks
994 * on the right freelists.
995 */
996 change_pageblock_range(buddy, order, migratetype);
997 }
998
999 combined_pfn = buddy_pfn & pfn;
1000 page = page + (combined_pfn - pfn);
1001 pfn = combined_pfn;
1002 order++;
1003 }
1004
1005 done_merging:
1006 set_buddy_order(page, order);
1007
1008 if (fpi_flags & FPI_TO_TAIL)
1009 to_tail = true;
1010 else if (is_shuffle_order(order))
1011 to_tail = shuffle_pick_tail();
1012 else
1013 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1014
1015 __add_to_free_list(page, zone, order, migratetype, to_tail);
1016
1017 /* Notify page reporting subsystem of freed page */
1018 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1019 page_reporting_notify_free(order);
1020 }
1021
1022 /*
1023 * A bad page could be due to a number of fields. Instead of multiple branches,
1024 * try and check multiple fields with one check. The caller must do a detailed
1025 * check if necessary.
1026 */
page_expected_state(struct page * page,unsigned long check_flags)1027 static inline bool page_expected_state(struct page *page,
1028 unsigned long check_flags)
1029 {
1030 if (unlikely(atomic_read(&page->_mapcount) != -1))
1031 return false;
1032
1033 if (unlikely((unsigned long)page->mapping |
1034 page_ref_count(page) |
1035 #ifdef CONFIG_MEMCG
1036 page->memcg_data |
1037 #endif
1038 (page->flags.f & check_flags)))
1039 return false;
1040
1041 return true;
1042 }
1043
page_bad_reason(struct page * page,unsigned long flags)1044 static const char *page_bad_reason(struct page *page, unsigned long flags)
1045 {
1046 const char *bad_reason = NULL;
1047
1048 if (unlikely(atomic_read(&page->_mapcount) != -1))
1049 bad_reason = "nonzero mapcount";
1050 if (unlikely(page->mapping != NULL))
1051 bad_reason = "non-NULL mapping";
1052 if (unlikely(page_ref_count(page) != 0))
1053 bad_reason = "nonzero _refcount";
1054 if (unlikely(page->flags.f & flags)) {
1055 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1056 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1057 else
1058 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1059 }
1060 #ifdef CONFIG_MEMCG
1061 if (unlikely(page->memcg_data))
1062 bad_reason = "page still charged to cgroup";
1063 #endif
1064 return bad_reason;
1065 }
1066
free_page_is_bad(struct page * page)1067 static inline bool free_page_is_bad(struct page *page)
1068 {
1069 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1070 return false;
1071
1072 /* Something has gone sideways, find it */
1073 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1074 return true;
1075 }
1076
is_check_pages_enabled(void)1077 static inline bool is_check_pages_enabled(void)
1078 {
1079 return static_branch_unlikely(&check_pages_enabled);
1080 }
1081
free_tail_page_prepare(struct page * head_page,struct page * page)1082 static int free_tail_page_prepare(struct page *head_page, struct page *page)
1083 {
1084 struct folio *folio = (struct folio *)head_page;
1085 int ret = 1;
1086
1087 /*
1088 * We rely page->lru.next never has bit 0 set, unless the page
1089 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1090 */
1091 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1092
1093 if (!is_check_pages_enabled()) {
1094 ret = 0;
1095 goto out;
1096 }
1097 switch (page - head_page) {
1098 case 1:
1099 /* the first tail page: these may be in place of ->mapping */
1100 if (unlikely(folio_large_mapcount(folio))) {
1101 bad_page(page, "nonzero large_mapcount");
1102 goto out;
1103 }
1104 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
1105 unlikely(atomic_read(&folio->_nr_pages_mapped))) {
1106 bad_page(page, "nonzero nr_pages_mapped");
1107 goto out;
1108 }
1109 if (IS_ENABLED(CONFIG_MM_ID)) {
1110 if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
1111 bad_page(page, "nonzero mm mapcount 0");
1112 goto out;
1113 }
1114 if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
1115 bad_page(page, "nonzero mm mapcount 1");
1116 goto out;
1117 }
1118 }
1119 if (IS_ENABLED(CONFIG_64BIT)) {
1120 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1121 bad_page(page, "nonzero entire_mapcount");
1122 goto out;
1123 }
1124 if (unlikely(atomic_read(&folio->_pincount))) {
1125 bad_page(page, "nonzero pincount");
1126 goto out;
1127 }
1128 }
1129 break;
1130 case 2:
1131 /* the second tail page: deferred_list overlaps ->mapping */
1132 if (unlikely(!list_empty(&folio->_deferred_list))) {
1133 bad_page(page, "on deferred list");
1134 goto out;
1135 }
1136 if (!IS_ENABLED(CONFIG_64BIT)) {
1137 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1138 bad_page(page, "nonzero entire_mapcount");
1139 goto out;
1140 }
1141 if (unlikely(atomic_read(&folio->_pincount))) {
1142 bad_page(page, "nonzero pincount");
1143 goto out;
1144 }
1145 }
1146 break;
1147 case 3:
1148 /* the third tail page: hugetlb specifics overlap ->mappings */
1149 if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
1150 break;
1151 fallthrough;
1152 default:
1153 if (page->mapping != TAIL_MAPPING) {
1154 bad_page(page, "corrupted mapping in tail page");
1155 goto out;
1156 }
1157 break;
1158 }
1159 if (unlikely(!PageTail(page))) {
1160 bad_page(page, "PageTail not set");
1161 goto out;
1162 }
1163 if (unlikely(compound_head(page) != head_page)) {
1164 bad_page(page, "compound_head not consistent");
1165 goto out;
1166 }
1167 ret = 0;
1168 out:
1169 page->mapping = NULL;
1170 clear_compound_head(page);
1171 return ret;
1172 }
1173
1174 /*
1175 * Skip KASAN memory poisoning when either:
1176 *
1177 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1178 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1179 * using page tags instead (see below).
1180 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1181 * that error detection is disabled for accesses via the page address.
1182 *
1183 * Pages will have match-all tags in the following circumstances:
1184 *
1185 * 1. Pages are being initialized for the first time, including during deferred
1186 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1187 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1188 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1189 * 3. The allocation was excluded from being checked due to sampling,
1190 * see the call to kasan_unpoison_pages.
1191 *
1192 * Poisoning pages during deferred memory init will greatly lengthen the
1193 * process and cause problem in large memory systems as the deferred pages
1194 * initialization is done with interrupt disabled.
1195 *
1196 * Assuming that there will be no reference to those newly initialized
1197 * pages before they are ever allocated, this should have no effect on
1198 * KASAN memory tracking as the poison will be properly inserted at page
1199 * allocation time. The only corner case is when pages are allocated by
1200 * on-demand allocation and then freed again before the deferred pages
1201 * initialization is done, but this is not likely to happen.
1202 */
should_skip_kasan_poison(struct page * page)1203 static inline bool should_skip_kasan_poison(struct page *page)
1204 {
1205 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1206 return deferred_pages_enabled();
1207
1208 return page_kasan_tag(page) == KASAN_TAG_KERNEL;
1209 }
1210
kernel_init_pages(struct page * page,int numpages)1211 static void kernel_init_pages(struct page *page, int numpages)
1212 {
1213 int i;
1214
1215 /* s390's use of memset() could override KASAN redzones. */
1216 kasan_disable_current();
1217 for (i = 0; i < numpages; i++)
1218 clear_highpage_kasan_tagged(page + i);
1219 kasan_enable_current();
1220 }
1221
1222 #ifdef CONFIG_MEM_ALLOC_PROFILING
1223
1224 /* Should be called only if mem_alloc_profiling_enabled() */
__clear_page_tag_ref(struct page * page)1225 void __clear_page_tag_ref(struct page *page)
1226 {
1227 union pgtag_ref_handle handle;
1228 union codetag_ref ref;
1229
1230 if (get_page_tag_ref(page, &ref, &handle)) {
1231 set_codetag_empty(&ref);
1232 update_page_tag_ref(handle, &ref);
1233 put_page_tag_ref(handle);
1234 }
1235 }
1236
1237 /* Should be called only if mem_alloc_profiling_enabled() */
1238 static noinline
__pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1239 void __pgalloc_tag_add(struct page *page, struct task_struct *task,
1240 unsigned int nr)
1241 {
1242 union pgtag_ref_handle handle;
1243 union codetag_ref ref;
1244
1245 if (likely(get_page_tag_ref(page, &ref, &handle))) {
1246 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
1247 update_page_tag_ref(handle, &ref);
1248 put_page_tag_ref(handle);
1249 } else {
1250 /*
1251 * page_ext is not available yet, record the pfn so we can
1252 * clear the tag ref later when page_ext is initialized.
1253 */
1254 alloc_tag_add_early_pfn(page_to_pfn(page));
1255 if (task->alloc_tag)
1256 alloc_tag_set_inaccurate(task->alloc_tag);
1257 }
1258 }
1259
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1260 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1261 unsigned int nr)
1262 {
1263 if (mem_alloc_profiling_enabled())
1264 __pgalloc_tag_add(page, task, nr);
1265 }
1266
1267 /* Should be called only if mem_alloc_profiling_enabled() */
1268 static noinline
__pgalloc_tag_sub(struct page * page,unsigned int nr)1269 void __pgalloc_tag_sub(struct page *page, unsigned int nr)
1270 {
1271 union pgtag_ref_handle handle;
1272 union codetag_ref ref;
1273
1274 if (get_page_tag_ref(page, &ref, &handle)) {
1275 alloc_tag_sub(&ref, PAGE_SIZE * nr);
1276 update_page_tag_ref(handle, &ref);
1277 put_page_tag_ref(handle);
1278 }
1279 }
1280
pgalloc_tag_sub(struct page * page,unsigned int nr)1281 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
1282 {
1283 if (mem_alloc_profiling_enabled())
1284 __pgalloc_tag_sub(page, nr);
1285 }
1286
1287 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1288 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
1289 {
1290 if (tag)
1291 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
1292 }
1293
1294 #else /* CONFIG_MEM_ALLOC_PROFILING */
1295
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1296 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1297 unsigned int nr) {}
pgalloc_tag_sub(struct page * page,unsigned int nr)1298 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1299 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
1300
1301 #endif /* CONFIG_MEM_ALLOC_PROFILING */
1302
__free_pages_prepare(struct page * page,unsigned int order,fpi_t fpi_flags)1303 __always_inline bool __free_pages_prepare(struct page *page,
1304 unsigned int order, fpi_t fpi_flags)
1305 {
1306 int bad = 0;
1307 bool skip_kasan_poison = should_skip_kasan_poison(page);
1308 bool init = want_init_on_free();
1309 bool compound = PageCompound(page);
1310 struct folio *folio = page_folio(page);
1311
1312 VM_BUG_ON_PAGE(PageTail(page), page);
1313
1314 trace_mm_page_free(page, order);
1315 kmsan_free_page(page, order);
1316
1317 if (memcg_kmem_online() && PageMemcgKmem(page))
1318 __memcg_kmem_uncharge_page(page, order);
1319
1320 /*
1321 * In rare cases, when truncation or holepunching raced with
1322 * munlock after VM_LOCKED was cleared, Mlocked may still be
1323 * found set here. This does not indicate a problem, unless
1324 * "unevictable_pgs_cleared" appears worryingly large.
1325 */
1326 if (unlikely(folio_test_mlocked(folio))) {
1327 long nr_pages = folio_nr_pages(folio);
1328
1329 __folio_clear_mlocked(folio);
1330 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
1331 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
1332 }
1333
1334 if (unlikely(PageHWPoison(page)) && !order) {
1335 /* Do not let hwpoison pages hit pcplists/buddy */
1336 reset_page_owner(page, order);
1337 page_table_check_free(page, order);
1338 pgalloc_tag_sub(page, 1 << order);
1339
1340 /*
1341 * The page is isolated and accounted for.
1342 * Mark the codetag as empty to avoid accounting error
1343 * when the page is freed by unpoison_memory().
1344 */
1345 clear_page_tag_ref(page);
1346 return false;
1347 }
1348
1349 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1350
1351 /*
1352 * Check tail pages before head page information is cleared to
1353 * avoid checking PageCompound for order-0 pages.
1354 */
1355 if (unlikely(order)) {
1356 int i;
1357
1358 if (compound) {
1359 page[1].flags.f &= ~PAGE_FLAGS_SECOND;
1360 #ifdef NR_PAGES_IN_LARGE_FOLIO
1361 folio->_nr_pages = 0;
1362 #endif
1363 }
1364 for (i = 1; i < (1 << order); i++) {
1365 if (compound)
1366 bad += free_tail_page_prepare(page, page + i);
1367 if (is_check_pages_enabled()) {
1368 if (free_page_is_bad(page + i)) {
1369 bad++;
1370 continue;
1371 }
1372 }
1373 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1374 }
1375 }
1376 if (folio_test_anon(folio)) {
1377 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
1378 folio->mapping = NULL;
1379 }
1380 if (unlikely(page_has_type(page))) {
1381 /* networking expects to clear its page type before releasing */
1382 if (is_check_pages_enabled()) {
1383 if (unlikely(PageNetpp(page))) {
1384 bad_page(page, "page_pool leak");
1385 return false;
1386 }
1387 }
1388 /* Reset the page_type (which overlays _mapcount) */
1389 page->page_type = UINT_MAX;
1390 }
1391
1392 if (is_check_pages_enabled()) {
1393 if (free_page_is_bad(page))
1394 bad++;
1395 if (bad)
1396 return false;
1397 }
1398
1399 page_cpupid_reset_last(page);
1400 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1401 page->private = 0;
1402 reset_page_owner(page, order);
1403 page_table_check_free(page, order);
1404 pgalloc_tag_sub(page, 1 << order);
1405
1406 if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) {
1407 debug_check_no_locks_freed(page_address(page),
1408 PAGE_SIZE << order);
1409 debug_check_no_obj_freed(page_address(page),
1410 PAGE_SIZE << order);
1411 }
1412
1413 kernel_poison_pages(page, 1 << order);
1414
1415 /*
1416 * As memory initialization might be integrated into KASAN,
1417 * KASAN poisoning and memory initialization code must be
1418 * kept together to avoid discrepancies in behavior.
1419 *
1420 * With hardware tag-based KASAN, memory tags must be set before the
1421 * page becomes unavailable via debug_pagealloc or arch_free_page.
1422 */
1423 if (!skip_kasan_poison) {
1424 kasan_poison_pages(page, order, init);
1425
1426 /* Memory is already initialized if KASAN did it internally. */
1427 if (kasan_has_integrated_init())
1428 init = false;
1429 }
1430 if (init)
1431 kernel_init_pages(page, 1 << order);
1432
1433 /*
1434 * arch_free_page() can make the page's contents inaccessible. s390
1435 * does this. So nothing which can access the page's contents should
1436 * happen after this.
1437 */
1438 arch_free_page(page, order);
1439
1440 debug_pagealloc_unmap_pages(page, 1 << order);
1441
1442 return true;
1443 }
1444
free_pages_prepare(struct page * page,unsigned int order)1445 bool free_pages_prepare(struct page *page, unsigned int order)
1446 {
1447 return __free_pages_prepare(page, order, FPI_NONE);
1448 }
1449
1450 /*
1451 * Frees a number of pages from the PCP lists
1452 * Assumes all pages on list are in same zone.
1453 * count is the number of pages to free.
1454 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1455 static void free_pcppages_bulk(struct zone *zone, int count,
1456 struct per_cpu_pages *pcp,
1457 int pindex)
1458 {
1459 unsigned long flags;
1460 unsigned int order;
1461 struct page *page;
1462
1463 /*
1464 * Ensure proper count is passed which otherwise would stuck in the
1465 * below while (list_empty(list)) loop.
1466 */
1467 count = min(pcp->count, count);
1468
1469 /* Ensure requested pindex is drained first. */
1470 pindex = pindex - 1;
1471
1472 spin_lock_irqsave(&zone->lock, flags);
1473
1474 while (count > 0) {
1475 struct list_head *list;
1476 int nr_pages;
1477
1478 /* Remove pages from lists in a round-robin fashion. */
1479 do {
1480 if (++pindex > NR_PCP_LISTS - 1)
1481 pindex = 0;
1482 list = &pcp->lists[pindex];
1483 } while (list_empty(list));
1484
1485 order = pindex_to_order(pindex);
1486 nr_pages = 1 << order;
1487 do {
1488 unsigned long pfn;
1489 int mt;
1490
1491 page = list_last_entry(list, struct page, pcp_list);
1492 pfn = page_to_pfn(page);
1493 mt = get_pfnblock_migratetype(page, pfn);
1494
1495 /* must delete to avoid corrupting pcp list */
1496 list_del(&page->pcp_list);
1497 count -= nr_pages;
1498 pcp->count -= nr_pages;
1499
1500 __free_one_page(page, pfn, zone, order, mt, FPI_NONE);
1501 trace_mm_page_pcpu_drain(page, order, mt);
1502 } while (count > 0 && !list_empty(list));
1503 }
1504
1505 spin_unlock_irqrestore(&zone->lock, flags);
1506 }
1507
1508 /* Split a multi-block free page into its individual pageblocks. */
split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi)1509 static void split_large_buddy(struct zone *zone, struct page *page,
1510 unsigned long pfn, int order, fpi_t fpi)
1511 {
1512 unsigned long end = pfn + (1 << order);
1513
1514 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
1515 /* Caller removed page from freelist, buddy info cleared! */
1516 VM_WARN_ON_ONCE(PageBuddy(page));
1517
1518 if (order > pageblock_order)
1519 order = pageblock_order;
1520
1521 do {
1522 int mt = get_pfnblock_migratetype(page, pfn);
1523
1524 __free_one_page(page, pfn, zone, order, mt, fpi);
1525 pfn += 1 << order;
1526 if (pfn == end)
1527 break;
1528 page = pfn_to_page(pfn);
1529 } while (1);
1530 }
1531
add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order)1532 static void add_page_to_zone_llist(struct zone *zone, struct page *page,
1533 unsigned int order)
1534 {
1535 /* Remember the order */
1536 page->private = order;
1537 /* Add the page to the free list */
1538 llist_add(&page->pcp_llist, &zone->trylock_free_pages);
1539 }
1540
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags)1541 static void free_one_page(struct zone *zone, struct page *page,
1542 unsigned long pfn, unsigned int order,
1543 fpi_t fpi_flags)
1544 {
1545 struct llist_head *llhead;
1546 unsigned long flags;
1547
1548 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1549 if (!spin_trylock_irqsave(&zone->lock, flags)) {
1550 add_page_to_zone_llist(zone, page, order);
1551 return;
1552 }
1553 } else {
1554 spin_lock_irqsave(&zone->lock, flags);
1555 }
1556
1557 /* The lock succeeded. Process deferred pages. */
1558 llhead = &zone->trylock_free_pages;
1559 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
1560 struct llist_node *llnode;
1561 struct page *p, *tmp;
1562
1563 llnode = llist_del_all(llhead);
1564 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
1565 unsigned int p_order = p->private;
1566
1567 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
1568 __count_vm_events(PGFREE, 1 << p_order);
1569 }
1570 }
1571 split_large_buddy(zone, page, pfn, order, fpi_flags);
1572 spin_unlock_irqrestore(&zone->lock, flags);
1573
1574 __count_vm_events(PGFREE, 1 << order);
1575 }
1576
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1577 static void __free_pages_ok(struct page *page, unsigned int order,
1578 fpi_t fpi_flags)
1579 {
1580 unsigned long pfn = page_to_pfn(page);
1581 struct zone *zone = page_zone(page);
1582
1583 if (__free_pages_prepare(page, order, fpi_flags))
1584 free_one_page(zone, page, pfn, order, fpi_flags);
1585 }
1586
__free_pages_core(struct page * page,unsigned int order,enum meminit_context context)1587 void __meminit __free_pages_core(struct page *page, unsigned int order,
1588 enum meminit_context context)
1589 {
1590 unsigned int nr_pages = 1 << order;
1591 struct page *p = page;
1592 unsigned int loop;
1593
1594 /*
1595 * When initializing the memmap, __init_single_page() sets the refcount
1596 * of all pages to 1 ("allocated"/"not free"). We have to set the
1597 * refcount of all involved pages to 0.
1598 *
1599 * Note that hotplugged memory pages are initialized to PageOffline().
1600 * Pages freed from memblock might be marked as reserved.
1601 */
1602 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1603 unlikely(context == MEMINIT_HOTPLUG)) {
1604 for (loop = 0; loop < nr_pages; loop++, p++) {
1605 VM_WARN_ON_ONCE(PageReserved(p));
1606 __ClearPageOffline(p);
1607 set_page_count(p, 0);
1608 }
1609
1610 adjust_managed_page_count(page, nr_pages);
1611 } else {
1612 for (loop = 0; loop < nr_pages; loop++, p++) {
1613 __ClearPageReserved(p);
1614 set_page_count(p, 0);
1615 }
1616
1617 /* memblock adjusts totalram_pages() manually. */
1618 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1619 }
1620
1621 if (page_contains_unaccepted(page, order)) {
1622 if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
1623 return;
1624
1625 accept_memory(page_to_phys(page), PAGE_SIZE << order);
1626 }
1627
1628 /*
1629 * Bypass PCP and place fresh pages right to the tail, primarily
1630 * relevant for memory onlining.
1631 */
1632 __free_pages_ok(page, order, FPI_TO_TAIL);
1633 }
1634
1635 /*
1636 * Check that the whole (or subset of) a pageblock given by the interval of
1637 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1638 * with the migration of free compaction scanner.
1639 *
1640 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1641 *
1642 * It's possible on some configurations to have a setup like node0 node1 node0
1643 * i.e. it's possible that all pages within a zones range of pages do not
1644 * belong to a single zone. We assume that a border between node0 and node1
1645 * can occur within a single pageblock, but not a node0 node1 node0
1646 * interleaving within a single pageblock. It is therefore sufficient to check
1647 * the first and last page of a pageblock and avoid checking each individual
1648 * page in a pageblock.
1649 *
1650 * Note: the function may return non-NULL struct page even for a page block
1651 * which contains a memory hole (i.e. there is no physical memory for a subset
1652 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1653 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1654 * even though the start pfn is online and valid. This should be safe most of
1655 * the time because struct pages are still initialized via init_unavailable_range()
1656 * and pfn walkers shouldn't touch any physical memory range for which they do
1657 * not recognize any specific metadata in struct pages.
1658 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1659 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1660 unsigned long end_pfn, struct zone *zone)
1661 {
1662 struct page *start_page;
1663 struct page *end_page;
1664
1665 /* end_pfn is one past the range we are checking */
1666 end_pfn--;
1667
1668 if (!pfn_valid(end_pfn))
1669 return NULL;
1670
1671 start_page = pfn_to_online_page(start_pfn);
1672 if (!start_page)
1673 return NULL;
1674
1675 if (page_zone(start_page) != zone)
1676 return NULL;
1677
1678 end_page = pfn_to_page(end_pfn);
1679
1680 /* This gives a shorter code than deriving page_zone(end_page) */
1681 if (page_zone_id(start_page) != page_zone_id(end_page))
1682 return NULL;
1683
1684 return start_page;
1685 }
1686
1687 /*
1688 * The order of subdivision here is critical for the IO subsystem.
1689 * Please do not alter this order without good reasons and regression
1690 * testing. Specifically, as large blocks of memory are subdivided,
1691 * the order in which smaller blocks are delivered depends on the order
1692 * they're subdivided in this function. This is the primary factor
1693 * influencing the order in which pages are delivered to the IO
1694 * subsystem according to empirical testing, and this is also justified
1695 * by considering the behavior of a buddy system containing a single
1696 * large block of memory acted on by a series of small allocations.
1697 * This behavior is a critical factor in sglist merging's success.
1698 *
1699 * -- nyc
1700 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1701 static inline unsigned int expand(struct zone *zone, struct page *page, int low,
1702 int high, int migratetype)
1703 {
1704 unsigned int size = 1 << high;
1705 unsigned int nr_added = 0;
1706
1707 while (high > low) {
1708 high--;
1709 size >>= 1;
1710 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1711
1712 /*
1713 * Mark as guard pages (or page), that will allow to
1714 * merge back to allocator when buddy will be freed.
1715 * Corresponding page table entries will not be touched,
1716 * pages will stay not present in virtual address space
1717 */
1718 if (set_page_guard(zone, &page[size], high))
1719 continue;
1720
1721 __add_to_free_list(&page[size], zone, high, migratetype, false);
1722 set_buddy_order(&page[size], high);
1723 nr_added += size;
1724 }
1725
1726 return nr_added;
1727 }
1728
page_del_and_expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1729 static __always_inline void page_del_and_expand(struct zone *zone,
1730 struct page *page, int low,
1731 int high, int migratetype)
1732 {
1733 int nr_pages = 1 << high;
1734
1735 __del_page_from_free_list(page, zone, high, migratetype);
1736 nr_pages -= expand(zone, page, low, high, migratetype);
1737 account_freepages(zone, -nr_pages, migratetype);
1738 }
1739
check_new_page_bad(struct page * page)1740 static void check_new_page_bad(struct page *page)
1741 {
1742 if (unlikely(PageHWPoison(page))) {
1743 /* Don't complain about hwpoisoned pages */
1744 if (PageBuddy(page))
1745 __ClearPageBuddy(page);
1746 return;
1747 }
1748
1749 bad_page(page,
1750 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1751 }
1752
1753 /*
1754 * This page is about to be returned from the page allocator
1755 */
check_new_page(struct page * page)1756 static bool check_new_page(struct page *page)
1757 {
1758 if (likely(page_expected_state(page,
1759 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1760 return false;
1761
1762 check_new_page_bad(page);
1763 return true;
1764 }
1765
check_new_pages(struct page * page,unsigned int order)1766 static inline bool check_new_pages(struct page *page, unsigned int order)
1767 {
1768 if (is_check_pages_enabled()) {
1769 for (int i = 0; i < (1 << order); i++) {
1770 struct page *p = page + i;
1771
1772 if (check_new_page(p))
1773 return true;
1774 }
1775 }
1776
1777 return false;
1778 }
1779
should_skip_kasan_unpoison(gfp_t flags)1780 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1781 {
1782 /* Don't skip if a software KASAN mode is enabled. */
1783 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1784 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1785 return false;
1786
1787 /* Skip, if hardware tag-based KASAN is not enabled. */
1788 if (!kasan_hw_tags_enabled())
1789 return true;
1790
1791 /*
1792 * With hardware tag-based KASAN enabled, skip if this has been
1793 * requested via __GFP_SKIP_KASAN.
1794 */
1795 return flags & __GFP_SKIP_KASAN;
1796 }
1797
should_skip_init(gfp_t flags)1798 static inline bool should_skip_init(gfp_t flags)
1799 {
1800 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1801 if (!kasan_hw_tags_enabled())
1802 return false;
1803
1804 /* For hardware tag-based KASAN, skip if requested. */
1805 return (flags & __GFP_SKIP_ZERO);
1806 }
1807
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1808 inline void post_alloc_hook(struct page *page, unsigned int order,
1809 gfp_t gfp_flags)
1810 {
1811 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1812 !should_skip_init(gfp_flags);
1813 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1814 int i;
1815
1816 set_page_private(page, 0);
1817
1818 arch_alloc_page(page, order);
1819 debug_pagealloc_map_pages(page, 1 << order);
1820
1821 /*
1822 * Page unpoisoning must happen before memory initialization.
1823 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1824 * allocations and the page unpoisoning code will complain.
1825 */
1826 kernel_unpoison_pages(page, 1 << order);
1827
1828 /*
1829 * As memory initialization might be integrated into KASAN,
1830 * KASAN unpoisoning and memory initialization code must be
1831 * kept together to avoid discrepancies in behavior.
1832 */
1833
1834 /*
1835 * If memory tags should be zeroed
1836 * (which happens only when memory should be initialized as well).
1837 */
1838 if (zero_tags)
1839 init = !tag_clear_highpages(page, 1 << order);
1840
1841 if (!should_skip_kasan_unpoison(gfp_flags) &&
1842 kasan_unpoison_pages(page, order, init)) {
1843 /* Take note that memory was initialized by KASAN. */
1844 if (kasan_has_integrated_init())
1845 init = false;
1846 } else {
1847 /*
1848 * If memory tags have not been set by KASAN, reset the page
1849 * tags to ensure page_address() dereferencing does not fault.
1850 */
1851 for (i = 0; i != 1 << order; ++i)
1852 page_kasan_tag_reset(page + i);
1853 }
1854 /* If memory is still not initialized, initialize it now. */
1855 if (init)
1856 kernel_init_pages(page, 1 << order);
1857
1858 set_page_owner(page, order, gfp_flags);
1859 page_table_check_alloc(page, order);
1860 pgalloc_tag_add(page, current, 1 << order);
1861 }
1862
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1863 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1864 unsigned int alloc_flags)
1865 {
1866 post_alloc_hook(page, order, gfp_flags);
1867
1868 if (order && (gfp_flags & __GFP_COMP))
1869 prep_compound_page(page, order);
1870
1871 /*
1872 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1873 * allocate the page. The expectation is that the caller is taking
1874 * steps that will free more memory. The caller should avoid the page
1875 * being used for !PFMEMALLOC purposes.
1876 */
1877 if (alloc_flags & ALLOC_NO_WATERMARKS)
1878 set_page_pfmemalloc(page);
1879 else
1880 clear_page_pfmemalloc(page);
1881 }
1882
1883 /*
1884 * Go through the free lists for the given migratetype and remove
1885 * the smallest available page from the freelists
1886 */
1887 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1888 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1889 int migratetype)
1890 {
1891 unsigned int current_order;
1892 struct free_area *area;
1893 struct page *page;
1894
1895 /* Find a page of the appropriate size in the preferred list */
1896 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1897 area = &(zone->free_area[current_order]);
1898 page = get_page_from_free_area(area, migratetype);
1899 if (!page)
1900 continue;
1901
1902 page_del_and_expand(zone, page, order, current_order,
1903 migratetype);
1904 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1905 pcp_allowed_order(order) &&
1906 migratetype < MIGRATE_PCPTYPES);
1907 return page;
1908 }
1909
1910 return NULL;
1911 }
1912
1913
1914 /*
1915 * This array describes the order lists are fallen back to when
1916 * the free lists for the desirable migrate type are depleted
1917 *
1918 * The other migratetypes do not have fallbacks.
1919 */
1920 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
1921 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1922 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1923 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1924 };
1925
1926 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1927 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1928 unsigned int order)
1929 {
1930 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1931 }
1932 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1933 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1934 unsigned int order) { return NULL; }
1935 #endif
1936
1937 /*
1938 * Move all free pages of a block to new type's freelist. Caller needs to
1939 * change the block type.
1940 */
__move_freepages_block(struct zone * zone,unsigned long start_pfn,int old_mt,int new_mt)1941 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1942 int old_mt, int new_mt)
1943 {
1944 struct page *page;
1945 unsigned long pfn, end_pfn;
1946 unsigned int order;
1947 int pages_moved = 0;
1948
1949 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
1950 end_pfn = pageblock_end_pfn(start_pfn);
1951
1952 for (pfn = start_pfn; pfn < end_pfn;) {
1953 page = pfn_to_page(pfn);
1954 if (!PageBuddy(page)) {
1955 pfn++;
1956 continue;
1957 }
1958
1959 /* Make sure we are not inadvertently changing nodes */
1960 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1961 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1962
1963 order = buddy_order(page);
1964
1965 move_to_free_list(page, zone, order, old_mt, new_mt);
1966
1967 pfn += 1 << order;
1968 pages_moved += 1 << order;
1969 }
1970
1971 return pages_moved;
1972 }
1973
prep_move_freepages_block(struct zone * zone,struct page * page,unsigned long * start_pfn,int * num_free,int * num_movable)1974 static bool prep_move_freepages_block(struct zone *zone, struct page *page,
1975 unsigned long *start_pfn,
1976 int *num_free, int *num_movable)
1977 {
1978 unsigned long pfn, start, end;
1979
1980 pfn = page_to_pfn(page);
1981 start = pageblock_start_pfn(pfn);
1982 end = pageblock_end_pfn(pfn);
1983
1984 /*
1985 * The caller only has the lock for @zone, don't touch ranges
1986 * that straddle into other zones. While we could move part of
1987 * the range that's inside the zone, this call is usually
1988 * accompanied by other operations such as migratetype updates
1989 * which also should be locked.
1990 */
1991 if (!zone_spans_pfn(zone, start))
1992 return false;
1993 if (!zone_spans_pfn(zone, end - 1))
1994 return false;
1995
1996 *start_pfn = start;
1997
1998 if (num_free) {
1999 *num_free = 0;
2000 *num_movable = 0;
2001 for (pfn = start; pfn < end;) {
2002 page = pfn_to_page(pfn);
2003 if (PageBuddy(page)) {
2004 int nr = 1 << buddy_order(page);
2005
2006 *num_free += nr;
2007 pfn += nr;
2008 continue;
2009 }
2010 /*
2011 * We assume that pages that could be isolated for
2012 * migration are movable. But we don't actually try
2013 * isolating, as that would be expensive.
2014 */
2015 if (PageLRU(page) || page_has_movable_ops(page))
2016 (*num_movable)++;
2017 pfn++;
2018 }
2019 }
2020
2021 return true;
2022 }
2023
move_freepages_block(struct zone * zone,struct page * page,int old_mt,int new_mt)2024 static int move_freepages_block(struct zone *zone, struct page *page,
2025 int old_mt, int new_mt)
2026 {
2027 unsigned long start_pfn;
2028 int res;
2029
2030 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2031 return -1;
2032
2033 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
2034 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
2035
2036 return res;
2037
2038 }
2039
2040 #ifdef CONFIG_MEMORY_ISOLATION
2041 /* Look for a buddy that straddles start_pfn */
find_large_buddy(unsigned long start_pfn)2042 static unsigned long find_large_buddy(unsigned long start_pfn)
2043 {
2044 /*
2045 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing
2046 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking
2047 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy,
2048 * the starting order does not matter.
2049 */
2050 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER;
2051 struct page *page;
2052 unsigned long pfn = start_pfn;
2053
2054 while (!PageBuddy(page = pfn_to_page(pfn))) {
2055 /* Nothing found */
2056 if (++order > MAX_PAGE_ORDER)
2057 return start_pfn;
2058 pfn &= ~0UL << order;
2059 }
2060
2061 /*
2062 * Found a preceding buddy, but does it straddle?
2063 */
2064 if (pfn + (1 << buddy_order(page)) > start_pfn)
2065 return pfn;
2066
2067 /* Nothing found */
2068 return start_pfn;
2069 }
2070
toggle_pageblock_isolate(struct page * page,bool isolate)2071 static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
2072 {
2073 if (isolate)
2074 set_pageblock_isolate(page);
2075 else
2076 clear_pageblock_isolate(page);
2077 }
2078
2079 /**
2080 * __move_freepages_block_isolate - move free pages in block for page isolation
2081 * @zone: the zone
2082 * @page: the pageblock page
2083 * @isolate: to isolate the given pageblock or unisolate it
2084 *
2085 * This is similar to move_freepages_block(), but handles the special
2086 * case encountered in page isolation, where the block of interest
2087 * might be part of a larger buddy spanning multiple pageblocks.
2088 *
2089 * Unlike the regular page allocator path, which moves pages while
2090 * stealing buddies off the freelist, page isolation is interested in
2091 * arbitrary pfn ranges that may have overlapping buddies on both ends.
2092 *
2093 * This function handles that. Straddling buddies are split into
2094 * individual pageblocks. Only the block of interest is moved.
2095 *
2096 * Returns %true if pages could be moved, %false otherwise.
2097 */
__move_freepages_block_isolate(struct zone * zone,struct page * page,bool isolate)2098 static bool __move_freepages_block_isolate(struct zone *zone,
2099 struct page *page, bool isolate)
2100 {
2101 unsigned long start_pfn, buddy_pfn;
2102 int from_mt;
2103 int to_mt;
2104 struct page *buddy;
2105
2106 if (isolate == get_pageblock_isolate(page)) {
2107 VM_WARN_ONCE(1, "%s a pageblock that is already in that state",
2108 isolate ? "Isolate" : "Unisolate");
2109 return false;
2110 }
2111
2112 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2113 return false;
2114
2115 /* No splits needed if buddies can't span multiple blocks */
2116 if (pageblock_order == MAX_PAGE_ORDER)
2117 goto move;
2118
2119 buddy_pfn = find_large_buddy(start_pfn);
2120 buddy = pfn_to_page(buddy_pfn);
2121 /* We're a part of a larger buddy */
2122 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) {
2123 int order = buddy_order(buddy);
2124
2125 del_page_from_free_list(buddy, zone, order,
2126 get_pfnblock_migratetype(buddy, buddy_pfn));
2127 toggle_pageblock_isolate(page, isolate);
2128 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
2129 return true;
2130 }
2131
2132 move:
2133 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */
2134 if (isolate) {
2135 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2136 MIGRATETYPE_MASK);
2137 to_mt = MIGRATE_ISOLATE;
2138 } else {
2139 from_mt = MIGRATE_ISOLATE;
2140 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2141 MIGRATETYPE_MASK);
2142 }
2143
2144 __move_freepages_block(zone, start_pfn, from_mt, to_mt);
2145 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate);
2146
2147 return true;
2148 }
2149
pageblock_isolate_and_move_free_pages(struct zone * zone,struct page * page)2150 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
2151 {
2152 return __move_freepages_block_isolate(zone, page, true);
2153 }
2154
pageblock_unisolate_and_move_free_pages(struct zone * zone,struct page * page)2155 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
2156 {
2157 return __move_freepages_block_isolate(zone, page, false);
2158 }
2159
2160 #endif /* CONFIG_MEMORY_ISOLATION */
2161
boost_watermark(struct zone * zone)2162 static inline bool boost_watermark(struct zone *zone)
2163 {
2164 unsigned long max_boost;
2165
2166 if (!watermark_boost_factor)
2167 return false;
2168 /*
2169 * Don't bother in zones that are unlikely to produce results.
2170 * On small machines, including kdump capture kernels running
2171 * in a small area, boosting the watermark can cause an out of
2172 * memory situation immediately.
2173 */
2174 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2175 return false;
2176
2177 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2178 watermark_boost_factor, 10000);
2179
2180 /*
2181 * high watermark may be uninitialised if fragmentation occurs
2182 * very early in boot so do not boost. We do not fall
2183 * through and boost by pageblock_nr_pages as failing
2184 * allocations that early means that reclaim is not going
2185 * to help and it may even be impossible to reclaim the
2186 * boosted watermark resulting in a hang.
2187 */
2188 if (!max_boost)
2189 return false;
2190
2191 max_boost = max(pageblock_nr_pages, max_boost);
2192
2193 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2194 max_boost);
2195
2196 return true;
2197 }
2198
2199 /*
2200 * When we are falling back to another migratetype during allocation, should we
2201 * try to claim an entire block to satisfy further allocations, instead of
2202 * polluting multiple pageblocks?
2203 */
should_try_claim_block(unsigned int order,int start_mt)2204 static bool should_try_claim_block(unsigned int order, int start_mt)
2205 {
2206 /*
2207 * Leaving this order check is intended, although there is
2208 * relaxed order check in next check. The reason is that
2209 * we can actually claim the whole pageblock if this condition met,
2210 * but, below check doesn't guarantee it and that is just heuristic
2211 * so could be changed anytime.
2212 */
2213 if (order >= pageblock_order)
2214 return true;
2215
2216 /*
2217 * Above a certain threshold, always try to claim, as it's likely there
2218 * will be more free pages in the pageblock.
2219 */
2220 if (order >= pageblock_order / 2)
2221 return true;
2222
2223 /*
2224 * Unmovable/reclaimable allocations would cause permanent
2225 * fragmentations if they fell back to allocating from a movable block
2226 * (polluting it), so we try to claim the whole block regardless of the
2227 * allocation size. Later movable allocations can always steal from this
2228 * block, which is less problematic.
2229 */
2230 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
2231 return true;
2232
2233 if (page_group_by_mobility_disabled)
2234 return true;
2235
2236 /*
2237 * Movable pages won't cause permanent fragmentation, so when you alloc
2238 * small pages, we just need to temporarily steal unmovable or
2239 * reclaimable pages that are closest to the request size. After a
2240 * while, memory compaction may occur to form large contiguous pages,
2241 * and the next movable allocation may not need to steal.
2242 */
2243 return false;
2244 }
2245
2246 /*
2247 * Check whether there is a suitable fallback freepage with requested order.
2248 * If claimable is true, this function returns fallback_mt only if
2249 * we would do this whole-block claiming. This would help to reduce
2250 * fragmentation due to mixed migratetype pages in one pageblock.
2251 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool claimable)2252 int find_suitable_fallback(struct free_area *area, unsigned int order,
2253 int migratetype, bool claimable)
2254 {
2255 int i;
2256
2257 if (claimable && !should_try_claim_block(order, migratetype))
2258 return -2;
2259
2260 if (area->nr_free == 0)
2261 return -1;
2262
2263 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
2264 int fallback_mt = fallbacks[migratetype][i];
2265
2266 if (!free_area_empty(area, fallback_mt))
2267 return fallback_mt;
2268 }
2269
2270 return -1;
2271 }
2272
2273 /*
2274 * This function implements actual block claiming behaviour. If order is large
2275 * enough, we can claim the whole pageblock for the requested migratetype. If
2276 * not, we check the pageblock for constituent pages; if at least half of the
2277 * pages are free or compatible, we can still claim the whole block, so pages
2278 * freed in the future will be put on the correct free list.
2279 */
2280 static struct page *
try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags)2281 try_to_claim_block(struct zone *zone, struct page *page,
2282 int current_order, int order, int start_type,
2283 int block_type, unsigned int alloc_flags)
2284 {
2285 int free_pages, movable_pages, alike_pages;
2286 unsigned long start_pfn;
2287
2288 /* Take ownership for orders >= pageblock_order */
2289 if (current_order >= pageblock_order) {
2290 unsigned int nr_added;
2291
2292 del_page_from_free_list(page, zone, current_order, block_type);
2293 change_pageblock_range(page, current_order, start_type);
2294 nr_added = expand(zone, page, order, current_order, start_type);
2295 account_freepages(zone, nr_added, start_type);
2296 return page;
2297 }
2298
2299 /*
2300 * Boost watermarks to increase reclaim pressure to reduce the
2301 * likelihood of future fallbacks. Wake kswapd now as the node
2302 * may be balanced overall and kswapd will not wake naturally.
2303 */
2304 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2305 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2306
2307 /* moving whole block can fail due to zone boundary conditions */
2308 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
2309 &movable_pages))
2310 return NULL;
2311
2312 /*
2313 * Determine how many pages are compatible with our allocation.
2314 * For movable allocation, it's the number of movable pages which
2315 * we just obtained. For other types it's a bit more tricky.
2316 */
2317 if (start_type == MIGRATE_MOVABLE) {
2318 alike_pages = movable_pages;
2319 } else {
2320 /*
2321 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2322 * to MOVABLE pageblock, consider all non-movable pages as
2323 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2324 * vice versa, be conservative since we can't distinguish the
2325 * exact migratetype of non-movable pages.
2326 */
2327 if (block_type == MIGRATE_MOVABLE)
2328 alike_pages = pageblock_nr_pages
2329 - (free_pages + movable_pages);
2330 else
2331 alike_pages = 0;
2332 }
2333 /*
2334 * If a sufficient number of pages in the block are either free or of
2335 * compatible migratability as our allocation, claim the whole block.
2336 */
2337 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2338 page_group_by_mobility_disabled) {
2339 __move_freepages_block(zone, start_pfn, block_type, start_type);
2340 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
2341 return __rmqueue_smallest(zone, order, start_type);
2342 }
2343
2344 return NULL;
2345 }
2346
2347 /*
2348 * Try to allocate from some fallback migratetype by claiming the entire block,
2349 * i.e. converting it to the allocation's start migratetype.
2350 *
2351 * The use of signed ints for order and current_order is a deliberate
2352 * deviation from the rest of this file, to make the for loop
2353 * condition simpler.
2354 */
2355 static __always_inline struct page *
__rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2356 __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
2357 unsigned int alloc_flags)
2358 {
2359 struct free_area *area;
2360 int current_order;
2361 int min_order = order;
2362 struct page *page;
2363 int fallback_mt;
2364
2365 /*
2366 * Do not steal pages from freelists belonging to other pageblocks
2367 * i.e. orders < pageblock_order. If there are no local zones free,
2368 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2369 */
2370 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2371 min_order = pageblock_order;
2372
2373 /*
2374 * Find the largest available free page in the other list. This roughly
2375 * approximates finding the pageblock with the most free pages, which
2376 * would be too costly to do exactly.
2377 */
2378 for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
2379 --current_order) {
2380 area = &(zone->free_area[current_order]);
2381 fallback_mt = find_suitable_fallback(area, current_order,
2382 start_migratetype, true);
2383
2384 /* No block in that order */
2385 if (fallback_mt == -1)
2386 continue;
2387
2388 /* Advanced into orders too low to claim, abort */
2389 if (fallback_mt == -2)
2390 break;
2391
2392 page = get_page_from_free_area(area, fallback_mt);
2393 page = try_to_claim_block(zone, page, current_order, order,
2394 start_migratetype, fallback_mt,
2395 alloc_flags);
2396 if (page) {
2397 trace_mm_page_alloc_extfrag(page, order, current_order,
2398 start_migratetype, fallback_mt);
2399 return page;
2400 }
2401 }
2402
2403 return NULL;
2404 }
2405
2406 /*
2407 * Try to steal a single page from some fallback migratetype. Leave the rest of
2408 * the block as its current migratetype, potentially causing fragmentation.
2409 */
2410 static __always_inline struct page *
__rmqueue_steal(struct zone * zone,int order,int start_migratetype)2411 __rmqueue_steal(struct zone *zone, int order, int start_migratetype)
2412 {
2413 struct free_area *area;
2414 int current_order;
2415 struct page *page;
2416 int fallback_mt;
2417
2418 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2419 area = &(zone->free_area[current_order]);
2420 fallback_mt = find_suitable_fallback(area, current_order,
2421 start_migratetype, false);
2422 if (fallback_mt == -1)
2423 continue;
2424
2425 page = get_page_from_free_area(area, fallback_mt);
2426 page_del_and_expand(zone, page, order, current_order, fallback_mt);
2427 trace_mm_page_alloc_extfrag(page, order, current_order,
2428 start_migratetype, fallback_mt);
2429 return page;
2430 }
2431
2432 return NULL;
2433 }
2434
2435 enum rmqueue_mode {
2436 RMQUEUE_NORMAL,
2437 RMQUEUE_CMA,
2438 RMQUEUE_CLAIM,
2439 RMQUEUE_STEAL,
2440 };
2441
2442 /*
2443 * Do the hard work of removing an element from the buddy allocator.
2444 * Call me with the zone->lock already held.
2445 */
2446 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode)2447 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2448 unsigned int alloc_flags, enum rmqueue_mode *mode)
2449 {
2450 struct page *page;
2451
2452 if (IS_ENABLED(CONFIG_CMA)) {
2453 /*
2454 * Balance movable allocations between regular and CMA areas by
2455 * allocating from CMA when over half of the zone's free memory
2456 * is in the CMA area.
2457 */
2458 if (alloc_flags & ALLOC_CMA &&
2459 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2460 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2461 page = __rmqueue_cma_fallback(zone, order);
2462 if (page)
2463 return page;
2464 }
2465 }
2466
2467 /*
2468 * First try the freelists of the requested migratetype, then try
2469 * fallbacks modes with increasing levels of fragmentation risk.
2470 *
2471 * The fallback logic is expensive and rmqueue_bulk() calls in
2472 * a loop with the zone->lock held, meaning the freelists are
2473 * not subject to any outside changes. Remember in *mode where
2474 * we found pay dirt, to save us the search on the next call.
2475 */
2476 switch (*mode) {
2477 case RMQUEUE_NORMAL:
2478 page = __rmqueue_smallest(zone, order, migratetype);
2479 if (page)
2480 return page;
2481 fallthrough;
2482 case RMQUEUE_CMA:
2483 if (alloc_flags & ALLOC_CMA) {
2484 page = __rmqueue_cma_fallback(zone, order);
2485 if (page) {
2486 *mode = RMQUEUE_CMA;
2487 return page;
2488 }
2489 }
2490 fallthrough;
2491 case RMQUEUE_CLAIM:
2492 page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
2493 if (page) {
2494 /* Replenished preferred freelist, back to normal mode. */
2495 *mode = RMQUEUE_NORMAL;
2496 return page;
2497 }
2498 fallthrough;
2499 case RMQUEUE_STEAL:
2500 if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
2501 page = __rmqueue_steal(zone, order, migratetype);
2502 if (page) {
2503 *mode = RMQUEUE_STEAL;
2504 return page;
2505 }
2506 }
2507 }
2508 return NULL;
2509 }
2510
2511 /*
2512 * Obtain a specified number of elements from the buddy allocator, all under
2513 * a single hold of the lock, for efficiency. Add them to the supplied list.
2514 * Returns the number of new pages which were placed at *list.
2515 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2516 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2517 unsigned long count, struct list_head *list,
2518 int migratetype, unsigned int alloc_flags)
2519 {
2520 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
2521 unsigned long flags;
2522 int i;
2523
2524 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2525 if (!spin_trylock_irqsave(&zone->lock, flags))
2526 return 0;
2527 } else {
2528 spin_lock_irqsave(&zone->lock, flags);
2529 }
2530 for (i = 0; i < count; ++i) {
2531 struct page *page = __rmqueue(zone, order, migratetype,
2532 alloc_flags, &rmqm);
2533 if (unlikely(page == NULL))
2534 break;
2535
2536 /*
2537 * Split buddy pages returned by expand() are received here in
2538 * physical page order. The page is added to the tail of
2539 * caller's list. From the callers perspective, the linked list
2540 * is ordered by page number under some conditions. This is
2541 * useful for IO devices that can forward direction from the
2542 * head, thus also in the physical page order. This is useful
2543 * for IO devices that can merge IO requests if the physical
2544 * pages are ordered properly.
2545 */
2546 list_add_tail(&page->pcp_list, list);
2547 }
2548 spin_unlock_irqrestore(&zone->lock, flags);
2549
2550 return i;
2551 }
2552
2553 /*
2554 * Called from the vmstat counter updater to decay the PCP high.
2555 * Return whether there are addition works to do.
2556 */
decay_pcp_high(struct zone * zone,struct per_cpu_pages * pcp)2557 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2558 {
2559 int high_min, to_drain, to_drain_batched, batch;
2560 bool todo = false;
2561
2562 high_min = READ_ONCE(pcp->high_min);
2563 batch = READ_ONCE(pcp->batch);
2564 /*
2565 * Decrease pcp->high periodically to try to free possible
2566 * idle PCP pages. And, avoid to free too many pages to
2567 * control latency. This caps pcp->high decrement too.
2568 */
2569 if (pcp->high > high_min) {
2570 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2571 pcp->high - (pcp->high >> 3), high_min);
2572 if (pcp->high > high_min)
2573 todo = true;
2574 }
2575
2576 to_drain = pcp->count - pcp->high;
2577 while (to_drain > 0) {
2578 to_drain_batched = min(to_drain, batch);
2579 pcp_spin_lock_nopin(pcp);
2580 free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
2581 pcp_spin_unlock_nopin(pcp);
2582 todo = true;
2583
2584 to_drain -= to_drain_batched;
2585 }
2586
2587 return todo;
2588 }
2589
2590 #ifdef CONFIG_NUMA
2591 /*
2592 * Called from the vmstat counter updater to drain pagesets of this
2593 * currently executing processor on remote nodes after they have
2594 * expired.
2595 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2596 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2597 {
2598 int to_drain, batch;
2599
2600 batch = READ_ONCE(pcp->batch);
2601 to_drain = min(pcp->count, batch);
2602 if (to_drain > 0) {
2603 pcp_spin_lock_nopin(pcp);
2604 free_pcppages_bulk(zone, to_drain, pcp, 0);
2605 pcp_spin_unlock_nopin(pcp);
2606 }
2607 }
2608 #endif
2609
2610 /*
2611 * Drain pcplists of the indicated processor and zone.
2612 */
drain_pages_zone(unsigned int cpu,struct zone * zone)2613 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2614 {
2615 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2616 int count;
2617
2618 do {
2619 pcp_spin_lock_nopin(pcp);
2620 count = pcp->count;
2621 if (count) {
2622 int to_drain = min(count,
2623 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2624
2625 free_pcppages_bulk(zone, to_drain, pcp, 0);
2626 count -= to_drain;
2627 }
2628 pcp_spin_unlock_nopin(pcp);
2629 } while (count);
2630 }
2631
2632 /*
2633 * Drain pcplists of all zones on the indicated processor.
2634 */
drain_pages(unsigned int cpu)2635 static void drain_pages(unsigned int cpu)
2636 {
2637 struct zone *zone;
2638
2639 for_each_populated_zone(zone) {
2640 drain_pages_zone(cpu, zone);
2641 }
2642 }
2643
2644 /*
2645 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2646 */
drain_local_pages(struct zone * zone)2647 void drain_local_pages(struct zone *zone)
2648 {
2649 int cpu = smp_processor_id();
2650
2651 if (zone)
2652 drain_pages_zone(cpu, zone);
2653 else
2654 drain_pages(cpu);
2655 }
2656
2657 /*
2658 * The implementation of drain_all_pages(), exposing an extra parameter to
2659 * drain on all cpus.
2660 *
2661 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2662 * not empty. The check for non-emptiness can however race with a free to
2663 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2664 * that need the guarantee that every CPU has drained can disable the
2665 * optimizing racy check.
2666 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2667 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2668 {
2669 int cpu;
2670
2671 /*
2672 * Allocate in the BSS so we won't require allocation in
2673 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2674 */
2675 static cpumask_t cpus_with_pcps;
2676
2677 /*
2678 * Do not drain if one is already in progress unless it's specific to
2679 * a zone. Such callers are primarily CMA and memory hotplug and need
2680 * the drain to be complete when the call returns.
2681 */
2682 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2683 if (!zone)
2684 return;
2685 mutex_lock(&pcpu_drain_mutex);
2686 }
2687
2688 /*
2689 * We don't care about racing with CPU hotplug event
2690 * as offline notification will cause the notified
2691 * cpu to drain that CPU pcps and on_each_cpu_mask
2692 * disables preemption as part of its processing
2693 */
2694 for_each_online_cpu(cpu) {
2695 struct per_cpu_pages *pcp;
2696 struct zone *z;
2697 bool has_pcps = false;
2698
2699 if (force_all_cpus) {
2700 /*
2701 * The pcp.count check is racy, some callers need a
2702 * guarantee that no cpu is missed.
2703 */
2704 has_pcps = true;
2705 } else if (zone) {
2706 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2707 if (pcp->count)
2708 has_pcps = true;
2709 } else {
2710 for_each_populated_zone(z) {
2711 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2712 if (pcp->count) {
2713 has_pcps = true;
2714 break;
2715 }
2716 }
2717 }
2718
2719 if (has_pcps)
2720 cpumask_set_cpu(cpu, &cpus_with_pcps);
2721 else
2722 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2723 }
2724
2725 for_each_cpu(cpu, &cpus_with_pcps) {
2726 if (zone)
2727 drain_pages_zone(cpu, zone);
2728 else
2729 drain_pages(cpu);
2730 }
2731
2732 mutex_unlock(&pcpu_drain_mutex);
2733 }
2734
2735 /*
2736 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2737 *
2738 * When zone parameter is non-NULL, spill just the single zone's pages.
2739 */
drain_all_pages(struct zone * zone)2740 void drain_all_pages(struct zone *zone)
2741 {
2742 __drain_all_pages(zone, false);
2743 }
2744
nr_pcp_free(struct per_cpu_pages * pcp,int batch,int high,bool free_high)2745 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2746 {
2747 int min_nr_free, max_nr_free;
2748
2749 /* Free as much as possible if batch freeing high-order pages. */
2750 if (unlikely(free_high))
2751 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2752
2753 /* Check for PCP disabled or boot pageset */
2754 if (unlikely(high < batch))
2755 return 1;
2756
2757 /* Leave at least pcp->batch pages on the list */
2758 min_nr_free = batch;
2759 max_nr_free = high - batch;
2760
2761 /*
2762 * Increase the batch number to the number of the consecutive
2763 * freed pages to reduce zone lock contention.
2764 */
2765 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2766
2767 return batch;
2768 }
2769
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,int batch,bool free_high)2770 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2771 int batch, bool free_high)
2772 {
2773 int high, high_min, high_max;
2774
2775 high_min = READ_ONCE(pcp->high_min);
2776 high_max = READ_ONCE(pcp->high_max);
2777 high = pcp->high = clamp(pcp->high, high_min, high_max);
2778
2779 if (unlikely(!high))
2780 return 0;
2781
2782 if (unlikely(free_high)) {
2783 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2784 high_min);
2785 return 0;
2786 }
2787
2788 /*
2789 * If reclaim is active, limit the number of pages that can be
2790 * stored on pcp lists
2791 */
2792 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
2793 int free_count = max_t(int, pcp->free_count, batch);
2794
2795 pcp->high = max(high - free_count, high_min);
2796 return min(batch << 2, pcp->high);
2797 }
2798
2799 if (high_min == high_max)
2800 return high;
2801
2802 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
2803 int free_count = max_t(int, pcp->free_count, batch);
2804
2805 pcp->high = max(high - free_count, high_min);
2806 high = max(pcp->count, high_min);
2807 } else if (pcp->count >= high) {
2808 int need_high = pcp->free_count + batch;
2809
2810 /* pcp->high should be large enough to hold batch freed pages */
2811 if (pcp->high < need_high)
2812 pcp->high = clamp(need_high, high_min, high_max);
2813 }
2814
2815 return high;
2816 }
2817
2818 /*
2819 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the
2820 * pcp's watermarks below high.
2821 *
2822 * May return a freed pcp, if during page freeing the pcp spinlock cannot be
2823 * reacquired. Return true if pcp is locked, false otherwise.
2824 */
free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags)2825 static bool free_frozen_page_commit(struct zone *zone,
2826 struct per_cpu_pages *pcp, struct page *page, int migratetype,
2827 unsigned int order, fpi_t fpi_flags)
2828 {
2829 int high, batch;
2830 int to_free, to_free_batched;
2831 int pindex;
2832 int cpu = smp_processor_id();
2833 int ret = true;
2834 bool free_high = false;
2835
2836 /*
2837 * On freeing, reduce the number of pages that are batch allocated.
2838 * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2839 * allocations.
2840 */
2841 pcp->alloc_factor >>= 1;
2842 __count_vm_events(PGFREE, 1 << order);
2843 pindex = order_to_pindex(migratetype, order);
2844 list_add(&page->pcp_list, &pcp->lists[pindex]);
2845 pcp->count += 1 << order;
2846
2847 batch = READ_ONCE(pcp->batch);
2848 /*
2849 * As high-order pages other than THP's stored on PCP can contribute
2850 * to fragmentation, limit the number stored when PCP is heavily
2851 * freeing without allocation. The remainder after bulk freeing
2852 * stops will be drained from vmstat refresh context.
2853 */
2854 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
2855 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
2856 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2857 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2858 pcp->count >= batch));
2859 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2860 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2861 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2862 }
2863 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2864 pcp->free_count += (1 << order);
2865
2866 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
2867 /*
2868 * Do not attempt to take a zone lock. Let pcp->count get
2869 * over high mark temporarily.
2870 */
2871 return true;
2872 }
2873
2874 high = nr_pcp_high(pcp, zone, batch, free_high);
2875 if (pcp->count < high)
2876 return true;
2877
2878 to_free = nr_pcp_free(pcp, batch, high, free_high);
2879 while (to_free > 0 && pcp->count > 0) {
2880 to_free_batched = min(to_free, batch);
2881 free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
2882 to_free -= to_free_batched;
2883
2884 if (to_free == 0 || pcp->count == 0)
2885 break;
2886
2887 pcp_spin_unlock(pcp);
2888
2889 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2890 if (!pcp) {
2891 ret = false;
2892 break;
2893 }
2894
2895 /*
2896 * Check if this thread has been migrated to a different CPU.
2897 * If that is the case, give up and indicate that the pcp is
2898 * returned in an unlocked state.
2899 */
2900 if (smp_processor_id() != cpu) {
2901 pcp_spin_unlock(pcp);
2902 ret = false;
2903 break;
2904 }
2905 }
2906
2907 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2908 zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2909 ZONE_MOVABLE, 0)) {
2910 struct pglist_data *pgdat = zone->zone_pgdat;
2911 clear_bit(ZONE_BELOW_HIGH, &zone->flags);
2912
2913 /*
2914 * Assume that memory pressure on this node is gone and may be
2915 * in a reclaimable state. If a memory fallback node exists,
2916 * direct reclaim may not have been triggered, causing a
2917 * 'hopeless node' to stay in that state for a while. Let
2918 * kswapd work again by resetting kswapd_failures.
2919 */
2920 if (kswapd_test_hopeless(pgdat) &&
2921 next_memory_node(pgdat->node_id) < MAX_NUMNODES)
2922 kswapd_clear_hopeless(pgdat, KSWAPD_CLEAR_HOPELESS_PCP);
2923 }
2924 return ret;
2925 }
2926
2927 /*
2928 * Free a pcp page
2929 */
__free_frozen_pages(struct page * page,unsigned int order,fpi_t fpi_flags)2930 static void __free_frozen_pages(struct page *page, unsigned int order,
2931 fpi_t fpi_flags)
2932 {
2933 struct per_cpu_pages *pcp;
2934 struct zone *zone;
2935 unsigned long pfn = page_to_pfn(page);
2936 int migratetype;
2937
2938 if (!pcp_allowed_order(order)) {
2939 __free_pages_ok(page, order, fpi_flags);
2940 return;
2941 }
2942
2943 if (!__free_pages_prepare(page, order, fpi_flags))
2944 return;
2945
2946 /*
2947 * We only track unmovable, reclaimable and movable on pcp lists.
2948 * Place ISOLATE pages on the isolated list because they are being
2949 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2950 * get those areas back if necessary. Otherwise, we may have to free
2951 * excessively into the page allocator
2952 */
2953 zone = page_zone(page);
2954 migratetype = get_pfnblock_migratetype(page, pfn);
2955 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2956 if (unlikely(is_migrate_isolate(migratetype))) {
2957 free_one_page(zone, page, pfn, order, fpi_flags);
2958 return;
2959 }
2960 migratetype = MIGRATE_MOVABLE;
2961 }
2962
2963 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
2964 && (in_nmi() || in_hardirq()))) {
2965 add_page_to_zone_llist(zone, page, order);
2966 return;
2967 }
2968 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2969 if (pcp) {
2970 if (!free_frozen_page_commit(zone, pcp, page, migratetype,
2971 order, fpi_flags))
2972 return;
2973 pcp_spin_unlock(pcp);
2974 } else {
2975 free_one_page(zone, page, pfn, order, fpi_flags);
2976 }
2977 }
2978
free_frozen_pages(struct page * page,unsigned int order)2979 void free_frozen_pages(struct page *page, unsigned int order)
2980 {
2981 __free_frozen_pages(page, order, FPI_NONE);
2982 }
2983
free_frozen_pages_nolock(struct page * page,unsigned int order)2984 void free_frozen_pages_nolock(struct page *page, unsigned int order)
2985 {
2986 __free_frozen_pages(page, order, FPI_TRYLOCK);
2987 }
2988
2989 /*
2990 * Free a batch of folios
2991 */
free_unref_folios(struct folio_batch * folios)2992 void free_unref_folios(struct folio_batch *folios)
2993 {
2994 struct per_cpu_pages *pcp = NULL;
2995 struct zone *locked_zone = NULL;
2996 int i, j;
2997
2998 /* Prepare folios for freeing */
2999 for (i = 0, j = 0; i < folios->nr; i++) {
3000 struct folio *folio = folios->folios[i];
3001 unsigned long pfn = folio_pfn(folio);
3002 unsigned int order = folio_order(folio);
3003
3004 if (!__free_pages_prepare(&folio->page, order, FPI_NONE))
3005 continue;
3006 /*
3007 * Free orders not handled on the PCP directly to the
3008 * allocator.
3009 */
3010 if (!pcp_allowed_order(order)) {
3011 free_one_page(folio_zone(folio), &folio->page,
3012 pfn, order, FPI_NONE);
3013 continue;
3014 }
3015 folio->private = (void *)(unsigned long)order;
3016 if (j != i)
3017 folios->folios[j] = folio;
3018 j++;
3019 }
3020 folios->nr = j;
3021
3022 for (i = 0; i < folios->nr; i++) {
3023 struct folio *folio = folios->folios[i];
3024 struct zone *zone = folio_zone(folio);
3025 unsigned long pfn = folio_pfn(folio);
3026 unsigned int order = (unsigned long)folio->private;
3027 int migratetype;
3028
3029 folio->private = NULL;
3030 migratetype = get_pfnblock_migratetype(&folio->page, pfn);
3031
3032 /* Different zone requires a different pcp lock */
3033 if (zone != locked_zone ||
3034 is_migrate_isolate(migratetype)) {
3035 if (pcp) {
3036 pcp_spin_unlock(pcp);
3037 locked_zone = NULL;
3038 pcp = NULL;
3039 }
3040
3041 /*
3042 * Free isolated pages directly to the
3043 * allocator, see comment in free_frozen_pages.
3044 */
3045 if (is_migrate_isolate(migratetype)) {
3046 free_one_page(zone, &folio->page, pfn,
3047 order, FPI_NONE);
3048 continue;
3049 }
3050
3051 /*
3052 * trylock is necessary as folios may be getting freed
3053 * from IRQ or SoftIRQ context after an IO completion.
3054 */
3055 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3056 if (unlikely(!pcp)) {
3057 free_one_page(zone, &folio->page, pfn,
3058 order, FPI_NONE);
3059 continue;
3060 }
3061 locked_zone = zone;
3062 }
3063
3064 /*
3065 * Non-isolated types over MIGRATE_PCPTYPES get added
3066 * to the MIGRATE_MOVABLE pcp list.
3067 */
3068 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3069 migratetype = MIGRATE_MOVABLE;
3070
3071 trace_mm_page_free_batched(&folio->page);
3072 if (!free_frozen_page_commit(zone, pcp, &folio->page,
3073 migratetype, order, FPI_NONE)) {
3074 pcp = NULL;
3075 locked_zone = NULL;
3076 }
3077 }
3078
3079 if (pcp)
3080 pcp_spin_unlock(pcp);
3081 folio_batch_reinit(folios);
3082 }
3083
__split_page(struct page * page,unsigned int order)3084 static void __split_page(struct page *page, unsigned int order)
3085 {
3086 VM_WARN_ON_PAGE(PageCompound(page), page);
3087
3088 split_page_owner(page, order, 0);
3089 pgalloc_tag_split(page_folio(page), order, 0);
3090 split_page_memcg(page, order);
3091 }
3092
3093 /*
3094 * split_page takes a non-compound higher-order page, and splits it into
3095 * n (1<<order) sub-pages: page[0..n]
3096 * Each sub-page must be freed individually.
3097 *
3098 * Note: this is probably too low level an operation for use in drivers.
3099 * Please consult with lkml before using this in your driver.
3100 */
split_page(struct page * page,unsigned int order)3101 void split_page(struct page *page, unsigned int order)
3102 {
3103 int i;
3104
3105 VM_WARN_ON_PAGE(!page_count(page), page);
3106
3107 for (i = 1; i < (1 << order); i++)
3108 set_page_refcounted(page + i);
3109
3110 __split_page(page, order);
3111 }
3112 EXPORT_SYMBOL_GPL(split_page);
3113
__isolate_free_page(struct page * page,unsigned int order)3114 int __isolate_free_page(struct page *page, unsigned int order)
3115 {
3116 struct zone *zone = page_zone(page);
3117 int mt = get_pageblock_migratetype(page);
3118
3119 if (!is_migrate_isolate(mt)) {
3120 unsigned long watermark;
3121 /*
3122 * Obey watermarks as if the page was being allocated. We can
3123 * emulate a high-order watermark check with a raised order-0
3124 * watermark, because we already know our high-order page
3125 * exists.
3126 */
3127 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3128 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3129 return 0;
3130 }
3131
3132 del_page_from_free_list(page, zone, order, mt);
3133
3134 /*
3135 * Set the pageblock if the isolated page is at least half of a
3136 * pageblock
3137 */
3138 if (order >= pageblock_order - 1) {
3139 struct page *endpage = page + (1 << order) - 1;
3140 for (; page < endpage; page += pageblock_nr_pages) {
3141 int mt = get_pageblock_migratetype(page);
3142 /*
3143 * Only change normal pageblocks (i.e., they can merge
3144 * with others)
3145 */
3146 if (migratetype_is_mergeable(mt))
3147 move_freepages_block(zone, page, mt,
3148 MIGRATE_MOVABLE);
3149 }
3150 }
3151
3152 return 1UL << order;
3153 }
3154
3155 /**
3156 * __putback_isolated_page - Return a now-isolated page back where we got it
3157 * @page: Page that was isolated
3158 * @order: Order of the isolated page
3159 * @mt: The page's pageblock's migratetype
3160 *
3161 * This function is meant to return a page pulled from the free lists via
3162 * __isolate_free_page back to the free lists they were pulled from.
3163 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3164 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3165 {
3166 struct zone *zone = page_zone(page);
3167
3168 /* zone lock should be held when this function is called */
3169 lockdep_assert_held(&zone->lock);
3170
3171 /* Return isolated page to tail of freelist. */
3172 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3173 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3174 }
3175
3176 /*
3177 * Update NUMA hit/miss statistics
3178 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)3179 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3180 long nr_account)
3181 {
3182 #ifdef CONFIG_NUMA
3183 enum numa_stat_item local_stat = NUMA_LOCAL;
3184
3185 /* skip numa counters update if numa stats is disabled */
3186 if (!static_branch_likely(&vm_numa_stat_key))
3187 return;
3188
3189 if (zone_to_nid(z) != numa_node_id())
3190 local_stat = NUMA_OTHER;
3191
3192 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3193 __count_numa_events(z, NUMA_HIT, nr_account);
3194 else {
3195 __count_numa_events(z, NUMA_MISS, nr_account);
3196 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3197 }
3198 __count_numa_events(z, local_stat, nr_account);
3199 #endif
3200 }
3201
3202 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)3203 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3204 unsigned int order, unsigned int alloc_flags,
3205 int migratetype)
3206 {
3207 struct page *page;
3208 unsigned long flags;
3209
3210 do {
3211 page = NULL;
3212 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
3213 if (!spin_trylock_irqsave(&zone->lock, flags))
3214 return NULL;
3215 } else {
3216 spin_lock_irqsave(&zone->lock, flags);
3217 }
3218 if (alloc_flags & ALLOC_HIGHATOMIC)
3219 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3220 if (!page) {
3221 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
3222
3223 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
3224
3225 /*
3226 * If the allocation fails, allow OOM handling and
3227 * order-0 (atomic) allocs access to HIGHATOMIC
3228 * reserves as failing now is worse than failing a
3229 * high-order atomic allocation in the future.
3230 */
3231 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
3232 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3233
3234 if (!page) {
3235 spin_unlock_irqrestore(&zone->lock, flags);
3236 return NULL;
3237 }
3238 }
3239 spin_unlock_irqrestore(&zone->lock, flags);
3240 } while (check_new_pages(page, order));
3241
3242 /*
3243 * If this is a high-order atomic allocation then check
3244 * if the pageblock should be reserved for the future
3245 */
3246 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3247 reserve_highatomic_pageblock(page, order, zone);
3248
3249 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3250 zone_statistics(preferred_zone, zone, 1);
3251
3252 return page;
3253 }
3254
nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order)3255 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
3256 {
3257 int high, base_batch, batch, max_nr_alloc;
3258 int high_max, high_min;
3259
3260 base_batch = READ_ONCE(pcp->batch);
3261 high_min = READ_ONCE(pcp->high_min);
3262 high_max = READ_ONCE(pcp->high_max);
3263 high = pcp->high = clamp(pcp->high, high_min, high_max);
3264
3265 /* Check for PCP disabled or boot pageset */
3266 if (unlikely(high < base_batch))
3267 return 1;
3268
3269 if (order)
3270 batch = base_batch;
3271 else
3272 batch = (base_batch << pcp->alloc_factor);
3273
3274 /*
3275 * If we had larger pcp->high, we could avoid to allocate from
3276 * zone.
3277 */
3278 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
3279 high = pcp->high = min(high + batch, high_max);
3280
3281 if (!order) {
3282 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
3283 /*
3284 * Double the number of pages allocated each time there is
3285 * subsequent allocation of order-0 pages without any freeing.
3286 */
3287 if (batch <= max_nr_alloc &&
3288 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
3289 pcp->alloc_factor++;
3290 batch = min(batch, max_nr_alloc);
3291 }
3292
3293 /*
3294 * Scale batch relative to order if batch implies free pages
3295 * can be stored on the PCP. Batch can be 1 for small zones or
3296 * for boot pagesets which should never store free pages as
3297 * the pages may belong to arbitrary zones.
3298 */
3299 if (batch > 1)
3300 batch = max(batch >> order, 2);
3301
3302 return batch;
3303 }
3304
3305 /* Remove page from the per-cpu list, caller must protect the list */
3306 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)3307 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3308 int migratetype,
3309 unsigned int alloc_flags,
3310 struct per_cpu_pages *pcp,
3311 struct list_head *list)
3312 {
3313 struct page *page;
3314
3315 do {
3316 if (list_empty(list)) {
3317 int batch = nr_pcp_alloc(pcp, zone, order);
3318 int alloced;
3319
3320 /*
3321 * Don't refill the list for a higher order atomic
3322 * allocation under memory pressure, as this would
3323 * not build up any HIGHATOMIC reserves, which
3324 * might be needed soon.
3325 *
3326 * Instead, direct it towards the reserves by
3327 * returning NULL, which will make the caller fall
3328 * back to rmqueue_buddy. This will try to use the
3329 * reserves first and grow them if needed.
3330 */
3331 if (alloc_flags & ALLOC_HIGHATOMIC)
3332 return NULL;
3333
3334 alloced = rmqueue_bulk(zone, order,
3335 batch, list,
3336 migratetype, alloc_flags);
3337
3338 pcp->count += alloced << order;
3339 if (unlikely(list_empty(list)))
3340 return NULL;
3341 }
3342
3343 page = list_first_entry(list, struct page, pcp_list);
3344 list_del(&page->pcp_list);
3345 pcp->count -= 1 << order;
3346 } while (check_new_pages(page, order));
3347
3348 return page;
3349 }
3350
3351 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3352 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3353 struct zone *zone, unsigned int order,
3354 int migratetype, unsigned int alloc_flags)
3355 {
3356 struct per_cpu_pages *pcp;
3357 struct list_head *list;
3358 struct page *page;
3359
3360 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
3361 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3362 if (!pcp)
3363 return NULL;
3364
3365 /*
3366 * On allocation, reduce the number of pages that are batch freed.
3367 * See nr_pcp_free() where free_factor is increased for subsequent
3368 * frees.
3369 */
3370 pcp->free_count >>= 1;
3371 list = &pcp->lists[order_to_pindex(migratetype, order)];
3372 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3373 pcp_spin_unlock(pcp);
3374 if (page) {
3375 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3376 zone_statistics(preferred_zone, zone, 1);
3377 }
3378 return page;
3379 }
3380
3381 /*
3382 * Allocate a page from the given zone.
3383 * Use pcplists for THP or "cheap" high-order allocations.
3384 */
3385
3386 /*
3387 * Do not instrument rmqueue() with KMSAN. This function may call
3388 * __msan_poison_alloca() through a call to set_pfnblock_migratetype().
3389 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3390 * may call rmqueue() again, which will result in a deadlock.
3391 */
3392 __no_sanitize_memory
3393 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)3394 struct page *rmqueue(struct zone *preferred_zone,
3395 struct zone *zone, unsigned int order,
3396 gfp_t gfp_flags, unsigned int alloc_flags,
3397 int migratetype)
3398 {
3399 struct page *page;
3400
3401 if (likely(pcp_allowed_order(order))) {
3402 page = rmqueue_pcplist(preferred_zone, zone, order,
3403 migratetype, alloc_flags);
3404 if (likely(page))
3405 goto out;
3406 }
3407
3408 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3409 migratetype);
3410
3411 out:
3412 /* Separate test+clear to avoid unnecessary atomics */
3413 if ((alloc_flags & ALLOC_KSWAPD) &&
3414 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3415 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3416 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3417 }
3418
3419 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3420 return page;
3421 }
3422
3423 /*
3424 * Reserve the pageblock(s) surrounding an allocation request for
3425 * exclusive use of high-order atomic allocations if there are no
3426 * empty page blocks that contain a page with a suitable order
3427 */
reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone)3428 static void reserve_highatomic_pageblock(struct page *page, int order,
3429 struct zone *zone)
3430 {
3431 int mt;
3432 unsigned long max_managed, flags;
3433
3434 /*
3435 * The number reserved as: minimum is 1 pageblock, maximum is
3436 * roughly 1% of a zone. But if 1% of a zone falls below a
3437 * pageblock size, then don't reserve any pageblocks.
3438 * Check is race-prone but harmless.
3439 */
3440 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
3441 return;
3442 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
3443 if (zone->nr_reserved_highatomic >= max_managed)
3444 return;
3445
3446 spin_lock_irqsave(&zone->lock, flags);
3447
3448 /* Recheck the nr_reserved_highatomic limit under the lock */
3449 if (zone->nr_reserved_highatomic >= max_managed)
3450 goto out_unlock;
3451
3452 /* Yoink! */
3453 mt = get_pageblock_migratetype(page);
3454 /* Only reserve normal pageblocks (i.e., they can merge with others) */
3455 if (!migratetype_is_mergeable(mt))
3456 goto out_unlock;
3457
3458 if (order < pageblock_order) {
3459 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
3460 goto out_unlock;
3461 zone->nr_reserved_highatomic += pageblock_nr_pages;
3462 } else {
3463 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
3464 zone->nr_reserved_highatomic += 1 << order;
3465 }
3466
3467 out_unlock:
3468 spin_unlock_irqrestore(&zone->lock, flags);
3469 }
3470
3471 /*
3472 * Used when an allocation is about to fail under memory pressure. This
3473 * potentially hurts the reliability of high-order allocations when under
3474 * intense memory pressure but failed atomic allocations should be easier
3475 * to recover from than an OOM.
3476 *
3477 * If @force is true, try to unreserve pageblocks even though highatomic
3478 * pageblock is exhausted.
3479 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)3480 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
3481 bool force)
3482 {
3483 struct zonelist *zonelist = ac->zonelist;
3484 unsigned long flags;
3485 struct zoneref *z;
3486 struct zone *zone;
3487 struct page *page;
3488 int order;
3489 int ret;
3490
3491 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3492 ac->nodemask) {
3493 /*
3494 * Preserve at least one pageblock unless memory pressure
3495 * is really high.
3496 */
3497 if (!force && zone->nr_reserved_highatomic <=
3498 pageblock_nr_pages)
3499 continue;
3500
3501 spin_lock_irqsave(&zone->lock, flags);
3502 for (order = 0; order < NR_PAGE_ORDERS; order++) {
3503 struct free_area *area = &(zone->free_area[order]);
3504 unsigned long size;
3505
3506 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3507 if (!page)
3508 continue;
3509
3510 size = max(pageblock_nr_pages, 1UL << order);
3511 /*
3512 * It should never happen but changes to
3513 * locking could inadvertently allow a per-cpu
3514 * drain to add pages to MIGRATE_HIGHATOMIC
3515 * while unreserving so be safe and watch for
3516 * underflows.
3517 */
3518 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
3519 size = zone->nr_reserved_highatomic;
3520 zone->nr_reserved_highatomic -= size;
3521
3522 /*
3523 * Convert to ac->migratetype and avoid the normal
3524 * pageblock stealing heuristics. Minimally, the caller
3525 * is doing the work and needs the pages. More
3526 * importantly, if the block was always converted to
3527 * MIGRATE_UNMOVABLE or another type then the number
3528 * of pageblocks that cannot be completely freed
3529 * may increase.
3530 */
3531 if (order < pageblock_order)
3532 ret = move_freepages_block(zone, page,
3533 MIGRATE_HIGHATOMIC,
3534 ac->migratetype);
3535 else {
3536 move_to_free_list(page, zone, order,
3537 MIGRATE_HIGHATOMIC,
3538 ac->migratetype);
3539 change_pageblock_range(page, order,
3540 ac->migratetype);
3541 ret = 1;
3542 }
3543 /*
3544 * Reserving the block(s) already succeeded,
3545 * so this should not fail on zone boundaries.
3546 */
3547 WARN_ON_ONCE(ret == -1);
3548 if (ret > 0) {
3549 spin_unlock_irqrestore(&zone->lock, flags);
3550 return ret;
3551 }
3552 }
3553 spin_unlock_irqrestore(&zone->lock, flags);
3554 }
3555
3556 return false;
3557 }
3558
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)3559 static inline long __zone_watermark_unusable_free(struct zone *z,
3560 unsigned int order, unsigned int alloc_flags)
3561 {
3562 long unusable_free = (1 << order) - 1;
3563
3564 /*
3565 * If the caller does not have rights to reserves below the min
3566 * watermark then subtract the free pages reserved for highatomic.
3567 */
3568 if (likely(!(alloc_flags & ALLOC_RESERVES)))
3569 unusable_free += READ_ONCE(z->nr_free_highatomic);
3570
3571 #ifdef CONFIG_CMA
3572 /* If allocation can't use CMA areas don't use free CMA pages */
3573 if (!(alloc_flags & ALLOC_CMA))
3574 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3575 #endif
3576
3577 return unusable_free;
3578 }
3579
3580 /*
3581 * Return true if free base pages are above 'mark'. For high-order checks it
3582 * will return true of the order-0 watermark is reached and there is at least
3583 * one free page of a suitable size. Checking now avoids taking the zone lock
3584 * to check in the allocation paths if no pages are free.
3585 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)3586 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3587 int highest_zoneidx, unsigned int alloc_flags,
3588 long free_pages)
3589 {
3590 long min = mark;
3591 int o;
3592
3593 /* free_pages may go negative - that's OK */
3594 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3595
3596 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3597 /*
3598 * __GFP_HIGH allows access to 50% of the min reserve as well
3599 * as OOM.
3600 */
3601 if (alloc_flags & ALLOC_MIN_RESERVE) {
3602 min -= min / 2;
3603
3604 /*
3605 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3606 * access more reserves than just __GFP_HIGH. Other
3607 * non-blocking allocations requests such as GFP_NOWAIT
3608 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3609 * access to the min reserve.
3610 */
3611 if (alloc_flags & ALLOC_NON_BLOCK)
3612 min -= min / 4;
3613 }
3614
3615 /*
3616 * OOM victims can try even harder than the normal reserve
3617 * users on the grounds that it's definitely going to be in
3618 * the exit path shortly and free memory. Any allocation it
3619 * makes during the free path will be small and short-lived.
3620 */
3621 if (alloc_flags & ALLOC_OOM)
3622 min -= min / 2;
3623 }
3624
3625 /*
3626 * Check watermarks for an order-0 allocation request. If these
3627 * are not met, then a high-order request also cannot go ahead
3628 * even if a suitable page happened to be free.
3629 */
3630 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3631 return false;
3632
3633 /* If this is an order-0 request then the watermark is fine */
3634 if (!order)
3635 return true;
3636
3637 /* For a high-order request, check at least one suitable page is free */
3638 for (o = order; o < NR_PAGE_ORDERS; o++) {
3639 struct free_area *area = &z->free_area[o];
3640 int mt;
3641
3642 if (!area->nr_free)
3643 continue;
3644
3645 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3646 if (!free_area_empty(area, mt))
3647 return true;
3648 }
3649
3650 #ifdef CONFIG_CMA
3651 if ((alloc_flags & ALLOC_CMA) &&
3652 !free_area_empty(area, MIGRATE_CMA)) {
3653 return true;
3654 }
3655 #endif
3656 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3657 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3658 return true;
3659 }
3660 }
3661 return false;
3662 }
3663
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)3664 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3665 int highest_zoneidx, unsigned int alloc_flags)
3666 {
3667 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3668 zone_page_state(z, NR_FREE_PAGES));
3669 }
3670
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)3671 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3672 unsigned long mark, int highest_zoneidx,
3673 unsigned int alloc_flags, gfp_t gfp_mask)
3674 {
3675 long free_pages;
3676
3677 free_pages = zone_page_state(z, NR_FREE_PAGES);
3678
3679 /*
3680 * Fast check for order-0 only. If this fails then the reserves
3681 * need to be calculated.
3682 */
3683 if (!order) {
3684 long usable_free;
3685 long reserved;
3686
3687 usable_free = free_pages;
3688 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3689
3690 /* reserved may over estimate high-atomic reserves. */
3691 usable_free -= min(usable_free, reserved);
3692 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
3693 return true;
3694 }
3695
3696 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3697 free_pages))
3698 return true;
3699
3700 /*
3701 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
3702 * when checking the min watermark. The min watermark is the
3703 * point where boosting is ignored so that kswapd is woken up
3704 * when below the low watermark.
3705 */
3706 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
3707 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3708 mark = z->_watermark[WMARK_MIN];
3709 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3710 alloc_flags, free_pages);
3711 }
3712
3713 return false;
3714 }
3715
3716 #ifdef CONFIG_NUMA
3717 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3718
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3719 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3720 {
3721 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3722 node_reclaim_distance;
3723 }
3724 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3725 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3726 {
3727 return true;
3728 }
3729 #endif /* CONFIG_NUMA */
3730
3731 /*
3732 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3733 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3734 * premature use of a lower zone may cause lowmem pressure problems that
3735 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3736 * probably too small. It only makes sense to spread allocations to avoid
3737 * fragmentation between the Normal and DMA32 zones.
3738 */
3739 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3740 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3741 {
3742 unsigned int alloc_flags;
3743
3744 /*
3745 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3746 * to save a branch.
3747 */
3748 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3749
3750 if (defrag_mode) {
3751 alloc_flags |= ALLOC_NOFRAGMENT;
3752 return alloc_flags;
3753 }
3754
3755 #ifdef CONFIG_ZONE_DMA32
3756 if (!zone)
3757 return alloc_flags;
3758
3759 if (zone_idx(zone) != ZONE_NORMAL)
3760 return alloc_flags;
3761
3762 /*
3763 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3764 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3765 * on UMA that if Normal is populated then so is DMA32.
3766 */
3767 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3768 if (nr_online_nodes > 1 && !populated_zone(--zone))
3769 return alloc_flags;
3770
3771 alloc_flags |= ALLOC_NOFRAGMENT;
3772 #endif /* CONFIG_ZONE_DMA32 */
3773 return alloc_flags;
3774 }
3775
3776 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3777 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3778 unsigned int alloc_flags)
3779 {
3780 #ifdef CONFIG_CMA
3781 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3782 alloc_flags |= ALLOC_CMA;
3783 #endif
3784 return alloc_flags;
3785 }
3786
3787 /*
3788 * get_page_from_freelist goes through the zonelist trying to allocate
3789 * a page.
3790 */
3791 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3792 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3793 const struct alloc_context *ac)
3794 {
3795 struct zoneref *z;
3796 struct zone *zone;
3797 struct pglist_data *last_pgdat = NULL;
3798 bool last_pgdat_dirty_ok = false;
3799 bool no_fallback;
3800 bool skip_kswapd_nodes = nr_online_nodes > 1;
3801 bool skipped_kswapd_nodes = false;
3802
3803 retry:
3804 /*
3805 * Scan zonelist, looking for a zone with enough free.
3806 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c.
3807 */
3808 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3809 z = ac->preferred_zoneref;
3810 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3811 ac->nodemask) {
3812 struct page *page;
3813 unsigned long mark;
3814
3815 if (cpusets_enabled() &&
3816 (alloc_flags & ALLOC_CPUSET) &&
3817 !__cpuset_zone_allowed(zone, gfp_mask))
3818 continue;
3819 /*
3820 * When allocating a page cache page for writing, we
3821 * want to get it from a node that is within its dirty
3822 * limit, such that no single node holds more than its
3823 * proportional share of globally allowed dirty pages.
3824 * The dirty limits take into account the node's
3825 * lowmem reserves and high watermark so that kswapd
3826 * should be able to balance it without having to
3827 * write pages from its LRU list.
3828 *
3829 * XXX: For now, allow allocations to potentially
3830 * exceed the per-node dirty limit in the slowpath
3831 * (spread_dirty_pages unset) before going into reclaim,
3832 * which is important when on a NUMA setup the allowed
3833 * nodes are together not big enough to reach the
3834 * global limit. The proper fix for these situations
3835 * will require awareness of nodes in the
3836 * dirty-throttling and the flusher threads.
3837 */
3838 if (ac->spread_dirty_pages) {
3839 if (last_pgdat != zone->zone_pgdat) {
3840 last_pgdat = zone->zone_pgdat;
3841 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3842 }
3843
3844 if (!last_pgdat_dirty_ok)
3845 continue;
3846 }
3847
3848 if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
3849 zone != zonelist_zone(ac->preferred_zoneref)) {
3850 int local_nid;
3851
3852 /*
3853 * If moving to a remote node, retry but allow
3854 * fragmenting fallbacks. Locality is more important
3855 * than fragmentation avoidance.
3856 */
3857 local_nid = zonelist_node_idx(ac->preferred_zoneref);
3858 if (zone_to_nid(zone) != local_nid) {
3859 alloc_flags &= ~ALLOC_NOFRAGMENT;
3860 goto retry;
3861 }
3862 }
3863
3864 /*
3865 * If kswapd is already active on a node, keep looking
3866 * for other nodes that might be idle. This can happen
3867 * if another process has NUMA bindings and is causing
3868 * kswapd wakeups on only some nodes. Avoid accidental
3869 * "node_reclaim_mode"-like behavior in this case.
3870 */
3871 if (skip_kswapd_nodes &&
3872 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
3873 skipped_kswapd_nodes = true;
3874 continue;
3875 }
3876
3877 cond_accept_memory(zone, order, alloc_flags);
3878
3879 /*
3880 * Detect whether the number of free pages is below high
3881 * watermark. If so, we will decrease pcp->high and free
3882 * PCP pages in free path to reduce the possibility of
3883 * premature page reclaiming. Detection is done here to
3884 * avoid to do that in hotter free path.
3885 */
3886 if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3887 goto check_alloc_wmark;
3888
3889 mark = high_wmark_pages(zone);
3890 if (zone_watermark_fast(zone, order, mark,
3891 ac->highest_zoneidx, alloc_flags,
3892 gfp_mask))
3893 goto try_this_zone;
3894 else
3895 set_bit(ZONE_BELOW_HIGH, &zone->flags);
3896
3897 check_alloc_wmark:
3898 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3899 if (!zone_watermark_fast(zone, order, mark,
3900 ac->highest_zoneidx, alloc_flags,
3901 gfp_mask)) {
3902 int ret;
3903
3904 if (cond_accept_memory(zone, order, alloc_flags))
3905 goto try_this_zone;
3906
3907 /*
3908 * Watermark failed for this zone, but see if we can
3909 * grow this zone if it contains deferred pages.
3910 */
3911 if (deferred_pages_enabled()) {
3912 if (_deferred_grow_zone(zone, order))
3913 goto try_this_zone;
3914 }
3915 /* Checked here to keep the fast path fast */
3916 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3917 if (alloc_flags & ALLOC_NO_WATERMARKS)
3918 goto try_this_zone;
3919
3920 if (!node_reclaim_enabled() ||
3921 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
3922 continue;
3923
3924 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3925 switch (ret) {
3926 case NODE_RECLAIM_NOSCAN:
3927 /* did not scan */
3928 continue;
3929 case NODE_RECLAIM_FULL:
3930 /* scanned but unreclaimable */
3931 continue;
3932 default:
3933 /* did we reclaim enough */
3934 if (zone_watermark_ok(zone, order, mark,
3935 ac->highest_zoneidx, alloc_flags))
3936 goto try_this_zone;
3937
3938 continue;
3939 }
3940 }
3941
3942 try_this_zone:
3943 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
3944 gfp_mask, alloc_flags, ac->migratetype);
3945 if (page) {
3946 prep_new_page(page, order, gfp_mask, alloc_flags);
3947
3948 return page;
3949 } else {
3950 if (cond_accept_memory(zone, order, alloc_flags))
3951 goto try_this_zone;
3952
3953 /* Try again if zone has deferred pages */
3954 if (deferred_pages_enabled()) {
3955 if (_deferred_grow_zone(zone, order))
3956 goto try_this_zone;
3957 }
3958 }
3959 }
3960
3961 /*
3962 * If we skipped over nodes with active kswapds and found no
3963 * idle nodes, retry and place anywhere the watermarks permit.
3964 */
3965 if (skip_kswapd_nodes && skipped_kswapd_nodes) {
3966 skip_kswapd_nodes = false;
3967 goto retry;
3968 }
3969
3970 /*
3971 * It's possible on a UMA machine to get through all zones that are
3972 * fragmented. If avoiding fragmentation, reset and try again.
3973 */
3974 if (no_fallback && !defrag_mode) {
3975 alloc_flags &= ~ALLOC_NOFRAGMENT;
3976 goto retry;
3977 }
3978
3979 return NULL;
3980 }
3981
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3982 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3983 {
3984 unsigned int filter = SHOW_MEM_FILTER_NODES;
3985
3986 /*
3987 * This documents exceptions given to allocations in certain
3988 * contexts that are allowed to allocate outside current's set
3989 * of allowed nodes.
3990 */
3991 if (!(gfp_mask & __GFP_NOMEMALLOC))
3992 if (tsk_is_oom_victim(current) ||
3993 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3994 filter &= ~SHOW_MEM_FILTER_NODES;
3995 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3996 filter &= ~SHOW_MEM_FILTER_NODES;
3997
3998 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3999 mem_cgroup_show_protected_memory(NULL);
4000 }
4001
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)4002 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4003 {
4004 struct va_format vaf;
4005 va_list args;
4006 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4007
4008 if ((gfp_mask & __GFP_NOWARN) ||
4009 !__ratelimit(&nopage_rs) ||
4010 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4011 return;
4012
4013 va_start(args, fmt);
4014 vaf.fmt = fmt;
4015 vaf.va = &args;
4016 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4017 current->comm, &vaf, gfp_mask, &gfp_mask,
4018 nodemask_pr_args(nodemask));
4019 va_end(args);
4020
4021 cpuset_print_current_mems_allowed();
4022 pr_cont("\n");
4023 dump_stack();
4024 warn_alloc_show_mem(gfp_mask, nodemask);
4025 }
4026
4027 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4028 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4029 unsigned int alloc_flags,
4030 const struct alloc_context *ac)
4031 {
4032 struct page *page;
4033
4034 page = get_page_from_freelist(gfp_mask, order,
4035 alloc_flags|ALLOC_CPUSET, ac);
4036 /*
4037 * fallback to ignore cpuset restriction if our nodes
4038 * are depleted
4039 */
4040 if (!page)
4041 page = get_page_from_freelist(gfp_mask, order,
4042 alloc_flags, ac);
4043 return page;
4044 }
4045
4046 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4047 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4048 const struct alloc_context *ac, unsigned long *did_some_progress)
4049 {
4050 struct oom_control oc = {
4051 .zonelist = ac->zonelist,
4052 .nodemask = ac->nodemask,
4053 .memcg = NULL,
4054 .gfp_mask = gfp_mask,
4055 .order = order,
4056 };
4057 struct page *page;
4058
4059 *did_some_progress = 0;
4060
4061 /*
4062 * Acquire the oom lock. If that fails, somebody else is
4063 * making progress for us.
4064 */
4065 if (!mutex_trylock(&oom_lock)) {
4066 *did_some_progress = 1;
4067 schedule_timeout_uninterruptible(1);
4068 return NULL;
4069 }
4070
4071 /*
4072 * Go through the zonelist yet one more time, keep very high watermark
4073 * here, this is only to catch a parallel oom killing, we must fail if
4074 * we're still under heavy pressure. But make sure that this reclaim
4075 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4076 * allocation which will never fail due to oom_lock already held.
4077 */
4078 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4079 ~__GFP_DIRECT_RECLAIM, order,
4080 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4081 if (page)
4082 goto out;
4083
4084 /* Coredumps can quickly deplete all memory reserves */
4085 if (current->flags & PF_DUMPCORE)
4086 goto out;
4087 /* The OOM killer will not help higher order allocs */
4088 if (order > PAGE_ALLOC_COSTLY_ORDER)
4089 goto out;
4090 /*
4091 * We have already exhausted all our reclaim opportunities without any
4092 * success so it is time to admit defeat. We will skip the OOM killer
4093 * because it is very likely that the caller has a more reasonable
4094 * fallback than shooting a random task.
4095 *
4096 * The OOM killer may not free memory on a specific node.
4097 */
4098 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4099 goto out;
4100 /* The OOM killer does not needlessly kill tasks for lowmem */
4101 if (ac->highest_zoneidx < ZONE_NORMAL)
4102 goto out;
4103 if (pm_suspended_storage())
4104 goto out;
4105 /*
4106 * XXX: GFP_NOFS allocations should rather fail than rely on
4107 * other request to make a forward progress.
4108 * We are in an unfortunate situation where out_of_memory cannot
4109 * do much for this context but let's try it to at least get
4110 * access to memory reserved if the current task is killed (see
4111 * out_of_memory). Once filesystems are ready to handle allocation
4112 * failures more gracefully we should just bail out here.
4113 */
4114
4115 /* Exhausted what can be done so it's blame time */
4116 if (out_of_memory(&oc) ||
4117 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4118 *did_some_progress = 1;
4119
4120 /*
4121 * Help non-failing allocations by giving them access to memory
4122 * reserves
4123 */
4124 if (gfp_mask & __GFP_NOFAIL)
4125 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4126 ALLOC_NO_WATERMARKS, ac);
4127 }
4128 out:
4129 mutex_unlock(&oom_lock);
4130 return page;
4131 }
4132
4133 /*
4134 * Maximum number of compaction retries with a progress before OOM
4135 * killer is consider as the only way to move forward.
4136 */
4137 #define MAX_COMPACT_RETRIES 16
4138
4139 #ifdef CONFIG_COMPACTION
4140 /* Try memory compaction for high-order allocations before reclaim */
4141 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4142 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4143 unsigned int alloc_flags, const struct alloc_context *ac,
4144 enum compact_priority prio, enum compact_result *compact_result)
4145 {
4146 struct page *page = NULL;
4147 unsigned long pflags;
4148 unsigned int noreclaim_flag;
4149
4150 if (!order)
4151 return NULL;
4152
4153 psi_memstall_enter(&pflags);
4154 delayacct_compact_start();
4155 noreclaim_flag = memalloc_noreclaim_save();
4156
4157 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4158 prio, &page);
4159
4160 memalloc_noreclaim_restore(noreclaim_flag);
4161 psi_memstall_leave(&pflags);
4162 delayacct_compact_end();
4163
4164 if (*compact_result == COMPACT_SKIPPED)
4165 return NULL;
4166 /*
4167 * At least in one zone compaction wasn't deferred or skipped, so let's
4168 * count a compaction stall
4169 */
4170 count_vm_event(COMPACTSTALL);
4171
4172 /* Prep a captured page if available */
4173 if (page)
4174 prep_new_page(page, order, gfp_mask, alloc_flags);
4175
4176 /* Try get a page from the freelist if available */
4177 if (!page)
4178 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4179
4180 if (page) {
4181 struct zone *zone = page_zone(page);
4182
4183 zone->compact_blockskip_flush = false;
4184 compaction_defer_reset(zone, order, true);
4185 count_vm_event(COMPACTSUCCESS);
4186 return page;
4187 }
4188
4189 /*
4190 * It's bad if compaction run occurs and fails. The most likely reason
4191 * is that pages exist, but not enough to satisfy watermarks.
4192 */
4193 count_vm_event(COMPACTFAIL);
4194
4195 cond_resched();
4196
4197 return NULL;
4198 }
4199
4200 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4201 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4202 enum compact_result compact_result,
4203 enum compact_priority *compact_priority,
4204 int *compaction_retries)
4205 {
4206 int max_retries = MAX_COMPACT_RETRIES;
4207 int min_priority;
4208 bool ret = false;
4209 int retries = *compaction_retries;
4210 enum compact_priority priority = *compact_priority;
4211
4212 if (!order)
4213 return false;
4214
4215 if (fatal_signal_pending(current))
4216 return false;
4217
4218 /*
4219 * Compaction was skipped due to a lack of free order-0
4220 * migration targets. Continue if reclaim can help.
4221 */
4222 if (compact_result == COMPACT_SKIPPED) {
4223 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4224 goto out;
4225 }
4226
4227 /*
4228 * Compaction managed to coalesce some page blocks, but the
4229 * allocation failed presumably due to a race. Retry some.
4230 */
4231 if (compact_result == COMPACT_SUCCESS) {
4232 /*
4233 * !costly requests are much more important than
4234 * __GFP_RETRY_MAYFAIL costly ones because they are de
4235 * facto nofail and invoke OOM killer to move on while
4236 * costly can fail and users are ready to cope with
4237 * that. 1/4 retries is rather arbitrary but we would
4238 * need much more detailed feedback from compaction to
4239 * make a better decision.
4240 */
4241 if (order > PAGE_ALLOC_COSTLY_ORDER)
4242 max_retries /= 4;
4243
4244 if (++(*compaction_retries) <= max_retries) {
4245 ret = true;
4246 goto out;
4247 }
4248 }
4249
4250 /*
4251 * Compaction failed. Retry with increasing priority.
4252 */
4253 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4254 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4255
4256 if (*compact_priority > min_priority) {
4257 (*compact_priority)--;
4258 *compaction_retries = 0;
4259 ret = true;
4260 }
4261 out:
4262 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4263 return ret;
4264 }
4265 #else
4266 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4267 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4268 unsigned int alloc_flags, const struct alloc_context *ac,
4269 enum compact_priority prio, enum compact_result *compact_result)
4270 {
4271 *compact_result = COMPACT_SKIPPED;
4272 return NULL;
4273 }
4274
4275 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4276 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4277 enum compact_result compact_result,
4278 enum compact_priority *compact_priority,
4279 int *compaction_retries)
4280 {
4281 struct zone *zone;
4282 struct zoneref *z;
4283
4284 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4285 return false;
4286
4287 /*
4288 * There are setups with compaction disabled which would prefer to loop
4289 * inside the allocator rather than hit the oom killer prematurely.
4290 * Let's give them a good hope and keep retrying while the order-0
4291 * watermarks are OK.
4292 */
4293 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4294 ac->highest_zoneidx, ac->nodemask) {
4295 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4296 ac->highest_zoneidx, alloc_flags))
4297 return true;
4298 }
4299 return false;
4300 }
4301 #endif /* CONFIG_COMPACTION */
4302
4303 #ifdef CONFIG_LOCKDEP
4304 static struct lockdep_map __fs_reclaim_map =
4305 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4306
__need_reclaim(gfp_t gfp_mask)4307 static bool __need_reclaim(gfp_t gfp_mask)
4308 {
4309 /* no reclaim without waiting on it */
4310 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4311 return false;
4312
4313 /* this guy won't enter reclaim */
4314 if (current->flags & PF_MEMALLOC)
4315 return false;
4316
4317 if (gfp_mask & __GFP_NOLOCKDEP)
4318 return false;
4319
4320 return true;
4321 }
4322
__fs_reclaim_acquire(unsigned long ip)4323 void __fs_reclaim_acquire(unsigned long ip)
4324 {
4325 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4326 }
4327
__fs_reclaim_release(unsigned long ip)4328 void __fs_reclaim_release(unsigned long ip)
4329 {
4330 lock_release(&__fs_reclaim_map, ip);
4331 }
4332
fs_reclaim_acquire(gfp_t gfp_mask)4333 void fs_reclaim_acquire(gfp_t gfp_mask)
4334 {
4335 gfp_mask = current_gfp_context(gfp_mask);
4336
4337 if (__need_reclaim(gfp_mask)) {
4338 if (gfp_mask & __GFP_FS)
4339 __fs_reclaim_acquire(_RET_IP_);
4340
4341 #ifdef CONFIG_MMU_NOTIFIER
4342 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4343 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4344 #endif
4345
4346 }
4347 }
4348 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4349
fs_reclaim_release(gfp_t gfp_mask)4350 void fs_reclaim_release(gfp_t gfp_mask)
4351 {
4352 gfp_mask = current_gfp_context(gfp_mask);
4353
4354 if (__need_reclaim(gfp_mask)) {
4355 if (gfp_mask & __GFP_FS)
4356 __fs_reclaim_release(_RET_IP_);
4357 }
4358 }
4359 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4360 #endif
4361
4362 /*
4363 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4364 * have been rebuilt so allocation retries. Reader side does not lock and
4365 * retries the allocation if zonelist changes. Writer side is protected by the
4366 * embedded spin_lock.
4367 */
4368 static DEFINE_SEQLOCK(zonelist_update_seq);
4369
zonelist_iter_begin(void)4370 static unsigned int zonelist_iter_begin(void)
4371 {
4372 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4373 return read_seqbegin(&zonelist_update_seq);
4374
4375 return 0;
4376 }
4377
check_retry_zonelist(unsigned int seq)4378 static unsigned int check_retry_zonelist(unsigned int seq)
4379 {
4380 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4381 return read_seqretry(&zonelist_update_seq, seq);
4382
4383 return seq;
4384 }
4385
4386 /* Perform direct synchronous page reclaim */
4387 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4388 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4389 const struct alloc_context *ac)
4390 {
4391 unsigned int noreclaim_flag;
4392 unsigned long progress;
4393
4394 cond_resched();
4395
4396 /* We now go into synchronous reclaim */
4397 cpuset_memory_pressure_bump();
4398 fs_reclaim_acquire(gfp_mask);
4399 noreclaim_flag = memalloc_noreclaim_save();
4400
4401 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4402 ac->nodemask);
4403
4404 memalloc_noreclaim_restore(noreclaim_flag);
4405 fs_reclaim_release(gfp_mask);
4406
4407 cond_resched();
4408
4409 return progress;
4410 }
4411
4412 /* The really slow allocator path where we enter direct reclaim */
4413 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4414 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4415 unsigned int alloc_flags, const struct alloc_context *ac,
4416 unsigned long *did_some_progress)
4417 {
4418 struct page *page = NULL;
4419 unsigned long pflags;
4420 bool drained = false;
4421
4422 psi_memstall_enter(&pflags);
4423 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4424 if (unlikely(!(*did_some_progress)))
4425 goto out;
4426
4427 retry:
4428 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4429
4430 /*
4431 * If an allocation failed after direct reclaim, it could be because
4432 * pages are pinned on the per-cpu lists or in high alloc reserves.
4433 * Shrink them and try again
4434 */
4435 if (!page && !drained) {
4436 unreserve_highatomic_pageblock(ac, false);
4437 drain_all_pages(NULL);
4438 drained = true;
4439 goto retry;
4440 }
4441 out:
4442 psi_memstall_leave(&pflags);
4443
4444 return page;
4445 }
4446
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4447 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4448 const struct alloc_context *ac)
4449 {
4450 struct zoneref *z;
4451 struct zone *zone;
4452 pg_data_t *last_pgdat = NULL;
4453 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4454 unsigned int reclaim_order;
4455
4456 if (defrag_mode)
4457 reclaim_order = max(order, pageblock_order);
4458 else
4459 reclaim_order = order;
4460
4461 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4462 ac->nodemask) {
4463 if (!managed_zone(zone))
4464 continue;
4465 if (last_pgdat == zone->zone_pgdat)
4466 continue;
4467 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
4468 last_pgdat = zone->zone_pgdat;
4469 }
4470 }
4471
4472 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)4473 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
4474 {
4475 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4476
4477 /*
4478 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
4479 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4480 * to save two branches.
4481 */
4482 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
4483 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4484
4485 /*
4486 * The caller may dip into page reserves a bit more if the caller
4487 * cannot run direct reclaim, or if the caller has realtime scheduling
4488 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4489 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
4490 */
4491 alloc_flags |= (__force int)
4492 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4493
4494 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
4495 /*
4496 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4497 * if it can't schedule.
4498 */
4499 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
4500 alloc_flags |= ALLOC_NON_BLOCK;
4501
4502 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
4503 alloc_flags |= ALLOC_HIGHATOMIC;
4504 }
4505
4506 /*
4507 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4508 * GFP_ATOMIC) rather than fail, see the comment for
4509 * cpuset_current_node_allowed().
4510 */
4511 if (alloc_flags & ALLOC_MIN_RESERVE)
4512 alloc_flags &= ~ALLOC_CPUSET;
4513 } else if (unlikely(rt_or_dl_task(current)) && in_task())
4514 alloc_flags |= ALLOC_MIN_RESERVE;
4515
4516 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4517
4518 if (defrag_mode)
4519 alloc_flags |= ALLOC_NOFRAGMENT;
4520
4521 return alloc_flags;
4522 }
4523
oom_reserves_allowed(struct task_struct * tsk)4524 static bool oom_reserves_allowed(struct task_struct *tsk)
4525 {
4526 if (!tsk_is_oom_victim(tsk))
4527 return false;
4528
4529 /*
4530 * !MMU doesn't have oom reaper so give access to memory reserves
4531 * only to the thread with TIF_MEMDIE set
4532 */
4533 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4534 return false;
4535
4536 return true;
4537 }
4538
4539 /*
4540 * Distinguish requests which really need access to full memory
4541 * reserves from oom victims which can live with a portion of it
4542 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)4543 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4544 {
4545 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4546 return 0;
4547 if (gfp_mask & __GFP_MEMALLOC)
4548 return ALLOC_NO_WATERMARKS;
4549 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4550 return ALLOC_NO_WATERMARKS;
4551 if (!in_interrupt()) {
4552 if (current->flags & PF_MEMALLOC)
4553 return ALLOC_NO_WATERMARKS;
4554 else if (oom_reserves_allowed(current))
4555 return ALLOC_OOM;
4556 }
4557
4558 return 0;
4559 }
4560
gfp_pfmemalloc_allowed(gfp_t gfp_mask)4561 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4562 {
4563 return !!__gfp_pfmemalloc_flags(gfp_mask);
4564 }
4565
4566 /*
4567 * Checks whether it makes sense to retry the reclaim to make a forward progress
4568 * for the given allocation request.
4569 *
4570 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4571 * without success, or when we couldn't even meet the watermark if we
4572 * reclaimed all remaining pages on the LRU lists.
4573 *
4574 * Returns true if a retry is viable or false to enter the oom path.
4575 */
4576 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)4577 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4578 struct alloc_context *ac, int alloc_flags,
4579 bool did_some_progress, int *no_progress_loops)
4580 {
4581 struct zone *zone;
4582 struct zoneref *z;
4583 bool ret = false;
4584
4585 /*
4586 * Costly allocations might have made a progress but this doesn't mean
4587 * their order will become available due to high fragmentation so
4588 * always increment the no progress counter for them
4589 */
4590 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4591 *no_progress_loops = 0;
4592 else
4593 (*no_progress_loops)++;
4594
4595 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4596 goto out;
4597
4598
4599 /*
4600 * Keep reclaiming pages while there is a chance this will lead
4601 * somewhere. If none of the target zones can satisfy our allocation
4602 * request even if all reclaimable pages are considered then we are
4603 * screwed and have to go OOM.
4604 */
4605 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4606 ac->highest_zoneidx, ac->nodemask) {
4607 unsigned long available;
4608 unsigned long reclaimable;
4609 unsigned long min_wmark = min_wmark_pages(zone);
4610 bool wmark;
4611
4612 if (cpusets_enabled() &&
4613 (alloc_flags & ALLOC_CPUSET) &&
4614 !__cpuset_zone_allowed(zone, gfp_mask))
4615 continue;
4616
4617 available = reclaimable = zone_reclaimable_pages(zone);
4618 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4619
4620 /*
4621 * Would the allocation succeed if we reclaimed all
4622 * reclaimable pages?
4623 */
4624 wmark = __zone_watermark_ok(zone, order, min_wmark,
4625 ac->highest_zoneidx, alloc_flags, available);
4626 trace_reclaim_retry_zone(z, order, reclaimable,
4627 available, min_wmark, *no_progress_loops, wmark);
4628 if (wmark) {
4629 ret = true;
4630 break;
4631 }
4632 }
4633
4634 /*
4635 * Memory allocation/reclaim might be called from a WQ context and the
4636 * current implementation of the WQ concurrency control doesn't
4637 * recognize that a particular WQ is congested if the worker thread is
4638 * looping without ever sleeping. Therefore we have to do a short sleep
4639 * here rather than calling cond_resched().
4640 */
4641 if (current->flags & PF_WQ_WORKER)
4642 schedule_timeout_uninterruptible(1);
4643 else
4644 cond_resched();
4645 out:
4646 /* Before OOM, exhaust highatomic_reserve */
4647 if (!ret)
4648 return unreserve_highatomic_pageblock(ac, true);
4649
4650 return ret;
4651 }
4652
4653 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)4654 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4655 {
4656 /*
4657 * It's possible that cpuset's mems_allowed and the nodemask from
4658 * mempolicy don't intersect. This should be normally dealt with by
4659 * policy_nodemask(), but it's possible to race with cpuset update in
4660 * such a way the check therein was true, and then it became false
4661 * before we got our cpuset_mems_cookie here.
4662 * This assumes that for all allocations, ac->nodemask can come only
4663 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4664 * when it does not intersect with the cpuset restrictions) or the
4665 * caller can deal with a violated nodemask.
4666 */
4667 if (cpusets_enabled() && ac->nodemask &&
4668 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4669 ac->nodemask = NULL;
4670 return true;
4671 }
4672
4673 /*
4674 * When updating a task's mems_allowed or mempolicy nodemask, it is
4675 * possible to race with parallel threads in such a way that our
4676 * allocation can fail while the mask is being updated. If we are about
4677 * to fail, check if the cpuset changed during allocation and if so,
4678 * retry.
4679 */
4680 if (read_mems_allowed_retry(cpuset_mems_cookie))
4681 return true;
4682
4683 return false;
4684 }
4685
4686 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)4687 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4688 struct alloc_context *ac)
4689 {
4690 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4691 bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask);
4692 bool nofail = gfp_mask & __GFP_NOFAIL;
4693 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4694 struct page *page = NULL;
4695 unsigned int alloc_flags;
4696 unsigned long did_some_progress;
4697 enum compact_priority compact_priority;
4698 enum compact_result compact_result;
4699 int compaction_retries;
4700 int no_progress_loops;
4701 unsigned int cpuset_mems_cookie;
4702 unsigned int zonelist_iter_cookie;
4703 int reserve_flags;
4704 bool compact_first = false;
4705 bool can_retry_reserves = true;
4706
4707 if (unlikely(nofail)) {
4708 /*
4709 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
4710 * otherwise, we may result in lockup.
4711 */
4712 WARN_ON_ONCE(!can_direct_reclaim);
4713 /*
4714 * PF_MEMALLOC request from this context is rather bizarre
4715 * because we cannot reclaim anything and only can loop waiting
4716 * for somebody to do a work for us.
4717 */
4718 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4719 }
4720
4721 restart:
4722 compaction_retries = 0;
4723 no_progress_loops = 0;
4724 compact_result = COMPACT_SKIPPED;
4725 compact_priority = DEF_COMPACT_PRIORITY;
4726 cpuset_mems_cookie = read_mems_allowed_begin();
4727 zonelist_iter_cookie = zonelist_iter_begin();
4728
4729 /*
4730 * For costly allocations, try direct compaction first, as it's likely
4731 * that we have enough base pages and don't need to reclaim. For non-
4732 * movable high-order allocations, do that as well, as compaction will
4733 * try prevent permanent fragmentation by migrating from blocks of the
4734 * same migratetype.
4735 */
4736 if (can_compact && (costly_order || (order > 0 &&
4737 ac->migratetype != MIGRATE_MOVABLE))) {
4738 compact_first = true;
4739 compact_priority = INIT_COMPACT_PRIORITY;
4740 }
4741
4742 /*
4743 * The fast path uses conservative alloc_flags to succeed only until
4744 * kswapd needs to be woken up, and to avoid the cost of setting up
4745 * alloc_flags precisely. So we do that now.
4746 */
4747 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4748
4749 /*
4750 * We need to recalculate the starting point for the zonelist iterator
4751 * because we might have used different nodemask in the fast path, or
4752 * there was a cpuset modification and we are retrying - otherwise we
4753 * could end up iterating over non-eligible zones endlessly.
4754 */
4755 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4756 ac->highest_zoneidx, ac->nodemask);
4757 if (!zonelist_zone(ac->preferred_zoneref))
4758 goto nopage;
4759
4760 /*
4761 * Check for insane configurations where the cpuset doesn't contain
4762 * any suitable zone to satisfy the request - e.g. non-movable
4763 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4764 */
4765 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4766 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4767 ac->highest_zoneidx,
4768 &cpuset_current_mems_allowed);
4769 if (!zonelist_zone(z))
4770 goto nopage;
4771 }
4772
4773 retry:
4774 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4775 if (alloc_flags & ALLOC_KSWAPD)
4776 wake_all_kswapds(order, gfp_mask, ac);
4777
4778 /*
4779 * The adjusted alloc_flags might result in immediate success, so try
4780 * that first
4781 */
4782 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4783 if (page)
4784 goto got_pg;
4785
4786 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4787 if (reserve_flags)
4788 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4789 (alloc_flags & ALLOC_KSWAPD);
4790
4791 /*
4792 * Reset the nodemask and zonelist iterators if memory policies can be
4793 * ignored. These allocations are high priority and system rather than
4794 * user oriented.
4795 */
4796 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4797 ac->nodemask = NULL;
4798 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4799 ac->highest_zoneidx, ac->nodemask);
4800
4801 /*
4802 * The first time we adjust anything due to being allowed to
4803 * ignore memory policies or watermarks, retry immediately. This
4804 * allows us to keep the first allocation attempt optimistic so
4805 * it can succeed in a zone that is still above watermarks.
4806 */
4807 if (can_retry_reserves) {
4808 can_retry_reserves = false;
4809 goto retry;
4810 }
4811 }
4812
4813 /* Caller is not willing to reclaim, we can't balance anything */
4814 if (!can_direct_reclaim)
4815 goto nopage;
4816
4817 /* Avoid recursion of direct reclaim */
4818 if (current->flags & PF_MEMALLOC)
4819 goto nopage;
4820
4821 /* Try direct reclaim and then allocating */
4822 if (!compact_first) {
4823 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags,
4824 ac, &did_some_progress);
4825 if (page)
4826 goto got_pg;
4827 }
4828
4829 /* Try direct compaction and then allocating */
4830 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4831 compact_priority, &compact_result);
4832 if (page)
4833 goto got_pg;
4834
4835 if (compact_first) {
4836 /*
4837 * THP page faults may attempt local node only first, but are
4838 * then allowed to only compact, not reclaim, see
4839 * alloc_pages_mpol().
4840 *
4841 * Compaction has failed above and we don't want such THP
4842 * allocations to put reclaim pressure on a single node in a
4843 * situation where other nodes might have plenty of available
4844 * memory.
4845 */
4846 if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE))
4847 goto nopage;
4848
4849 /*
4850 * For the initial compaction attempt we have lowered its
4851 * priority. Restore it for further retries, if those are
4852 * allowed. With __GFP_NORETRY there will be a single round of
4853 * reclaim and compaction with the lowered priority.
4854 */
4855 if (!(gfp_mask & __GFP_NORETRY))
4856 compact_priority = DEF_COMPACT_PRIORITY;
4857
4858 compact_first = false;
4859 goto retry;
4860 }
4861
4862 /* Do not loop if specifically requested */
4863 if (gfp_mask & __GFP_NORETRY)
4864 goto nopage;
4865
4866 /*
4867 * Do not retry costly high order allocations unless they are
4868 * __GFP_RETRY_MAYFAIL and we can compact
4869 */
4870 if (costly_order && (!can_compact ||
4871 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4872 goto nopage;
4873
4874 /*
4875 * Deal with possible cpuset update races or zonelist updates to avoid
4876 * infinite retries. No "goto retry;" can be placed above this check
4877 * unless it can execute just once.
4878 */
4879 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4880 check_retry_zonelist(zonelist_iter_cookie))
4881 goto restart;
4882
4883 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4884 did_some_progress > 0, &no_progress_loops))
4885 goto retry;
4886
4887 /*
4888 * It doesn't make any sense to retry for the compaction if the order-0
4889 * reclaim is not able to make any progress because the current
4890 * implementation of the compaction depends on the sufficient amount
4891 * of free memory (see __compaction_suitable)
4892 */
4893 if (did_some_progress > 0 && can_compact &&
4894 should_compact_retry(ac, order, alloc_flags,
4895 compact_result, &compact_priority,
4896 &compaction_retries))
4897 goto retry;
4898
4899 /* Reclaim/compaction failed to prevent the fallback */
4900 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
4901 alloc_flags &= ~ALLOC_NOFRAGMENT;
4902 goto retry;
4903 }
4904
4905 /*
4906 * Deal with possible cpuset update races or zonelist updates to avoid
4907 * a unnecessary OOM kill.
4908 */
4909 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4910 check_retry_zonelist(zonelist_iter_cookie))
4911 goto restart;
4912
4913 /* Reclaim has failed us, start killing things */
4914 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4915 if (page)
4916 goto got_pg;
4917
4918 /* Avoid allocations with no watermarks from looping endlessly */
4919 if (tsk_is_oom_victim(current) &&
4920 (alloc_flags & ALLOC_OOM ||
4921 (gfp_mask & __GFP_NOMEMALLOC)))
4922 goto nopage;
4923
4924 /* Retry as long as the OOM killer is making progress */
4925 if (did_some_progress) {
4926 no_progress_loops = 0;
4927 goto retry;
4928 }
4929
4930 nopage:
4931 /*
4932 * Deal with possible cpuset update races or zonelist updates to avoid
4933 * a unnecessary OOM kill.
4934 */
4935 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4936 check_retry_zonelist(zonelist_iter_cookie))
4937 goto restart;
4938
4939 /*
4940 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4941 * we always retry
4942 */
4943 if (unlikely(nofail)) {
4944 /*
4945 * Lacking direct_reclaim we can't do anything to reclaim memory,
4946 * we disregard these unreasonable nofail requests and still
4947 * return NULL
4948 */
4949 if (!can_direct_reclaim)
4950 goto fail;
4951
4952 /*
4953 * Help non-failing allocations by giving some access to memory
4954 * reserves normally used for high priority non-blocking
4955 * allocations but do not use ALLOC_NO_WATERMARKS because this
4956 * could deplete whole memory reserves which would just make
4957 * the situation worse.
4958 */
4959 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4960 if (page)
4961 goto got_pg;
4962
4963 cond_resched();
4964 goto retry;
4965 }
4966 fail:
4967 warn_alloc(gfp_mask, ac->nodemask,
4968 "page allocation failure: order:%u", order);
4969 got_pg:
4970 return page;
4971 }
4972
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4973 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4974 int preferred_nid, nodemask_t *nodemask,
4975 struct alloc_context *ac, gfp_t *alloc_gfp,
4976 unsigned int *alloc_flags)
4977 {
4978 ac->highest_zoneidx = gfp_zone(gfp_mask);
4979 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4980 ac->nodemask = nodemask;
4981 ac->migratetype = gfp_migratetype(gfp_mask);
4982
4983 if (cpusets_enabled()) {
4984 *alloc_gfp |= __GFP_HARDWALL;
4985 /*
4986 * When we are in the interrupt context, it is irrelevant
4987 * to the current task context. It means that any node ok.
4988 */
4989 if (in_task() && !ac->nodemask)
4990 ac->nodemask = &cpuset_current_mems_allowed;
4991 else
4992 *alloc_flags |= ALLOC_CPUSET;
4993 }
4994
4995 might_alloc(gfp_mask);
4996
4997 /*
4998 * Don't invoke should_fail logic, since it may call
4999 * get_random_u32() and printk() which need to spin_lock.
5000 */
5001 if (!(*alloc_flags & ALLOC_TRYLOCK) &&
5002 should_fail_alloc_page(gfp_mask, order))
5003 return false;
5004
5005 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5006
5007 /* Dirty zone balancing only done in the fast path */
5008 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5009
5010 /*
5011 * The preferred zone is used for statistics but crucially it is
5012 * also used as the starting point for the zonelist iterator. It
5013 * may get reset for allocations that ignore memory policies.
5014 */
5015 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5016 ac->highest_zoneidx, ac->nodemask);
5017
5018 return true;
5019 }
5020
5021 /*
5022 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
5023 * @gfp: GFP flags for the allocation
5024 * @preferred_nid: The preferred NUMA node ID to allocate from
5025 * @nodemask: Set of nodes to allocate from, may be NULL
5026 * @nr_pages: The number of pages desired in the array
5027 * @page_array: Array to store the pages
5028 *
5029 * This is a batched version of the page allocator that attempts to allocate
5030 * @nr_pages quickly. Pages are added to @page_array.
5031 *
5032 * Note that only the elements in @page_array that were cleared to %NULL on
5033 * entry are populated with newly allocated pages. @nr_pages is the maximum
5034 * number of pages that will be stored in the array.
5035 *
5036 * Returns the number of pages in @page_array, including ones already
5037 * allocated on entry. This can be less than the number requested in @nr_pages,
5038 * but all empty slots are filled from the beginning. I.e., if all slots in
5039 * @page_array were set to %NULL on entry, the slots from 0 to the return value
5040 * - 1 will be filled.
5041 */
alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array)5042 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
5043 nodemask_t *nodemask, int nr_pages,
5044 struct page **page_array)
5045 {
5046 struct page *page;
5047 struct zone *zone;
5048 struct zoneref *z;
5049 struct per_cpu_pages *pcp;
5050 struct list_head *pcp_list;
5051 struct alloc_context ac;
5052 gfp_t alloc_gfp;
5053 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5054 int nr_populated = 0, nr_account = 0;
5055
5056 /*
5057 * Skip populated array elements to determine if any pages need
5058 * to be allocated before disabling IRQs.
5059 */
5060 while (nr_populated < nr_pages && page_array[nr_populated])
5061 nr_populated++;
5062
5063 /* No pages requested? */
5064 if (unlikely(nr_pages <= 0))
5065 goto out;
5066
5067 /* Already populated array? */
5068 if (unlikely(nr_pages - nr_populated == 0))
5069 goto out;
5070
5071 /* Bulk allocator does not support memcg accounting. */
5072 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
5073 goto failed;
5074
5075 /* Use the single page allocator for one page. */
5076 if (nr_pages - nr_populated == 1)
5077 goto failed;
5078
5079 #ifdef CONFIG_PAGE_OWNER
5080 /*
5081 * PAGE_OWNER may recurse into the allocator to allocate space to
5082 * save the stack with pagesets.lock held. Releasing/reacquiring
5083 * removes much of the performance benefit of bulk allocation so
5084 * force the caller to allocate one page at a time as it'll have
5085 * similar performance to added complexity to the bulk allocator.
5086 */
5087 if (static_branch_unlikely(&page_owner_inited))
5088 goto failed;
5089 #endif
5090
5091 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5092 gfp &= gfp_allowed_mask;
5093 alloc_gfp = gfp;
5094 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5095 goto out;
5096 gfp = alloc_gfp;
5097
5098 /* Find an allowed local zone that meets the low watermark. */
5099 z = ac.preferred_zoneref;
5100 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
5101 unsigned long mark;
5102
5103 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5104 !__cpuset_zone_allowed(zone, gfp)) {
5105 continue;
5106 }
5107
5108 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
5109 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
5110 goto failed;
5111 }
5112
5113 cond_accept_memory(zone, 0, alloc_flags);
5114 retry_this_zone:
5115 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages - nr_populated;
5116 if (zone_watermark_fast(zone, 0, mark,
5117 zonelist_zone_idx(ac.preferred_zoneref),
5118 alloc_flags, gfp)) {
5119 break;
5120 }
5121
5122 if (cond_accept_memory(zone, 0, alloc_flags))
5123 goto retry_this_zone;
5124
5125 /* Try again if zone has deferred pages */
5126 if (deferred_pages_enabled()) {
5127 if (_deferred_grow_zone(zone, 0))
5128 goto retry_this_zone;
5129 }
5130 }
5131
5132 /*
5133 * If there are no allowed local zones that meets the watermarks then
5134 * try to allocate a single page and reclaim if necessary.
5135 */
5136 if (unlikely(!zone))
5137 goto failed;
5138
5139 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
5140 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
5141 if (!pcp)
5142 goto failed;
5143
5144 /* Attempt the batch allocation */
5145 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5146 while (nr_populated < nr_pages) {
5147
5148 /* Skip existing pages */
5149 if (page_array[nr_populated]) {
5150 nr_populated++;
5151 continue;
5152 }
5153
5154 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5155 pcp, pcp_list);
5156 if (unlikely(!page)) {
5157 /* Try and allocate at least one page */
5158 if (!nr_account) {
5159 pcp_spin_unlock(pcp);
5160 goto failed;
5161 }
5162 break;
5163 }
5164 nr_account++;
5165
5166 prep_new_page(page, 0, gfp, 0);
5167 set_page_refcounted(page);
5168 page_array[nr_populated++] = page;
5169 }
5170
5171 pcp_spin_unlock(pcp);
5172
5173 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5174 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
5175
5176 out:
5177 return nr_populated;
5178
5179 failed:
5180 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
5181 if (page)
5182 page_array[nr_populated++] = page;
5183 goto out;
5184 }
5185 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
5186
5187 /*
5188 * This is the 'heart' of the zoned buddy allocator.
5189 */
__alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5190 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
5191 int preferred_nid, nodemask_t *nodemask)
5192 {
5193 struct page *page;
5194 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5195 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5196 struct alloc_context ac = { };
5197
5198 /*
5199 * There are several places where we assume that the order value is sane
5200 * so bail out early if the request is out of bound.
5201 */
5202 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
5203 return NULL;
5204
5205 gfp &= gfp_allowed_mask;
5206 /*
5207 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5208 * resp. GFP_NOIO which has to be inherited for all allocation requests
5209 * from a particular context which has been marked by
5210 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5211 * movable zones are not used during allocation.
5212 */
5213 gfp = current_gfp_context(gfp);
5214 alloc_gfp = gfp;
5215 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5216 &alloc_gfp, &alloc_flags))
5217 return NULL;
5218
5219 /*
5220 * Forbid the first pass from falling back to types that fragment
5221 * memory until all local zones are considered.
5222 */
5223 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
5224
5225 /* First allocation attempt */
5226 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5227 if (likely(page))
5228 goto out;
5229
5230 alloc_gfp = gfp;
5231 ac.spread_dirty_pages = false;
5232
5233 /*
5234 * Restore the original nodemask if it was potentially replaced with
5235 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5236 */
5237 ac.nodemask = nodemask;
5238
5239 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5240
5241 out:
5242 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
5243 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5244 free_frozen_pages(page, order);
5245 page = NULL;
5246 }
5247
5248 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5249 kmsan_alloc_page(page, order, alloc_gfp);
5250
5251 return page;
5252 }
5253 EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
5254
__alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5255 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
5256 int preferred_nid, nodemask_t *nodemask)
5257 {
5258 struct page *page;
5259
5260 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
5261 if (page)
5262 set_page_refcounted(page);
5263 return page;
5264 }
5265 EXPORT_SYMBOL(__alloc_pages_noprof);
5266
__folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5267 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
5268 nodemask_t *nodemask)
5269 {
5270 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
5271 preferred_nid, nodemask);
5272 return page_rmappable_folio(page);
5273 }
5274 EXPORT_SYMBOL(__folio_alloc_noprof);
5275
5276 /*
5277 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5278 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5279 * you need to access high mem.
5280 */
get_free_pages_noprof(gfp_t gfp_mask,unsigned int order)5281 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
5282 {
5283 struct page *page;
5284
5285 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
5286 if (!page)
5287 return 0;
5288 return (unsigned long) page_address(page);
5289 }
5290 EXPORT_SYMBOL(get_free_pages_noprof);
5291
get_zeroed_page_noprof(gfp_t gfp_mask)5292 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
5293 {
5294 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
5295 }
5296 EXPORT_SYMBOL(get_zeroed_page_noprof);
5297
___free_pages(struct page * page,unsigned int order,fpi_t fpi_flags)5298 static void ___free_pages(struct page *page, unsigned int order,
5299 fpi_t fpi_flags)
5300 {
5301 /* get PageHead before we drop reference */
5302 int head = PageHead(page);
5303 /* get alloc tag in case the page is released by others */
5304 struct alloc_tag *tag = pgalloc_tag_get(page);
5305
5306 if (put_page_testzero(page))
5307 __free_frozen_pages(page, order, fpi_flags);
5308 else if (!head) {
5309 pgalloc_tag_sub_pages(tag, (1 << order) - 1);
5310 while (order-- > 0) {
5311 /*
5312 * The "tail" pages of this non-compound high-order
5313 * page will have no code tags, so to avoid warnings
5314 * mark them as empty.
5315 */
5316 clear_page_tag_ref(page + (1 << order));
5317 __free_frozen_pages(page + (1 << order), order,
5318 fpi_flags);
5319 }
5320 }
5321 }
5322
5323 /**
5324 * __free_pages - Free pages allocated with alloc_pages().
5325 * @page: The page pointer returned from alloc_pages().
5326 * @order: The order of the allocation.
5327 *
5328 * This function can free multi-page allocations that are not compound
5329 * pages. It does not check that the @order passed in matches that of
5330 * the allocation, so it is easy to leak memory. Freeing more memory
5331 * than was allocated will probably emit a warning.
5332 *
5333 * If the last reference to this page is speculative, it will be released
5334 * by put_page() which only frees the first page of a non-compound
5335 * allocation. To prevent the remaining pages from being leaked, we free
5336 * the subsequent pages here. If you want to use the page's reference
5337 * count to decide when to free the allocation, you should allocate a
5338 * compound page, and use put_page() instead of __free_pages().
5339 *
5340 * Context: May be called in interrupt context or while holding a normal
5341 * spinlock, but not in NMI context or while holding a raw spinlock.
5342 */
__free_pages(struct page * page,unsigned int order)5343 void __free_pages(struct page *page, unsigned int order)
5344 {
5345 ___free_pages(page, order, FPI_NONE);
5346 }
5347 EXPORT_SYMBOL(__free_pages);
5348
5349 /*
5350 * Can be called while holding raw_spin_lock or from IRQ and NMI for any
5351 * page type (not only those that came from alloc_pages_nolock)
5352 */
free_pages_nolock(struct page * page,unsigned int order)5353 void free_pages_nolock(struct page *page, unsigned int order)
5354 {
5355 ___free_pages(page, order, FPI_TRYLOCK);
5356 }
5357
5358 /**
5359 * free_pages - Free pages allocated with __get_free_pages().
5360 * @addr: The virtual address tied to a page returned from __get_free_pages().
5361 * @order: The order of the allocation.
5362 *
5363 * This function behaves the same as __free_pages(). Use this function
5364 * to free pages when you only have a valid virtual address. If you have
5365 * the page, call __free_pages() instead.
5366 */
free_pages(unsigned long addr,unsigned int order)5367 void free_pages(unsigned long addr, unsigned int order)
5368 {
5369 if (addr != 0) {
5370 VM_BUG_ON(!virt_addr_valid((void *)addr));
5371 __free_pages(virt_to_page((void *)addr), order);
5372 }
5373 }
5374
5375 EXPORT_SYMBOL(free_pages);
5376
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5377 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5378 size_t size)
5379 {
5380 if (addr) {
5381 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5382 struct page *page = virt_to_page((void *)addr);
5383 struct page *last = page + nr;
5384
5385 __split_page(page, order);
5386 while (page < --last)
5387 set_page_refcounted(last);
5388
5389 last = page + (1UL << order);
5390 for (page += nr; page < last; page++)
5391 __free_pages_ok(page, 0, FPI_TO_TAIL);
5392 }
5393 return (void *)addr;
5394 }
5395
5396 /**
5397 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5398 * @size: the number of bytes to allocate
5399 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5400 *
5401 * This function is similar to alloc_pages(), except that it allocates the
5402 * minimum number of pages to satisfy the request. alloc_pages() can only
5403 * allocate memory in power-of-two pages.
5404 *
5405 * This function is also limited by MAX_PAGE_ORDER.
5406 *
5407 * Memory allocated by this function must be released by free_pages_exact().
5408 *
5409 * Return: pointer to the allocated area or %NULL in case of error.
5410 */
alloc_pages_exact_noprof(size_t size,gfp_t gfp_mask)5411 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
5412 {
5413 unsigned int order = get_order(size);
5414 unsigned long addr;
5415
5416 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5417 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5418
5419 addr = get_free_pages_noprof(gfp_mask, order);
5420 return make_alloc_exact(addr, order, size);
5421 }
5422 EXPORT_SYMBOL(alloc_pages_exact_noprof);
5423
5424 /**
5425 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5426 * pages on a node.
5427 * @nid: the preferred node ID where memory should be allocated
5428 * @size: the number of bytes to allocate
5429 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5430 *
5431 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5432 * back.
5433 *
5434 * Return: pointer to the allocated area or %NULL in case of error.
5435 */
alloc_pages_exact_nid_noprof(int nid,size_t size,gfp_t gfp_mask)5436 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
5437 {
5438 unsigned int order = get_order(size);
5439 struct page *p;
5440
5441 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5442 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5443
5444 p = alloc_pages_node_noprof(nid, gfp_mask, order);
5445 if (!p)
5446 return NULL;
5447 return make_alloc_exact((unsigned long)page_address(p), order, size);
5448 }
5449
5450 /**
5451 * free_pages_exact - release memory allocated via alloc_pages_exact()
5452 * @virt: the value returned by alloc_pages_exact.
5453 * @size: size of allocation, same value as passed to alloc_pages_exact().
5454 *
5455 * Release the memory allocated by a previous call to alloc_pages_exact.
5456 */
free_pages_exact(void * virt,size_t size)5457 void free_pages_exact(void *virt, size_t size)
5458 {
5459 unsigned long addr = (unsigned long)virt;
5460 unsigned long end = addr + PAGE_ALIGN(size);
5461
5462 while (addr < end) {
5463 free_page(addr);
5464 addr += PAGE_SIZE;
5465 }
5466 }
5467 EXPORT_SYMBOL(free_pages_exact);
5468
5469 /**
5470 * nr_free_zone_pages - count number of pages beyond high watermark
5471 * @offset: The zone index of the highest zone
5472 *
5473 * nr_free_zone_pages() counts the number of pages which are beyond the
5474 * high watermark within all zones at or below a given zone index. For each
5475 * zone, the number of pages is calculated as:
5476 *
5477 * nr_free_zone_pages = managed_pages - high_pages
5478 *
5479 * Return: number of pages beyond high watermark.
5480 */
nr_free_zone_pages(int offset)5481 static unsigned long nr_free_zone_pages(int offset)
5482 {
5483 struct zoneref *z;
5484 struct zone *zone;
5485
5486 /* Just pick one node, since fallback list is circular */
5487 unsigned long sum = 0;
5488
5489 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5490
5491 for_each_zone_zonelist(zone, z, zonelist, offset) {
5492 unsigned long size = zone_managed_pages(zone);
5493 unsigned long high = high_wmark_pages(zone);
5494 if (size > high)
5495 sum += size - high;
5496 }
5497
5498 return sum;
5499 }
5500
5501 /**
5502 * nr_free_buffer_pages - count number of pages beyond high watermark
5503 *
5504 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5505 * watermark within ZONE_DMA and ZONE_NORMAL.
5506 *
5507 * Return: number of pages beyond high watermark within ZONE_DMA and
5508 * ZONE_NORMAL.
5509 */
nr_free_buffer_pages(void)5510 unsigned long nr_free_buffer_pages(void)
5511 {
5512 return nr_free_zone_pages(gfp_zone(GFP_USER));
5513 }
5514 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5515
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)5516 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5517 {
5518 zoneref->zone = zone;
5519 zoneref->zone_idx = zone_idx(zone);
5520 }
5521
5522 /*
5523 * Builds allocation fallback zone lists.
5524 *
5525 * Add all populated zones of a node to the zonelist.
5526 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)5527 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5528 {
5529 struct zone *zone;
5530 enum zone_type zone_type = MAX_NR_ZONES;
5531 int nr_zones = 0;
5532
5533 do {
5534 zone_type--;
5535 zone = pgdat->node_zones + zone_type;
5536 if (populated_zone(zone)) {
5537 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5538 check_highest_zone(zone_type);
5539 }
5540 } while (zone_type);
5541
5542 return nr_zones;
5543 }
5544
5545 #ifdef CONFIG_NUMA
5546
__parse_numa_zonelist_order(char * s)5547 static int __parse_numa_zonelist_order(char *s)
5548 {
5549 /*
5550 * We used to support different zonelists modes but they turned
5551 * out to be just not useful. Let's keep the warning in place
5552 * if somebody still use the cmd line parameter so that we do
5553 * not fail it silently
5554 */
5555 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5556 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
5557 return -EINVAL;
5558 }
5559 return 0;
5560 }
5561
5562 static char numa_zonelist_order[] = "Node";
5563 #define NUMA_ZONELIST_ORDER_LEN 16
5564 /*
5565 * sysctl handler for numa_zonelist_order
5566 */
numa_zonelist_order_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5567 static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
5568 void *buffer, size_t *length, loff_t *ppos)
5569 {
5570 if (write)
5571 return __parse_numa_zonelist_order(buffer);
5572 return proc_dostring(table, write, buffer, length, ppos);
5573 }
5574
5575 static int node_load[MAX_NUMNODES];
5576
5577 /**
5578 * find_next_best_node - find the next node that should appear in a given node's fallback list
5579 * @node: node whose fallback list we're appending
5580 * @used_node_mask: nodemask_t of already used nodes
5581 *
5582 * We use a number of factors to determine which is the next node that should
5583 * appear on a given node's fallback list. The node should not have appeared
5584 * already in @node's fallback list, and it should be the next closest node
5585 * according to the distance array (which contains arbitrary distance values
5586 * from each node to each node in the system), and should also prefer nodes
5587 * with no CPUs, since presumably they'll have very little allocation pressure
5588 * on them otherwise.
5589 *
5590 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5591 */
find_next_best_node(int node,nodemask_t * used_node_mask)5592 int find_next_best_node(int node, nodemask_t *used_node_mask)
5593 {
5594 int n, val;
5595 int min_val = INT_MAX;
5596 int best_node = NUMA_NO_NODE;
5597
5598 /*
5599 * Use the local node if we haven't already, but for memoryless local
5600 * node, we should skip it and fall back to other nodes.
5601 */
5602 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
5603 node_set(node, *used_node_mask);
5604 return node;
5605 }
5606
5607 for_each_node_state(n, N_MEMORY) {
5608
5609 /* Don't want a node to appear more than once */
5610 if (node_isset(n, *used_node_mask))
5611 continue;
5612
5613 /* Use the distance array to find the distance */
5614 val = node_distance(node, n);
5615
5616 /* Penalize nodes under us ("prefer the next node") */
5617 val += (n < node);
5618
5619 /* Give preference to headless and unused nodes */
5620 if (!cpumask_empty(cpumask_of_node(n)))
5621 val += PENALTY_FOR_NODE_WITH_CPUS;
5622
5623 /* Slight preference for less loaded node */
5624 val *= MAX_NUMNODES;
5625 val += node_load[n];
5626
5627 if (val < min_val) {
5628 min_val = val;
5629 best_node = n;
5630 }
5631 }
5632
5633 if (best_node >= 0)
5634 node_set(best_node, *used_node_mask);
5635
5636 return best_node;
5637 }
5638
5639
5640 /*
5641 * Build zonelists ordered by node and zones within node.
5642 * This results in maximum locality--normal zone overflows into local
5643 * DMA zone, if any--but risks exhausting DMA zone.
5644 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)5645 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5646 unsigned nr_nodes)
5647 {
5648 struct zoneref *zonerefs;
5649 int i;
5650
5651 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5652
5653 for (i = 0; i < nr_nodes; i++) {
5654 int nr_zones;
5655
5656 pg_data_t *node = NODE_DATA(node_order[i]);
5657
5658 nr_zones = build_zonerefs_node(node, zonerefs);
5659 zonerefs += nr_zones;
5660 }
5661 zonerefs->zone = NULL;
5662 zonerefs->zone_idx = 0;
5663 }
5664
5665 /*
5666 * Build __GFP_THISNODE zonelists
5667 */
build_thisnode_zonelists(pg_data_t * pgdat)5668 static void build_thisnode_zonelists(pg_data_t *pgdat)
5669 {
5670 struct zoneref *zonerefs;
5671 int nr_zones;
5672
5673 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5674 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5675 zonerefs += nr_zones;
5676 zonerefs->zone = NULL;
5677 zonerefs->zone_idx = 0;
5678 }
5679
build_zonelists(pg_data_t * pgdat)5680 static void build_zonelists(pg_data_t *pgdat)
5681 {
5682 static int node_order[MAX_NUMNODES];
5683 int node, nr_nodes = 0;
5684 nodemask_t used_mask = NODE_MASK_NONE;
5685 int local_node, prev_node;
5686
5687 /* NUMA-aware ordering of nodes */
5688 local_node = pgdat->node_id;
5689 prev_node = local_node;
5690
5691 memset(node_order, 0, sizeof(node_order));
5692 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5693 /*
5694 * We don't want to pressure a particular node.
5695 * So adding penalty to the first node in same
5696 * distance group to make it round-robin.
5697 */
5698 if (node_distance(local_node, node) !=
5699 node_distance(local_node, prev_node))
5700 node_load[node] += 1;
5701
5702 node_order[nr_nodes++] = node;
5703 prev_node = node;
5704 }
5705
5706 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5707 build_thisnode_zonelists(pgdat);
5708 pr_info("Fallback order for Node %d: ", local_node);
5709 for (node = 0; node < nr_nodes; node++)
5710 pr_cont("%d ", node_order[node]);
5711 pr_cont("\n");
5712 }
5713
5714 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5715 /*
5716 * Return node id of node used for "local" allocations.
5717 * I.e., first node id of first zone in arg node's generic zonelist.
5718 * Used for initializing percpu 'numa_mem', which is used primarily
5719 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5720 */
local_memory_node(int node)5721 int local_memory_node(int node)
5722 {
5723 struct zoneref *z;
5724
5725 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5726 gfp_zone(GFP_KERNEL),
5727 NULL);
5728 return zonelist_node_idx(z);
5729 }
5730 #endif
5731
5732 static void setup_min_unmapped_ratio(void);
5733 static void setup_min_slab_ratio(void);
5734 #else /* CONFIG_NUMA */
5735
build_zonelists(pg_data_t * pgdat)5736 static void build_zonelists(pg_data_t *pgdat)
5737 {
5738 struct zoneref *zonerefs;
5739 int nr_zones;
5740
5741 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5742 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5743 zonerefs += nr_zones;
5744
5745 zonerefs->zone = NULL;
5746 zonerefs->zone_idx = 0;
5747 }
5748
5749 #endif /* CONFIG_NUMA */
5750
5751 /*
5752 * Boot pageset table. One per cpu which is going to be used for all
5753 * zones and all nodes. The parameters will be set in such a way
5754 * that an item put on a list will immediately be handed over to
5755 * the buddy list. This is safe since pageset manipulation is done
5756 * with interrupts disabled.
5757 *
5758 * The boot_pagesets must be kept even after bootup is complete for
5759 * unused processors and/or zones. They do play a role for bootstrapping
5760 * hotplugged processors.
5761 *
5762 * zoneinfo_show() and maybe other functions do
5763 * not check if the processor is online before following the pageset pointer.
5764 * Other parts of the kernel may not check if the zone is available.
5765 */
5766 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5767 /* These effectively disable the pcplists in the boot pageset completely */
5768 #define BOOT_PAGESET_HIGH 0
5769 #define BOOT_PAGESET_BATCH 1
5770 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5771 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5772
__build_all_zonelists(void * data)5773 static void __build_all_zonelists(void *data)
5774 {
5775 int nid;
5776 int __maybe_unused cpu;
5777 pg_data_t *self = data;
5778 unsigned long flags;
5779
5780 /*
5781 * The zonelist_update_seq must be acquired with irqsave because the
5782 * reader can be invoked from IRQ with GFP_ATOMIC.
5783 */
5784 write_seqlock_irqsave(&zonelist_update_seq, flags);
5785 /*
5786 * Also disable synchronous printk() to prevent any printk() from
5787 * trying to hold port->lock, for
5788 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5789 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5790 */
5791 printk_deferred_enter();
5792
5793 #ifdef CONFIG_NUMA
5794 memset(node_load, 0, sizeof(node_load));
5795 #endif
5796
5797 /*
5798 * This node is hotadded and no memory is yet present. So just
5799 * building zonelists is fine - no need to touch other nodes.
5800 */
5801 if (self && !node_online(self->node_id)) {
5802 build_zonelists(self);
5803 } else {
5804 /*
5805 * All possible nodes have pgdat preallocated
5806 * in free_area_init
5807 */
5808 for_each_node(nid) {
5809 pg_data_t *pgdat = NODE_DATA(nid);
5810
5811 build_zonelists(pgdat);
5812 }
5813
5814 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5815 /*
5816 * We now know the "local memory node" for each node--
5817 * i.e., the node of the first zone in the generic zonelist.
5818 * Set up numa_mem percpu variable for on-line cpus. During
5819 * boot, only the boot cpu should be on-line; we'll init the
5820 * secondary cpus' numa_mem as they come on-line. During
5821 * node/memory hotplug, we'll fixup all on-line cpus.
5822 */
5823 for_each_online_cpu(cpu)
5824 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5825 #endif
5826 }
5827
5828 printk_deferred_exit();
5829 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5830 }
5831
5832 static noinline void __init
build_all_zonelists_init(void)5833 build_all_zonelists_init(void)
5834 {
5835 int cpu;
5836
5837 __build_all_zonelists(NULL);
5838
5839 /*
5840 * Initialize the boot_pagesets that are going to be used
5841 * for bootstrapping processors. The real pagesets for
5842 * each zone will be allocated later when the per cpu
5843 * allocator is available.
5844 *
5845 * boot_pagesets are used also for bootstrapping offline
5846 * cpus if the system is already booted because the pagesets
5847 * are needed to initialize allocators on a specific cpu too.
5848 * F.e. the percpu allocator needs the page allocator which
5849 * needs the percpu allocator in order to allocate its pagesets
5850 * (a chicken-egg dilemma).
5851 */
5852 for_each_possible_cpu(cpu)
5853 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5854
5855 mminit_verify_zonelist();
5856 cpuset_init_current_mems_allowed();
5857 }
5858
5859 /*
5860 * unless system_state == SYSTEM_BOOTING.
5861 *
5862 * __ref due to call of __init annotated helper build_all_zonelists_init
5863 * [protected by SYSTEM_BOOTING].
5864 */
build_all_zonelists(pg_data_t * pgdat)5865 void __ref build_all_zonelists(pg_data_t *pgdat)
5866 {
5867 unsigned long vm_total_pages;
5868
5869 if (system_state == SYSTEM_BOOTING) {
5870 build_all_zonelists_init();
5871 } else {
5872 __build_all_zonelists(pgdat);
5873 /* cpuset refresh routine should be here */
5874 }
5875 /* Get the number of free pages beyond high watermark in all zones. */
5876 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5877 /*
5878 * Disable grouping by mobility if the number of pages in the
5879 * system is too low to allow the mechanism to work. It would be
5880 * more accurate, but expensive to check per-zone. This check is
5881 * made on memory-hotadd so a system can start with mobility
5882 * disabled and enable it later
5883 */
5884 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5885 page_group_by_mobility_disabled = 1;
5886 else
5887 page_group_by_mobility_disabled = 0;
5888
5889 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5890 nr_online_nodes,
5891 str_off_on(page_group_by_mobility_disabled),
5892 vm_total_pages);
5893 #ifdef CONFIG_NUMA
5894 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5895 #endif
5896 }
5897
zone_batchsize(struct zone * zone)5898 static int zone_batchsize(struct zone *zone)
5899 {
5900 #ifdef CONFIG_MMU
5901 int batch;
5902
5903 /*
5904 * The number of pages to batch allocate is either ~0.025%
5905 * of the zone or 256KB, whichever is smaller. The batch
5906 * size is striking a balance between allocation latency
5907 * and zone lock contention.
5908 */
5909 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
5910 if (batch <= 1)
5911 return 1;
5912
5913 /*
5914 * Clamp the batch to a 2^n - 1 value. Having a power
5915 * of 2 value was found to be more likely to have
5916 * suboptimal cache aliasing properties in some cases.
5917 *
5918 * For example if 2 tasks are alternately allocating
5919 * batches of pages, one task can end up with a lot
5920 * of pages of one half of the possible page colors
5921 * and the other with pages of the other colors.
5922 */
5923 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5924
5925 return batch;
5926
5927 #else
5928 /* The deferral and batching of frees should be suppressed under NOMMU
5929 * conditions.
5930 *
5931 * The problem is that NOMMU needs to be able to allocate large chunks
5932 * of contiguous memory as there's no hardware page translation to
5933 * assemble apparent contiguous memory from discontiguous pages.
5934 *
5935 * Queueing large contiguous runs of pages for batching, however,
5936 * causes the pages to actually be freed in smaller chunks. As there
5937 * can be a significant delay between the individual batches being
5938 * recycled, this leads to the once large chunks of space being
5939 * fragmented and becoming unavailable for high-order allocations.
5940 */
5941 return 1;
5942 #endif
5943 }
5944
5945 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online,int high_fraction)5946 static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5947 int high_fraction)
5948 {
5949 #ifdef CONFIG_MMU
5950 int high;
5951 int nr_split_cpus;
5952 unsigned long total_pages;
5953
5954 if (!high_fraction) {
5955 /*
5956 * By default, the high value of the pcp is based on the zone
5957 * low watermark so that if they are full then background
5958 * reclaim will not be started prematurely.
5959 */
5960 total_pages = low_wmark_pages(zone);
5961 } else {
5962 /*
5963 * If percpu_pagelist_high_fraction is configured, the high
5964 * value is based on a fraction of the managed pages in the
5965 * zone.
5966 */
5967 total_pages = zone_managed_pages(zone) / high_fraction;
5968 }
5969
5970 /*
5971 * Split the high value across all online CPUs local to the zone. Note
5972 * that early in boot that CPUs may not be online yet and that during
5973 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5974 * onlined. For memory nodes that have no CPUs, split the high value
5975 * across all online CPUs to mitigate the risk that reclaim is triggered
5976 * prematurely due to pages stored on pcp lists.
5977 */
5978 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5979 if (!nr_split_cpus)
5980 nr_split_cpus = num_online_cpus();
5981 high = total_pages / nr_split_cpus;
5982
5983 /*
5984 * Ensure high is at least batch*4. The multiple is based on the
5985 * historical relationship between high and batch.
5986 */
5987 high = max(high, batch << 2);
5988
5989 return high;
5990 #else
5991 return 0;
5992 #endif
5993 }
5994
5995 /*
5996 * pcp->high and pcp->batch values are related and generally batch is lower
5997 * than high. They are also related to pcp->count such that count is lower
5998 * than high, and as soon as it reaches high, the pcplist is flushed.
5999 *
6000 * However, guaranteeing these relations at all times would require e.g. write
6001 * barriers here but also careful usage of read barriers at the read side, and
6002 * thus be prone to error and bad for performance. Thus the update only prevents
6003 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
6004 * should ensure they can cope with those fields changing asynchronously, and
6005 * fully trust only the pcp->count field on the local CPU with interrupts
6006 * disabled.
6007 *
6008 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6009 * outside of boot time (or some other assurance that no concurrent updaters
6010 * exist).
6011 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high_min,unsigned long high_max,unsigned long batch)6012 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
6013 unsigned long high_max, unsigned long batch)
6014 {
6015 WRITE_ONCE(pcp->batch, batch);
6016 WRITE_ONCE(pcp->high_min, high_min);
6017 WRITE_ONCE(pcp->high_max, high_max);
6018 }
6019
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)6020 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6021 {
6022 int pindex;
6023
6024 memset(pcp, 0, sizeof(*pcp));
6025 memset(pzstats, 0, sizeof(*pzstats));
6026
6027 spin_lock_init(&pcp->lock);
6028 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6029 INIT_LIST_HEAD(&pcp->lists[pindex]);
6030
6031 /*
6032 * Set batch and high values safe for a boot pageset. A true percpu
6033 * pageset's initialization will update them subsequently. Here we don't
6034 * need to be as careful as pageset_update() as nobody can access the
6035 * pageset yet.
6036 */
6037 pcp->high_min = BOOT_PAGESET_HIGH;
6038 pcp->high_max = BOOT_PAGESET_HIGH;
6039 pcp->batch = BOOT_PAGESET_BATCH;
6040 }
6041
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high_min,unsigned long high_max,unsigned long batch)6042 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
6043 unsigned long high_max, unsigned long batch)
6044 {
6045 struct per_cpu_pages *pcp;
6046 int cpu;
6047
6048 for_each_possible_cpu(cpu) {
6049 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6050 pageset_update(pcp, high_min, high_max, batch);
6051 }
6052 }
6053
6054 /*
6055 * Calculate and set new high and batch values for all per-cpu pagesets of a
6056 * zone based on the zone's size.
6057 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)6058 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6059 {
6060 int new_high_min, new_high_max, new_batch;
6061
6062 new_batch = zone_batchsize(zone);
6063 if (percpu_pagelist_high_fraction) {
6064 new_high_min = zone_highsize(zone, new_batch, cpu_online,
6065 percpu_pagelist_high_fraction);
6066 /*
6067 * PCP high is tuned manually, disable auto-tuning via
6068 * setting high_min and high_max to the manual value.
6069 */
6070 new_high_max = new_high_min;
6071 } else {
6072 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
6073 new_high_max = zone_highsize(zone, new_batch, cpu_online,
6074 MIN_PERCPU_PAGELIST_HIGH_FRACTION);
6075 }
6076
6077 if (zone->pageset_high_min == new_high_min &&
6078 zone->pageset_high_max == new_high_max &&
6079 zone->pageset_batch == new_batch)
6080 return;
6081
6082 zone->pageset_high_min = new_high_min;
6083 zone->pageset_high_max = new_high_max;
6084 zone->pageset_batch = new_batch;
6085
6086 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
6087 new_batch);
6088 }
6089
setup_zone_pageset(struct zone * zone)6090 void __meminit setup_zone_pageset(struct zone *zone)
6091 {
6092 int cpu;
6093
6094 /* Size may be 0 on !SMP && !NUMA */
6095 if (sizeof(struct per_cpu_zonestat) > 0)
6096 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6097
6098 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6099 for_each_possible_cpu(cpu) {
6100 struct per_cpu_pages *pcp;
6101 struct per_cpu_zonestat *pzstats;
6102
6103 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6104 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6105 per_cpu_pages_init(pcp, pzstats);
6106 }
6107
6108 zone_set_pageset_high_and_batch(zone, 0);
6109 }
6110
6111 /*
6112 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6113 * page high values need to be recalculated.
6114 */
zone_pcp_update(struct zone * zone,int cpu_online)6115 static void zone_pcp_update(struct zone *zone, int cpu_online)
6116 {
6117 mutex_lock(&pcp_batch_high_lock);
6118 zone_set_pageset_high_and_batch(zone, cpu_online);
6119 mutex_unlock(&pcp_batch_high_lock);
6120 }
6121
zone_pcp_update_cacheinfo(struct zone * zone,unsigned int cpu)6122 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
6123 {
6124 struct per_cpu_pages *pcp;
6125 struct cpu_cacheinfo *cci;
6126
6127 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6128 cci = get_cpu_cacheinfo(cpu);
6129 /*
6130 * If data cache slice of CPU is large enough, "pcp->batch"
6131 * pages can be preserved in PCP before draining PCP for
6132 * consecutive high-order pages freeing without allocation.
6133 * This can reduce zone lock contention without hurting
6134 * cache-hot pages sharing.
6135 */
6136 pcp_spin_lock_nopin(pcp);
6137 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
6138 pcp->flags |= PCPF_FREE_HIGH_BATCH;
6139 else
6140 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
6141 pcp_spin_unlock_nopin(pcp);
6142 }
6143
setup_pcp_cacheinfo(unsigned int cpu)6144 void setup_pcp_cacheinfo(unsigned int cpu)
6145 {
6146 struct zone *zone;
6147
6148 for_each_populated_zone(zone)
6149 zone_pcp_update_cacheinfo(zone, cpu);
6150 }
6151
6152 /*
6153 * Allocate per cpu pagesets and initialize them.
6154 * Before this call only boot pagesets were available.
6155 */
setup_per_cpu_pageset(void)6156 void __init setup_per_cpu_pageset(void)
6157 {
6158 struct pglist_data *pgdat;
6159 struct zone *zone;
6160 int __maybe_unused cpu;
6161
6162 for_each_populated_zone(zone)
6163 setup_zone_pageset(zone);
6164
6165 #ifdef CONFIG_NUMA
6166 /*
6167 * Unpopulated zones continue using the boot pagesets.
6168 * The numa stats for these pagesets need to be reset.
6169 * Otherwise, they will end up skewing the stats of
6170 * the nodes these zones are associated with.
6171 */
6172 for_each_possible_cpu(cpu) {
6173 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6174 memset(pzstats->vm_numa_event, 0,
6175 sizeof(pzstats->vm_numa_event));
6176 }
6177 #endif
6178
6179 for_each_online_pgdat(pgdat)
6180 pgdat->per_cpu_nodestats =
6181 alloc_percpu(struct per_cpu_nodestat);
6182 }
6183
zone_pcp_init(struct zone * zone)6184 __meminit void zone_pcp_init(struct zone *zone)
6185 {
6186 /*
6187 * per cpu subsystem is not up at this point. The following code
6188 * relies on the ability of the linker to provide the
6189 * offset of a (static) per cpu variable into the per cpu area.
6190 */
6191 zone->per_cpu_pageset = &boot_pageset;
6192 zone->per_cpu_zonestats = &boot_zonestats;
6193 zone->pageset_high_min = BOOT_PAGESET_HIGH;
6194 zone->pageset_high_max = BOOT_PAGESET_HIGH;
6195 zone->pageset_batch = BOOT_PAGESET_BATCH;
6196
6197 if (populated_zone(zone))
6198 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
6199 zone->present_pages, zone_batchsize(zone));
6200 }
6201
6202 static void setup_per_zone_lowmem_reserve(void);
6203
adjust_managed_page_count(struct page * page,long count)6204 void adjust_managed_page_count(struct page *page, long count)
6205 {
6206 atomic_long_add(count, &page_zone(page)->managed_pages);
6207 totalram_pages_add(count);
6208 setup_per_zone_lowmem_reserve();
6209 }
6210 EXPORT_SYMBOL(adjust_managed_page_count);
6211
free_reserved_page(struct page * page)6212 void free_reserved_page(struct page *page)
6213 {
6214 clear_page_tag_ref(page);
6215 ClearPageReserved(page);
6216 init_page_count(page);
6217 __free_page(page);
6218 adjust_managed_page_count(page, 1);
6219 }
6220 EXPORT_SYMBOL(free_reserved_page);
6221
page_alloc_cpu_dead(unsigned int cpu)6222 static int page_alloc_cpu_dead(unsigned int cpu)
6223 {
6224 struct zone *zone;
6225
6226 lru_add_drain_cpu(cpu);
6227 mlock_drain_remote(cpu);
6228 drain_pages(cpu);
6229
6230 /*
6231 * Spill the event counters of the dead processor
6232 * into the current processors event counters.
6233 * This artificially elevates the count of the current
6234 * processor.
6235 */
6236 vm_events_fold_cpu(cpu);
6237
6238 /*
6239 * Zero the differential counters of the dead processor
6240 * so that the vm statistics are consistent.
6241 *
6242 * This is only okay since the processor is dead and cannot
6243 * race with what we are doing.
6244 */
6245 cpu_vm_stats_fold(cpu);
6246
6247 for_each_populated_zone(zone)
6248 zone_pcp_update(zone, 0);
6249
6250 return 0;
6251 }
6252
page_alloc_cpu_online(unsigned int cpu)6253 static int page_alloc_cpu_online(unsigned int cpu)
6254 {
6255 struct zone *zone;
6256
6257 for_each_populated_zone(zone)
6258 zone_pcp_update(zone, 1);
6259 return 0;
6260 }
6261
page_alloc_init_cpuhp(void)6262 void __init page_alloc_init_cpuhp(void)
6263 {
6264 int ret;
6265
6266 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
6267 "mm/page_alloc:pcp",
6268 page_alloc_cpu_online,
6269 page_alloc_cpu_dead);
6270 WARN_ON(ret < 0);
6271 }
6272
6273 /*
6274 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6275 * or min_free_kbytes changes.
6276 */
calculate_totalreserve_pages(void)6277 static void calculate_totalreserve_pages(void)
6278 {
6279 struct pglist_data *pgdat;
6280 unsigned long reserve_pages = 0;
6281 enum zone_type i, j;
6282
6283 for_each_online_pgdat(pgdat) {
6284
6285 pgdat->totalreserve_pages = 0;
6286
6287 for (i = 0; i < MAX_NR_ZONES; i++) {
6288 struct zone *zone = pgdat->node_zones + i;
6289 long max = 0;
6290 unsigned long managed_pages = zone_managed_pages(zone);
6291
6292 /*
6293 * lowmem_reserve[j] is monotonically non-decreasing
6294 * in j for a given zone (see
6295 * setup_per_zone_lowmem_reserve()). The maximum
6296 * valid reserve lives at the highest index with a
6297 * non-zero value, so scan backwards and stop at the
6298 * first hit.
6299 */
6300 for (j = MAX_NR_ZONES - 1; j > i; j--) {
6301 if (!zone->lowmem_reserve[j])
6302 continue;
6303
6304 max = zone->lowmem_reserve[j];
6305 break;
6306 }
6307 /* we treat the high watermark as reserved pages. */
6308 max += high_wmark_pages(zone);
6309
6310 max = min_t(unsigned long, max, managed_pages);
6311
6312 pgdat->totalreserve_pages += max;
6313
6314 reserve_pages += max;
6315 }
6316 }
6317 totalreserve_pages = reserve_pages;
6318 trace_mm_calculate_totalreserve_pages(totalreserve_pages);
6319 }
6320
6321 /*
6322 * setup_per_zone_lowmem_reserve - called whenever
6323 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
6324 * has a correct pages reserved value, so an adequate number of
6325 * pages are left in the zone after a successful __alloc_pages().
6326 */
setup_per_zone_lowmem_reserve(void)6327 static void setup_per_zone_lowmem_reserve(void)
6328 {
6329 struct pglist_data *pgdat;
6330 enum zone_type i, j;
6331 /*
6332 * For a given zone node_zones[i], lowmem_reserve[j] (j > i)
6333 * represents how many pages in zone i must effectively be kept
6334 * in reserve when deciding whether an allocation class that is
6335 * allowed to allocate from zones up to j may fall back into
6336 * zone i.
6337 *
6338 * As j increases, the allocation class can use a strictly larger
6339 * set of fallback zones and therefore must not be allowed to
6340 * deplete low zones more aggressively than a less flexible one.
6341 * As a result, lowmem_reserve[j] is required to be monotonically
6342 * non-decreasing in j for each zone i. Callers such as
6343 * calculate_totalreserve_pages() rely on this monotonicity when
6344 * selecting the maximum reserve entry.
6345 */
6346 for_each_online_pgdat(pgdat) {
6347 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
6348 struct zone *zone = &pgdat->node_zones[i];
6349 int ratio = sysctl_lowmem_reserve_ratio[i];
6350 bool clear = !ratio || !zone_managed_pages(zone);
6351 unsigned long managed_pages = 0;
6352
6353 for (j = i + 1; j < MAX_NR_ZONES; j++) {
6354 struct zone *upper_zone = &pgdat->node_zones[j];
6355
6356 managed_pages += zone_managed_pages(upper_zone);
6357
6358 if (clear)
6359 zone->lowmem_reserve[j] = 0;
6360 else
6361 zone->lowmem_reserve[j] = managed_pages / ratio;
6362 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
6363 zone->lowmem_reserve[j]);
6364 }
6365 }
6366 }
6367
6368 /* update totalreserve_pages */
6369 calculate_totalreserve_pages();
6370 }
6371
__setup_per_zone_wmarks(void)6372 static void __setup_per_zone_wmarks(void)
6373 {
6374 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6375 unsigned long lowmem_pages = 0;
6376 struct zone *zone;
6377 unsigned long flags;
6378
6379 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
6380 for_each_zone(zone) {
6381 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
6382 lowmem_pages += zone_managed_pages(zone);
6383 }
6384
6385 for_each_zone(zone) {
6386 u64 tmp;
6387
6388 spin_lock_irqsave(&zone->lock, flags);
6389 tmp = (u64)pages_min * zone_managed_pages(zone);
6390 tmp = div64_ul(tmp, lowmem_pages);
6391 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
6392 /*
6393 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6394 * need highmem and movable zones pages, so cap pages_min
6395 * to a small value here.
6396 *
6397 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6398 * deltas control async page reclaim, and so should
6399 * not be capped for highmem and movable zones.
6400 */
6401 unsigned long min_pages;
6402
6403 min_pages = zone_managed_pages(zone) / 1024;
6404 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6405 zone->_watermark[WMARK_MIN] = min_pages;
6406 } else {
6407 /*
6408 * If it's a lowmem zone, reserve a number of pages
6409 * proportionate to the zone's size.
6410 */
6411 zone->_watermark[WMARK_MIN] = tmp;
6412 }
6413
6414 /*
6415 * Set the kswapd watermarks distance according to the
6416 * scale factor in proportion to available memory, but
6417 * ensure a minimum size on small systems.
6418 */
6419 tmp = max_t(u64, tmp >> 2,
6420 mult_frac(zone_managed_pages(zone),
6421 watermark_scale_factor, 10000));
6422
6423 zone->watermark_boost = 0;
6424 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6425 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6426 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
6427 trace_mm_setup_per_zone_wmarks(zone);
6428
6429 spin_unlock_irqrestore(&zone->lock, flags);
6430 }
6431
6432 /* update totalreserve_pages */
6433 calculate_totalreserve_pages();
6434 }
6435
6436 /**
6437 * setup_per_zone_wmarks - called when min_free_kbytes changes
6438 * or when memory is hot-{added|removed}
6439 *
6440 * Ensures that the watermark[min,low,high] values for each zone are set
6441 * correctly with respect to min_free_kbytes.
6442 */
setup_per_zone_wmarks(void)6443 void setup_per_zone_wmarks(void)
6444 {
6445 struct zone *zone;
6446 static DEFINE_SPINLOCK(lock);
6447
6448 spin_lock(&lock);
6449 __setup_per_zone_wmarks();
6450 spin_unlock(&lock);
6451
6452 /*
6453 * The watermark size have changed so update the pcpu batch
6454 * and high limits or the limits may be inappropriate.
6455 */
6456 for_each_zone(zone)
6457 zone_pcp_update(zone, 0);
6458 }
6459
6460 /*
6461 * Initialise min_free_kbytes.
6462 *
6463 * For small machines we want it small (128k min). For large machines
6464 * we want it large (256MB max). But it is not linear, because network
6465 * bandwidth does not increase linearly with machine size. We use
6466 *
6467 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6468 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6469 *
6470 * which yields
6471 *
6472 * 16MB: 512k
6473 * 32MB: 724k
6474 * 64MB: 1024k
6475 * 128MB: 1448k
6476 * 256MB: 2048k
6477 * 512MB: 2896k
6478 * 1024MB: 4096k
6479 * 2048MB: 5792k
6480 * 4096MB: 8192k
6481 * 8192MB: 11584k
6482 * 16384MB: 16384k
6483 */
calculate_min_free_kbytes(void)6484 void calculate_min_free_kbytes(void)
6485 {
6486 unsigned long lowmem_kbytes;
6487 int new_min_free_kbytes;
6488
6489 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6490 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6491
6492 if (new_min_free_kbytes > user_min_free_kbytes)
6493 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6494 else
6495 pr_warn_ratelimited("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6496 new_min_free_kbytes, user_min_free_kbytes);
6497
6498 }
6499
init_per_zone_wmark_min(void)6500 int __meminit init_per_zone_wmark_min(void)
6501 {
6502 calculate_min_free_kbytes();
6503 setup_per_zone_wmarks();
6504 refresh_zone_stat_thresholds();
6505 setup_per_zone_lowmem_reserve();
6506
6507 #ifdef CONFIG_NUMA
6508 setup_min_unmapped_ratio();
6509 setup_min_slab_ratio();
6510 #endif
6511
6512 khugepaged_min_free_kbytes_update();
6513
6514 return 0;
6515 }
postcore_initcall(init_per_zone_wmark_min)6516 postcore_initcall(init_per_zone_wmark_min)
6517
6518 /*
6519 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6520 * that we can call two helper functions whenever min_free_kbytes
6521 * changes.
6522 */
6523 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
6524 void *buffer, size_t *length, loff_t *ppos)
6525 {
6526 int rc;
6527
6528 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6529 if (rc)
6530 return rc;
6531
6532 if (write) {
6533 user_min_free_kbytes = min_free_kbytes;
6534 setup_per_zone_wmarks();
6535 }
6536 return 0;
6537 }
6538
watermark_scale_factor_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6539 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
6540 void *buffer, size_t *length, loff_t *ppos)
6541 {
6542 int rc;
6543
6544 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6545 if (rc)
6546 return rc;
6547
6548 if (write)
6549 setup_per_zone_wmarks();
6550
6551 return 0;
6552 }
6553
6554 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)6555 static void setup_min_unmapped_ratio(void)
6556 {
6557 pg_data_t *pgdat;
6558 struct zone *zone;
6559
6560 for_each_online_pgdat(pgdat)
6561 pgdat->min_unmapped_pages = 0;
6562
6563 for_each_zone(zone)
6564 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6565 sysctl_min_unmapped_ratio) / 100;
6566 }
6567
6568
sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6569 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
6570 void *buffer, size_t *length, loff_t *ppos)
6571 {
6572 int rc;
6573
6574 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6575 if (rc)
6576 return rc;
6577
6578 setup_min_unmapped_ratio();
6579
6580 return 0;
6581 }
6582
setup_min_slab_ratio(void)6583 static void setup_min_slab_ratio(void)
6584 {
6585 pg_data_t *pgdat;
6586 struct zone *zone;
6587
6588 for_each_online_pgdat(pgdat)
6589 pgdat->min_slab_pages = 0;
6590
6591 for_each_zone(zone)
6592 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6593 sysctl_min_slab_ratio) / 100;
6594 }
6595
sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6596 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
6597 void *buffer, size_t *length, loff_t *ppos)
6598 {
6599 int rc;
6600
6601 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6602 if (rc)
6603 return rc;
6604
6605 setup_min_slab_ratio();
6606
6607 return 0;
6608 }
6609 #endif
6610
6611 /*
6612 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6613 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6614 * whenever sysctl_lowmem_reserve_ratio changes.
6615 *
6616 * The reserve ratio obviously has absolutely no relation with the
6617 * minimum watermarks. The lowmem reserve ratio can only make sense
6618 * if in function of the boot time zone sizes.
6619 */
lowmem_reserve_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6620 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
6621 int write, void *buffer, size_t *length, loff_t *ppos)
6622 {
6623 int i;
6624
6625 proc_dointvec_minmax(table, write, buffer, length, ppos);
6626
6627 for (i = 0; i < MAX_NR_ZONES; i++) {
6628 if (sysctl_lowmem_reserve_ratio[i] < 1)
6629 sysctl_lowmem_reserve_ratio[i] = 0;
6630 }
6631
6632 setup_per_zone_lowmem_reserve();
6633 return 0;
6634 }
6635
6636 /*
6637 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6638 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6639 * pagelist can have before it gets flushed back to buddy allocator.
6640 */
percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6641 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
6642 int write, void *buffer, size_t *length, loff_t *ppos)
6643 {
6644 struct zone *zone;
6645 int old_percpu_pagelist_high_fraction;
6646 int ret;
6647
6648 /*
6649 * Avoid using pcp_batch_high_lock for reads as the value is read
6650 * atomically and a race with offlining is harmless.
6651 */
6652
6653 if (!write)
6654 return proc_dointvec_minmax(table, write, buffer, length, ppos);
6655
6656 mutex_lock(&pcp_batch_high_lock);
6657 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
6658
6659 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6660 if (ret < 0)
6661 goto out;
6662
6663 /* Sanity checking to avoid pcp imbalance */
6664 if (percpu_pagelist_high_fraction &&
6665 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6666 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
6667 ret = -EINVAL;
6668 goto out;
6669 }
6670
6671 /* No change? */
6672 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
6673 goto out;
6674
6675 for_each_populated_zone(zone)
6676 zone_set_pageset_high_and_batch(zone, 0);
6677 out:
6678 mutex_unlock(&pcp_batch_high_lock);
6679 return ret;
6680 }
6681
6682 static const struct ctl_table page_alloc_sysctl_table[] = {
6683 {
6684 .procname = "min_free_kbytes",
6685 .data = &min_free_kbytes,
6686 .maxlen = sizeof(min_free_kbytes),
6687 .mode = 0644,
6688 .proc_handler = min_free_kbytes_sysctl_handler,
6689 .extra1 = SYSCTL_ZERO,
6690 },
6691 {
6692 .procname = "watermark_boost_factor",
6693 .data = &watermark_boost_factor,
6694 .maxlen = sizeof(watermark_boost_factor),
6695 .mode = 0644,
6696 .proc_handler = proc_dointvec_minmax,
6697 .extra1 = SYSCTL_ZERO,
6698 },
6699 {
6700 .procname = "watermark_scale_factor",
6701 .data = &watermark_scale_factor,
6702 .maxlen = sizeof(watermark_scale_factor),
6703 .mode = 0644,
6704 .proc_handler = watermark_scale_factor_sysctl_handler,
6705 .extra1 = SYSCTL_ONE,
6706 .extra2 = SYSCTL_THREE_THOUSAND,
6707 },
6708 {
6709 .procname = "defrag_mode",
6710 .data = &defrag_mode,
6711 .maxlen = sizeof(defrag_mode),
6712 .mode = 0644,
6713 .proc_handler = proc_dointvec_minmax,
6714 .extra1 = SYSCTL_ZERO,
6715 .extra2 = SYSCTL_ONE,
6716 },
6717 {
6718 .procname = "percpu_pagelist_high_fraction",
6719 .data = &percpu_pagelist_high_fraction,
6720 .maxlen = sizeof(percpu_pagelist_high_fraction),
6721 .mode = 0644,
6722 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
6723 .extra1 = SYSCTL_ZERO,
6724 },
6725 {
6726 .procname = "lowmem_reserve_ratio",
6727 .data = &sysctl_lowmem_reserve_ratio,
6728 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
6729 .mode = 0644,
6730 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
6731 },
6732 #ifdef CONFIG_NUMA
6733 {
6734 .procname = "numa_zonelist_order",
6735 .data = &numa_zonelist_order,
6736 .maxlen = NUMA_ZONELIST_ORDER_LEN,
6737 .mode = 0644,
6738 .proc_handler = numa_zonelist_order_handler,
6739 },
6740 {
6741 .procname = "min_unmapped_ratio",
6742 .data = &sysctl_min_unmapped_ratio,
6743 .maxlen = sizeof(sysctl_min_unmapped_ratio),
6744 .mode = 0644,
6745 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6746 .extra1 = SYSCTL_ZERO,
6747 .extra2 = SYSCTL_ONE_HUNDRED,
6748 },
6749 {
6750 .procname = "min_slab_ratio",
6751 .data = &sysctl_min_slab_ratio,
6752 .maxlen = sizeof(sysctl_min_slab_ratio),
6753 .mode = 0644,
6754 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6755 .extra1 = SYSCTL_ZERO,
6756 .extra2 = SYSCTL_ONE_HUNDRED,
6757 },
6758 #endif
6759 };
6760
page_alloc_sysctl_init(void)6761 void __init page_alloc_sysctl_init(void)
6762 {
6763 register_sysctl_init("vm", page_alloc_sysctl_table);
6764 }
6765
6766 #ifdef CONFIG_CONTIG_ALLOC
6767 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6768 static void alloc_contig_dump_pages(struct list_head *page_list)
6769 {
6770 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6771
6772 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6773 struct page *page;
6774
6775 dump_stack();
6776 list_for_each_entry(page, page_list, lru)
6777 dump_page(page, "migration failure");
6778 }
6779 }
6780
6781 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6782 static int __alloc_contig_migrate_range(struct compact_control *cc,
6783 unsigned long start, unsigned long end)
6784 {
6785 /* This function is based on compact_zone() from compaction.c. */
6786 unsigned int nr_reclaimed;
6787 unsigned long pfn = start;
6788 unsigned int tries = 0;
6789 int ret = 0;
6790 struct migration_target_control mtc = {
6791 .nid = zone_to_nid(cc->zone),
6792 .gfp_mask = cc->gfp_mask,
6793 .reason = MR_CONTIG_RANGE,
6794 };
6795
6796 lru_cache_disable();
6797
6798 while (pfn < end || !list_empty(&cc->migratepages)) {
6799 if (fatal_signal_pending(current)) {
6800 ret = -EINTR;
6801 break;
6802 }
6803
6804 if (list_empty(&cc->migratepages)) {
6805 cc->nr_migratepages = 0;
6806 ret = isolate_migratepages_range(cc, pfn, end);
6807 if (ret && ret != -EAGAIN)
6808 break;
6809 pfn = cc->migrate_pfn;
6810 tries = 0;
6811 } else if (++tries == 5) {
6812 ret = -EBUSY;
6813 break;
6814 }
6815
6816 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6817 &cc->migratepages);
6818 cc->nr_migratepages -= nr_reclaimed;
6819
6820 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6821 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6822
6823 /*
6824 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6825 * to retry again over this error, so do the same here.
6826 */
6827 if (ret == -ENOMEM)
6828 break;
6829 }
6830
6831 lru_cache_enable();
6832 if (ret < 0) {
6833 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6834 alloc_contig_dump_pages(&cc->migratepages);
6835 putback_movable_pages(&cc->migratepages);
6836 }
6837
6838 return (ret < 0) ? ret : 0;
6839 }
6840
split_free_frozen_pages(struct list_head * list,gfp_t gfp_mask)6841 static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask)
6842 {
6843 int order;
6844
6845 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6846 struct page *page, *next;
6847 int nr_pages = 1 << order;
6848
6849 list_for_each_entry_safe(page, next, &list[order], lru) {
6850 int i;
6851
6852 post_alloc_hook(page, order, gfp_mask);
6853 if (!order)
6854 continue;
6855
6856 __split_page(page, order);
6857
6858 /* Add all subpages to the order-0 head, in sequence. */
6859 list_del(&page->lru);
6860 for (i = 0; i < nr_pages; i++)
6861 list_add_tail(&page[i].lru, &list[0]);
6862 }
6863 }
6864 }
6865
__alloc_contig_verify_gfp_mask(gfp_t gfp_mask,gfp_t * gfp_cc_mask)6866 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
6867 {
6868 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
6869 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
6870 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
6871 __GFP_SKIP_KASAN;
6872 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
6873
6874 /*
6875 * We are given the range to allocate; node, mobility and placement
6876 * hints are irrelevant at this point. We'll simply ignore them.
6877 */
6878 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
6879 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
6880
6881 /*
6882 * We only support most reclaim flags (but not NOFAIL/NORETRY), and
6883 * selected action flags.
6884 */
6885 if (gfp_mask & ~(reclaim_mask | action_mask))
6886 return -EINVAL;
6887
6888 /*
6889 * Flags to control page compaction/migration/reclaim, to free up our
6890 * page range. Migratable pages are movable, __GFP_MOVABLE is implied
6891 * for them.
6892 *
6893 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
6894 * to not degrade callers.
6895 */
6896 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
6897 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
6898 return 0;
6899 }
6900
__free_contig_frozen_range(unsigned long pfn,unsigned long nr_pages)6901 static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
6902 {
6903 for (; nr_pages--; pfn++)
6904 free_frozen_pages(pfn_to_page(pfn), 0);
6905 }
6906
6907 /**
6908 * alloc_contig_frozen_range() -- tries to allocate given range of frozen pages
6909 * @start: start PFN to allocate
6910 * @end: one-past-the-last PFN to allocate
6911 * @alloc_flags: allocation information
6912 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
6913 * action and reclaim modifiers are supported. Reclaim modifiers
6914 * control allocation behavior during compaction/migration/reclaim.
6915 *
6916 * The PFN range does not have to be pageblock aligned. The PFN range must
6917 * belong to a single zone.
6918 *
6919 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6920 * pageblocks in the range. Once isolated, the pageblocks should not
6921 * be modified by others.
6922 *
6923 * All frozen pages which PFN is in [start, end) are allocated for the
6924 * caller, and they could be freed with free_contig_frozen_range(),
6925 * free_frozen_pages() also could be used to free compound frozen pages
6926 * directly.
6927 *
6928 * Return: zero on success or negative error code.
6929 */
alloc_contig_frozen_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)6930 int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
6931 acr_flags_t alloc_flags, gfp_t gfp_mask)
6932 {
6933 const unsigned int order = ilog2(end - start);
6934 unsigned long outer_start, outer_end;
6935 int ret = 0;
6936
6937 struct compact_control cc = {
6938 .nr_migratepages = 0,
6939 .order = -1,
6940 .zone = page_zone(pfn_to_page(start)),
6941 .mode = MIGRATE_SYNC,
6942 .ignore_skip_hint = true,
6943 .no_set_skip_hint = true,
6944 .alloc_contig = true,
6945 };
6946 INIT_LIST_HEAD(&cc.migratepages);
6947 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
6948 PB_ISOLATE_MODE_CMA_ALLOC :
6949 PB_ISOLATE_MODE_OTHER;
6950
6951 /*
6952 * In contrast to the buddy, we allow for orders here that exceed
6953 * MAX_PAGE_ORDER, so we must manually make sure that we are not
6954 * exceeding the maximum folio order.
6955 */
6956 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
6957 return -EINVAL;
6958
6959 gfp_mask = current_gfp_context(gfp_mask);
6960 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
6961 return -EINVAL;
6962
6963 /*
6964 * What we do here is we mark all pageblocks in range as
6965 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6966 * have different sizes, and due to the way page allocator
6967 * work, start_isolate_page_range() has special handlings for this.
6968 *
6969 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6970 * migrate the pages from an unaligned range (ie. pages that
6971 * we are interested in). This will put all the pages in
6972 * range back to page allocator as MIGRATE_ISOLATE.
6973 *
6974 * When this is done, we take the pages in range from page
6975 * allocator removing them from the buddy system. This way
6976 * page allocator will never consider using them.
6977 *
6978 * This lets us mark the pageblocks back as
6979 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6980 * aligned range but not in the unaligned, original range are
6981 * put back to page allocator so that buddy can use them.
6982 */
6983
6984 ret = start_isolate_page_range(start, end, mode);
6985 if (ret)
6986 goto done;
6987
6988 drain_all_pages(cc.zone);
6989
6990 /*
6991 * In case of -EBUSY, we'd like to know which page causes problem.
6992 * So, just fall through. test_pages_isolated() has a tracepoint
6993 * which will report the busy page.
6994 *
6995 * It is possible that busy pages could become available before
6996 * the call to test_pages_isolated, and the range will actually be
6997 * allocated. So, if we fall through be sure to clear ret so that
6998 * -EBUSY is not accidentally used or returned to caller.
6999 */
7000 ret = __alloc_contig_migrate_range(&cc, start, end);
7001 if (ret && ret != -EBUSY)
7002 goto done;
7003
7004 /*
7005 * When in-use hugetlb pages are migrated, they may simply be released
7006 * back into the free hugepage pool instead of being returned to the
7007 * buddy system. After the migration of in-use huge pages is completed,
7008 * we will invoke replace_free_hugepage_folios() to ensure that these
7009 * hugepages are properly released to the buddy system.
7010 */
7011 ret = replace_free_hugepage_folios(start, end);
7012 if (ret)
7013 goto done;
7014
7015 /*
7016 * Pages from [start, end) are within a pageblock_nr_pages
7017 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7018 * more, all pages in [start, end) are free in page allocator.
7019 * What we are going to do is to allocate all pages from
7020 * [start, end) (that is remove them from page allocator).
7021 *
7022 * The only problem is that pages at the beginning and at the
7023 * end of interesting range may be not aligned with pages that
7024 * page allocator holds, ie. they can be part of higher order
7025 * pages. Because of this, we reserve the bigger range and
7026 * once this is done free the pages we are not interested in.
7027 *
7028 * We don't have to hold zone->lock here because the pages are
7029 * isolated thus they won't get removed from buddy.
7030 */
7031 outer_start = find_large_buddy(start);
7032
7033 /* Make sure the range is really isolated. */
7034 if (test_pages_isolated(outer_start, end, mode)) {
7035 ret = -EBUSY;
7036 goto done;
7037 }
7038
7039 /* Grab isolated pages from freelists. */
7040 outer_end = isolate_freepages_range(&cc, outer_start, end);
7041 if (!outer_end) {
7042 ret = -EBUSY;
7043 goto done;
7044 }
7045
7046 if (!(gfp_mask & __GFP_COMP)) {
7047 split_free_frozen_pages(cc.freepages, gfp_mask);
7048
7049 /* Free head and tail (if any) */
7050 if (start != outer_start)
7051 __free_contig_frozen_range(outer_start, start - outer_start);
7052 if (end != outer_end)
7053 __free_contig_frozen_range(end, outer_end - end);
7054 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
7055 struct page *head = pfn_to_page(start);
7056
7057 check_new_pages(head, order);
7058 prep_new_page(head, order, gfp_mask, 0);
7059 } else {
7060 ret = -EINVAL;
7061 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
7062 start, end, outer_start, outer_end);
7063 }
7064 done:
7065 undo_isolate_page_range(start, end);
7066 return ret;
7067 }
7068 EXPORT_SYMBOL(alloc_contig_frozen_range_noprof);
7069
7070 /**
7071 * alloc_contig_range() -- tries to allocate given range of pages
7072 * @start: start PFN to allocate
7073 * @end: one-past-the-last PFN to allocate
7074 * @alloc_flags: allocation information
7075 * @gfp_mask: GFP mask.
7076 *
7077 * This routine is a wrapper around alloc_contig_frozen_range(), it can't
7078 * be used to allocate compound pages, the refcount of each allocated page
7079 * will be set to one.
7080 *
7081 * All pages which PFN is in [start, end) are allocated for the caller,
7082 * and should be freed with free_contig_range() or by manually calling
7083 * __free_page() on each allocated page.
7084 *
7085 * Return: zero on success or negative error code.
7086 */
alloc_contig_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)7087 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
7088 acr_flags_t alloc_flags, gfp_t gfp_mask)
7089 {
7090 int ret;
7091
7092 if (WARN_ON(gfp_mask & __GFP_COMP))
7093 return -EINVAL;
7094
7095 ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask);
7096 if (!ret)
7097 set_pages_refcounted(pfn_to_page(start), end - start);
7098
7099 return ret;
7100 }
7101 EXPORT_SYMBOL(alloc_contig_range_noprof);
7102
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages,bool skip_hugetlb,bool * skipped_hugetlb)7103 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
7104 unsigned long nr_pages, bool skip_hugetlb,
7105 bool *skipped_hugetlb)
7106 {
7107 unsigned long end_pfn = start_pfn + nr_pages;
7108 struct page *page;
7109
7110 while (start_pfn < end_pfn) {
7111 unsigned long step = 1;
7112
7113 page = pfn_to_online_page(start_pfn);
7114 if (!page)
7115 return false;
7116
7117 if (page_zone(page) != z)
7118 return false;
7119
7120 if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step))
7121 return false;
7122
7123 /*
7124 * Only consider ranges containing hugepages if those pages are
7125 * smaller than the requested contiguous region. e.g.:
7126 * Move 2MB pages to free up a 1GB range.
7127 * Don't move 1GB pages to free up a 2MB range.
7128 *
7129 * This makes contiguous allocation more reliable if multiple
7130 * hugepage sizes are used without causing needless movement.
7131 */
7132 if (PageHuge(page)) {
7133 unsigned int order;
7134
7135 if (skip_hugetlb) {
7136 *skipped_hugetlb = true;
7137 return false;
7138 }
7139
7140 page = compound_head(page);
7141 order = compound_order(page);
7142 if ((order >= MAX_FOLIO_ORDER) ||
7143 (nr_pages <= (1 << order)))
7144 return false;
7145 }
7146
7147 start_pfn += step;
7148 }
7149 return true;
7150 }
7151
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)7152 static bool zone_spans_last_pfn(const struct zone *zone,
7153 unsigned long start_pfn, unsigned long nr_pages)
7154 {
7155 unsigned long last_pfn = start_pfn + nr_pages - 1;
7156
7157 return zone_spans_pfn(zone, last_pfn);
7158 }
7159
7160 /**
7161 * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages
7162 * @nr_pages: Number of contiguous pages to allocate
7163 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
7164 * action and reclaim modifiers are supported. Reclaim modifiers
7165 * control allocation behavior during compaction/migration/reclaim.
7166 * @nid: Target node
7167 * @nodemask: Mask for other possible nodes
7168 *
7169 * This routine is a wrapper around alloc_contig_frozen_range(). It scans over
7170 * zones on an applicable zonelist to find a contiguous pfn range which can then
7171 * be tried for allocation with alloc_contig_frozen_range(). This routine is
7172 * intended for allocation requests which can not be fulfilled with the buddy
7173 * allocator.
7174 *
7175 * The allocated memory is always aligned to a page boundary. If nr_pages is a
7176 * power of two, then allocated range is also guaranteed to be aligned to same
7177 * nr_pages (e.g. 1GB request would be aligned to 1GB).
7178 *
7179 * Allocated frozen pages need be freed with free_contig_frozen_range(),
7180 * or by manually calling free_frozen_pages() on each allocated frozen
7181 * non-compound page, for compound frozen pages could be freed with
7182 * free_frozen_pages() directly.
7183 *
7184 * Return: pointer to contiguous frozen pages on success, or NULL if not successful.
7185 */
alloc_contig_frozen_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7186 struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
7187 gfp_t gfp_mask, int nid, nodemask_t *nodemask)
7188 {
7189 unsigned long ret, pfn, flags;
7190 struct zonelist *zonelist;
7191 struct zone *zone;
7192 struct zoneref *z;
7193 bool skip_hugetlb = true;
7194 bool skipped_hugetlb = false;
7195
7196 retry:
7197 zonelist = node_zonelist(nid, gfp_mask);
7198 for_each_zone_zonelist_nodemask(zone, z, zonelist,
7199 gfp_zone(gfp_mask), nodemask) {
7200 spin_lock_irqsave(&zone->lock, flags);
7201
7202 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
7203 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
7204 if (pfn_range_valid_contig(zone, pfn, nr_pages,
7205 skip_hugetlb,
7206 &skipped_hugetlb)) {
7207 /*
7208 * We release the zone lock here because
7209 * alloc_contig_frozen_range() will also lock
7210 * the zone at some point. If there's an
7211 * allocation spinning on this lock, it may
7212 * win the race and cause allocation to fail.
7213 */
7214 spin_unlock_irqrestore(&zone->lock, flags);
7215 ret = alloc_contig_frozen_range_noprof(pfn,
7216 pfn + nr_pages,
7217 ACR_FLAGS_NONE,
7218 gfp_mask);
7219 if (!ret)
7220 return pfn_to_page(pfn);
7221 spin_lock_irqsave(&zone->lock, flags);
7222 }
7223 pfn += nr_pages;
7224 }
7225 spin_unlock_irqrestore(&zone->lock, flags);
7226 }
7227 /*
7228 * If we failed, retry the search, but treat regions with HugeTLB pages
7229 * as valid targets. This retains fast-allocations on first pass
7230 * without trying to migrate HugeTLB pages (which may fail). On the
7231 * second pass, we will try moving HugeTLB pages when those pages are
7232 * smaller than the requested contiguous region size.
7233 */
7234 if (skip_hugetlb && skipped_hugetlb) {
7235 skip_hugetlb = false;
7236 goto retry;
7237 }
7238 return NULL;
7239 }
7240 EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof);
7241
7242 /**
7243 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
7244 * @nr_pages: Number of contiguous pages to allocate
7245 * @gfp_mask: GFP mask.
7246 * @nid: Target node
7247 * @nodemask: Mask for other possible nodes
7248 *
7249 * This routine is a wrapper around alloc_contig_frozen_pages(), it can't
7250 * be used to allocate compound pages, the refcount of each allocated page
7251 * will be set to one.
7252 *
7253 * Allocated pages can be freed with free_contig_range() or by manually
7254 * calling __free_page() on each allocated page.
7255 *
7256 * Return: pointer to contiguous pages on success, or NULL if not successful.
7257 */
alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7258 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
7259 int nid, nodemask_t *nodemask)
7260 {
7261 struct page *page;
7262
7263 if (WARN_ON(gfp_mask & __GFP_COMP))
7264 return NULL;
7265
7266 page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid,
7267 nodemask);
7268 if (page)
7269 set_pages_refcounted(page, nr_pages);
7270
7271 return page;
7272 }
7273 EXPORT_SYMBOL(alloc_contig_pages_noprof);
7274
7275 /**
7276 * free_contig_frozen_range() -- free the contiguous range of frozen pages
7277 * @pfn: start PFN to free
7278 * @nr_pages: Number of contiguous frozen pages to free
7279 *
7280 * This can be used to free the allocated compound/non-compound frozen pages.
7281 */
free_contig_frozen_range(unsigned long pfn,unsigned long nr_pages)7282 void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
7283 {
7284 struct page *first_page = pfn_to_page(pfn);
7285 const unsigned int order = ilog2(nr_pages);
7286
7287 if (WARN_ON_ONCE(first_page != compound_head(first_page)))
7288 return;
7289
7290 if (PageHead(first_page)) {
7291 WARN_ON_ONCE(order != compound_order(first_page));
7292 free_frozen_pages(first_page, order);
7293 return;
7294 }
7295
7296 __free_contig_frozen_range(pfn, nr_pages);
7297 }
7298 EXPORT_SYMBOL(free_contig_frozen_range);
7299
7300 /**
7301 * free_contig_range() -- free the contiguous range of pages
7302 * @pfn: start PFN to free
7303 * @nr_pages: Number of contiguous pages to free
7304 *
7305 * This can be only used to free the allocated non-compound pages.
7306 */
free_contig_range(unsigned long pfn,unsigned long nr_pages)7307 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
7308 {
7309 if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
7310 return;
7311
7312 for (; nr_pages--; pfn++)
7313 __free_page(pfn_to_page(pfn));
7314 }
7315 EXPORT_SYMBOL(free_contig_range);
7316 #endif /* CONFIG_CONTIG_ALLOC */
7317
7318 /*
7319 * Effectively disable pcplists for the zone by setting the high limit to 0
7320 * and draining all cpus. A concurrent page freeing on another CPU that's about
7321 * to put the page on pcplist will either finish before the drain and the page
7322 * will be drained, or observe the new high limit and skip the pcplist.
7323 *
7324 * Must be paired with a call to zone_pcp_enable().
7325 */
zone_pcp_disable(struct zone * zone)7326 void zone_pcp_disable(struct zone *zone)
7327 {
7328 mutex_lock(&pcp_batch_high_lock);
7329 __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
7330 __drain_all_pages(zone, true);
7331 }
7332
zone_pcp_enable(struct zone * zone)7333 void zone_pcp_enable(struct zone *zone)
7334 {
7335 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
7336 zone->pageset_high_max, zone->pageset_batch);
7337 mutex_unlock(&pcp_batch_high_lock);
7338 }
7339
zone_pcp_reset(struct zone * zone)7340 void zone_pcp_reset(struct zone *zone)
7341 {
7342 int cpu;
7343 struct per_cpu_zonestat *pzstats;
7344
7345 if (zone->per_cpu_pageset != &boot_pageset) {
7346 for_each_online_cpu(cpu) {
7347 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7348 drain_zonestat(zone, pzstats);
7349 }
7350 free_percpu(zone->per_cpu_pageset);
7351 zone->per_cpu_pageset = &boot_pageset;
7352 if (zone->per_cpu_zonestats != &boot_zonestats) {
7353 free_percpu(zone->per_cpu_zonestats);
7354 zone->per_cpu_zonestats = &boot_zonestats;
7355 }
7356 }
7357 }
7358
7359 #ifdef CONFIG_MEMORY_HOTREMOVE
7360 /*
7361 * All pages in the range must be in a single zone, must not contain holes,
7362 * must span full sections, and must be isolated before calling this function.
7363 *
7364 * Returns the number of managed (non-PageOffline()) pages in the range: the
7365 * number of pages for which memory offlining code must adjust managed page
7366 * counters using adjust_managed_page_count().
7367 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)7368 unsigned long __offline_isolated_pages(unsigned long start_pfn,
7369 unsigned long end_pfn)
7370 {
7371 unsigned long already_offline = 0, flags;
7372 unsigned long pfn = start_pfn;
7373 struct page *page;
7374 struct zone *zone;
7375 unsigned int order;
7376
7377 offline_mem_sections(pfn, end_pfn);
7378 zone = page_zone(pfn_to_page(pfn));
7379 spin_lock_irqsave(&zone->lock, flags);
7380 while (pfn < end_pfn) {
7381 page = pfn_to_page(pfn);
7382 /*
7383 * The HWPoisoned page may be not in buddy system, and
7384 * page_count() is not 0.
7385 */
7386 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7387 pfn++;
7388 continue;
7389 }
7390 /*
7391 * At this point all remaining PageOffline() pages have a
7392 * reference count of 0 and can simply be skipped.
7393 */
7394 if (PageOffline(page)) {
7395 BUG_ON(page_count(page));
7396 BUG_ON(PageBuddy(page));
7397 already_offline++;
7398 pfn++;
7399 continue;
7400 }
7401
7402 BUG_ON(page_count(page));
7403 BUG_ON(!PageBuddy(page));
7404 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
7405 order = buddy_order(page);
7406 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
7407 pfn += (1 << order);
7408 }
7409 spin_unlock_irqrestore(&zone->lock, flags);
7410
7411 return end_pfn - start_pfn - already_offline;
7412 }
7413 #endif
7414
7415 /*
7416 * This function returns a stable result only if called under zone lock.
7417 */
is_free_buddy_page(const struct page * page)7418 bool is_free_buddy_page(const struct page *page)
7419 {
7420 unsigned long pfn = page_to_pfn(page);
7421 unsigned int order;
7422
7423 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7424 const struct page *head = page - (pfn & ((1 << order) - 1));
7425
7426 if (PageBuddy(head) &&
7427 buddy_order_unsafe(head) >= order)
7428 break;
7429 }
7430
7431 return order <= MAX_PAGE_ORDER;
7432 }
7433 EXPORT_SYMBOL(is_free_buddy_page);
7434
7435 #ifdef CONFIG_MEMORY_FAILURE
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)7436 static inline void add_to_free_list(struct page *page, struct zone *zone,
7437 unsigned int order, int migratetype,
7438 bool tail)
7439 {
7440 __add_to_free_list(page, zone, order, migratetype, tail);
7441 account_freepages(zone, 1 << order, migratetype);
7442 }
7443
7444 /*
7445 * Break down a higher-order page in sub-pages, and keep our target out of
7446 * buddy allocator.
7447 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)7448 static void break_down_buddy_pages(struct zone *zone, struct page *page,
7449 struct page *target, int low, int high,
7450 int migratetype)
7451 {
7452 unsigned long size = 1 << high;
7453 struct page *current_buddy;
7454
7455 while (high > low) {
7456 high--;
7457 size >>= 1;
7458
7459 if (target >= &page[size]) {
7460 current_buddy = page;
7461 page = page + size;
7462 } else {
7463 current_buddy = page + size;
7464 }
7465
7466 if (set_page_guard(zone, current_buddy, high))
7467 continue;
7468
7469 add_to_free_list(current_buddy, zone, high, migratetype, false);
7470 set_buddy_order(current_buddy, high);
7471 }
7472 }
7473
7474 /*
7475 * Take a page that will be marked as poisoned off the buddy allocator.
7476 */
take_page_off_buddy(struct page * page)7477 bool take_page_off_buddy(struct page *page)
7478 {
7479 struct zone *zone = page_zone(page);
7480 unsigned long pfn = page_to_pfn(page);
7481 unsigned long flags;
7482 unsigned int order;
7483 bool ret = false;
7484
7485 spin_lock_irqsave(&zone->lock, flags);
7486 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7487 struct page *page_head = page - (pfn & ((1 << order) - 1));
7488 int page_order = buddy_order(page_head);
7489
7490 if (PageBuddy(page_head) && page_order >= order) {
7491 unsigned long pfn_head = page_to_pfn(page_head);
7492 int migratetype = get_pfnblock_migratetype(page_head,
7493 pfn_head);
7494
7495 del_page_from_free_list(page_head, zone, page_order,
7496 migratetype);
7497 break_down_buddy_pages(zone, page_head, page, 0,
7498 page_order, migratetype);
7499 SetPageHWPoisonTakenOff(page);
7500 ret = true;
7501 break;
7502 }
7503 if (page_count(page_head) > 0)
7504 break;
7505 }
7506 spin_unlock_irqrestore(&zone->lock, flags);
7507 return ret;
7508 }
7509
7510 /*
7511 * Cancel takeoff done by take_page_off_buddy().
7512 */
put_page_back_buddy(struct page * page)7513 bool put_page_back_buddy(struct page *page)
7514 {
7515 struct zone *zone = page_zone(page);
7516 unsigned long flags;
7517 bool ret = false;
7518
7519 spin_lock_irqsave(&zone->lock, flags);
7520 if (put_page_testzero(page)) {
7521 unsigned long pfn = page_to_pfn(page);
7522 int migratetype = get_pfnblock_migratetype(page, pfn);
7523
7524 ClearPageHWPoisonTakenOff(page);
7525 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
7526 if (TestClearPageHWPoison(page)) {
7527 ret = true;
7528 }
7529 }
7530 spin_unlock_irqrestore(&zone->lock, flags);
7531
7532 return ret;
7533 }
7534 #endif
7535
has_managed_zone(enum zone_type zone)7536 bool has_managed_zone(enum zone_type zone)
7537 {
7538 struct pglist_data *pgdat;
7539
7540 for_each_online_pgdat(pgdat) {
7541 if (managed_zone(&pgdat->node_zones[zone]))
7542 return true;
7543 }
7544 return false;
7545 }
7546
7547 #ifdef CONFIG_UNACCEPTED_MEMORY
7548
7549 static bool lazy_accept = true;
7550
accept_memory_parse(char * p)7551 static int __init accept_memory_parse(char *p)
7552 {
7553 if (!strcmp(p, "lazy")) {
7554 lazy_accept = true;
7555 return 0;
7556 } else if (!strcmp(p, "eager")) {
7557 lazy_accept = false;
7558 return 0;
7559 } else {
7560 return -EINVAL;
7561 }
7562 }
7563 early_param("accept_memory", accept_memory_parse);
7564
page_contains_unaccepted(struct page * page,unsigned int order)7565 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7566 {
7567 phys_addr_t start = page_to_phys(page);
7568
7569 return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
7570 }
7571
__accept_page(struct zone * zone,unsigned long * flags,struct page * page)7572 static void __accept_page(struct zone *zone, unsigned long *flags,
7573 struct page *page)
7574 {
7575 list_del(&page->lru);
7576 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7577 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
7578 __ClearPageUnaccepted(page);
7579 spin_unlock_irqrestore(&zone->lock, *flags);
7580
7581 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
7582
7583 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
7584 }
7585
accept_page(struct page * page)7586 void accept_page(struct page *page)
7587 {
7588 struct zone *zone = page_zone(page);
7589 unsigned long flags;
7590
7591 spin_lock_irqsave(&zone->lock, flags);
7592 if (!PageUnaccepted(page)) {
7593 spin_unlock_irqrestore(&zone->lock, flags);
7594 return;
7595 }
7596
7597 /* Unlocks zone->lock */
7598 __accept_page(zone, &flags, page);
7599 }
7600
try_to_accept_memory_one(struct zone * zone)7601 static bool try_to_accept_memory_one(struct zone *zone)
7602 {
7603 unsigned long flags;
7604 struct page *page;
7605
7606 spin_lock_irqsave(&zone->lock, flags);
7607 page = list_first_entry_or_null(&zone->unaccepted_pages,
7608 struct page, lru);
7609 if (!page) {
7610 spin_unlock_irqrestore(&zone->lock, flags);
7611 return false;
7612 }
7613
7614 /* Unlocks zone->lock */
7615 __accept_page(zone, &flags, page);
7616
7617 return true;
7618 }
7619
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7620 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7621 int alloc_flags)
7622 {
7623 long to_accept, wmark;
7624 bool ret = false;
7625
7626 if (list_empty(&zone->unaccepted_pages))
7627 return false;
7628
7629 /* Bailout, since try_to_accept_memory_one() needs to take a lock */
7630 if (alloc_flags & ALLOC_TRYLOCK)
7631 return false;
7632
7633 wmark = promo_wmark_pages(zone);
7634
7635 /*
7636 * Watermarks have not been initialized yet.
7637 *
7638 * Accepting one MAX_ORDER page to ensure progress.
7639 */
7640 if (!wmark)
7641 return try_to_accept_memory_one(zone);
7642
7643 /* How much to accept to get to promo watermark? */
7644 to_accept = wmark -
7645 (zone_page_state(zone, NR_FREE_PAGES) -
7646 __zone_watermark_unusable_free(zone, order, 0) -
7647 zone_page_state(zone, NR_UNACCEPTED));
7648
7649 while (to_accept > 0) {
7650 if (!try_to_accept_memory_one(zone))
7651 break;
7652 ret = true;
7653 to_accept -= MAX_ORDER_NR_PAGES;
7654 }
7655
7656 return ret;
7657 }
7658
__free_unaccepted(struct page * page)7659 static bool __free_unaccepted(struct page *page)
7660 {
7661 struct zone *zone = page_zone(page);
7662 unsigned long flags;
7663
7664 if (!lazy_accept)
7665 return false;
7666
7667 spin_lock_irqsave(&zone->lock, flags);
7668 list_add_tail(&page->lru, &zone->unaccepted_pages);
7669 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7670 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
7671 __SetPageUnaccepted(page);
7672 spin_unlock_irqrestore(&zone->lock, flags);
7673
7674 return true;
7675 }
7676
7677 #else
7678
page_contains_unaccepted(struct page * page,unsigned int order)7679 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7680 {
7681 return false;
7682 }
7683
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7684 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7685 int alloc_flags)
7686 {
7687 return false;
7688 }
7689
__free_unaccepted(struct page * page)7690 static bool __free_unaccepted(struct page *page)
7691 {
7692 BUILD_BUG();
7693 return false;
7694 }
7695
7696 #endif /* CONFIG_UNACCEPTED_MEMORY */
7697
alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7698 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7699 {
7700 /*
7701 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
7702 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd
7703 * is not safe in arbitrary context.
7704 *
7705 * These two are the conditions for gfpflags_allow_spinning() being true.
7706 *
7707 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason
7708 * to warn. Also warn would trigger printk() which is unsafe from
7709 * various contexts. We cannot use printk_deferred_enter() to mitigate,
7710 * since the running context is unknown.
7711 *
7712 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below
7713 * is safe in any context. Also zeroing the page is mandatory for
7714 * BPF use cases.
7715 *
7716 * Though __GFP_NOMEMALLOC is not checked in the code path below,
7717 * specify it here to highlight that alloc_pages_nolock()
7718 * doesn't want to deplete reserves.
7719 */
7720 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
7721 | gfp_flags;
7722 unsigned int alloc_flags = ALLOC_TRYLOCK;
7723 struct alloc_context ac = { };
7724 struct page *page;
7725
7726 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
7727 /*
7728 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
7729 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current
7730 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
7731 * mark the task as the owner of another rt_spin_lock which will
7732 * confuse PI logic, so return immediately if called from hard IRQ or
7733 * NMI.
7734 *
7735 * Note, irqs_disabled() case is ok. This function can be called
7736 * from raw_spin_lock_irqsave region.
7737 */
7738 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
7739 return NULL;
7740 if (!pcp_allowed_order(order))
7741 return NULL;
7742
7743 /* Bailout, since _deferred_grow_zone() needs to take a lock */
7744 if (deferred_pages_enabled())
7745 return NULL;
7746
7747 if (nid == NUMA_NO_NODE)
7748 nid = numa_node_id();
7749
7750 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
7751 &alloc_gfp, &alloc_flags);
7752
7753 /*
7754 * Best effort allocation from percpu free list.
7755 * If it's empty attempt to spin_trylock zone->lock.
7756 */
7757 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
7758
7759 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
7760
7761 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
7762 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
7763 __free_frozen_pages(page, order, FPI_TRYLOCK);
7764 page = NULL;
7765 }
7766 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
7767 kmsan_alloc_page(page, order, alloc_gfp);
7768 return page;
7769 }
7770 /**
7771 * alloc_pages_nolock - opportunistic reentrant allocation from any context
7772 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
7773 * @nid: node to allocate from
7774 * @order: allocation order size
7775 *
7776 * Allocates pages of a given order from the given node. This is safe to
7777 * call from any context (from atomic, NMI, and also reentrant
7778 * allocator -> tracepoint -> alloc_pages_nolock_noprof).
7779 * Allocation is best effort and to be expected to fail easily so nobody should
7780 * rely on the success. Failures are not reported via warn_alloc().
7781 * See always fail conditions below.
7782 *
7783 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7784 * It means ENOMEM. There is no reason to call it again and expect !NULL.
7785 */
alloc_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7786 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7787 {
7788 struct page *page;
7789
7790 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
7791 if (page)
7792 set_page_refcounted(page);
7793 return page;
7794 }
7795 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);
7796