1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
23 #include <linux/compiler.h>
24 #include <linux/kernel.h>
25 #include <linux/kasan.h>
26 #include <linux/kmsan.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/ratelimit.h>
30 #include <linux/oom.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/pagevec.h>
36 #include <linux/memory_hotplug.h>
37 #include <linux/nodemask.h>
38 #include <linux/vmstat.h>
39 #include <linux/fault-inject.h>
40 #include <linux/compaction.h>
41 #include <trace/events/kmem.h>
42 #include <trace/events/oom.h>
43 #include <linux/prefetch.h>
44 #include <linux/mm_inline.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/migrate.h>
47 #include <linux/sched/mm.h>
48 #include <linux/page_owner.h>
49 #include <linux/page_table_check.h>
50 #include <linux/memcontrol.h>
51 #include <linux/ftrace.h>
52 #include <linux/lockdep.h>
53 #include <linux/psi.h>
54 #include <linux/khugepaged.h>
55 #include <linux/delayacct.h>
56 #include <linux/cacheinfo.h>
57 #include <linux/pgalloc_tag.h>
58 #include <asm/div64.h>
59 #include "internal.h"
60 #include "shuffle.h"
61 #include "page_reporting.h"
62
63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
64 typedef int __bitwise fpi_t;
65
66 /* No special request */
67 #define FPI_NONE ((__force fpi_t)0)
68
69 /*
70 * Skip free page reporting notification for the (possibly merged) page.
71 * This does not hinder free page reporting from grabbing the page,
72 * reporting it and marking it "reported" - it only skips notifying
73 * the free page reporting infrastructure about a newly freed page. For
74 * example, used when temporarily pulling a page from a freelist and
75 * putting it back unmodified.
76 */
77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
78
79 /*
80 * Place the (possibly merged) page to the tail of the freelist. Will ignore
81 * page shuffling (relevant code - e.g., memory onlining - is expected to
82 * shuffle the whole zone).
83 *
84 * Note: No code should rely on this flag for correctness - it's purely
85 * to allow for optimizations when handing back either fresh pages
86 * (memory onlining) or untouched pages (page isolation, free page
87 * reporting).
88 */
89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
90
91 /* Free the page without taking locks. Rely on trylock only. */
92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2))
93
94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
95 static DEFINE_MUTEX(pcp_batch_high_lock);
96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
97
98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
99 /*
100 * On SMP, spin_trylock is sufficient protection.
101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
102 * Pass flags to a no-op inline function to typecheck and silence the unused
103 * variable warning.
104 */
__pcp_trylock_noop(unsigned long * flags)105 static inline void __pcp_trylock_noop(unsigned long *flags) { }
106 #define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags))
107 #define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags))
108 #else
109
110 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
111 #define pcp_trylock_prepare(flags) local_irq_save(flags)
112 #define pcp_trylock_finish(flags) local_irq_restore(flags)
113 #endif
114
115 /*
116 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
117 * a migration causing the wrong PCP to be locked and remote memory being
118 * potentially allocated, pin the task to the CPU for the lookup+lock.
119 * preempt_disable is used on !RT because it is faster than migrate_disable.
120 * migrate_disable is used on RT because otherwise RT spinlock usage is
121 * interfered with and a high priority task cannot preempt the allocator.
122 */
123 #ifndef CONFIG_PREEMPT_RT
124 #define pcpu_task_pin() preempt_disable()
125 #define pcpu_task_unpin() preempt_enable()
126 #else
127 #define pcpu_task_pin() migrate_disable()
128 #define pcpu_task_unpin() migrate_enable()
129 #endif
130
131 /*
132 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
133 * Return value should be used with equivalent unlock helper.
134 */
135 #define pcpu_spin_trylock(type, member, ptr) \
136 ({ \
137 type *_ret; \
138 pcpu_task_pin(); \
139 _ret = this_cpu_ptr(ptr); \
140 if (!spin_trylock(&_ret->member)) { \
141 pcpu_task_unpin(); \
142 _ret = NULL; \
143 } \
144 _ret; \
145 })
146
147 #define pcpu_spin_unlock(member, ptr) \
148 ({ \
149 spin_unlock(&ptr->member); \
150 pcpu_task_unpin(); \
151 })
152
153 /* struct per_cpu_pages specific helpers. */
154 #define pcp_spin_trylock(ptr, UP_flags) \
155 ({ \
156 struct per_cpu_pages *__ret; \
157 pcp_trylock_prepare(UP_flags); \
158 __ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \
159 if (!__ret) \
160 pcp_trylock_finish(UP_flags); \
161 __ret; \
162 })
163
164 #define pcp_spin_unlock(ptr, UP_flags) \
165 ({ \
166 pcpu_spin_unlock(lock, ptr); \
167 pcp_trylock_finish(UP_flags); \
168 })
169
170 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
171 DEFINE_PER_CPU(int, numa_node);
172 EXPORT_PER_CPU_SYMBOL(numa_node);
173 #endif
174
175 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
176
177 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
178 /*
179 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
180 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
181 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
182 * defined in <linux/topology.h>.
183 */
184 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
185 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
186 #endif
187
188 static DEFINE_MUTEX(pcpu_drain_mutex);
189
190 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
191 volatile unsigned long latent_entropy __latent_entropy;
192 EXPORT_SYMBOL(latent_entropy);
193 #endif
194
195 /*
196 * Array of node states.
197 */
198 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
199 [N_POSSIBLE] = NODE_MASK_ALL,
200 [N_ONLINE] = { { [0] = 1UL } },
201 #ifndef CONFIG_NUMA
202 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
203 #ifdef CONFIG_HIGHMEM
204 [N_HIGH_MEMORY] = { { [0] = 1UL } },
205 #endif
206 [N_MEMORY] = { { [0] = 1UL } },
207 [N_CPU] = { { [0] = 1UL } },
208 #endif /* NUMA */
209 };
210 EXPORT_SYMBOL(node_states);
211
212 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
213
214 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
215 unsigned int pageblock_order __read_mostly;
216 #endif
217
218 static void __free_pages_ok(struct page *page, unsigned int order,
219 fpi_t fpi_flags);
220
221 /*
222 * results with 256, 32 in the lowmem_reserve sysctl:
223 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
224 * 1G machine -> (16M dma, 784M normal, 224M high)
225 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
226 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
227 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
228 *
229 * TBD: should special case ZONE_DMA32 machines here - in those we normally
230 * don't need any ZONE_NORMAL reservation
231 */
232 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
233 #ifdef CONFIG_ZONE_DMA
234 [ZONE_DMA] = 256,
235 #endif
236 #ifdef CONFIG_ZONE_DMA32
237 [ZONE_DMA32] = 256,
238 #endif
239 [ZONE_NORMAL] = 32,
240 #ifdef CONFIG_HIGHMEM
241 [ZONE_HIGHMEM] = 0,
242 #endif
243 [ZONE_MOVABLE] = 0,
244 };
245
246 char * const zone_names[MAX_NR_ZONES] = {
247 #ifdef CONFIG_ZONE_DMA
248 "DMA",
249 #endif
250 #ifdef CONFIG_ZONE_DMA32
251 "DMA32",
252 #endif
253 "Normal",
254 #ifdef CONFIG_HIGHMEM
255 "HighMem",
256 #endif
257 "Movable",
258 #ifdef CONFIG_ZONE_DEVICE
259 "Device",
260 #endif
261 };
262
263 const char * const migratetype_names[MIGRATE_TYPES] = {
264 "Unmovable",
265 "Movable",
266 "Reclaimable",
267 "HighAtomic",
268 #ifdef CONFIG_CMA
269 "CMA",
270 #endif
271 #ifdef CONFIG_MEMORY_ISOLATION
272 "Isolate",
273 #endif
274 };
275
276 int min_free_kbytes = 1024;
277 int user_min_free_kbytes = -1;
278 static int watermark_boost_factor __read_mostly = 15000;
279 static int watermark_scale_factor = 10;
280 int defrag_mode;
281
282 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
283 int movable_zone;
284 EXPORT_SYMBOL(movable_zone);
285
286 #if MAX_NUMNODES > 1
287 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
288 unsigned int nr_online_nodes __read_mostly = 1;
289 EXPORT_SYMBOL(nr_node_ids);
290 EXPORT_SYMBOL(nr_online_nodes);
291 #endif
292
293 static bool page_contains_unaccepted(struct page *page, unsigned int order);
294 static bool cond_accept_memory(struct zone *zone, unsigned int order,
295 int alloc_flags);
296 static bool __free_unaccepted(struct page *page);
297
298 int page_group_by_mobility_disabled __read_mostly;
299
300 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
301 /*
302 * During boot we initialize deferred pages on-demand, as needed, but once
303 * page_alloc_init_late() has finished, the deferred pages are all initialized,
304 * and we can permanently disable that path.
305 */
306 DEFINE_STATIC_KEY_TRUE(deferred_pages);
307
deferred_pages_enabled(void)308 static inline bool deferred_pages_enabled(void)
309 {
310 return static_branch_unlikely(&deferred_pages);
311 }
312
313 /*
314 * deferred_grow_zone() is __init, but it is called from
315 * get_page_from_freelist() during early boot until deferred_pages permanently
316 * disables this call. This is why we have refdata wrapper to avoid warning,
317 * and to ensure that the function body gets unloaded.
318 */
319 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)320 _deferred_grow_zone(struct zone *zone, unsigned int order)
321 {
322 return deferred_grow_zone(zone, order);
323 }
324 #else
deferred_pages_enabled(void)325 static inline bool deferred_pages_enabled(void)
326 {
327 return false;
328 }
329
_deferred_grow_zone(struct zone * zone,unsigned int order)330 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
331 {
332 return false;
333 }
334 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
335
336 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)337 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
338 unsigned long pfn)
339 {
340 #ifdef CONFIG_SPARSEMEM
341 return section_to_usemap(__pfn_to_section(pfn));
342 #else
343 return page_zone(page)->pageblock_flags;
344 #endif /* CONFIG_SPARSEMEM */
345 }
346
pfn_to_bitidx(const struct page * page,unsigned long pfn)347 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
348 {
349 #ifdef CONFIG_SPARSEMEM
350 pfn &= (PAGES_PER_SECTION-1);
351 #else
352 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
353 #endif /* CONFIG_SPARSEMEM */
354 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
355 }
356
is_standalone_pb_bit(enum pageblock_bits pb_bit)357 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit)
358 {
359 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS;
360 }
361
362 static __always_inline void
get_pfnblock_bitmap_bitidx(const struct page * page,unsigned long pfn,unsigned long ** bitmap_word,unsigned long * bitidx)363 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
364 unsigned long **bitmap_word, unsigned long *bitidx)
365 {
366 unsigned long *bitmap;
367 unsigned long word_bitidx;
368
369 #ifdef CONFIG_MEMORY_ISOLATION
370 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
371 #else
372 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
373 #endif
374 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK);
375 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
376
377 bitmap = get_pageblock_bitmap(page, pfn);
378 *bitidx = pfn_to_bitidx(page, pfn);
379 word_bitidx = *bitidx / BITS_PER_LONG;
380 *bitidx &= (BITS_PER_LONG - 1);
381 *bitmap_word = &bitmap[word_bitidx];
382 }
383
384
385 /**
386 * __get_pfnblock_flags_mask - Return the requested group of flags for
387 * a pageblock_nr_pages block of pages
388 * @page: The page within the block of interest
389 * @pfn: The target page frame number
390 * @mask: mask of bits that the caller is interested in
391 *
392 * Return: pageblock_bits flags
393 */
__get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)394 static unsigned long __get_pfnblock_flags_mask(const struct page *page,
395 unsigned long pfn,
396 unsigned long mask)
397 {
398 unsigned long *bitmap_word;
399 unsigned long bitidx;
400 unsigned long word;
401
402 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
403 /*
404 * This races, without locks, with set_pfnblock_migratetype(). Ensure
405 * a consistent read of the memory array, so that results, even though
406 * racy, are not corrupted.
407 */
408 word = READ_ONCE(*bitmap_word);
409 return (word >> bitidx) & mask;
410 }
411
412 /**
413 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set
414 * @page: The page within the block of interest
415 * @pfn: The target page frame number
416 * @pb_bit: pageblock bit to check
417 *
418 * Return: true if the bit is set, otherwise false
419 */
get_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)420 bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
421 enum pageblock_bits pb_bit)
422 {
423 unsigned long *bitmap_word;
424 unsigned long bitidx;
425
426 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
427 return false;
428
429 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
430
431 return test_bit(bitidx + pb_bit, bitmap_word);
432 }
433
434 /**
435 * get_pfnblock_migratetype - Return the migratetype of a pageblock
436 * @page: The page within the block of interest
437 * @pfn: The target page frame number
438 *
439 * Return: The migratetype of the pageblock
440 *
441 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
442 * to save a call to page_to_pfn().
443 */
444 __always_inline enum migratetype
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)445 get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
446 {
447 unsigned long mask = MIGRATETYPE_AND_ISO_MASK;
448 unsigned long flags;
449
450 flags = __get_pfnblock_flags_mask(page, pfn, mask);
451
452 #ifdef CONFIG_MEMORY_ISOLATION
453 if (flags & BIT(PB_migrate_isolate))
454 return MIGRATE_ISOLATE;
455 #endif
456 return flags & MIGRATETYPE_MASK;
457 }
458
459 /**
460 * __set_pfnblock_flags_mask - Set the requested group of flags for
461 * a pageblock_nr_pages block of pages
462 * @page: The page within the block of interest
463 * @pfn: The target page frame number
464 * @flags: The flags to set
465 * @mask: mask of bits that the caller is interested in
466 */
__set_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long flags,unsigned long mask)467 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
468 unsigned long flags, unsigned long mask)
469 {
470 unsigned long *bitmap_word;
471 unsigned long bitidx;
472 unsigned long word;
473
474 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
475
476 mask <<= bitidx;
477 flags <<= bitidx;
478
479 word = READ_ONCE(*bitmap_word);
480 do {
481 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags));
482 }
483
484 /**
485 * set_pfnblock_bit - Set a standalone bit of a pageblock
486 * @page: The page within the block of interest
487 * @pfn: The target page frame number
488 * @pb_bit: pageblock bit to set
489 */
set_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)490 void set_pfnblock_bit(const struct page *page, unsigned long pfn,
491 enum pageblock_bits pb_bit)
492 {
493 unsigned long *bitmap_word;
494 unsigned long bitidx;
495
496 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
497 return;
498
499 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
500
501 set_bit(bitidx + pb_bit, bitmap_word);
502 }
503
504 /**
505 * clear_pfnblock_bit - Clear a standalone bit of a pageblock
506 * @page: The page within the block of interest
507 * @pfn: The target page frame number
508 * @pb_bit: pageblock bit to clear
509 */
clear_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)510 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
511 enum pageblock_bits pb_bit)
512 {
513 unsigned long *bitmap_word;
514 unsigned long bitidx;
515
516 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
517 return;
518
519 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
520
521 clear_bit(bitidx + pb_bit, bitmap_word);
522 }
523
524 /**
525 * set_pageblock_migratetype - Set the migratetype of a pageblock
526 * @page: The page within the block of interest
527 * @migratetype: migratetype to set
528 */
set_pageblock_migratetype(struct page * page,enum migratetype migratetype)529 static void set_pageblock_migratetype(struct page *page,
530 enum migratetype migratetype)
531 {
532 if (unlikely(page_group_by_mobility_disabled &&
533 migratetype < MIGRATE_PCPTYPES))
534 migratetype = MIGRATE_UNMOVABLE;
535
536 #ifdef CONFIG_MEMORY_ISOLATION
537 if (migratetype == MIGRATE_ISOLATE) {
538 VM_WARN_ONCE(1,
539 "Use set_pageblock_isolate() for pageblock isolation");
540 return;
541 }
542 VM_WARN_ONCE(get_pageblock_isolate(page),
543 "Use clear_pageblock_isolate() to unisolate pageblock");
544 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */
545 #endif
546 __set_pfnblock_flags_mask(page, page_to_pfn(page),
547 (unsigned long)migratetype,
548 MIGRATETYPE_AND_ISO_MASK);
549 }
550
init_pageblock_migratetype(struct page * page,enum migratetype migratetype,bool isolate)551 void __meminit init_pageblock_migratetype(struct page *page,
552 enum migratetype migratetype,
553 bool isolate)
554 {
555 unsigned long flags;
556
557 if (unlikely(page_group_by_mobility_disabled &&
558 migratetype < MIGRATE_PCPTYPES))
559 migratetype = MIGRATE_UNMOVABLE;
560
561 flags = migratetype;
562
563 #ifdef CONFIG_MEMORY_ISOLATION
564 if (migratetype == MIGRATE_ISOLATE) {
565 VM_WARN_ONCE(
566 1,
567 "Set isolate=true to isolate pageblock with a migratetype");
568 return;
569 }
570 if (isolate)
571 flags |= BIT(PB_migrate_isolate);
572 #endif
573 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags,
574 MIGRATETYPE_AND_ISO_MASK);
575 }
576
577 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)578 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
579 {
580 int ret;
581 unsigned seq;
582 unsigned long pfn = page_to_pfn(page);
583 unsigned long sp, start_pfn;
584
585 do {
586 seq = zone_span_seqbegin(zone);
587 start_pfn = zone->zone_start_pfn;
588 sp = zone->spanned_pages;
589 ret = !zone_spans_pfn(zone, pfn);
590 } while (zone_span_seqretry(zone, seq));
591
592 if (ret)
593 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
594 pfn, zone_to_nid(zone), zone->name,
595 start_pfn, start_pfn + sp);
596
597 return ret;
598 }
599
600 /*
601 * Temporary debugging check for pages not lying within a given zone.
602 */
bad_range(struct zone * zone,struct page * page)603 static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
604 {
605 if (page_outside_zone_boundaries(zone, page))
606 return true;
607 if (zone != page_zone(page))
608 return true;
609
610 return false;
611 }
612 #else
bad_range(struct zone * zone,struct page * page)613 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
614 {
615 return false;
616 }
617 #endif
618
bad_page(struct page * page,const char * reason)619 static void bad_page(struct page *page, const char *reason)
620 {
621 static unsigned long resume;
622 static unsigned long nr_shown;
623 static unsigned long nr_unshown;
624
625 /*
626 * Allow a burst of 60 reports, then keep quiet for that minute;
627 * or allow a steady drip of one report per second.
628 */
629 if (nr_shown == 60) {
630 if (time_before(jiffies, resume)) {
631 nr_unshown++;
632 goto out;
633 }
634 if (nr_unshown) {
635 pr_alert(
636 "BUG: Bad page state: %lu messages suppressed\n",
637 nr_unshown);
638 nr_unshown = 0;
639 }
640 nr_shown = 0;
641 }
642 if (nr_shown++ == 0)
643 resume = jiffies + 60 * HZ;
644
645 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
646 current->comm, page_to_pfn(page));
647 dump_page(page, reason);
648
649 print_modules();
650 dump_stack();
651 out:
652 /* Leave bad fields for debug, except PageBuddy could make trouble */
653 if (PageBuddy(page))
654 __ClearPageBuddy(page);
655 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
656 }
657
order_to_pindex(int migratetype,int order)658 static inline unsigned int order_to_pindex(int migratetype, int order)
659 {
660
661 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
662 bool movable;
663 if (order > PAGE_ALLOC_COSTLY_ORDER) {
664 VM_BUG_ON(order != HPAGE_PMD_ORDER);
665
666 movable = migratetype == MIGRATE_MOVABLE;
667
668 return NR_LOWORDER_PCP_LISTS + movable;
669 }
670 #else
671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
672 #endif
673
674 return (MIGRATE_PCPTYPES * order) + migratetype;
675 }
676
pindex_to_order(unsigned int pindex)677 static inline int pindex_to_order(unsigned int pindex)
678 {
679 int order = pindex / MIGRATE_PCPTYPES;
680
681 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
682 if (pindex >= NR_LOWORDER_PCP_LISTS)
683 order = HPAGE_PMD_ORDER;
684 #else
685 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
686 #endif
687
688 return order;
689 }
690
pcp_allowed_order(unsigned int order)691 static inline bool pcp_allowed_order(unsigned int order)
692 {
693 if (order <= PAGE_ALLOC_COSTLY_ORDER)
694 return true;
695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
696 if (order == HPAGE_PMD_ORDER)
697 return true;
698 #endif
699 return false;
700 }
701
702 /*
703 * Higher-order pages are called "compound pages". They are structured thusly:
704 *
705 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
706 *
707 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
708 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
709 *
710 * The first tail page's ->compound_order holds the order of allocation.
711 * This usage means that zero-order pages may not be compound.
712 */
713
prep_compound_page(struct page * page,unsigned int order)714 void prep_compound_page(struct page *page, unsigned int order)
715 {
716 int i;
717 int nr_pages = 1 << order;
718
719 __SetPageHead(page);
720 for (i = 1; i < nr_pages; i++)
721 prep_compound_tail(page, i);
722
723 prep_compound_head(page, order);
724 }
725
set_buddy_order(struct page * page,unsigned int order)726 static inline void set_buddy_order(struct page *page, unsigned int order)
727 {
728 set_page_private(page, order);
729 __SetPageBuddy(page);
730 }
731
732 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)733 static inline struct capture_control *task_capc(struct zone *zone)
734 {
735 struct capture_control *capc = current->capture_control;
736
737 return unlikely(capc) &&
738 !(current->flags & PF_KTHREAD) &&
739 !capc->page &&
740 capc->cc->zone == zone ? capc : NULL;
741 }
742
743 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)744 compaction_capture(struct capture_control *capc, struct page *page,
745 int order, int migratetype)
746 {
747 if (!capc || order != capc->cc->order)
748 return false;
749
750 /* Do not accidentally pollute CMA or isolated regions*/
751 if (is_migrate_cma(migratetype) ||
752 is_migrate_isolate(migratetype))
753 return false;
754
755 /*
756 * Do not let lower order allocations pollute a movable pageblock
757 * unless compaction is also requesting movable pages.
758 * This might let an unmovable request use a reclaimable pageblock
759 * and vice-versa but no more than normal fallback logic which can
760 * have trouble finding a high-order free page.
761 */
762 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
763 capc->cc->migratetype != MIGRATE_MOVABLE)
764 return false;
765
766 if (migratetype != capc->cc->migratetype)
767 trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
768 capc->cc->migratetype, migratetype);
769
770 capc->page = page;
771 return true;
772 }
773
774 #else
task_capc(struct zone * zone)775 static inline struct capture_control *task_capc(struct zone *zone)
776 {
777 return NULL;
778 }
779
780 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)781 compaction_capture(struct capture_control *capc, struct page *page,
782 int order, int migratetype)
783 {
784 return false;
785 }
786 #endif /* CONFIG_COMPACTION */
787
account_freepages(struct zone * zone,int nr_pages,int migratetype)788 static inline void account_freepages(struct zone *zone, int nr_pages,
789 int migratetype)
790 {
791 lockdep_assert_held(&zone->lock);
792
793 if (is_migrate_isolate(migratetype))
794 return;
795
796 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
797
798 if (is_migrate_cma(migratetype))
799 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
800 else if (migratetype == MIGRATE_HIGHATOMIC)
801 WRITE_ONCE(zone->nr_free_highatomic,
802 zone->nr_free_highatomic + nr_pages);
803 }
804
805 /* Used for pages not on another list */
__add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)806 static inline void __add_to_free_list(struct page *page, struct zone *zone,
807 unsigned int order, int migratetype,
808 bool tail)
809 {
810 struct free_area *area = &zone->free_area[order];
811 int nr_pages = 1 << order;
812
813 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
814 "page type is %d, passed migratetype is %d (nr=%d)\n",
815 get_pageblock_migratetype(page), migratetype, nr_pages);
816
817 if (tail)
818 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
819 else
820 list_add(&page->buddy_list, &area->free_list[migratetype]);
821 area->nr_free++;
822
823 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
824 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
825 }
826
827 /*
828 * Used for pages which are on another list. Move the pages to the tail
829 * of the list - so the moved pages won't immediately be considered for
830 * allocation again (e.g., optimization for memory onlining).
831 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt)832 static inline void move_to_free_list(struct page *page, struct zone *zone,
833 unsigned int order, int old_mt, int new_mt)
834 {
835 struct free_area *area = &zone->free_area[order];
836 int nr_pages = 1 << order;
837
838 /* Free page moving can fail, so it happens before the type update */
839 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
840 "page type is %d, passed migratetype is %d (nr=%d)\n",
841 get_pageblock_migratetype(page), old_mt, nr_pages);
842
843 list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
844
845 account_freepages(zone, -nr_pages, old_mt);
846 account_freepages(zone, nr_pages, new_mt);
847
848 if (order >= pageblock_order &&
849 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
850 if (!is_migrate_isolate(old_mt))
851 nr_pages = -nr_pages;
852 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
853 }
854 }
855
__del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)856 static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
857 unsigned int order, int migratetype)
858 {
859 int nr_pages = 1 << order;
860
861 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
862 "page type is %d, passed migratetype is %d (nr=%d)\n",
863 get_pageblock_migratetype(page), migratetype, nr_pages);
864
865 /* clear reported state and update reported page count */
866 if (page_reported(page))
867 __ClearPageReported(page);
868
869 list_del(&page->buddy_list);
870 __ClearPageBuddy(page);
871 set_page_private(page, 0);
872 zone->free_area[order].nr_free--;
873
874 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
875 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
876 }
877
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)878 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
879 unsigned int order, int migratetype)
880 {
881 __del_page_from_free_list(page, zone, order, migratetype);
882 account_freepages(zone, -(1 << order), migratetype);
883 }
884
get_page_from_free_area(struct free_area * area,int migratetype)885 static inline struct page *get_page_from_free_area(struct free_area *area,
886 int migratetype)
887 {
888 return list_first_entry_or_null(&area->free_list[migratetype],
889 struct page, buddy_list);
890 }
891
892 /*
893 * If this is less than the 2nd largest possible page, check if the buddy
894 * of the next-higher order is free. If it is, it's possible
895 * that pages are being freed that will coalesce soon. In case,
896 * that is happening, add the free page to the tail of the list
897 * so it's less likely to be used soon and more likely to be merged
898 * as a 2-level higher order page
899 */
900 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)901 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
902 struct page *page, unsigned int order)
903 {
904 unsigned long higher_page_pfn;
905 struct page *higher_page;
906
907 if (order >= MAX_PAGE_ORDER - 1)
908 return false;
909
910 higher_page_pfn = buddy_pfn & pfn;
911 higher_page = page + (higher_page_pfn - pfn);
912
913 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
914 NULL) != NULL;
915 }
916
917 /*
918 * Freeing function for a buddy system allocator.
919 *
920 * The concept of a buddy system is to maintain direct-mapped table
921 * (containing bit values) for memory blocks of various "orders".
922 * The bottom level table contains the map for the smallest allocatable
923 * units of memory (here, pages), and each level above it describes
924 * pairs of units from the levels below, hence, "buddies".
925 * At a high level, all that happens here is marking the table entry
926 * at the bottom level available, and propagating the changes upward
927 * as necessary, plus some accounting needed to play nicely with other
928 * parts of the VM system.
929 * At each level, we keep a list of pages, which are heads of continuous
930 * free pages of length of (1 << order) and marked with PageBuddy.
931 * Page's order is recorded in page_private(page) field.
932 * So when we are allocating or freeing one, we can derive the state of the
933 * other. That is, if we allocate a small block, and both were
934 * free, the remainder of the region must be split into blocks.
935 * If a block is freed, and its buddy is also free, then this
936 * triggers coalescing into a block of larger size.
937 *
938 * -- nyc
939 */
940
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)941 static inline void __free_one_page(struct page *page,
942 unsigned long pfn,
943 struct zone *zone, unsigned int order,
944 int migratetype, fpi_t fpi_flags)
945 {
946 struct capture_control *capc = task_capc(zone);
947 unsigned long buddy_pfn = 0;
948 unsigned long combined_pfn;
949 struct page *buddy;
950 bool to_tail;
951
952 VM_BUG_ON(!zone_is_initialized(zone));
953 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
954
955 VM_BUG_ON(migratetype == -1);
956 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
957 VM_BUG_ON_PAGE(bad_range(zone, page), page);
958
959 account_freepages(zone, 1 << order, migratetype);
960
961 while (order < MAX_PAGE_ORDER) {
962 int buddy_mt = migratetype;
963
964 if (compaction_capture(capc, page, order, migratetype)) {
965 account_freepages(zone, -(1 << order), migratetype);
966 return;
967 }
968
969 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
970 if (!buddy)
971 goto done_merging;
972
973 if (unlikely(order >= pageblock_order)) {
974 /*
975 * We want to prevent merge between freepages on pageblock
976 * without fallbacks and normal pageblock. Without this,
977 * pageblock isolation could cause incorrect freepage or CMA
978 * accounting or HIGHATOMIC accounting.
979 */
980 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
981
982 if (migratetype != buddy_mt &&
983 (!migratetype_is_mergeable(migratetype) ||
984 !migratetype_is_mergeable(buddy_mt)))
985 goto done_merging;
986 }
987
988 /*
989 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
990 * merge with it and move up one order.
991 */
992 if (page_is_guard(buddy))
993 clear_page_guard(zone, buddy, order);
994 else
995 __del_page_from_free_list(buddy, zone, order, buddy_mt);
996
997 if (unlikely(buddy_mt != migratetype)) {
998 /*
999 * Match buddy type. This ensures that an
1000 * expand() down the line puts the sub-blocks
1001 * on the right freelists.
1002 */
1003 set_pageblock_migratetype(buddy, migratetype);
1004 }
1005
1006 combined_pfn = buddy_pfn & pfn;
1007 page = page + (combined_pfn - pfn);
1008 pfn = combined_pfn;
1009 order++;
1010 }
1011
1012 done_merging:
1013 set_buddy_order(page, order);
1014
1015 if (fpi_flags & FPI_TO_TAIL)
1016 to_tail = true;
1017 else if (is_shuffle_order(order))
1018 to_tail = shuffle_pick_tail();
1019 else
1020 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1021
1022 __add_to_free_list(page, zone, order, migratetype, to_tail);
1023
1024 /* Notify page reporting subsystem of freed page */
1025 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1026 page_reporting_notify_free(order);
1027 }
1028
1029 /*
1030 * A bad page could be due to a number of fields. Instead of multiple branches,
1031 * try and check multiple fields with one check. The caller must do a detailed
1032 * check if necessary.
1033 */
page_expected_state(struct page * page,unsigned long check_flags)1034 static inline bool page_expected_state(struct page *page,
1035 unsigned long check_flags)
1036 {
1037 if (unlikely(atomic_read(&page->_mapcount) != -1))
1038 return false;
1039
1040 if (unlikely((unsigned long)page->mapping |
1041 page_ref_count(page) |
1042 #ifdef CONFIG_MEMCG
1043 page->memcg_data |
1044 #endif
1045 page_pool_page_is_pp(page) |
1046 (page->flags.f & check_flags)))
1047 return false;
1048
1049 return true;
1050 }
1051
page_bad_reason(struct page * page,unsigned long flags)1052 static const char *page_bad_reason(struct page *page, unsigned long flags)
1053 {
1054 const char *bad_reason = NULL;
1055
1056 if (unlikely(atomic_read(&page->_mapcount) != -1))
1057 bad_reason = "nonzero mapcount";
1058 if (unlikely(page->mapping != NULL))
1059 bad_reason = "non-NULL mapping";
1060 if (unlikely(page_ref_count(page) != 0))
1061 bad_reason = "nonzero _refcount";
1062 if (unlikely(page->flags.f & flags)) {
1063 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1064 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1065 else
1066 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1067 }
1068 #ifdef CONFIG_MEMCG
1069 if (unlikely(page->memcg_data))
1070 bad_reason = "page still charged to cgroup";
1071 #endif
1072 if (unlikely(page_pool_page_is_pp(page)))
1073 bad_reason = "page_pool leak";
1074 return bad_reason;
1075 }
1076
free_page_is_bad(struct page * page)1077 static inline bool free_page_is_bad(struct page *page)
1078 {
1079 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1080 return false;
1081
1082 /* Something has gone sideways, find it */
1083 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1084 return true;
1085 }
1086
is_check_pages_enabled(void)1087 static inline bool is_check_pages_enabled(void)
1088 {
1089 return static_branch_unlikely(&check_pages_enabled);
1090 }
1091
free_tail_page_prepare(struct page * head_page,struct page * page)1092 static int free_tail_page_prepare(struct page *head_page, struct page *page)
1093 {
1094 struct folio *folio = (struct folio *)head_page;
1095 int ret = 1;
1096
1097 /*
1098 * We rely page->lru.next never has bit 0 set, unless the page
1099 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1100 */
1101 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1102
1103 if (!is_check_pages_enabled()) {
1104 ret = 0;
1105 goto out;
1106 }
1107 switch (page - head_page) {
1108 case 1:
1109 /* the first tail page: these may be in place of ->mapping */
1110 if (unlikely(folio_large_mapcount(folio))) {
1111 bad_page(page, "nonzero large_mapcount");
1112 goto out;
1113 }
1114 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
1115 unlikely(atomic_read(&folio->_nr_pages_mapped))) {
1116 bad_page(page, "nonzero nr_pages_mapped");
1117 goto out;
1118 }
1119 if (IS_ENABLED(CONFIG_MM_ID)) {
1120 if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
1121 bad_page(page, "nonzero mm mapcount 0");
1122 goto out;
1123 }
1124 if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
1125 bad_page(page, "nonzero mm mapcount 1");
1126 goto out;
1127 }
1128 }
1129 if (IS_ENABLED(CONFIG_64BIT)) {
1130 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1131 bad_page(page, "nonzero entire_mapcount");
1132 goto out;
1133 }
1134 if (unlikely(atomic_read(&folio->_pincount))) {
1135 bad_page(page, "nonzero pincount");
1136 goto out;
1137 }
1138 }
1139 break;
1140 case 2:
1141 /* the second tail page: deferred_list overlaps ->mapping */
1142 if (unlikely(!list_empty(&folio->_deferred_list))) {
1143 bad_page(page, "on deferred list");
1144 goto out;
1145 }
1146 if (!IS_ENABLED(CONFIG_64BIT)) {
1147 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1148 bad_page(page, "nonzero entire_mapcount");
1149 goto out;
1150 }
1151 if (unlikely(atomic_read(&folio->_pincount))) {
1152 bad_page(page, "nonzero pincount");
1153 goto out;
1154 }
1155 }
1156 break;
1157 case 3:
1158 /* the third tail page: hugetlb specifics overlap ->mappings */
1159 if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
1160 break;
1161 fallthrough;
1162 default:
1163 if (page->mapping != TAIL_MAPPING) {
1164 bad_page(page, "corrupted mapping in tail page");
1165 goto out;
1166 }
1167 break;
1168 }
1169 if (unlikely(!PageTail(page))) {
1170 bad_page(page, "PageTail not set");
1171 goto out;
1172 }
1173 if (unlikely(compound_head(page) != head_page)) {
1174 bad_page(page, "compound_head not consistent");
1175 goto out;
1176 }
1177 ret = 0;
1178 out:
1179 page->mapping = NULL;
1180 clear_compound_head(page);
1181 return ret;
1182 }
1183
1184 /*
1185 * Skip KASAN memory poisoning when either:
1186 *
1187 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1188 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1189 * using page tags instead (see below).
1190 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1191 * that error detection is disabled for accesses via the page address.
1192 *
1193 * Pages will have match-all tags in the following circumstances:
1194 *
1195 * 1. Pages are being initialized for the first time, including during deferred
1196 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1197 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1198 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1199 * 3. The allocation was excluded from being checked due to sampling,
1200 * see the call to kasan_unpoison_pages.
1201 *
1202 * Poisoning pages during deferred memory init will greatly lengthen the
1203 * process and cause problem in large memory systems as the deferred pages
1204 * initialization is done with interrupt disabled.
1205 *
1206 * Assuming that there will be no reference to those newly initialized
1207 * pages before they are ever allocated, this should have no effect on
1208 * KASAN memory tracking as the poison will be properly inserted at page
1209 * allocation time. The only corner case is when pages are allocated by
1210 * on-demand allocation and then freed again before the deferred pages
1211 * initialization is done, but this is not likely to happen.
1212 */
should_skip_kasan_poison(struct page * page)1213 static inline bool should_skip_kasan_poison(struct page *page)
1214 {
1215 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1216 return deferred_pages_enabled();
1217
1218 return page_kasan_tag(page) == KASAN_TAG_KERNEL;
1219 }
1220
kernel_init_pages(struct page * page,int numpages)1221 static void kernel_init_pages(struct page *page, int numpages)
1222 {
1223 int i;
1224
1225 /* s390's use of memset() could override KASAN redzones. */
1226 kasan_disable_current();
1227 for (i = 0; i < numpages; i++)
1228 clear_highpage_kasan_tagged(page + i);
1229 kasan_enable_current();
1230 }
1231
1232 #ifdef CONFIG_MEM_ALLOC_PROFILING
1233
1234 /* Should be called only if mem_alloc_profiling_enabled() */
__clear_page_tag_ref(struct page * page)1235 void __clear_page_tag_ref(struct page *page)
1236 {
1237 union pgtag_ref_handle handle;
1238 union codetag_ref ref;
1239
1240 if (get_page_tag_ref(page, &ref, &handle)) {
1241 set_codetag_empty(&ref);
1242 update_page_tag_ref(handle, &ref);
1243 put_page_tag_ref(handle);
1244 }
1245 }
1246
1247 /* Should be called only if mem_alloc_profiling_enabled() */
1248 static noinline
__pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1249 void __pgalloc_tag_add(struct page *page, struct task_struct *task,
1250 unsigned int nr)
1251 {
1252 union pgtag_ref_handle handle;
1253 union codetag_ref ref;
1254
1255 if (get_page_tag_ref(page, &ref, &handle)) {
1256 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
1257 update_page_tag_ref(handle, &ref);
1258 put_page_tag_ref(handle);
1259 }
1260 }
1261
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1262 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1263 unsigned int nr)
1264 {
1265 if (mem_alloc_profiling_enabled())
1266 __pgalloc_tag_add(page, task, nr);
1267 }
1268
1269 /* Should be called only if mem_alloc_profiling_enabled() */
1270 static noinline
__pgalloc_tag_sub(struct page * page,unsigned int nr)1271 void __pgalloc_tag_sub(struct page *page, unsigned int nr)
1272 {
1273 union pgtag_ref_handle handle;
1274 union codetag_ref ref;
1275
1276 if (get_page_tag_ref(page, &ref, &handle)) {
1277 alloc_tag_sub(&ref, PAGE_SIZE * nr);
1278 update_page_tag_ref(handle, &ref);
1279 put_page_tag_ref(handle);
1280 }
1281 }
1282
pgalloc_tag_sub(struct page * page,unsigned int nr)1283 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
1284 {
1285 if (mem_alloc_profiling_enabled())
1286 __pgalloc_tag_sub(page, nr);
1287 }
1288
1289 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1290 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
1291 {
1292 if (tag)
1293 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
1294 }
1295
1296 #else /* CONFIG_MEM_ALLOC_PROFILING */
1297
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1298 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1299 unsigned int nr) {}
pgalloc_tag_sub(struct page * page,unsigned int nr)1300 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1301 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
1302
1303 #endif /* CONFIG_MEM_ALLOC_PROFILING */
1304
free_pages_prepare(struct page * page,unsigned int order)1305 __always_inline bool free_pages_prepare(struct page *page,
1306 unsigned int order)
1307 {
1308 int bad = 0;
1309 bool skip_kasan_poison = should_skip_kasan_poison(page);
1310 bool init = want_init_on_free();
1311 bool compound = PageCompound(page);
1312 struct folio *folio = page_folio(page);
1313
1314 VM_BUG_ON_PAGE(PageTail(page), page);
1315
1316 trace_mm_page_free(page, order);
1317 kmsan_free_page(page, order);
1318
1319 if (memcg_kmem_online() && PageMemcgKmem(page))
1320 __memcg_kmem_uncharge_page(page, order);
1321
1322 /*
1323 * In rare cases, when truncation or holepunching raced with
1324 * munlock after VM_LOCKED was cleared, Mlocked may still be
1325 * found set here. This does not indicate a problem, unless
1326 * "unevictable_pgs_cleared" appears worryingly large.
1327 */
1328 if (unlikely(folio_test_mlocked(folio))) {
1329 long nr_pages = folio_nr_pages(folio);
1330
1331 __folio_clear_mlocked(folio);
1332 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
1333 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
1334 }
1335
1336 if (unlikely(PageHWPoison(page)) && !order) {
1337 /* Do not let hwpoison pages hit pcplists/buddy */
1338 reset_page_owner(page, order);
1339 page_table_check_free(page, order);
1340 pgalloc_tag_sub(page, 1 << order);
1341
1342 /*
1343 * The page is isolated and accounted for.
1344 * Mark the codetag as empty to avoid accounting error
1345 * when the page is freed by unpoison_memory().
1346 */
1347 clear_page_tag_ref(page);
1348 return false;
1349 }
1350
1351 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1352
1353 /*
1354 * Check tail pages before head page information is cleared to
1355 * avoid checking PageCompound for order-0 pages.
1356 */
1357 if (unlikely(order)) {
1358 int i;
1359
1360 if (compound) {
1361 page[1].flags.f &= ~PAGE_FLAGS_SECOND;
1362 #ifdef NR_PAGES_IN_LARGE_FOLIO
1363 folio->_nr_pages = 0;
1364 #endif
1365 }
1366 for (i = 1; i < (1 << order); i++) {
1367 if (compound)
1368 bad += free_tail_page_prepare(page, page + i);
1369 if (is_check_pages_enabled()) {
1370 if (free_page_is_bad(page + i)) {
1371 bad++;
1372 continue;
1373 }
1374 }
1375 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1376 }
1377 }
1378 if (folio_test_anon(folio)) {
1379 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
1380 folio->mapping = NULL;
1381 }
1382 if (unlikely(page_has_type(page)))
1383 /* Reset the page_type (which overlays _mapcount) */
1384 page->page_type = UINT_MAX;
1385
1386 if (is_check_pages_enabled()) {
1387 if (free_page_is_bad(page))
1388 bad++;
1389 if (bad)
1390 return false;
1391 }
1392
1393 page_cpupid_reset_last(page);
1394 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1395 reset_page_owner(page, order);
1396 page_table_check_free(page, order);
1397 pgalloc_tag_sub(page, 1 << order);
1398
1399 if (!PageHighMem(page)) {
1400 debug_check_no_locks_freed(page_address(page),
1401 PAGE_SIZE << order);
1402 debug_check_no_obj_freed(page_address(page),
1403 PAGE_SIZE << order);
1404 }
1405
1406 kernel_poison_pages(page, 1 << order);
1407
1408 /*
1409 * As memory initialization might be integrated into KASAN,
1410 * KASAN poisoning and memory initialization code must be
1411 * kept together to avoid discrepancies in behavior.
1412 *
1413 * With hardware tag-based KASAN, memory tags must be set before the
1414 * page becomes unavailable via debug_pagealloc or arch_free_page.
1415 */
1416 if (!skip_kasan_poison) {
1417 kasan_poison_pages(page, order, init);
1418
1419 /* Memory is already initialized if KASAN did it internally. */
1420 if (kasan_has_integrated_init())
1421 init = false;
1422 }
1423 if (init)
1424 kernel_init_pages(page, 1 << order);
1425
1426 /*
1427 * arch_free_page() can make the page's contents inaccessible. s390
1428 * does this. So nothing which can access the page's contents should
1429 * happen after this.
1430 */
1431 arch_free_page(page, order);
1432
1433 debug_pagealloc_unmap_pages(page, 1 << order);
1434
1435 return true;
1436 }
1437
1438 /*
1439 * Frees a number of pages from the PCP lists
1440 * Assumes all pages on list are in same zone.
1441 * count is the number of pages to free.
1442 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1443 static void free_pcppages_bulk(struct zone *zone, int count,
1444 struct per_cpu_pages *pcp,
1445 int pindex)
1446 {
1447 unsigned long flags;
1448 unsigned int order;
1449 struct page *page;
1450
1451 /*
1452 * Ensure proper count is passed which otherwise would stuck in the
1453 * below while (list_empty(list)) loop.
1454 */
1455 count = min(pcp->count, count);
1456
1457 /* Ensure requested pindex is drained first. */
1458 pindex = pindex - 1;
1459
1460 spin_lock_irqsave(&zone->lock, flags);
1461
1462 while (count > 0) {
1463 struct list_head *list;
1464 int nr_pages;
1465
1466 /* Remove pages from lists in a round-robin fashion. */
1467 do {
1468 if (++pindex > NR_PCP_LISTS - 1)
1469 pindex = 0;
1470 list = &pcp->lists[pindex];
1471 } while (list_empty(list));
1472
1473 order = pindex_to_order(pindex);
1474 nr_pages = 1 << order;
1475 do {
1476 unsigned long pfn;
1477 int mt;
1478
1479 page = list_last_entry(list, struct page, pcp_list);
1480 pfn = page_to_pfn(page);
1481 mt = get_pfnblock_migratetype(page, pfn);
1482
1483 /* must delete to avoid corrupting pcp list */
1484 list_del(&page->pcp_list);
1485 count -= nr_pages;
1486 pcp->count -= nr_pages;
1487
1488 __free_one_page(page, pfn, zone, order, mt, FPI_NONE);
1489 trace_mm_page_pcpu_drain(page, order, mt);
1490 } while (count > 0 && !list_empty(list));
1491 }
1492
1493 spin_unlock_irqrestore(&zone->lock, flags);
1494 }
1495
1496 /* Split a multi-block free page into its individual pageblocks. */
split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi)1497 static void split_large_buddy(struct zone *zone, struct page *page,
1498 unsigned long pfn, int order, fpi_t fpi)
1499 {
1500 unsigned long end = pfn + (1 << order);
1501
1502 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
1503 /* Caller removed page from freelist, buddy info cleared! */
1504 VM_WARN_ON_ONCE(PageBuddy(page));
1505
1506 if (order > pageblock_order)
1507 order = pageblock_order;
1508
1509 do {
1510 int mt = get_pfnblock_migratetype(page, pfn);
1511
1512 __free_one_page(page, pfn, zone, order, mt, fpi);
1513 pfn += 1 << order;
1514 if (pfn == end)
1515 break;
1516 page = pfn_to_page(pfn);
1517 } while (1);
1518 }
1519
add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order)1520 static void add_page_to_zone_llist(struct zone *zone, struct page *page,
1521 unsigned int order)
1522 {
1523 /* Remember the order */
1524 page->private = order;
1525 /* Add the page to the free list */
1526 llist_add(&page->pcp_llist, &zone->trylock_free_pages);
1527 }
1528
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags)1529 static void free_one_page(struct zone *zone, struct page *page,
1530 unsigned long pfn, unsigned int order,
1531 fpi_t fpi_flags)
1532 {
1533 struct llist_head *llhead;
1534 unsigned long flags;
1535
1536 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1537 if (!spin_trylock_irqsave(&zone->lock, flags)) {
1538 add_page_to_zone_llist(zone, page, order);
1539 return;
1540 }
1541 } else {
1542 spin_lock_irqsave(&zone->lock, flags);
1543 }
1544
1545 /* The lock succeeded. Process deferred pages. */
1546 llhead = &zone->trylock_free_pages;
1547 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
1548 struct llist_node *llnode;
1549 struct page *p, *tmp;
1550
1551 llnode = llist_del_all(llhead);
1552 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
1553 unsigned int p_order = p->private;
1554
1555 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
1556 __count_vm_events(PGFREE, 1 << p_order);
1557 }
1558 }
1559 split_large_buddy(zone, page, pfn, order, fpi_flags);
1560 spin_unlock_irqrestore(&zone->lock, flags);
1561
1562 __count_vm_events(PGFREE, 1 << order);
1563 }
1564
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1565 static void __free_pages_ok(struct page *page, unsigned int order,
1566 fpi_t fpi_flags)
1567 {
1568 unsigned long pfn = page_to_pfn(page);
1569 struct zone *zone = page_zone(page);
1570
1571 if (free_pages_prepare(page, order))
1572 free_one_page(zone, page, pfn, order, fpi_flags);
1573 }
1574
__free_pages_core(struct page * page,unsigned int order,enum meminit_context context)1575 void __meminit __free_pages_core(struct page *page, unsigned int order,
1576 enum meminit_context context)
1577 {
1578 unsigned int nr_pages = 1 << order;
1579 struct page *p = page;
1580 unsigned int loop;
1581
1582 /*
1583 * When initializing the memmap, __init_single_page() sets the refcount
1584 * of all pages to 1 ("allocated"/"not free"). We have to set the
1585 * refcount of all involved pages to 0.
1586 *
1587 * Note that hotplugged memory pages are initialized to PageOffline().
1588 * Pages freed from memblock might be marked as reserved.
1589 */
1590 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1591 unlikely(context == MEMINIT_HOTPLUG)) {
1592 for (loop = 0; loop < nr_pages; loop++, p++) {
1593 VM_WARN_ON_ONCE(PageReserved(p));
1594 __ClearPageOffline(p);
1595 set_page_count(p, 0);
1596 }
1597
1598 adjust_managed_page_count(page, nr_pages);
1599 } else {
1600 for (loop = 0; loop < nr_pages; loop++, p++) {
1601 __ClearPageReserved(p);
1602 set_page_count(p, 0);
1603 }
1604
1605 /* memblock adjusts totalram_pages() manually. */
1606 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1607 }
1608
1609 if (page_contains_unaccepted(page, order)) {
1610 if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
1611 return;
1612
1613 accept_memory(page_to_phys(page), PAGE_SIZE << order);
1614 }
1615
1616 /*
1617 * Bypass PCP and place fresh pages right to the tail, primarily
1618 * relevant for memory onlining.
1619 */
1620 __free_pages_ok(page, order, FPI_TO_TAIL);
1621 }
1622
1623 /*
1624 * Check that the whole (or subset of) a pageblock given by the interval of
1625 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1626 * with the migration of free compaction scanner.
1627 *
1628 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1629 *
1630 * It's possible on some configurations to have a setup like node0 node1 node0
1631 * i.e. it's possible that all pages within a zones range of pages do not
1632 * belong to a single zone. We assume that a border between node0 and node1
1633 * can occur within a single pageblock, but not a node0 node1 node0
1634 * interleaving within a single pageblock. It is therefore sufficient to check
1635 * the first and last page of a pageblock and avoid checking each individual
1636 * page in a pageblock.
1637 *
1638 * Note: the function may return non-NULL struct page even for a page block
1639 * which contains a memory hole (i.e. there is no physical memory for a subset
1640 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1641 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1642 * even though the start pfn is online and valid. This should be safe most of
1643 * the time because struct pages are still initialized via init_unavailable_range()
1644 * and pfn walkers shouldn't touch any physical memory range for which they do
1645 * not recognize any specific metadata in struct pages.
1646 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1647 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1648 unsigned long end_pfn, struct zone *zone)
1649 {
1650 struct page *start_page;
1651 struct page *end_page;
1652
1653 /* end_pfn is one past the range we are checking */
1654 end_pfn--;
1655
1656 if (!pfn_valid(end_pfn))
1657 return NULL;
1658
1659 start_page = pfn_to_online_page(start_pfn);
1660 if (!start_page)
1661 return NULL;
1662
1663 if (page_zone(start_page) != zone)
1664 return NULL;
1665
1666 end_page = pfn_to_page(end_pfn);
1667
1668 /* This gives a shorter code than deriving page_zone(end_page) */
1669 if (page_zone_id(start_page) != page_zone_id(end_page))
1670 return NULL;
1671
1672 return start_page;
1673 }
1674
1675 /*
1676 * The order of subdivision here is critical for the IO subsystem.
1677 * Please do not alter this order without good reasons and regression
1678 * testing. Specifically, as large blocks of memory are subdivided,
1679 * the order in which smaller blocks are delivered depends on the order
1680 * they're subdivided in this function. This is the primary factor
1681 * influencing the order in which pages are delivered to the IO
1682 * subsystem according to empirical testing, and this is also justified
1683 * by considering the behavior of a buddy system containing a single
1684 * large block of memory acted on by a series of small allocations.
1685 * This behavior is a critical factor in sglist merging's success.
1686 *
1687 * -- nyc
1688 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1689 static inline unsigned int expand(struct zone *zone, struct page *page, int low,
1690 int high, int migratetype)
1691 {
1692 unsigned int size = 1 << high;
1693 unsigned int nr_added = 0;
1694
1695 while (high > low) {
1696 high--;
1697 size >>= 1;
1698 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1699
1700 /*
1701 * Mark as guard pages (or page), that will allow to
1702 * merge back to allocator when buddy will be freed.
1703 * Corresponding page table entries will not be touched,
1704 * pages will stay not present in virtual address space
1705 */
1706 if (set_page_guard(zone, &page[size], high))
1707 continue;
1708
1709 __add_to_free_list(&page[size], zone, high, migratetype, false);
1710 set_buddy_order(&page[size], high);
1711 nr_added += size;
1712 }
1713
1714 return nr_added;
1715 }
1716
page_del_and_expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1717 static __always_inline void page_del_and_expand(struct zone *zone,
1718 struct page *page, int low,
1719 int high, int migratetype)
1720 {
1721 int nr_pages = 1 << high;
1722
1723 __del_page_from_free_list(page, zone, high, migratetype);
1724 nr_pages -= expand(zone, page, low, high, migratetype);
1725 account_freepages(zone, -nr_pages, migratetype);
1726 }
1727
check_new_page_bad(struct page * page)1728 static void check_new_page_bad(struct page *page)
1729 {
1730 if (unlikely(PageHWPoison(page))) {
1731 /* Don't complain about hwpoisoned pages */
1732 if (PageBuddy(page))
1733 __ClearPageBuddy(page);
1734 return;
1735 }
1736
1737 bad_page(page,
1738 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1739 }
1740
1741 /*
1742 * This page is about to be returned from the page allocator
1743 */
check_new_page(struct page * page)1744 static bool check_new_page(struct page *page)
1745 {
1746 if (likely(page_expected_state(page,
1747 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1748 return false;
1749
1750 check_new_page_bad(page);
1751 return true;
1752 }
1753
check_new_pages(struct page * page,unsigned int order)1754 static inline bool check_new_pages(struct page *page, unsigned int order)
1755 {
1756 if (is_check_pages_enabled()) {
1757 for (int i = 0; i < (1 << order); i++) {
1758 struct page *p = page + i;
1759
1760 if (check_new_page(p))
1761 return true;
1762 }
1763 }
1764
1765 return false;
1766 }
1767
should_skip_kasan_unpoison(gfp_t flags)1768 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1769 {
1770 /* Don't skip if a software KASAN mode is enabled. */
1771 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1772 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1773 return false;
1774
1775 /* Skip, if hardware tag-based KASAN is not enabled. */
1776 if (!kasan_hw_tags_enabled())
1777 return true;
1778
1779 /*
1780 * With hardware tag-based KASAN enabled, skip if this has been
1781 * requested via __GFP_SKIP_KASAN.
1782 */
1783 return flags & __GFP_SKIP_KASAN;
1784 }
1785
should_skip_init(gfp_t flags)1786 static inline bool should_skip_init(gfp_t flags)
1787 {
1788 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1789 if (!kasan_hw_tags_enabled())
1790 return false;
1791
1792 /* For hardware tag-based KASAN, skip if requested. */
1793 return (flags & __GFP_SKIP_ZERO);
1794 }
1795
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1796 inline void post_alloc_hook(struct page *page, unsigned int order,
1797 gfp_t gfp_flags)
1798 {
1799 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1800 !should_skip_init(gfp_flags);
1801 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1802 int i;
1803
1804 set_page_private(page, 0);
1805
1806 arch_alloc_page(page, order);
1807 debug_pagealloc_map_pages(page, 1 << order);
1808
1809 /*
1810 * Page unpoisoning must happen before memory initialization.
1811 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1812 * allocations and the page unpoisoning code will complain.
1813 */
1814 kernel_unpoison_pages(page, 1 << order);
1815
1816 /*
1817 * As memory initialization might be integrated into KASAN,
1818 * KASAN unpoisoning and memory initializion code must be
1819 * kept together to avoid discrepancies in behavior.
1820 */
1821
1822 /*
1823 * If memory tags should be zeroed
1824 * (which happens only when memory should be initialized as well).
1825 */
1826 if (zero_tags)
1827 init = !tag_clear_highpages(page, 1 << order);
1828
1829 if (!should_skip_kasan_unpoison(gfp_flags) &&
1830 kasan_unpoison_pages(page, order, init)) {
1831 /* Take note that memory was initialized by KASAN. */
1832 if (kasan_has_integrated_init())
1833 init = false;
1834 } else {
1835 /*
1836 * If memory tags have not been set by KASAN, reset the page
1837 * tags to ensure page_address() dereferencing does not fault.
1838 */
1839 for (i = 0; i != 1 << order; ++i)
1840 page_kasan_tag_reset(page + i);
1841 }
1842 /* If memory is still not initialized, initialize it now. */
1843 if (init)
1844 kernel_init_pages(page, 1 << order);
1845
1846 set_page_owner(page, order, gfp_flags);
1847 page_table_check_alloc(page, order);
1848 pgalloc_tag_add(page, current, 1 << order);
1849 }
1850
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1851 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1852 unsigned int alloc_flags)
1853 {
1854 post_alloc_hook(page, order, gfp_flags);
1855
1856 if (order && (gfp_flags & __GFP_COMP))
1857 prep_compound_page(page, order);
1858
1859 /*
1860 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1861 * allocate the page. The expectation is that the caller is taking
1862 * steps that will free more memory. The caller should avoid the page
1863 * being used for !PFMEMALLOC purposes.
1864 */
1865 if (alloc_flags & ALLOC_NO_WATERMARKS)
1866 set_page_pfmemalloc(page);
1867 else
1868 clear_page_pfmemalloc(page);
1869 }
1870
1871 /*
1872 * Go through the free lists for the given migratetype and remove
1873 * the smallest available page from the freelists
1874 */
1875 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1876 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1877 int migratetype)
1878 {
1879 unsigned int current_order;
1880 struct free_area *area;
1881 struct page *page;
1882
1883 /* Find a page of the appropriate size in the preferred list */
1884 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1885 area = &(zone->free_area[current_order]);
1886 page = get_page_from_free_area(area, migratetype);
1887 if (!page)
1888 continue;
1889
1890 page_del_and_expand(zone, page, order, current_order,
1891 migratetype);
1892 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1893 pcp_allowed_order(order) &&
1894 migratetype < MIGRATE_PCPTYPES);
1895 return page;
1896 }
1897
1898 return NULL;
1899 }
1900
1901
1902 /*
1903 * This array describes the order lists are fallen back to when
1904 * the free lists for the desirable migrate type are depleted
1905 *
1906 * The other migratetypes do not have fallbacks.
1907 */
1908 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
1909 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1910 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1911 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1912 };
1913
1914 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1915 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1916 unsigned int order)
1917 {
1918 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1919 }
1920 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1921 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1922 unsigned int order) { return NULL; }
1923 #endif
1924
1925 /*
1926 * Move all free pages of a block to new type's freelist. Caller needs to
1927 * change the block type.
1928 */
__move_freepages_block(struct zone * zone,unsigned long start_pfn,int old_mt,int new_mt)1929 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1930 int old_mt, int new_mt)
1931 {
1932 struct page *page;
1933 unsigned long pfn, end_pfn;
1934 unsigned int order;
1935 int pages_moved = 0;
1936
1937 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
1938 end_pfn = pageblock_end_pfn(start_pfn);
1939
1940 for (pfn = start_pfn; pfn < end_pfn;) {
1941 page = pfn_to_page(pfn);
1942 if (!PageBuddy(page)) {
1943 pfn++;
1944 continue;
1945 }
1946
1947 /* Make sure we are not inadvertently changing nodes */
1948 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1949 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1950
1951 order = buddy_order(page);
1952
1953 move_to_free_list(page, zone, order, old_mt, new_mt);
1954
1955 pfn += 1 << order;
1956 pages_moved += 1 << order;
1957 }
1958
1959 return pages_moved;
1960 }
1961
prep_move_freepages_block(struct zone * zone,struct page * page,unsigned long * start_pfn,int * num_free,int * num_movable)1962 static bool prep_move_freepages_block(struct zone *zone, struct page *page,
1963 unsigned long *start_pfn,
1964 int *num_free, int *num_movable)
1965 {
1966 unsigned long pfn, start, end;
1967
1968 pfn = page_to_pfn(page);
1969 start = pageblock_start_pfn(pfn);
1970 end = pageblock_end_pfn(pfn);
1971
1972 /*
1973 * The caller only has the lock for @zone, don't touch ranges
1974 * that straddle into other zones. While we could move part of
1975 * the range that's inside the zone, this call is usually
1976 * accompanied by other operations such as migratetype updates
1977 * which also should be locked.
1978 */
1979 if (!zone_spans_pfn(zone, start))
1980 return false;
1981 if (!zone_spans_pfn(zone, end - 1))
1982 return false;
1983
1984 *start_pfn = start;
1985
1986 if (num_free) {
1987 *num_free = 0;
1988 *num_movable = 0;
1989 for (pfn = start; pfn < end;) {
1990 page = pfn_to_page(pfn);
1991 if (PageBuddy(page)) {
1992 int nr = 1 << buddy_order(page);
1993
1994 *num_free += nr;
1995 pfn += nr;
1996 continue;
1997 }
1998 /*
1999 * We assume that pages that could be isolated for
2000 * migration are movable. But we don't actually try
2001 * isolating, as that would be expensive.
2002 */
2003 if (PageLRU(page) || page_has_movable_ops(page))
2004 (*num_movable)++;
2005 pfn++;
2006 }
2007 }
2008
2009 return true;
2010 }
2011
move_freepages_block(struct zone * zone,struct page * page,int old_mt,int new_mt)2012 static int move_freepages_block(struct zone *zone, struct page *page,
2013 int old_mt, int new_mt)
2014 {
2015 unsigned long start_pfn;
2016 int res;
2017
2018 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2019 return -1;
2020
2021 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
2022 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
2023
2024 return res;
2025
2026 }
2027
2028 #ifdef CONFIG_MEMORY_ISOLATION
2029 /* Look for a buddy that straddles start_pfn */
find_large_buddy(unsigned long start_pfn)2030 static unsigned long find_large_buddy(unsigned long start_pfn)
2031 {
2032 /*
2033 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing
2034 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking
2035 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy,
2036 * the starting order does not matter.
2037 */
2038 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER;
2039 struct page *page;
2040 unsigned long pfn = start_pfn;
2041
2042 while (!PageBuddy(page = pfn_to_page(pfn))) {
2043 /* Nothing found */
2044 if (++order > MAX_PAGE_ORDER)
2045 return start_pfn;
2046 pfn &= ~0UL << order;
2047 }
2048
2049 /*
2050 * Found a preceding buddy, but does it straddle?
2051 */
2052 if (pfn + (1 << buddy_order(page)) > start_pfn)
2053 return pfn;
2054
2055 /* Nothing found */
2056 return start_pfn;
2057 }
2058
toggle_pageblock_isolate(struct page * page,bool isolate)2059 static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
2060 {
2061 if (isolate)
2062 set_pageblock_isolate(page);
2063 else
2064 clear_pageblock_isolate(page);
2065 }
2066
2067 /**
2068 * __move_freepages_block_isolate - move free pages in block for page isolation
2069 * @zone: the zone
2070 * @page: the pageblock page
2071 * @isolate: to isolate the given pageblock or unisolate it
2072 *
2073 * This is similar to move_freepages_block(), but handles the special
2074 * case encountered in page isolation, where the block of interest
2075 * might be part of a larger buddy spanning multiple pageblocks.
2076 *
2077 * Unlike the regular page allocator path, which moves pages while
2078 * stealing buddies off the freelist, page isolation is interested in
2079 * arbitrary pfn ranges that may have overlapping buddies on both ends.
2080 *
2081 * This function handles that. Straddling buddies are split into
2082 * individual pageblocks. Only the block of interest is moved.
2083 *
2084 * Returns %true if pages could be moved, %false otherwise.
2085 */
__move_freepages_block_isolate(struct zone * zone,struct page * page,bool isolate)2086 static bool __move_freepages_block_isolate(struct zone *zone,
2087 struct page *page, bool isolate)
2088 {
2089 unsigned long start_pfn, buddy_pfn;
2090 int from_mt;
2091 int to_mt;
2092 struct page *buddy;
2093
2094 if (isolate == get_pageblock_isolate(page)) {
2095 VM_WARN_ONCE(1, "%s a pageblock that is already in that state",
2096 isolate ? "Isolate" : "Unisolate");
2097 return false;
2098 }
2099
2100 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2101 return false;
2102
2103 /* No splits needed if buddies can't span multiple blocks */
2104 if (pageblock_order == MAX_PAGE_ORDER)
2105 goto move;
2106
2107 buddy_pfn = find_large_buddy(start_pfn);
2108 buddy = pfn_to_page(buddy_pfn);
2109 /* We're a part of a larger buddy */
2110 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) {
2111 int order = buddy_order(buddy);
2112
2113 del_page_from_free_list(buddy, zone, order,
2114 get_pfnblock_migratetype(buddy, buddy_pfn));
2115 toggle_pageblock_isolate(page, isolate);
2116 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
2117 return true;
2118 }
2119
2120 move:
2121 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */
2122 if (isolate) {
2123 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2124 MIGRATETYPE_MASK);
2125 to_mt = MIGRATE_ISOLATE;
2126 } else {
2127 from_mt = MIGRATE_ISOLATE;
2128 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2129 MIGRATETYPE_MASK);
2130 }
2131
2132 __move_freepages_block(zone, start_pfn, from_mt, to_mt);
2133 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate);
2134
2135 return true;
2136 }
2137
pageblock_isolate_and_move_free_pages(struct zone * zone,struct page * page)2138 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
2139 {
2140 return __move_freepages_block_isolate(zone, page, true);
2141 }
2142
pageblock_unisolate_and_move_free_pages(struct zone * zone,struct page * page)2143 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
2144 {
2145 return __move_freepages_block_isolate(zone, page, false);
2146 }
2147
2148 #endif /* CONFIG_MEMORY_ISOLATION */
2149
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)2150 static void change_pageblock_range(struct page *pageblock_page,
2151 int start_order, int migratetype)
2152 {
2153 int nr_pageblocks = 1 << (start_order - pageblock_order);
2154
2155 while (nr_pageblocks--) {
2156 set_pageblock_migratetype(pageblock_page, migratetype);
2157 pageblock_page += pageblock_nr_pages;
2158 }
2159 }
2160
boost_watermark(struct zone * zone)2161 static inline bool boost_watermark(struct zone *zone)
2162 {
2163 unsigned long max_boost;
2164
2165 if (!watermark_boost_factor)
2166 return false;
2167 /*
2168 * Don't bother in zones that are unlikely to produce results.
2169 * On small machines, including kdump capture kernels running
2170 * in a small area, boosting the watermark can cause an out of
2171 * memory situation immediately.
2172 */
2173 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2174 return false;
2175
2176 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2177 watermark_boost_factor, 10000);
2178
2179 /*
2180 * high watermark may be uninitialised if fragmentation occurs
2181 * very early in boot so do not boost. We do not fall
2182 * through and boost by pageblock_nr_pages as failing
2183 * allocations that early means that reclaim is not going
2184 * to help and it may even be impossible to reclaim the
2185 * boosted watermark resulting in a hang.
2186 */
2187 if (!max_boost)
2188 return false;
2189
2190 max_boost = max(pageblock_nr_pages, max_boost);
2191
2192 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2193 max_boost);
2194
2195 return true;
2196 }
2197
2198 /*
2199 * When we are falling back to another migratetype during allocation, should we
2200 * try to claim an entire block to satisfy further allocations, instead of
2201 * polluting multiple pageblocks?
2202 */
should_try_claim_block(unsigned int order,int start_mt)2203 static bool should_try_claim_block(unsigned int order, int start_mt)
2204 {
2205 /*
2206 * Leaving this order check is intended, although there is
2207 * relaxed order check in next check. The reason is that
2208 * we can actually claim the whole pageblock if this condition met,
2209 * but, below check doesn't guarantee it and that is just heuristic
2210 * so could be changed anytime.
2211 */
2212 if (order >= pageblock_order)
2213 return true;
2214
2215 /*
2216 * Above a certain threshold, always try to claim, as it's likely there
2217 * will be more free pages in the pageblock.
2218 */
2219 if (order >= pageblock_order / 2)
2220 return true;
2221
2222 /*
2223 * Unmovable/reclaimable allocations would cause permanent
2224 * fragmentations if they fell back to allocating from a movable block
2225 * (polluting it), so we try to claim the whole block regardless of the
2226 * allocation size. Later movable allocations can always steal from this
2227 * block, which is less problematic.
2228 */
2229 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
2230 return true;
2231
2232 if (page_group_by_mobility_disabled)
2233 return true;
2234
2235 /*
2236 * Movable pages won't cause permanent fragmentation, so when you alloc
2237 * small pages, we just need to temporarily steal unmovable or
2238 * reclaimable pages that are closest to the request size. After a
2239 * while, memory compaction may occur to form large contiguous pages,
2240 * and the next movable allocation may not need to steal.
2241 */
2242 return false;
2243 }
2244
2245 /*
2246 * Check whether there is a suitable fallback freepage with requested order.
2247 * If claimable is true, this function returns fallback_mt only if
2248 * we would do this whole-block claiming. This would help to reduce
2249 * fragmentation due to mixed migratetype pages in one pageblock.
2250 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool claimable)2251 int find_suitable_fallback(struct free_area *area, unsigned int order,
2252 int migratetype, bool claimable)
2253 {
2254 int i;
2255
2256 if (claimable && !should_try_claim_block(order, migratetype))
2257 return -2;
2258
2259 if (area->nr_free == 0)
2260 return -1;
2261
2262 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
2263 int fallback_mt = fallbacks[migratetype][i];
2264
2265 if (!free_area_empty(area, fallback_mt))
2266 return fallback_mt;
2267 }
2268
2269 return -1;
2270 }
2271
2272 /*
2273 * This function implements actual block claiming behaviour. If order is large
2274 * enough, we can claim the whole pageblock for the requested migratetype. If
2275 * not, we check the pageblock for constituent pages; if at least half of the
2276 * pages are free or compatible, we can still claim the whole block, so pages
2277 * freed in the future will be put on the correct free list.
2278 */
2279 static struct page *
try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags)2280 try_to_claim_block(struct zone *zone, struct page *page,
2281 int current_order, int order, int start_type,
2282 int block_type, unsigned int alloc_flags)
2283 {
2284 int free_pages, movable_pages, alike_pages;
2285 unsigned long start_pfn;
2286
2287 /* Take ownership for orders >= pageblock_order */
2288 if (current_order >= pageblock_order) {
2289 unsigned int nr_added;
2290
2291 del_page_from_free_list(page, zone, current_order, block_type);
2292 change_pageblock_range(page, current_order, start_type);
2293 nr_added = expand(zone, page, order, current_order, start_type);
2294 account_freepages(zone, nr_added, start_type);
2295 return page;
2296 }
2297
2298 /*
2299 * Boost watermarks to increase reclaim pressure to reduce the
2300 * likelihood of future fallbacks. Wake kswapd now as the node
2301 * may be balanced overall and kswapd will not wake naturally.
2302 */
2303 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2304 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2305
2306 /* moving whole block can fail due to zone boundary conditions */
2307 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
2308 &movable_pages))
2309 return NULL;
2310
2311 /*
2312 * Determine how many pages are compatible with our allocation.
2313 * For movable allocation, it's the number of movable pages which
2314 * we just obtained. For other types it's a bit more tricky.
2315 */
2316 if (start_type == MIGRATE_MOVABLE) {
2317 alike_pages = movable_pages;
2318 } else {
2319 /*
2320 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2321 * to MOVABLE pageblock, consider all non-movable pages as
2322 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2323 * vice versa, be conservative since we can't distinguish the
2324 * exact migratetype of non-movable pages.
2325 */
2326 if (block_type == MIGRATE_MOVABLE)
2327 alike_pages = pageblock_nr_pages
2328 - (free_pages + movable_pages);
2329 else
2330 alike_pages = 0;
2331 }
2332 /*
2333 * If a sufficient number of pages in the block are either free or of
2334 * compatible migratability as our allocation, claim the whole block.
2335 */
2336 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2337 page_group_by_mobility_disabled) {
2338 __move_freepages_block(zone, start_pfn, block_type, start_type);
2339 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
2340 return __rmqueue_smallest(zone, order, start_type);
2341 }
2342
2343 return NULL;
2344 }
2345
2346 /*
2347 * Try to allocate from some fallback migratetype by claiming the entire block,
2348 * i.e. converting it to the allocation's start migratetype.
2349 *
2350 * The use of signed ints for order and current_order is a deliberate
2351 * deviation from the rest of this file, to make the for loop
2352 * condition simpler.
2353 */
2354 static __always_inline struct page *
__rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2355 __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
2356 unsigned int alloc_flags)
2357 {
2358 struct free_area *area;
2359 int current_order;
2360 int min_order = order;
2361 struct page *page;
2362 int fallback_mt;
2363
2364 /*
2365 * Do not steal pages from freelists belonging to other pageblocks
2366 * i.e. orders < pageblock_order. If there are no local zones free,
2367 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2368 */
2369 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2370 min_order = pageblock_order;
2371
2372 /*
2373 * Find the largest available free page in the other list. This roughly
2374 * approximates finding the pageblock with the most free pages, which
2375 * would be too costly to do exactly.
2376 */
2377 for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
2378 --current_order) {
2379 area = &(zone->free_area[current_order]);
2380 fallback_mt = find_suitable_fallback(area, current_order,
2381 start_migratetype, true);
2382
2383 /* No block in that order */
2384 if (fallback_mt == -1)
2385 continue;
2386
2387 /* Advanced into orders too low to claim, abort */
2388 if (fallback_mt == -2)
2389 break;
2390
2391 page = get_page_from_free_area(area, fallback_mt);
2392 page = try_to_claim_block(zone, page, current_order, order,
2393 start_migratetype, fallback_mt,
2394 alloc_flags);
2395 if (page) {
2396 trace_mm_page_alloc_extfrag(page, order, current_order,
2397 start_migratetype, fallback_mt);
2398 return page;
2399 }
2400 }
2401
2402 return NULL;
2403 }
2404
2405 /*
2406 * Try to steal a single page from some fallback migratetype. Leave the rest of
2407 * the block as its current migratetype, potentially causing fragmentation.
2408 */
2409 static __always_inline struct page *
__rmqueue_steal(struct zone * zone,int order,int start_migratetype)2410 __rmqueue_steal(struct zone *zone, int order, int start_migratetype)
2411 {
2412 struct free_area *area;
2413 int current_order;
2414 struct page *page;
2415 int fallback_mt;
2416
2417 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2418 area = &(zone->free_area[current_order]);
2419 fallback_mt = find_suitable_fallback(area, current_order,
2420 start_migratetype, false);
2421 if (fallback_mt == -1)
2422 continue;
2423
2424 page = get_page_from_free_area(area, fallback_mt);
2425 page_del_and_expand(zone, page, order, current_order, fallback_mt);
2426 trace_mm_page_alloc_extfrag(page, order, current_order,
2427 start_migratetype, fallback_mt);
2428 return page;
2429 }
2430
2431 return NULL;
2432 }
2433
2434 enum rmqueue_mode {
2435 RMQUEUE_NORMAL,
2436 RMQUEUE_CMA,
2437 RMQUEUE_CLAIM,
2438 RMQUEUE_STEAL,
2439 };
2440
2441 /*
2442 * Do the hard work of removing an element from the buddy allocator.
2443 * Call me with the zone->lock already held.
2444 */
2445 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode)2446 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2447 unsigned int alloc_flags, enum rmqueue_mode *mode)
2448 {
2449 struct page *page;
2450
2451 if (IS_ENABLED(CONFIG_CMA)) {
2452 /*
2453 * Balance movable allocations between regular and CMA areas by
2454 * allocating from CMA when over half of the zone's free memory
2455 * is in the CMA area.
2456 */
2457 if (alloc_flags & ALLOC_CMA &&
2458 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2459 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2460 page = __rmqueue_cma_fallback(zone, order);
2461 if (page)
2462 return page;
2463 }
2464 }
2465
2466 /*
2467 * First try the freelists of the requested migratetype, then try
2468 * fallbacks modes with increasing levels of fragmentation risk.
2469 *
2470 * The fallback logic is expensive and rmqueue_bulk() calls in
2471 * a loop with the zone->lock held, meaning the freelists are
2472 * not subject to any outside changes. Remember in *mode where
2473 * we found pay dirt, to save us the search on the next call.
2474 */
2475 switch (*mode) {
2476 case RMQUEUE_NORMAL:
2477 page = __rmqueue_smallest(zone, order, migratetype);
2478 if (page)
2479 return page;
2480 fallthrough;
2481 case RMQUEUE_CMA:
2482 if (alloc_flags & ALLOC_CMA) {
2483 page = __rmqueue_cma_fallback(zone, order);
2484 if (page) {
2485 *mode = RMQUEUE_CMA;
2486 return page;
2487 }
2488 }
2489 fallthrough;
2490 case RMQUEUE_CLAIM:
2491 page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
2492 if (page) {
2493 /* Replenished preferred freelist, back to normal mode. */
2494 *mode = RMQUEUE_NORMAL;
2495 return page;
2496 }
2497 fallthrough;
2498 case RMQUEUE_STEAL:
2499 if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
2500 page = __rmqueue_steal(zone, order, migratetype);
2501 if (page) {
2502 *mode = RMQUEUE_STEAL;
2503 return page;
2504 }
2505 }
2506 }
2507 return NULL;
2508 }
2509
2510 /*
2511 * Obtain a specified number of elements from the buddy allocator, all under
2512 * a single hold of the lock, for efficiency. Add them to the supplied list.
2513 * Returns the number of new pages which were placed at *list.
2514 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2515 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2516 unsigned long count, struct list_head *list,
2517 int migratetype, unsigned int alloc_flags)
2518 {
2519 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
2520 unsigned long flags;
2521 int i;
2522
2523 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2524 if (!spin_trylock_irqsave(&zone->lock, flags))
2525 return 0;
2526 } else {
2527 spin_lock_irqsave(&zone->lock, flags);
2528 }
2529 for (i = 0; i < count; ++i) {
2530 struct page *page = __rmqueue(zone, order, migratetype,
2531 alloc_flags, &rmqm);
2532 if (unlikely(page == NULL))
2533 break;
2534
2535 /*
2536 * Split buddy pages returned by expand() are received here in
2537 * physical page order. The page is added to the tail of
2538 * caller's list. From the callers perspective, the linked list
2539 * is ordered by page number under some conditions. This is
2540 * useful for IO devices that can forward direction from the
2541 * head, thus also in the physical page order. This is useful
2542 * for IO devices that can merge IO requests if the physical
2543 * pages are ordered properly.
2544 */
2545 list_add_tail(&page->pcp_list, list);
2546 }
2547 spin_unlock_irqrestore(&zone->lock, flags);
2548
2549 return i;
2550 }
2551
2552 /*
2553 * Called from the vmstat counter updater to decay the PCP high.
2554 * Return whether there are addition works to do.
2555 */
decay_pcp_high(struct zone * zone,struct per_cpu_pages * pcp)2556 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2557 {
2558 int high_min, to_drain, to_drain_batched, batch;
2559 bool todo = false;
2560
2561 high_min = READ_ONCE(pcp->high_min);
2562 batch = READ_ONCE(pcp->batch);
2563 /*
2564 * Decrease pcp->high periodically to try to free possible
2565 * idle PCP pages. And, avoid to free too many pages to
2566 * control latency. This caps pcp->high decrement too.
2567 */
2568 if (pcp->high > high_min) {
2569 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2570 pcp->high - (pcp->high >> 3), high_min);
2571 if (pcp->high > high_min)
2572 todo = true;
2573 }
2574
2575 to_drain = pcp->count - pcp->high;
2576 while (to_drain > 0) {
2577 to_drain_batched = min(to_drain, batch);
2578 spin_lock(&pcp->lock);
2579 free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
2580 spin_unlock(&pcp->lock);
2581 todo = true;
2582
2583 to_drain -= to_drain_batched;
2584 }
2585
2586 return todo;
2587 }
2588
2589 #ifdef CONFIG_NUMA
2590 /*
2591 * Called from the vmstat counter updater to drain pagesets of this
2592 * currently executing processor on remote nodes after they have
2593 * expired.
2594 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2595 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2596 {
2597 int to_drain, batch;
2598
2599 batch = READ_ONCE(pcp->batch);
2600 to_drain = min(pcp->count, batch);
2601 if (to_drain > 0) {
2602 spin_lock(&pcp->lock);
2603 free_pcppages_bulk(zone, to_drain, pcp, 0);
2604 spin_unlock(&pcp->lock);
2605 }
2606 }
2607 #endif
2608
2609 /*
2610 * Drain pcplists of the indicated processor and zone.
2611 */
drain_pages_zone(unsigned int cpu,struct zone * zone)2612 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2613 {
2614 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2615 int count;
2616
2617 do {
2618 spin_lock(&pcp->lock);
2619 count = pcp->count;
2620 if (count) {
2621 int to_drain = min(count,
2622 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2623
2624 free_pcppages_bulk(zone, to_drain, pcp, 0);
2625 count -= to_drain;
2626 }
2627 spin_unlock(&pcp->lock);
2628 } while (count);
2629 }
2630
2631 /*
2632 * Drain pcplists of all zones on the indicated processor.
2633 */
drain_pages(unsigned int cpu)2634 static void drain_pages(unsigned int cpu)
2635 {
2636 struct zone *zone;
2637
2638 for_each_populated_zone(zone) {
2639 drain_pages_zone(cpu, zone);
2640 }
2641 }
2642
2643 /*
2644 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2645 */
drain_local_pages(struct zone * zone)2646 void drain_local_pages(struct zone *zone)
2647 {
2648 int cpu = smp_processor_id();
2649
2650 if (zone)
2651 drain_pages_zone(cpu, zone);
2652 else
2653 drain_pages(cpu);
2654 }
2655
2656 /*
2657 * The implementation of drain_all_pages(), exposing an extra parameter to
2658 * drain on all cpus.
2659 *
2660 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2661 * not empty. The check for non-emptiness can however race with a free to
2662 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2663 * that need the guarantee that every CPU has drained can disable the
2664 * optimizing racy check.
2665 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2666 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2667 {
2668 int cpu;
2669
2670 /*
2671 * Allocate in the BSS so we won't require allocation in
2672 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2673 */
2674 static cpumask_t cpus_with_pcps;
2675
2676 /*
2677 * Do not drain if one is already in progress unless it's specific to
2678 * a zone. Such callers are primarily CMA and memory hotplug and need
2679 * the drain to be complete when the call returns.
2680 */
2681 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2682 if (!zone)
2683 return;
2684 mutex_lock(&pcpu_drain_mutex);
2685 }
2686
2687 /*
2688 * We don't care about racing with CPU hotplug event
2689 * as offline notification will cause the notified
2690 * cpu to drain that CPU pcps and on_each_cpu_mask
2691 * disables preemption as part of its processing
2692 */
2693 for_each_online_cpu(cpu) {
2694 struct per_cpu_pages *pcp;
2695 struct zone *z;
2696 bool has_pcps = false;
2697
2698 if (force_all_cpus) {
2699 /*
2700 * The pcp.count check is racy, some callers need a
2701 * guarantee that no cpu is missed.
2702 */
2703 has_pcps = true;
2704 } else if (zone) {
2705 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2706 if (pcp->count)
2707 has_pcps = true;
2708 } else {
2709 for_each_populated_zone(z) {
2710 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2711 if (pcp->count) {
2712 has_pcps = true;
2713 break;
2714 }
2715 }
2716 }
2717
2718 if (has_pcps)
2719 cpumask_set_cpu(cpu, &cpus_with_pcps);
2720 else
2721 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2722 }
2723
2724 for_each_cpu(cpu, &cpus_with_pcps) {
2725 if (zone)
2726 drain_pages_zone(cpu, zone);
2727 else
2728 drain_pages(cpu);
2729 }
2730
2731 mutex_unlock(&pcpu_drain_mutex);
2732 }
2733
2734 /*
2735 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2736 *
2737 * When zone parameter is non-NULL, spill just the single zone's pages.
2738 */
drain_all_pages(struct zone * zone)2739 void drain_all_pages(struct zone *zone)
2740 {
2741 __drain_all_pages(zone, false);
2742 }
2743
nr_pcp_free(struct per_cpu_pages * pcp,int batch,int high,bool free_high)2744 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2745 {
2746 int min_nr_free, max_nr_free;
2747
2748 /* Free as much as possible if batch freeing high-order pages. */
2749 if (unlikely(free_high))
2750 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2751
2752 /* Check for PCP disabled or boot pageset */
2753 if (unlikely(high < batch))
2754 return 1;
2755
2756 /* Leave at least pcp->batch pages on the list */
2757 min_nr_free = batch;
2758 max_nr_free = high - batch;
2759
2760 /*
2761 * Increase the batch number to the number of the consecutive
2762 * freed pages to reduce zone lock contention.
2763 */
2764 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2765
2766 return batch;
2767 }
2768
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,int batch,bool free_high)2769 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2770 int batch, bool free_high)
2771 {
2772 int high, high_min, high_max;
2773
2774 high_min = READ_ONCE(pcp->high_min);
2775 high_max = READ_ONCE(pcp->high_max);
2776 high = pcp->high = clamp(pcp->high, high_min, high_max);
2777
2778 if (unlikely(!high))
2779 return 0;
2780
2781 if (unlikely(free_high)) {
2782 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2783 high_min);
2784 return 0;
2785 }
2786
2787 /*
2788 * If reclaim is active, limit the number of pages that can be
2789 * stored on pcp lists
2790 */
2791 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
2792 int free_count = max_t(int, pcp->free_count, batch);
2793
2794 pcp->high = max(high - free_count, high_min);
2795 return min(batch << 2, pcp->high);
2796 }
2797
2798 if (high_min == high_max)
2799 return high;
2800
2801 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
2802 int free_count = max_t(int, pcp->free_count, batch);
2803
2804 pcp->high = max(high - free_count, high_min);
2805 high = max(pcp->count, high_min);
2806 } else if (pcp->count >= high) {
2807 int need_high = pcp->free_count + batch;
2808
2809 /* pcp->high should be large enough to hold batch freed pages */
2810 if (pcp->high < need_high)
2811 pcp->high = clamp(need_high, high_min, high_max);
2812 }
2813
2814 return high;
2815 }
2816
2817 /*
2818 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the
2819 * pcp's watermarks below high.
2820 *
2821 * May return a freed pcp, if during page freeing the pcp spinlock cannot be
2822 * reacquired. Return true if pcp is locked, false otherwise.
2823 */
free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags,unsigned long * UP_flags)2824 static bool free_frozen_page_commit(struct zone *zone,
2825 struct per_cpu_pages *pcp, struct page *page, int migratetype,
2826 unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags)
2827 {
2828 int high, batch;
2829 int to_free, to_free_batched;
2830 int pindex;
2831 int cpu = smp_processor_id();
2832 int ret = true;
2833 bool free_high = false;
2834
2835 /*
2836 * On freeing, reduce the number of pages that are batch allocated.
2837 * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2838 * allocations.
2839 */
2840 pcp->alloc_factor >>= 1;
2841 __count_vm_events(PGFREE, 1 << order);
2842 pindex = order_to_pindex(migratetype, order);
2843 list_add(&page->pcp_list, &pcp->lists[pindex]);
2844 pcp->count += 1 << order;
2845
2846 batch = READ_ONCE(pcp->batch);
2847 /*
2848 * As high-order pages other than THP's stored on PCP can contribute
2849 * to fragmentation, limit the number stored when PCP is heavily
2850 * freeing without allocation. The remainder after bulk freeing
2851 * stops will be drained from vmstat refresh context.
2852 */
2853 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
2854 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
2855 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2856 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2857 pcp->count >= batch));
2858 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2859 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2860 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2861 }
2862 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2863 pcp->free_count += (1 << order);
2864
2865 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
2866 /*
2867 * Do not attempt to take a zone lock. Let pcp->count get
2868 * over high mark temporarily.
2869 */
2870 return true;
2871 }
2872
2873 high = nr_pcp_high(pcp, zone, batch, free_high);
2874 if (pcp->count < high)
2875 return true;
2876
2877 to_free = nr_pcp_free(pcp, batch, high, free_high);
2878 while (to_free > 0 && pcp->count > 0) {
2879 to_free_batched = min(to_free, batch);
2880 free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
2881 to_free -= to_free_batched;
2882
2883 if (to_free == 0 || pcp->count == 0)
2884 break;
2885
2886 pcp_spin_unlock(pcp, *UP_flags);
2887
2888 pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags);
2889 if (!pcp) {
2890 ret = false;
2891 break;
2892 }
2893
2894 /*
2895 * Check if this thread has been migrated to a different CPU.
2896 * If that is the case, give up and indicate that the pcp is
2897 * returned in an unlocked state.
2898 */
2899 if (smp_processor_id() != cpu) {
2900 pcp_spin_unlock(pcp, *UP_flags);
2901 ret = false;
2902 break;
2903 }
2904 }
2905
2906 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2907 zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2908 ZONE_MOVABLE, 0)) {
2909 struct pglist_data *pgdat = zone->zone_pgdat;
2910 clear_bit(ZONE_BELOW_HIGH, &zone->flags);
2911
2912 /*
2913 * Assume that memory pressure on this node is gone and may be
2914 * in a reclaimable state. If a memory fallback node exists,
2915 * direct reclaim may not have been triggered, causing a
2916 * 'hopeless node' to stay in that state for a while. Let
2917 * kswapd work again by resetting kswapd_failures.
2918 */
2919 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES &&
2920 next_memory_node(pgdat->node_id) < MAX_NUMNODES)
2921 atomic_set(&pgdat->kswapd_failures, 0);
2922 }
2923 return ret;
2924 }
2925
2926 /*
2927 * Free a pcp page
2928 */
__free_frozen_pages(struct page * page,unsigned int order,fpi_t fpi_flags)2929 static void __free_frozen_pages(struct page *page, unsigned int order,
2930 fpi_t fpi_flags)
2931 {
2932 unsigned long UP_flags;
2933 struct per_cpu_pages *pcp;
2934 struct zone *zone;
2935 unsigned long pfn = page_to_pfn(page);
2936 int migratetype;
2937
2938 if (!pcp_allowed_order(order)) {
2939 __free_pages_ok(page, order, fpi_flags);
2940 return;
2941 }
2942
2943 if (!free_pages_prepare(page, order))
2944 return;
2945
2946 /*
2947 * We only track unmovable, reclaimable and movable on pcp lists.
2948 * Place ISOLATE pages on the isolated list because they are being
2949 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2950 * get those areas back if necessary. Otherwise, we may have to free
2951 * excessively into the page allocator
2952 */
2953 zone = page_zone(page);
2954 migratetype = get_pfnblock_migratetype(page, pfn);
2955 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2956 if (unlikely(is_migrate_isolate(migratetype))) {
2957 free_one_page(zone, page, pfn, order, fpi_flags);
2958 return;
2959 }
2960 migratetype = MIGRATE_MOVABLE;
2961 }
2962
2963 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
2964 && (in_nmi() || in_hardirq()))) {
2965 add_page_to_zone_llist(zone, page, order);
2966 return;
2967 }
2968 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
2969 if (pcp) {
2970 if (!free_frozen_page_commit(zone, pcp, page, migratetype,
2971 order, fpi_flags, &UP_flags))
2972 return;
2973 pcp_spin_unlock(pcp, UP_flags);
2974 } else {
2975 free_one_page(zone, page, pfn, order, fpi_flags);
2976 }
2977 }
2978
free_frozen_pages(struct page * page,unsigned int order)2979 void free_frozen_pages(struct page *page, unsigned int order)
2980 {
2981 __free_frozen_pages(page, order, FPI_NONE);
2982 }
2983
2984 /*
2985 * Free a batch of folios
2986 */
free_unref_folios(struct folio_batch * folios)2987 void free_unref_folios(struct folio_batch *folios)
2988 {
2989 unsigned long UP_flags;
2990 struct per_cpu_pages *pcp = NULL;
2991 struct zone *locked_zone = NULL;
2992 int i, j;
2993
2994 /* Prepare folios for freeing */
2995 for (i = 0, j = 0; i < folios->nr; i++) {
2996 struct folio *folio = folios->folios[i];
2997 unsigned long pfn = folio_pfn(folio);
2998 unsigned int order = folio_order(folio);
2999
3000 if (!free_pages_prepare(&folio->page, order))
3001 continue;
3002 /*
3003 * Free orders not handled on the PCP directly to the
3004 * allocator.
3005 */
3006 if (!pcp_allowed_order(order)) {
3007 free_one_page(folio_zone(folio), &folio->page,
3008 pfn, order, FPI_NONE);
3009 continue;
3010 }
3011 folio->private = (void *)(unsigned long)order;
3012 if (j != i)
3013 folios->folios[j] = folio;
3014 j++;
3015 }
3016 folios->nr = j;
3017
3018 for (i = 0; i < folios->nr; i++) {
3019 struct folio *folio = folios->folios[i];
3020 struct zone *zone = folio_zone(folio);
3021 unsigned long pfn = folio_pfn(folio);
3022 unsigned int order = (unsigned long)folio->private;
3023 int migratetype;
3024
3025 folio->private = NULL;
3026 migratetype = get_pfnblock_migratetype(&folio->page, pfn);
3027
3028 /* Different zone requires a different pcp lock */
3029 if (zone != locked_zone ||
3030 is_migrate_isolate(migratetype)) {
3031 if (pcp) {
3032 pcp_spin_unlock(pcp, UP_flags);
3033 locked_zone = NULL;
3034 pcp = NULL;
3035 }
3036
3037 /*
3038 * Free isolated pages directly to the
3039 * allocator, see comment in free_frozen_pages.
3040 */
3041 if (is_migrate_isolate(migratetype)) {
3042 free_one_page(zone, &folio->page, pfn,
3043 order, FPI_NONE);
3044 continue;
3045 }
3046
3047 /*
3048 * trylock is necessary as folios may be getting freed
3049 * from IRQ or SoftIRQ context after an IO completion.
3050 */
3051 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
3052 if (unlikely(!pcp)) {
3053 free_one_page(zone, &folio->page, pfn,
3054 order, FPI_NONE);
3055 continue;
3056 }
3057 locked_zone = zone;
3058 }
3059
3060 /*
3061 * Non-isolated types over MIGRATE_PCPTYPES get added
3062 * to the MIGRATE_MOVABLE pcp list.
3063 */
3064 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3065 migratetype = MIGRATE_MOVABLE;
3066
3067 trace_mm_page_free_batched(&folio->page);
3068 if (!free_frozen_page_commit(zone, pcp, &folio->page,
3069 migratetype, order, FPI_NONE, &UP_flags)) {
3070 pcp = NULL;
3071 locked_zone = NULL;
3072 }
3073 }
3074
3075 if (pcp)
3076 pcp_spin_unlock(pcp, UP_flags);
3077 folio_batch_reinit(folios);
3078 }
3079
3080 /*
3081 * split_page takes a non-compound higher-order page, and splits it into
3082 * n (1<<order) sub-pages: page[0..n]
3083 * Each sub-page must be freed individually.
3084 *
3085 * Note: this is probably too low level an operation for use in drivers.
3086 * Please consult with lkml before using this in your driver.
3087 */
split_page(struct page * page,unsigned int order)3088 void split_page(struct page *page, unsigned int order)
3089 {
3090 int i;
3091
3092 VM_BUG_ON_PAGE(PageCompound(page), page);
3093 VM_BUG_ON_PAGE(!page_count(page), page);
3094
3095 for (i = 1; i < (1 << order); i++)
3096 set_page_refcounted(page + i);
3097 split_page_owner(page, order, 0);
3098 pgalloc_tag_split(page_folio(page), order, 0);
3099 split_page_memcg(page, order);
3100 }
3101 EXPORT_SYMBOL_GPL(split_page);
3102
__isolate_free_page(struct page * page,unsigned int order)3103 int __isolate_free_page(struct page *page, unsigned int order)
3104 {
3105 struct zone *zone = page_zone(page);
3106 int mt = get_pageblock_migratetype(page);
3107
3108 if (!is_migrate_isolate(mt)) {
3109 unsigned long watermark;
3110 /*
3111 * Obey watermarks as if the page was being allocated. We can
3112 * emulate a high-order watermark check with a raised order-0
3113 * watermark, because we already know our high-order page
3114 * exists.
3115 */
3116 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3117 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3118 return 0;
3119 }
3120
3121 del_page_from_free_list(page, zone, order, mt);
3122
3123 /*
3124 * Set the pageblock if the isolated page is at least half of a
3125 * pageblock
3126 */
3127 if (order >= pageblock_order - 1) {
3128 struct page *endpage = page + (1 << order) - 1;
3129 for (; page < endpage; page += pageblock_nr_pages) {
3130 int mt = get_pageblock_migratetype(page);
3131 /*
3132 * Only change normal pageblocks (i.e., they can merge
3133 * with others)
3134 */
3135 if (migratetype_is_mergeable(mt))
3136 move_freepages_block(zone, page, mt,
3137 MIGRATE_MOVABLE);
3138 }
3139 }
3140
3141 return 1UL << order;
3142 }
3143
3144 /**
3145 * __putback_isolated_page - Return a now-isolated page back where we got it
3146 * @page: Page that was isolated
3147 * @order: Order of the isolated page
3148 * @mt: The page's pageblock's migratetype
3149 *
3150 * This function is meant to return a page pulled from the free lists via
3151 * __isolate_free_page back to the free lists they were pulled from.
3152 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3153 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3154 {
3155 struct zone *zone = page_zone(page);
3156
3157 /* zone lock should be held when this function is called */
3158 lockdep_assert_held(&zone->lock);
3159
3160 /* Return isolated page to tail of freelist. */
3161 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3162 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3163 }
3164
3165 /*
3166 * Update NUMA hit/miss statistics
3167 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)3168 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3169 long nr_account)
3170 {
3171 #ifdef CONFIG_NUMA
3172 enum numa_stat_item local_stat = NUMA_LOCAL;
3173
3174 /* skip numa counters update if numa stats is disabled */
3175 if (!static_branch_likely(&vm_numa_stat_key))
3176 return;
3177
3178 if (zone_to_nid(z) != numa_node_id())
3179 local_stat = NUMA_OTHER;
3180
3181 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3182 __count_numa_events(z, NUMA_HIT, nr_account);
3183 else {
3184 __count_numa_events(z, NUMA_MISS, nr_account);
3185 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3186 }
3187 __count_numa_events(z, local_stat, nr_account);
3188 #endif
3189 }
3190
3191 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)3192 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3193 unsigned int order, unsigned int alloc_flags,
3194 int migratetype)
3195 {
3196 struct page *page;
3197 unsigned long flags;
3198
3199 do {
3200 page = NULL;
3201 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
3202 if (!spin_trylock_irqsave(&zone->lock, flags))
3203 return NULL;
3204 } else {
3205 spin_lock_irqsave(&zone->lock, flags);
3206 }
3207 if (alloc_flags & ALLOC_HIGHATOMIC)
3208 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3209 if (!page) {
3210 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
3211
3212 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
3213
3214 /*
3215 * If the allocation fails, allow OOM handling and
3216 * order-0 (atomic) allocs access to HIGHATOMIC
3217 * reserves as failing now is worse than failing a
3218 * high-order atomic allocation in the future.
3219 */
3220 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
3221 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3222
3223 if (!page) {
3224 spin_unlock_irqrestore(&zone->lock, flags);
3225 return NULL;
3226 }
3227 }
3228 spin_unlock_irqrestore(&zone->lock, flags);
3229 } while (check_new_pages(page, order));
3230
3231 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3232 zone_statistics(preferred_zone, zone, 1);
3233
3234 return page;
3235 }
3236
nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order)3237 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
3238 {
3239 int high, base_batch, batch, max_nr_alloc;
3240 int high_max, high_min;
3241
3242 base_batch = READ_ONCE(pcp->batch);
3243 high_min = READ_ONCE(pcp->high_min);
3244 high_max = READ_ONCE(pcp->high_max);
3245 high = pcp->high = clamp(pcp->high, high_min, high_max);
3246
3247 /* Check for PCP disabled or boot pageset */
3248 if (unlikely(high < base_batch))
3249 return 1;
3250
3251 if (order)
3252 batch = base_batch;
3253 else
3254 batch = (base_batch << pcp->alloc_factor);
3255
3256 /*
3257 * If we had larger pcp->high, we could avoid to allocate from
3258 * zone.
3259 */
3260 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
3261 high = pcp->high = min(high + batch, high_max);
3262
3263 if (!order) {
3264 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
3265 /*
3266 * Double the number of pages allocated each time there is
3267 * subsequent allocation of order-0 pages without any freeing.
3268 */
3269 if (batch <= max_nr_alloc &&
3270 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
3271 pcp->alloc_factor++;
3272 batch = min(batch, max_nr_alloc);
3273 }
3274
3275 /*
3276 * Scale batch relative to order if batch implies free pages
3277 * can be stored on the PCP. Batch can be 1 for small zones or
3278 * for boot pagesets which should never store free pages as
3279 * the pages may belong to arbitrary zones.
3280 */
3281 if (batch > 1)
3282 batch = max(batch >> order, 2);
3283
3284 return batch;
3285 }
3286
3287 /* Remove page from the per-cpu list, caller must protect the list */
3288 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)3289 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3290 int migratetype,
3291 unsigned int alloc_flags,
3292 struct per_cpu_pages *pcp,
3293 struct list_head *list)
3294 {
3295 struct page *page;
3296
3297 do {
3298 if (list_empty(list)) {
3299 int batch = nr_pcp_alloc(pcp, zone, order);
3300 int alloced;
3301
3302 alloced = rmqueue_bulk(zone, order,
3303 batch, list,
3304 migratetype, alloc_flags);
3305
3306 pcp->count += alloced << order;
3307 if (unlikely(list_empty(list)))
3308 return NULL;
3309 }
3310
3311 page = list_first_entry(list, struct page, pcp_list);
3312 list_del(&page->pcp_list);
3313 pcp->count -= 1 << order;
3314 } while (check_new_pages(page, order));
3315
3316 return page;
3317 }
3318
3319 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3320 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3321 struct zone *zone, unsigned int order,
3322 int migratetype, unsigned int alloc_flags)
3323 {
3324 struct per_cpu_pages *pcp;
3325 struct list_head *list;
3326 struct page *page;
3327 unsigned long UP_flags;
3328
3329 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
3330 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
3331 if (!pcp)
3332 return NULL;
3333
3334 /*
3335 * On allocation, reduce the number of pages that are batch freed.
3336 * See nr_pcp_free() where free_factor is increased for subsequent
3337 * frees.
3338 */
3339 pcp->free_count >>= 1;
3340 list = &pcp->lists[order_to_pindex(migratetype, order)];
3341 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3342 pcp_spin_unlock(pcp, UP_flags);
3343 if (page) {
3344 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3345 zone_statistics(preferred_zone, zone, 1);
3346 }
3347 return page;
3348 }
3349
3350 /*
3351 * Allocate a page from the given zone.
3352 * Use pcplists for THP or "cheap" high-order allocations.
3353 */
3354
3355 /*
3356 * Do not instrument rmqueue() with KMSAN. This function may call
3357 * __msan_poison_alloca() through a call to set_pfnblock_migratetype().
3358 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3359 * may call rmqueue() again, which will result in a deadlock.
3360 */
3361 __no_sanitize_memory
3362 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)3363 struct page *rmqueue(struct zone *preferred_zone,
3364 struct zone *zone, unsigned int order,
3365 gfp_t gfp_flags, unsigned int alloc_flags,
3366 int migratetype)
3367 {
3368 struct page *page;
3369
3370 if (likely(pcp_allowed_order(order))) {
3371 page = rmqueue_pcplist(preferred_zone, zone, order,
3372 migratetype, alloc_flags);
3373 if (likely(page))
3374 goto out;
3375 }
3376
3377 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3378 migratetype);
3379
3380 out:
3381 /* Separate test+clear to avoid unnecessary atomics */
3382 if ((alloc_flags & ALLOC_KSWAPD) &&
3383 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3384 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3385 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3386 }
3387
3388 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3389 return page;
3390 }
3391
3392 /*
3393 * Reserve the pageblock(s) surrounding an allocation request for
3394 * exclusive use of high-order atomic allocations if there are no
3395 * empty page blocks that contain a page with a suitable order
3396 */
reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone)3397 static void reserve_highatomic_pageblock(struct page *page, int order,
3398 struct zone *zone)
3399 {
3400 int mt;
3401 unsigned long max_managed, flags;
3402
3403 /*
3404 * The number reserved as: minimum is 1 pageblock, maximum is
3405 * roughly 1% of a zone. But if 1% of a zone falls below a
3406 * pageblock size, then don't reserve any pageblocks.
3407 * Check is race-prone but harmless.
3408 */
3409 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
3410 return;
3411 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
3412 if (zone->nr_reserved_highatomic >= max_managed)
3413 return;
3414
3415 spin_lock_irqsave(&zone->lock, flags);
3416
3417 /* Recheck the nr_reserved_highatomic limit under the lock */
3418 if (zone->nr_reserved_highatomic >= max_managed)
3419 goto out_unlock;
3420
3421 /* Yoink! */
3422 mt = get_pageblock_migratetype(page);
3423 /* Only reserve normal pageblocks (i.e., they can merge with others) */
3424 if (!migratetype_is_mergeable(mt))
3425 goto out_unlock;
3426
3427 if (order < pageblock_order) {
3428 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
3429 goto out_unlock;
3430 zone->nr_reserved_highatomic += pageblock_nr_pages;
3431 } else {
3432 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
3433 zone->nr_reserved_highatomic += 1 << order;
3434 }
3435
3436 out_unlock:
3437 spin_unlock_irqrestore(&zone->lock, flags);
3438 }
3439
3440 /*
3441 * Used when an allocation is about to fail under memory pressure. This
3442 * potentially hurts the reliability of high-order allocations when under
3443 * intense memory pressure but failed atomic allocations should be easier
3444 * to recover from than an OOM.
3445 *
3446 * If @force is true, try to unreserve pageblocks even though highatomic
3447 * pageblock is exhausted.
3448 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)3449 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
3450 bool force)
3451 {
3452 struct zonelist *zonelist = ac->zonelist;
3453 unsigned long flags;
3454 struct zoneref *z;
3455 struct zone *zone;
3456 struct page *page;
3457 int order;
3458 int ret;
3459
3460 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3461 ac->nodemask) {
3462 /*
3463 * Preserve at least one pageblock unless memory pressure
3464 * is really high.
3465 */
3466 if (!force && zone->nr_reserved_highatomic <=
3467 pageblock_nr_pages)
3468 continue;
3469
3470 spin_lock_irqsave(&zone->lock, flags);
3471 for (order = 0; order < NR_PAGE_ORDERS; order++) {
3472 struct free_area *area = &(zone->free_area[order]);
3473 unsigned long size;
3474
3475 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3476 if (!page)
3477 continue;
3478
3479 size = max(pageblock_nr_pages, 1UL << order);
3480 /*
3481 * It should never happen but changes to
3482 * locking could inadvertently allow a per-cpu
3483 * drain to add pages to MIGRATE_HIGHATOMIC
3484 * while unreserving so be safe and watch for
3485 * underflows.
3486 */
3487 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
3488 size = zone->nr_reserved_highatomic;
3489 zone->nr_reserved_highatomic -= size;
3490
3491 /*
3492 * Convert to ac->migratetype and avoid the normal
3493 * pageblock stealing heuristics. Minimally, the caller
3494 * is doing the work and needs the pages. More
3495 * importantly, if the block was always converted to
3496 * MIGRATE_UNMOVABLE or another type then the number
3497 * of pageblocks that cannot be completely freed
3498 * may increase.
3499 */
3500 if (order < pageblock_order)
3501 ret = move_freepages_block(zone, page,
3502 MIGRATE_HIGHATOMIC,
3503 ac->migratetype);
3504 else {
3505 move_to_free_list(page, zone, order,
3506 MIGRATE_HIGHATOMIC,
3507 ac->migratetype);
3508 change_pageblock_range(page, order,
3509 ac->migratetype);
3510 ret = 1;
3511 }
3512 /*
3513 * Reserving the block(s) already succeeded,
3514 * so this should not fail on zone boundaries.
3515 */
3516 WARN_ON_ONCE(ret == -1);
3517 if (ret > 0) {
3518 spin_unlock_irqrestore(&zone->lock, flags);
3519 return ret;
3520 }
3521 }
3522 spin_unlock_irqrestore(&zone->lock, flags);
3523 }
3524
3525 return false;
3526 }
3527
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)3528 static inline long __zone_watermark_unusable_free(struct zone *z,
3529 unsigned int order, unsigned int alloc_flags)
3530 {
3531 long unusable_free = (1 << order) - 1;
3532
3533 /*
3534 * If the caller does not have rights to reserves below the min
3535 * watermark then subtract the free pages reserved for highatomic.
3536 */
3537 if (likely(!(alloc_flags & ALLOC_RESERVES)))
3538 unusable_free += READ_ONCE(z->nr_free_highatomic);
3539
3540 #ifdef CONFIG_CMA
3541 /* If allocation can't use CMA areas don't use free CMA pages */
3542 if (!(alloc_flags & ALLOC_CMA))
3543 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3544 #endif
3545
3546 return unusable_free;
3547 }
3548
3549 /*
3550 * Return true if free base pages are above 'mark'. For high-order checks it
3551 * will return true of the order-0 watermark is reached and there is at least
3552 * one free page of a suitable size. Checking now avoids taking the zone lock
3553 * to check in the allocation paths if no pages are free.
3554 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)3555 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3556 int highest_zoneidx, unsigned int alloc_flags,
3557 long free_pages)
3558 {
3559 long min = mark;
3560 int o;
3561
3562 /* free_pages may go negative - that's OK */
3563 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3564
3565 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3566 /*
3567 * __GFP_HIGH allows access to 50% of the min reserve as well
3568 * as OOM.
3569 */
3570 if (alloc_flags & ALLOC_MIN_RESERVE) {
3571 min -= min / 2;
3572
3573 /*
3574 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3575 * access more reserves than just __GFP_HIGH. Other
3576 * non-blocking allocations requests such as GFP_NOWAIT
3577 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3578 * access to the min reserve.
3579 */
3580 if (alloc_flags & ALLOC_NON_BLOCK)
3581 min -= min / 4;
3582 }
3583
3584 /*
3585 * OOM victims can try even harder than the normal reserve
3586 * users on the grounds that it's definitely going to be in
3587 * the exit path shortly and free memory. Any allocation it
3588 * makes during the free path will be small and short-lived.
3589 */
3590 if (alloc_flags & ALLOC_OOM)
3591 min -= min / 2;
3592 }
3593
3594 /*
3595 * Check watermarks for an order-0 allocation request. If these
3596 * are not met, then a high-order request also cannot go ahead
3597 * even if a suitable page happened to be free.
3598 */
3599 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3600 return false;
3601
3602 /* If this is an order-0 request then the watermark is fine */
3603 if (!order)
3604 return true;
3605
3606 /* For a high-order request, check at least one suitable page is free */
3607 for (o = order; o < NR_PAGE_ORDERS; o++) {
3608 struct free_area *area = &z->free_area[o];
3609 int mt;
3610
3611 if (!area->nr_free)
3612 continue;
3613
3614 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3615 if (!free_area_empty(area, mt))
3616 return true;
3617 }
3618
3619 #ifdef CONFIG_CMA
3620 if ((alloc_flags & ALLOC_CMA) &&
3621 !free_area_empty(area, MIGRATE_CMA)) {
3622 return true;
3623 }
3624 #endif
3625 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3626 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3627 return true;
3628 }
3629 }
3630 return false;
3631 }
3632
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)3633 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3634 int highest_zoneidx, unsigned int alloc_flags)
3635 {
3636 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3637 zone_page_state(z, NR_FREE_PAGES));
3638 }
3639
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)3640 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3641 unsigned long mark, int highest_zoneidx,
3642 unsigned int alloc_flags, gfp_t gfp_mask)
3643 {
3644 long free_pages;
3645
3646 free_pages = zone_page_state(z, NR_FREE_PAGES);
3647
3648 /*
3649 * Fast check for order-0 only. If this fails then the reserves
3650 * need to be calculated.
3651 */
3652 if (!order) {
3653 long usable_free;
3654 long reserved;
3655
3656 usable_free = free_pages;
3657 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3658
3659 /* reserved may over estimate high-atomic reserves. */
3660 usable_free -= min(usable_free, reserved);
3661 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
3662 return true;
3663 }
3664
3665 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3666 free_pages))
3667 return true;
3668
3669 /*
3670 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
3671 * when checking the min watermark. The min watermark is the
3672 * point where boosting is ignored so that kswapd is woken up
3673 * when below the low watermark.
3674 */
3675 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
3676 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3677 mark = z->_watermark[WMARK_MIN];
3678 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3679 alloc_flags, free_pages);
3680 }
3681
3682 return false;
3683 }
3684
3685 #ifdef CONFIG_NUMA
3686 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3687
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3688 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3689 {
3690 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3691 node_reclaim_distance;
3692 }
3693 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3694 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3695 {
3696 return true;
3697 }
3698 #endif /* CONFIG_NUMA */
3699
3700 /*
3701 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3702 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3703 * premature use of a lower zone may cause lowmem pressure problems that
3704 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3705 * probably too small. It only makes sense to spread allocations to avoid
3706 * fragmentation between the Normal and DMA32 zones.
3707 */
3708 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3709 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3710 {
3711 unsigned int alloc_flags;
3712
3713 /*
3714 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3715 * to save a branch.
3716 */
3717 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3718
3719 if (defrag_mode) {
3720 alloc_flags |= ALLOC_NOFRAGMENT;
3721 return alloc_flags;
3722 }
3723
3724 #ifdef CONFIG_ZONE_DMA32
3725 if (!zone)
3726 return alloc_flags;
3727
3728 if (zone_idx(zone) != ZONE_NORMAL)
3729 return alloc_flags;
3730
3731 /*
3732 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3733 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3734 * on UMA that if Normal is populated then so is DMA32.
3735 */
3736 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3737 if (nr_online_nodes > 1 && !populated_zone(--zone))
3738 return alloc_flags;
3739
3740 alloc_flags |= ALLOC_NOFRAGMENT;
3741 #endif /* CONFIG_ZONE_DMA32 */
3742 return alloc_flags;
3743 }
3744
3745 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3746 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3747 unsigned int alloc_flags)
3748 {
3749 #ifdef CONFIG_CMA
3750 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3751 alloc_flags |= ALLOC_CMA;
3752 #endif
3753 return alloc_flags;
3754 }
3755
3756 /*
3757 * get_page_from_freelist goes through the zonelist trying to allocate
3758 * a page.
3759 */
3760 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3761 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3762 const struct alloc_context *ac)
3763 {
3764 struct zoneref *z;
3765 struct zone *zone;
3766 struct pglist_data *last_pgdat = NULL;
3767 bool last_pgdat_dirty_ok = false;
3768 bool no_fallback;
3769 bool skip_kswapd_nodes = nr_online_nodes > 1;
3770 bool skipped_kswapd_nodes = false;
3771
3772 retry:
3773 /*
3774 * Scan zonelist, looking for a zone with enough free.
3775 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c.
3776 */
3777 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3778 z = ac->preferred_zoneref;
3779 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3780 ac->nodemask) {
3781 struct page *page;
3782 unsigned long mark;
3783
3784 if (cpusets_enabled() &&
3785 (alloc_flags & ALLOC_CPUSET) &&
3786 !__cpuset_zone_allowed(zone, gfp_mask))
3787 continue;
3788 /*
3789 * When allocating a page cache page for writing, we
3790 * want to get it from a node that is within its dirty
3791 * limit, such that no single node holds more than its
3792 * proportional share of globally allowed dirty pages.
3793 * The dirty limits take into account the node's
3794 * lowmem reserves and high watermark so that kswapd
3795 * should be able to balance it without having to
3796 * write pages from its LRU list.
3797 *
3798 * XXX: For now, allow allocations to potentially
3799 * exceed the per-node dirty limit in the slowpath
3800 * (spread_dirty_pages unset) before going into reclaim,
3801 * which is important when on a NUMA setup the allowed
3802 * nodes are together not big enough to reach the
3803 * global limit. The proper fix for these situations
3804 * will require awareness of nodes in the
3805 * dirty-throttling and the flusher threads.
3806 */
3807 if (ac->spread_dirty_pages) {
3808 if (last_pgdat != zone->zone_pgdat) {
3809 last_pgdat = zone->zone_pgdat;
3810 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3811 }
3812
3813 if (!last_pgdat_dirty_ok)
3814 continue;
3815 }
3816
3817 if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
3818 zone != zonelist_zone(ac->preferred_zoneref)) {
3819 int local_nid;
3820
3821 /*
3822 * If moving to a remote node, retry but allow
3823 * fragmenting fallbacks. Locality is more important
3824 * than fragmentation avoidance.
3825 */
3826 local_nid = zonelist_node_idx(ac->preferred_zoneref);
3827 if (zone_to_nid(zone) != local_nid) {
3828 alloc_flags &= ~ALLOC_NOFRAGMENT;
3829 goto retry;
3830 }
3831 }
3832
3833 /*
3834 * If kswapd is already active on a node, keep looking
3835 * for other nodes that might be idle. This can happen
3836 * if another process has NUMA bindings and is causing
3837 * kswapd wakeups on only some nodes. Avoid accidental
3838 * "node_reclaim_mode"-like behavior in this case.
3839 */
3840 if (skip_kswapd_nodes &&
3841 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
3842 skipped_kswapd_nodes = true;
3843 continue;
3844 }
3845
3846 cond_accept_memory(zone, order, alloc_flags);
3847
3848 /*
3849 * Detect whether the number of free pages is below high
3850 * watermark. If so, we will decrease pcp->high and free
3851 * PCP pages in free path to reduce the possibility of
3852 * premature page reclaiming. Detection is done here to
3853 * avoid to do that in hotter free path.
3854 */
3855 if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3856 goto check_alloc_wmark;
3857
3858 mark = high_wmark_pages(zone);
3859 if (zone_watermark_fast(zone, order, mark,
3860 ac->highest_zoneidx, alloc_flags,
3861 gfp_mask))
3862 goto try_this_zone;
3863 else
3864 set_bit(ZONE_BELOW_HIGH, &zone->flags);
3865
3866 check_alloc_wmark:
3867 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3868 if (!zone_watermark_fast(zone, order, mark,
3869 ac->highest_zoneidx, alloc_flags,
3870 gfp_mask)) {
3871 int ret;
3872
3873 if (cond_accept_memory(zone, order, alloc_flags))
3874 goto try_this_zone;
3875
3876 /*
3877 * Watermark failed for this zone, but see if we can
3878 * grow this zone if it contains deferred pages.
3879 */
3880 if (deferred_pages_enabled()) {
3881 if (_deferred_grow_zone(zone, order))
3882 goto try_this_zone;
3883 }
3884 /* Checked here to keep the fast path fast */
3885 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3886 if (alloc_flags & ALLOC_NO_WATERMARKS)
3887 goto try_this_zone;
3888
3889 if (!node_reclaim_enabled() ||
3890 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
3891 continue;
3892
3893 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3894 switch (ret) {
3895 case NODE_RECLAIM_NOSCAN:
3896 /* did not scan */
3897 continue;
3898 case NODE_RECLAIM_FULL:
3899 /* scanned but unreclaimable */
3900 continue;
3901 default:
3902 /* did we reclaim enough */
3903 if (zone_watermark_ok(zone, order, mark,
3904 ac->highest_zoneidx, alloc_flags))
3905 goto try_this_zone;
3906
3907 continue;
3908 }
3909 }
3910
3911 try_this_zone:
3912 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
3913 gfp_mask, alloc_flags, ac->migratetype);
3914 if (page) {
3915 prep_new_page(page, order, gfp_mask, alloc_flags);
3916
3917 /*
3918 * If this is a high-order atomic allocation then check
3919 * if the pageblock should be reserved for the future
3920 */
3921 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3922 reserve_highatomic_pageblock(page, order, zone);
3923
3924 return page;
3925 } else {
3926 if (cond_accept_memory(zone, order, alloc_flags))
3927 goto try_this_zone;
3928
3929 /* Try again if zone has deferred pages */
3930 if (deferred_pages_enabled()) {
3931 if (_deferred_grow_zone(zone, order))
3932 goto try_this_zone;
3933 }
3934 }
3935 }
3936
3937 /*
3938 * If we skipped over nodes with active kswapds and found no
3939 * idle nodes, retry and place anywhere the watermarks permit.
3940 */
3941 if (skip_kswapd_nodes && skipped_kswapd_nodes) {
3942 skip_kswapd_nodes = false;
3943 goto retry;
3944 }
3945
3946 /*
3947 * It's possible on a UMA machine to get through all zones that are
3948 * fragmented. If avoiding fragmentation, reset and try again.
3949 */
3950 if (no_fallback && !defrag_mode) {
3951 alloc_flags &= ~ALLOC_NOFRAGMENT;
3952 goto retry;
3953 }
3954
3955 return NULL;
3956 }
3957
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3958 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3959 {
3960 unsigned int filter = SHOW_MEM_FILTER_NODES;
3961
3962 /*
3963 * This documents exceptions given to allocations in certain
3964 * contexts that are allowed to allocate outside current's set
3965 * of allowed nodes.
3966 */
3967 if (!(gfp_mask & __GFP_NOMEMALLOC))
3968 if (tsk_is_oom_victim(current) ||
3969 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3970 filter &= ~SHOW_MEM_FILTER_NODES;
3971 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3972 filter &= ~SHOW_MEM_FILTER_NODES;
3973
3974 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3975 mem_cgroup_show_protected_memory(NULL);
3976 }
3977
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)3978 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3979 {
3980 struct va_format vaf;
3981 va_list args;
3982 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3983
3984 if ((gfp_mask & __GFP_NOWARN) ||
3985 !__ratelimit(&nopage_rs) ||
3986 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3987 return;
3988
3989 va_start(args, fmt);
3990 vaf.fmt = fmt;
3991 vaf.va = &args;
3992 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3993 current->comm, &vaf, gfp_mask, &gfp_mask,
3994 nodemask_pr_args(nodemask));
3995 va_end(args);
3996
3997 cpuset_print_current_mems_allowed();
3998 pr_cont("\n");
3999 dump_stack();
4000 warn_alloc_show_mem(gfp_mask, nodemask);
4001 }
4002
4003 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4004 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4005 unsigned int alloc_flags,
4006 const struct alloc_context *ac)
4007 {
4008 struct page *page;
4009
4010 page = get_page_from_freelist(gfp_mask, order,
4011 alloc_flags|ALLOC_CPUSET, ac);
4012 /*
4013 * fallback to ignore cpuset restriction if our nodes
4014 * are depleted
4015 */
4016 if (!page)
4017 page = get_page_from_freelist(gfp_mask, order,
4018 alloc_flags, ac);
4019 return page;
4020 }
4021
4022 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4023 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4024 const struct alloc_context *ac, unsigned long *did_some_progress)
4025 {
4026 struct oom_control oc = {
4027 .zonelist = ac->zonelist,
4028 .nodemask = ac->nodemask,
4029 .memcg = NULL,
4030 .gfp_mask = gfp_mask,
4031 .order = order,
4032 };
4033 struct page *page;
4034
4035 *did_some_progress = 0;
4036
4037 /*
4038 * Acquire the oom lock. If that fails, somebody else is
4039 * making progress for us.
4040 */
4041 if (!mutex_trylock(&oom_lock)) {
4042 *did_some_progress = 1;
4043 schedule_timeout_uninterruptible(1);
4044 return NULL;
4045 }
4046
4047 /*
4048 * Go through the zonelist yet one more time, keep very high watermark
4049 * here, this is only to catch a parallel oom killing, we must fail if
4050 * we're still under heavy pressure. But make sure that this reclaim
4051 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4052 * allocation which will never fail due to oom_lock already held.
4053 */
4054 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4055 ~__GFP_DIRECT_RECLAIM, order,
4056 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4057 if (page)
4058 goto out;
4059
4060 /* Coredumps can quickly deplete all memory reserves */
4061 if (current->flags & PF_DUMPCORE)
4062 goto out;
4063 /* The OOM killer will not help higher order allocs */
4064 if (order > PAGE_ALLOC_COSTLY_ORDER)
4065 goto out;
4066 /*
4067 * We have already exhausted all our reclaim opportunities without any
4068 * success so it is time to admit defeat. We will skip the OOM killer
4069 * because it is very likely that the caller has a more reasonable
4070 * fallback than shooting a random task.
4071 *
4072 * The OOM killer may not free memory on a specific node.
4073 */
4074 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4075 goto out;
4076 /* The OOM killer does not needlessly kill tasks for lowmem */
4077 if (ac->highest_zoneidx < ZONE_NORMAL)
4078 goto out;
4079 if (pm_suspended_storage())
4080 goto out;
4081 /*
4082 * XXX: GFP_NOFS allocations should rather fail than rely on
4083 * other request to make a forward progress.
4084 * We are in an unfortunate situation where out_of_memory cannot
4085 * do much for this context but let's try it to at least get
4086 * access to memory reserved if the current task is killed (see
4087 * out_of_memory). Once filesystems are ready to handle allocation
4088 * failures more gracefully we should just bail out here.
4089 */
4090
4091 /* Exhausted what can be done so it's blame time */
4092 if (out_of_memory(&oc) ||
4093 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4094 *did_some_progress = 1;
4095
4096 /*
4097 * Help non-failing allocations by giving them access to memory
4098 * reserves
4099 */
4100 if (gfp_mask & __GFP_NOFAIL)
4101 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4102 ALLOC_NO_WATERMARKS, ac);
4103 }
4104 out:
4105 mutex_unlock(&oom_lock);
4106 return page;
4107 }
4108
4109 /*
4110 * Maximum number of compaction retries with a progress before OOM
4111 * killer is consider as the only way to move forward.
4112 */
4113 #define MAX_COMPACT_RETRIES 16
4114
4115 #ifdef CONFIG_COMPACTION
4116 /* Try memory compaction for high-order allocations before reclaim */
4117 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4118 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4119 unsigned int alloc_flags, const struct alloc_context *ac,
4120 enum compact_priority prio, enum compact_result *compact_result)
4121 {
4122 struct page *page = NULL;
4123 unsigned long pflags;
4124 unsigned int noreclaim_flag;
4125
4126 if (!order)
4127 return NULL;
4128
4129 psi_memstall_enter(&pflags);
4130 delayacct_compact_start();
4131 noreclaim_flag = memalloc_noreclaim_save();
4132
4133 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4134 prio, &page);
4135
4136 memalloc_noreclaim_restore(noreclaim_flag);
4137 psi_memstall_leave(&pflags);
4138 delayacct_compact_end();
4139
4140 if (*compact_result == COMPACT_SKIPPED)
4141 return NULL;
4142 /*
4143 * At least in one zone compaction wasn't deferred or skipped, so let's
4144 * count a compaction stall
4145 */
4146 count_vm_event(COMPACTSTALL);
4147
4148 /* Prep a captured page if available */
4149 if (page)
4150 prep_new_page(page, order, gfp_mask, alloc_flags);
4151
4152 /* Try get a page from the freelist if available */
4153 if (!page)
4154 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4155
4156 if (page) {
4157 struct zone *zone = page_zone(page);
4158
4159 zone->compact_blockskip_flush = false;
4160 compaction_defer_reset(zone, order, true);
4161 count_vm_event(COMPACTSUCCESS);
4162 return page;
4163 }
4164
4165 /*
4166 * It's bad if compaction run occurs and fails. The most likely reason
4167 * is that pages exist, but not enough to satisfy watermarks.
4168 */
4169 count_vm_event(COMPACTFAIL);
4170
4171 cond_resched();
4172
4173 return NULL;
4174 }
4175
4176 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4177 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4178 enum compact_result compact_result,
4179 enum compact_priority *compact_priority,
4180 int *compaction_retries)
4181 {
4182 int max_retries = MAX_COMPACT_RETRIES;
4183 int min_priority;
4184 bool ret = false;
4185 int retries = *compaction_retries;
4186 enum compact_priority priority = *compact_priority;
4187
4188 if (!order)
4189 return false;
4190
4191 if (fatal_signal_pending(current))
4192 return false;
4193
4194 /*
4195 * Compaction was skipped due to a lack of free order-0
4196 * migration targets. Continue if reclaim can help.
4197 */
4198 if (compact_result == COMPACT_SKIPPED) {
4199 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4200 goto out;
4201 }
4202
4203 /*
4204 * Compaction managed to coalesce some page blocks, but the
4205 * allocation failed presumably due to a race. Retry some.
4206 */
4207 if (compact_result == COMPACT_SUCCESS) {
4208 /*
4209 * !costly requests are much more important than
4210 * __GFP_RETRY_MAYFAIL costly ones because they are de
4211 * facto nofail and invoke OOM killer to move on while
4212 * costly can fail and users are ready to cope with
4213 * that. 1/4 retries is rather arbitrary but we would
4214 * need much more detailed feedback from compaction to
4215 * make a better decision.
4216 */
4217 if (order > PAGE_ALLOC_COSTLY_ORDER)
4218 max_retries /= 4;
4219
4220 if (++(*compaction_retries) <= max_retries) {
4221 ret = true;
4222 goto out;
4223 }
4224 }
4225
4226 /*
4227 * Compaction failed. Retry with increasing priority.
4228 */
4229 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4230 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4231
4232 if (*compact_priority > min_priority) {
4233 (*compact_priority)--;
4234 *compaction_retries = 0;
4235 ret = true;
4236 }
4237 out:
4238 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4239 return ret;
4240 }
4241 #else
4242 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4243 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4244 unsigned int alloc_flags, const struct alloc_context *ac,
4245 enum compact_priority prio, enum compact_result *compact_result)
4246 {
4247 *compact_result = COMPACT_SKIPPED;
4248 return NULL;
4249 }
4250
4251 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4252 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4253 enum compact_result compact_result,
4254 enum compact_priority *compact_priority,
4255 int *compaction_retries)
4256 {
4257 struct zone *zone;
4258 struct zoneref *z;
4259
4260 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4261 return false;
4262
4263 /*
4264 * There are setups with compaction disabled which would prefer to loop
4265 * inside the allocator rather than hit the oom killer prematurely.
4266 * Let's give them a good hope and keep retrying while the order-0
4267 * watermarks are OK.
4268 */
4269 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4270 ac->highest_zoneidx, ac->nodemask) {
4271 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4272 ac->highest_zoneidx, alloc_flags))
4273 return true;
4274 }
4275 return false;
4276 }
4277 #endif /* CONFIG_COMPACTION */
4278
4279 #ifdef CONFIG_LOCKDEP
4280 static struct lockdep_map __fs_reclaim_map =
4281 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4282
__need_reclaim(gfp_t gfp_mask)4283 static bool __need_reclaim(gfp_t gfp_mask)
4284 {
4285 /* no reclaim without waiting on it */
4286 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4287 return false;
4288
4289 /* this guy won't enter reclaim */
4290 if (current->flags & PF_MEMALLOC)
4291 return false;
4292
4293 if (gfp_mask & __GFP_NOLOCKDEP)
4294 return false;
4295
4296 return true;
4297 }
4298
__fs_reclaim_acquire(unsigned long ip)4299 void __fs_reclaim_acquire(unsigned long ip)
4300 {
4301 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4302 }
4303
__fs_reclaim_release(unsigned long ip)4304 void __fs_reclaim_release(unsigned long ip)
4305 {
4306 lock_release(&__fs_reclaim_map, ip);
4307 }
4308
fs_reclaim_acquire(gfp_t gfp_mask)4309 void fs_reclaim_acquire(gfp_t gfp_mask)
4310 {
4311 gfp_mask = current_gfp_context(gfp_mask);
4312
4313 if (__need_reclaim(gfp_mask)) {
4314 if (gfp_mask & __GFP_FS)
4315 __fs_reclaim_acquire(_RET_IP_);
4316
4317 #ifdef CONFIG_MMU_NOTIFIER
4318 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4319 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4320 #endif
4321
4322 }
4323 }
4324 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4325
fs_reclaim_release(gfp_t gfp_mask)4326 void fs_reclaim_release(gfp_t gfp_mask)
4327 {
4328 gfp_mask = current_gfp_context(gfp_mask);
4329
4330 if (__need_reclaim(gfp_mask)) {
4331 if (gfp_mask & __GFP_FS)
4332 __fs_reclaim_release(_RET_IP_);
4333 }
4334 }
4335 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4336 #endif
4337
4338 /*
4339 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4340 * have been rebuilt so allocation retries. Reader side does not lock and
4341 * retries the allocation if zonelist changes. Writer side is protected by the
4342 * embedded spin_lock.
4343 */
4344 static DEFINE_SEQLOCK(zonelist_update_seq);
4345
zonelist_iter_begin(void)4346 static unsigned int zonelist_iter_begin(void)
4347 {
4348 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4349 return read_seqbegin(&zonelist_update_seq);
4350
4351 return 0;
4352 }
4353
check_retry_zonelist(unsigned int seq)4354 static unsigned int check_retry_zonelist(unsigned int seq)
4355 {
4356 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4357 return read_seqretry(&zonelist_update_seq, seq);
4358
4359 return seq;
4360 }
4361
4362 /* Perform direct synchronous page reclaim */
4363 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4364 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4365 const struct alloc_context *ac)
4366 {
4367 unsigned int noreclaim_flag;
4368 unsigned long progress;
4369
4370 cond_resched();
4371
4372 /* We now go into synchronous reclaim */
4373 cpuset_memory_pressure_bump();
4374 fs_reclaim_acquire(gfp_mask);
4375 noreclaim_flag = memalloc_noreclaim_save();
4376
4377 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4378 ac->nodemask);
4379
4380 memalloc_noreclaim_restore(noreclaim_flag);
4381 fs_reclaim_release(gfp_mask);
4382
4383 cond_resched();
4384
4385 return progress;
4386 }
4387
4388 /* The really slow allocator path where we enter direct reclaim */
4389 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4390 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4391 unsigned int alloc_flags, const struct alloc_context *ac,
4392 unsigned long *did_some_progress)
4393 {
4394 struct page *page = NULL;
4395 unsigned long pflags;
4396 bool drained = false;
4397
4398 psi_memstall_enter(&pflags);
4399 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4400 if (unlikely(!(*did_some_progress)))
4401 goto out;
4402
4403 retry:
4404 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4405
4406 /*
4407 * If an allocation failed after direct reclaim, it could be because
4408 * pages are pinned on the per-cpu lists or in high alloc reserves.
4409 * Shrink them and try again
4410 */
4411 if (!page && !drained) {
4412 unreserve_highatomic_pageblock(ac, false);
4413 drain_all_pages(NULL);
4414 drained = true;
4415 goto retry;
4416 }
4417 out:
4418 psi_memstall_leave(&pflags);
4419
4420 return page;
4421 }
4422
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4423 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4424 const struct alloc_context *ac)
4425 {
4426 struct zoneref *z;
4427 struct zone *zone;
4428 pg_data_t *last_pgdat = NULL;
4429 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4430 unsigned int reclaim_order;
4431
4432 if (defrag_mode)
4433 reclaim_order = max(order, pageblock_order);
4434 else
4435 reclaim_order = order;
4436
4437 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4438 ac->nodemask) {
4439 if (!managed_zone(zone))
4440 continue;
4441 if (last_pgdat == zone->zone_pgdat)
4442 continue;
4443 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
4444 last_pgdat = zone->zone_pgdat;
4445 }
4446 }
4447
4448 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)4449 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
4450 {
4451 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4452
4453 /*
4454 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
4455 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4456 * to save two branches.
4457 */
4458 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
4459 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4460
4461 /*
4462 * The caller may dip into page reserves a bit more if the caller
4463 * cannot run direct reclaim, or if the caller has realtime scheduling
4464 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4465 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
4466 */
4467 alloc_flags |= (__force int)
4468 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4469
4470 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
4471 /*
4472 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4473 * if it can't schedule.
4474 */
4475 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
4476 alloc_flags |= ALLOC_NON_BLOCK;
4477
4478 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
4479 alloc_flags |= ALLOC_HIGHATOMIC;
4480 }
4481
4482 /*
4483 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4484 * GFP_ATOMIC) rather than fail, see the comment for
4485 * cpuset_current_node_allowed().
4486 */
4487 if (alloc_flags & ALLOC_MIN_RESERVE)
4488 alloc_flags &= ~ALLOC_CPUSET;
4489 } else if (unlikely(rt_or_dl_task(current)) && in_task())
4490 alloc_flags |= ALLOC_MIN_RESERVE;
4491
4492 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4493
4494 if (defrag_mode)
4495 alloc_flags |= ALLOC_NOFRAGMENT;
4496
4497 return alloc_flags;
4498 }
4499
oom_reserves_allowed(struct task_struct * tsk)4500 static bool oom_reserves_allowed(struct task_struct *tsk)
4501 {
4502 if (!tsk_is_oom_victim(tsk))
4503 return false;
4504
4505 /*
4506 * !MMU doesn't have oom reaper so give access to memory reserves
4507 * only to the thread with TIF_MEMDIE set
4508 */
4509 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4510 return false;
4511
4512 return true;
4513 }
4514
4515 /*
4516 * Distinguish requests which really need access to full memory
4517 * reserves from oom victims which can live with a portion of it
4518 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)4519 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4520 {
4521 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4522 return 0;
4523 if (gfp_mask & __GFP_MEMALLOC)
4524 return ALLOC_NO_WATERMARKS;
4525 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4526 return ALLOC_NO_WATERMARKS;
4527 if (!in_interrupt()) {
4528 if (current->flags & PF_MEMALLOC)
4529 return ALLOC_NO_WATERMARKS;
4530 else if (oom_reserves_allowed(current))
4531 return ALLOC_OOM;
4532 }
4533
4534 return 0;
4535 }
4536
gfp_pfmemalloc_allowed(gfp_t gfp_mask)4537 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4538 {
4539 return !!__gfp_pfmemalloc_flags(gfp_mask);
4540 }
4541
4542 /*
4543 * Checks whether it makes sense to retry the reclaim to make a forward progress
4544 * for the given allocation request.
4545 *
4546 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4547 * without success, or when we couldn't even meet the watermark if we
4548 * reclaimed all remaining pages on the LRU lists.
4549 *
4550 * Returns true if a retry is viable or false to enter the oom path.
4551 */
4552 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)4553 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4554 struct alloc_context *ac, int alloc_flags,
4555 bool did_some_progress, int *no_progress_loops)
4556 {
4557 struct zone *zone;
4558 struct zoneref *z;
4559 bool ret = false;
4560
4561 /*
4562 * Costly allocations might have made a progress but this doesn't mean
4563 * their order will become available due to high fragmentation so
4564 * always increment the no progress counter for them
4565 */
4566 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4567 *no_progress_loops = 0;
4568 else
4569 (*no_progress_loops)++;
4570
4571 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4572 goto out;
4573
4574
4575 /*
4576 * Keep reclaiming pages while there is a chance this will lead
4577 * somewhere. If none of the target zones can satisfy our allocation
4578 * request even if all reclaimable pages are considered then we are
4579 * screwed and have to go OOM.
4580 */
4581 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4582 ac->highest_zoneidx, ac->nodemask) {
4583 unsigned long available;
4584 unsigned long reclaimable;
4585 unsigned long min_wmark = min_wmark_pages(zone);
4586 bool wmark;
4587
4588 if (cpusets_enabled() &&
4589 (alloc_flags & ALLOC_CPUSET) &&
4590 !__cpuset_zone_allowed(zone, gfp_mask))
4591 continue;
4592
4593 available = reclaimable = zone_reclaimable_pages(zone);
4594 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4595
4596 /*
4597 * Would the allocation succeed if we reclaimed all
4598 * reclaimable pages?
4599 */
4600 wmark = __zone_watermark_ok(zone, order, min_wmark,
4601 ac->highest_zoneidx, alloc_flags, available);
4602 trace_reclaim_retry_zone(z, order, reclaimable,
4603 available, min_wmark, *no_progress_loops, wmark);
4604 if (wmark) {
4605 ret = true;
4606 break;
4607 }
4608 }
4609
4610 /*
4611 * Memory allocation/reclaim might be called from a WQ context and the
4612 * current implementation of the WQ concurrency control doesn't
4613 * recognize that a particular WQ is congested if the worker thread is
4614 * looping without ever sleeping. Therefore we have to do a short sleep
4615 * here rather than calling cond_resched().
4616 */
4617 if (current->flags & PF_WQ_WORKER)
4618 schedule_timeout_uninterruptible(1);
4619 else
4620 cond_resched();
4621 out:
4622 /* Before OOM, exhaust highatomic_reserve */
4623 if (!ret)
4624 return unreserve_highatomic_pageblock(ac, true);
4625
4626 return ret;
4627 }
4628
4629 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)4630 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4631 {
4632 /*
4633 * It's possible that cpuset's mems_allowed and the nodemask from
4634 * mempolicy don't intersect. This should be normally dealt with by
4635 * policy_nodemask(), but it's possible to race with cpuset update in
4636 * such a way the check therein was true, and then it became false
4637 * before we got our cpuset_mems_cookie here.
4638 * This assumes that for all allocations, ac->nodemask can come only
4639 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4640 * when it does not intersect with the cpuset restrictions) or the
4641 * caller can deal with a violated nodemask.
4642 */
4643 if (cpusets_enabled() && ac->nodemask &&
4644 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4645 ac->nodemask = NULL;
4646 return true;
4647 }
4648
4649 /*
4650 * When updating a task's mems_allowed or mempolicy nodemask, it is
4651 * possible to race with parallel threads in such a way that our
4652 * allocation can fail while the mask is being updated. If we are about
4653 * to fail, check if the cpuset changed during allocation and if so,
4654 * retry.
4655 */
4656 if (read_mems_allowed_retry(cpuset_mems_cookie))
4657 return true;
4658
4659 return false;
4660 }
4661
4662 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)4663 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4664 struct alloc_context *ac)
4665 {
4666 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4667 bool can_compact = gfp_compaction_allowed(gfp_mask);
4668 bool nofail = gfp_mask & __GFP_NOFAIL;
4669 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4670 struct page *page = NULL;
4671 unsigned int alloc_flags;
4672 unsigned long did_some_progress;
4673 enum compact_priority compact_priority;
4674 enum compact_result compact_result;
4675 int compaction_retries;
4676 int no_progress_loops;
4677 unsigned int cpuset_mems_cookie;
4678 unsigned int zonelist_iter_cookie;
4679 int reserve_flags;
4680
4681 if (unlikely(nofail)) {
4682 /*
4683 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
4684 * otherwise, we may result in lockup.
4685 */
4686 WARN_ON_ONCE(!can_direct_reclaim);
4687 /*
4688 * PF_MEMALLOC request from this context is rather bizarre
4689 * because we cannot reclaim anything and only can loop waiting
4690 * for somebody to do a work for us.
4691 */
4692 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4693 }
4694
4695 restart:
4696 compaction_retries = 0;
4697 no_progress_loops = 0;
4698 compact_result = COMPACT_SKIPPED;
4699 compact_priority = DEF_COMPACT_PRIORITY;
4700 cpuset_mems_cookie = read_mems_allowed_begin();
4701 zonelist_iter_cookie = zonelist_iter_begin();
4702
4703 /*
4704 * The fast path uses conservative alloc_flags to succeed only until
4705 * kswapd needs to be woken up, and to avoid the cost of setting up
4706 * alloc_flags precisely. So we do that now.
4707 */
4708 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4709
4710 /*
4711 * We need to recalculate the starting point for the zonelist iterator
4712 * because we might have used different nodemask in the fast path, or
4713 * there was a cpuset modification and we are retrying - otherwise we
4714 * could end up iterating over non-eligible zones endlessly.
4715 */
4716 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4717 ac->highest_zoneidx, ac->nodemask);
4718 if (!zonelist_zone(ac->preferred_zoneref))
4719 goto nopage;
4720
4721 /*
4722 * Check for insane configurations where the cpuset doesn't contain
4723 * any suitable zone to satisfy the request - e.g. non-movable
4724 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4725 */
4726 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4727 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4728 ac->highest_zoneidx,
4729 &cpuset_current_mems_allowed);
4730 if (!zonelist_zone(z))
4731 goto nopage;
4732 }
4733
4734 if (alloc_flags & ALLOC_KSWAPD)
4735 wake_all_kswapds(order, gfp_mask, ac);
4736
4737 /*
4738 * The adjusted alloc_flags might result in immediate success, so try
4739 * that first
4740 */
4741 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4742 if (page)
4743 goto got_pg;
4744
4745 /*
4746 * For costly allocations, try direct compaction first, as it's likely
4747 * that we have enough base pages and don't need to reclaim. For non-
4748 * movable high-order allocations, do that as well, as compaction will
4749 * try prevent permanent fragmentation by migrating from blocks of the
4750 * same migratetype.
4751 * Don't try this for allocations that are allowed to ignore
4752 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4753 */
4754 if (can_direct_reclaim && can_compact &&
4755 (costly_order ||
4756 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4757 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4758 page = __alloc_pages_direct_compact(gfp_mask, order,
4759 alloc_flags, ac,
4760 INIT_COMPACT_PRIORITY,
4761 &compact_result);
4762 if (page)
4763 goto got_pg;
4764
4765 /*
4766 * Checks for costly allocations with __GFP_NORETRY, which
4767 * includes some THP page fault allocations
4768 */
4769 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4770 /*
4771 * If allocating entire pageblock(s) and compaction
4772 * failed because all zones are below low watermarks
4773 * or is prohibited because it recently failed at this
4774 * order, fail immediately unless the allocator has
4775 * requested compaction and reclaim retry.
4776 *
4777 * Reclaim is
4778 * - potentially very expensive because zones are far
4779 * below their low watermarks or this is part of very
4780 * bursty high order allocations,
4781 * - not guaranteed to help because isolate_freepages()
4782 * may not iterate over freed pages as part of its
4783 * linear scan, and
4784 * - unlikely to make entire pageblocks free on its
4785 * own.
4786 */
4787 if (compact_result == COMPACT_SKIPPED ||
4788 compact_result == COMPACT_DEFERRED)
4789 goto nopage;
4790
4791 /*
4792 * Looks like reclaim/compaction is worth trying, but
4793 * sync compaction could be very expensive, so keep
4794 * using async compaction.
4795 */
4796 compact_priority = INIT_COMPACT_PRIORITY;
4797 }
4798 }
4799
4800 retry:
4801 /*
4802 * Deal with possible cpuset update races or zonelist updates to avoid
4803 * infinite retries.
4804 */
4805 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4806 check_retry_zonelist(zonelist_iter_cookie))
4807 goto restart;
4808
4809 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4810 if (alloc_flags & ALLOC_KSWAPD)
4811 wake_all_kswapds(order, gfp_mask, ac);
4812
4813 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4814 if (reserve_flags)
4815 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4816 (alloc_flags & ALLOC_KSWAPD);
4817
4818 /*
4819 * Reset the nodemask and zonelist iterators if memory policies can be
4820 * ignored. These allocations are high priority and system rather than
4821 * user oriented.
4822 */
4823 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4824 ac->nodemask = NULL;
4825 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4826 ac->highest_zoneidx, ac->nodemask);
4827 }
4828
4829 /* Attempt with potentially adjusted zonelist and alloc_flags */
4830 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4831 if (page)
4832 goto got_pg;
4833
4834 /* Caller is not willing to reclaim, we can't balance anything */
4835 if (!can_direct_reclaim)
4836 goto nopage;
4837
4838 /* Avoid recursion of direct reclaim */
4839 if (current->flags & PF_MEMALLOC)
4840 goto nopage;
4841
4842 /* Try direct reclaim and then allocating */
4843 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4844 &did_some_progress);
4845 if (page)
4846 goto got_pg;
4847
4848 /* Try direct compaction and then allocating */
4849 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4850 compact_priority, &compact_result);
4851 if (page)
4852 goto got_pg;
4853
4854 /* Do not loop if specifically requested */
4855 if (gfp_mask & __GFP_NORETRY)
4856 goto nopage;
4857
4858 /*
4859 * Do not retry costly high order allocations unless they are
4860 * __GFP_RETRY_MAYFAIL and we can compact
4861 */
4862 if (costly_order && (!can_compact ||
4863 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4864 goto nopage;
4865
4866 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4867 did_some_progress > 0, &no_progress_loops))
4868 goto retry;
4869
4870 /*
4871 * It doesn't make any sense to retry for the compaction if the order-0
4872 * reclaim is not able to make any progress because the current
4873 * implementation of the compaction depends on the sufficient amount
4874 * of free memory (see __compaction_suitable)
4875 */
4876 if (did_some_progress > 0 && can_compact &&
4877 should_compact_retry(ac, order, alloc_flags,
4878 compact_result, &compact_priority,
4879 &compaction_retries))
4880 goto retry;
4881
4882 /* Reclaim/compaction failed to prevent the fallback */
4883 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
4884 alloc_flags &= ~ALLOC_NOFRAGMENT;
4885 goto retry;
4886 }
4887
4888 /*
4889 * Deal with possible cpuset update races or zonelist updates to avoid
4890 * a unnecessary OOM kill.
4891 */
4892 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4893 check_retry_zonelist(zonelist_iter_cookie))
4894 goto restart;
4895
4896 /* Reclaim has failed us, start killing things */
4897 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4898 if (page)
4899 goto got_pg;
4900
4901 /* Avoid allocations with no watermarks from looping endlessly */
4902 if (tsk_is_oom_victim(current) &&
4903 (alloc_flags & ALLOC_OOM ||
4904 (gfp_mask & __GFP_NOMEMALLOC)))
4905 goto nopage;
4906
4907 /* Retry as long as the OOM killer is making progress */
4908 if (did_some_progress) {
4909 no_progress_loops = 0;
4910 goto retry;
4911 }
4912
4913 nopage:
4914 /*
4915 * Deal with possible cpuset update races or zonelist updates to avoid
4916 * a unnecessary OOM kill.
4917 */
4918 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4919 check_retry_zonelist(zonelist_iter_cookie))
4920 goto restart;
4921
4922 /*
4923 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4924 * we always retry
4925 */
4926 if (unlikely(nofail)) {
4927 /*
4928 * Lacking direct_reclaim we can't do anything to reclaim memory,
4929 * we disregard these unreasonable nofail requests and still
4930 * return NULL
4931 */
4932 if (!can_direct_reclaim)
4933 goto fail;
4934
4935 /*
4936 * Help non-failing allocations by giving some access to memory
4937 * reserves normally used for high priority non-blocking
4938 * allocations but do not use ALLOC_NO_WATERMARKS because this
4939 * could deplete whole memory reserves which would just make
4940 * the situation worse.
4941 */
4942 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4943 if (page)
4944 goto got_pg;
4945
4946 cond_resched();
4947 goto retry;
4948 }
4949 fail:
4950 warn_alloc(gfp_mask, ac->nodemask,
4951 "page allocation failure: order:%u", order);
4952 got_pg:
4953 return page;
4954 }
4955
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4956 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4957 int preferred_nid, nodemask_t *nodemask,
4958 struct alloc_context *ac, gfp_t *alloc_gfp,
4959 unsigned int *alloc_flags)
4960 {
4961 ac->highest_zoneidx = gfp_zone(gfp_mask);
4962 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4963 ac->nodemask = nodemask;
4964 ac->migratetype = gfp_migratetype(gfp_mask);
4965
4966 if (cpusets_enabled()) {
4967 *alloc_gfp |= __GFP_HARDWALL;
4968 /*
4969 * When we are in the interrupt context, it is irrelevant
4970 * to the current task context. It means that any node ok.
4971 */
4972 if (in_task() && !ac->nodemask)
4973 ac->nodemask = &cpuset_current_mems_allowed;
4974 else
4975 *alloc_flags |= ALLOC_CPUSET;
4976 }
4977
4978 might_alloc(gfp_mask);
4979
4980 /*
4981 * Don't invoke should_fail logic, since it may call
4982 * get_random_u32() and printk() which need to spin_lock.
4983 */
4984 if (!(*alloc_flags & ALLOC_TRYLOCK) &&
4985 should_fail_alloc_page(gfp_mask, order))
4986 return false;
4987
4988 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4989
4990 /* Dirty zone balancing only done in the fast path */
4991 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4992
4993 /*
4994 * The preferred zone is used for statistics but crucially it is
4995 * also used as the starting point for the zonelist iterator. It
4996 * may get reset for allocations that ignore memory policies.
4997 */
4998 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4999 ac->highest_zoneidx, ac->nodemask);
5000
5001 return true;
5002 }
5003
5004 /*
5005 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
5006 * @gfp: GFP flags for the allocation
5007 * @preferred_nid: The preferred NUMA node ID to allocate from
5008 * @nodemask: Set of nodes to allocate from, may be NULL
5009 * @nr_pages: The number of pages desired in the array
5010 * @page_array: Array to store the pages
5011 *
5012 * This is a batched version of the page allocator that attempts to allocate
5013 * @nr_pages quickly. Pages are added to @page_array.
5014 *
5015 * Note that only the elements in @page_array that were cleared to %NULL on
5016 * entry are populated with newly allocated pages. @nr_pages is the maximum
5017 * number of pages that will be stored in the array.
5018 *
5019 * Returns the number of pages in @page_array, including ones already
5020 * allocated on entry. This can be less than the number requested in @nr_pages,
5021 * but all empty slots are filled from the beginning. I.e., if all slots in
5022 * @page_array were set to %NULL on entry, the slots from 0 to the return value
5023 * - 1 will be filled.
5024 */
alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array)5025 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
5026 nodemask_t *nodemask, int nr_pages,
5027 struct page **page_array)
5028 {
5029 struct page *page;
5030 unsigned long UP_flags;
5031 struct zone *zone;
5032 struct zoneref *z;
5033 struct per_cpu_pages *pcp;
5034 struct list_head *pcp_list;
5035 struct alloc_context ac;
5036 gfp_t alloc_gfp;
5037 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5038 int nr_populated = 0, nr_account = 0;
5039
5040 /*
5041 * Skip populated array elements to determine if any pages need
5042 * to be allocated before disabling IRQs.
5043 */
5044 while (nr_populated < nr_pages && page_array[nr_populated])
5045 nr_populated++;
5046
5047 /* No pages requested? */
5048 if (unlikely(nr_pages <= 0))
5049 goto out;
5050
5051 /* Already populated array? */
5052 if (unlikely(nr_pages - nr_populated == 0))
5053 goto out;
5054
5055 /* Bulk allocator does not support memcg accounting. */
5056 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
5057 goto failed;
5058
5059 /* Use the single page allocator for one page. */
5060 if (nr_pages - nr_populated == 1)
5061 goto failed;
5062
5063 #ifdef CONFIG_PAGE_OWNER
5064 /*
5065 * PAGE_OWNER may recurse into the allocator to allocate space to
5066 * save the stack with pagesets.lock held. Releasing/reacquiring
5067 * removes much of the performance benefit of bulk allocation so
5068 * force the caller to allocate one page at a time as it'll have
5069 * similar performance to added complexity to the bulk allocator.
5070 */
5071 if (static_branch_unlikely(&page_owner_inited))
5072 goto failed;
5073 #endif
5074
5075 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5076 gfp &= gfp_allowed_mask;
5077 alloc_gfp = gfp;
5078 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5079 goto out;
5080 gfp = alloc_gfp;
5081
5082 /* Find an allowed local zone that meets the low watermark. */
5083 z = ac.preferred_zoneref;
5084 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
5085 unsigned long mark;
5086
5087 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5088 !__cpuset_zone_allowed(zone, gfp)) {
5089 continue;
5090 }
5091
5092 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
5093 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
5094 goto failed;
5095 }
5096
5097 cond_accept_memory(zone, 0, alloc_flags);
5098 retry_this_zone:
5099 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5100 if (zone_watermark_fast(zone, 0, mark,
5101 zonelist_zone_idx(ac.preferred_zoneref),
5102 alloc_flags, gfp)) {
5103 break;
5104 }
5105
5106 if (cond_accept_memory(zone, 0, alloc_flags))
5107 goto retry_this_zone;
5108
5109 /* Try again if zone has deferred pages */
5110 if (deferred_pages_enabled()) {
5111 if (_deferred_grow_zone(zone, 0))
5112 goto retry_this_zone;
5113 }
5114 }
5115
5116 /*
5117 * If there are no allowed local zones that meets the watermarks then
5118 * try to allocate a single page and reclaim if necessary.
5119 */
5120 if (unlikely(!zone))
5121 goto failed;
5122
5123 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
5124 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
5125 if (!pcp)
5126 goto failed;
5127
5128 /* Attempt the batch allocation */
5129 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5130 while (nr_populated < nr_pages) {
5131
5132 /* Skip existing pages */
5133 if (page_array[nr_populated]) {
5134 nr_populated++;
5135 continue;
5136 }
5137
5138 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5139 pcp, pcp_list);
5140 if (unlikely(!page)) {
5141 /* Try and allocate at least one page */
5142 if (!nr_account) {
5143 pcp_spin_unlock(pcp, UP_flags);
5144 goto failed;
5145 }
5146 break;
5147 }
5148 nr_account++;
5149
5150 prep_new_page(page, 0, gfp, 0);
5151 set_page_refcounted(page);
5152 page_array[nr_populated++] = page;
5153 }
5154
5155 pcp_spin_unlock(pcp, UP_flags);
5156
5157 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5158 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
5159
5160 out:
5161 return nr_populated;
5162
5163 failed:
5164 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
5165 if (page)
5166 page_array[nr_populated++] = page;
5167 goto out;
5168 }
5169 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
5170
5171 /*
5172 * This is the 'heart' of the zoned buddy allocator.
5173 */
__alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5174 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
5175 int preferred_nid, nodemask_t *nodemask)
5176 {
5177 struct page *page;
5178 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5179 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5180 struct alloc_context ac = { };
5181
5182 /*
5183 * There are several places where we assume that the order value is sane
5184 * so bail out early if the request is out of bound.
5185 */
5186 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
5187 return NULL;
5188
5189 gfp &= gfp_allowed_mask;
5190 /*
5191 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5192 * resp. GFP_NOIO which has to be inherited for all allocation requests
5193 * from a particular context which has been marked by
5194 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5195 * movable zones are not used during allocation.
5196 */
5197 gfp = current_gfp_context(gfp);
5198 alloc_gfp = gfp;
5199 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5200 &alloc_gfp, &alloc_flags))
5201 return NULL;
5202
5203 /*
5204 * Forbid the first pass from falling back to types that fragment
5205 * memory until all local zones are considered.
5206 */
5207 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
5208
5209 /* First allocation attempt */
5210 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5211 if (likely(page))
5212 goto out;
5213
5214 alloc_gfp = gfp;
5215 ac.spread_dirty_pages = false;
5216
5217 /*
5218 * Restore the original nodemask if it was potentially replaced with
5219 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5220 */
5221 ac.nodemask = nodemask;
5222
5223 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5224
5225 out:
5226 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
5227 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5228 free_frozen_pages(page, order);
5229 page = NULL;
5230 }
5231
5232 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5233 kmsan_alloc_page(page, order, alloc_gfp);
5234
5235 return page;
5236 }
5237 EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
5238
__alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5239 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
5240 int preferred_nid, nodemask_t *nodemask)
5241 {
5242 struct page *page;
5243
5244 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
5245 if (page)
5246 set_page_refcounted(page);
5247 return page;
5248 }
5249 EXPORT_SYMBOL(__alloc_pages_noprof);
5250
__folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5251 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
5252 nodemask_t *nodemask)
5253 {
5254 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
5255 preferred_nid, nodemask);
5256 return page_rmappable_folio(page);
5257 }
5258 EXPORT_SYMBOL(__folio_alloc_noprof);
5259
5260 /*
5261 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5262 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5263 * you need to access high mem.
5264 */
get_free_pages_noprof(gfp_t gfp_mask,unsigned int order)5265 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
5266 {
5267 struct page *page;
5268
5269 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
5270 if (!page)
5271 return 0;
5272 return (unsigned long) page_address(page);
5273 }
5274 EXPORT_SYMBOL(get_free_pages_noprof);
5275
get_zeroed_page_noprof(gfp_t gfp_mask)5276 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
5277 {
5278 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
5279 }
5280 EXPORT_SYMBOL(get_zeroed_page_noprof);
5281
___free_pages(struct page * page,unsigned int order,fpi_t fpi_flags)5282 static void ___free_pages(struct page *page, unsigned int order,
5283 fpi_t fpi_flags)
5284 {
5285 /* get PageHead before we drop reference */
5286 int head = PageHead(page);
5287 /* get alloc tag in case the page is released by others */
5288 struct alloc_tag *tag = pgalloc_tag_get(page);
5289
5290 if (put_page_testzero(page))
5291 __free_frozen_pages(page, order, fpi_flags);
5292 else if (!head) {
5293 pgalloc_tag_sub_pages(tag, (1 << order) - 1);
5294 while (order-- > 0) {
5295 /*
5296 * The "tail" pages of this non-compound high-order
5297 * page will have no code tags, so to avoid warnings
5298 * mark them as empty.
5299 */
5300 clear_page_tag_ref(page + (1 << order));
5301 __free_frozen_pages(page + (1 << order), order,
5302 fpi_flags);
5303 }
5304 }
5305 }
5306
5307 /**
5308 * __free_pages - Free pages allocated with alloc_pages().
5309 * @page: The page pointer returned from alloc_pages().
5310 * @order: The order of the allocation.
5311 *
5312 * This function can free multi-page allocations that are not compound
5313 * pages. It does not check that the @order passed in matches that of
5314 * the allocation, so it is easy to leak memory. Freeing more memory
5315 * than was allocated will probably emit a warning.
5316 *
5317 * If the last reference to this page is speculative, it will be released
5318 * by put_page() which only frees the first page of a non-compound
5319 * allocation. To prevent the remaining pages from being leaked, we free
5320 * the subsequent pages here. If you want to use the page's reference
5321 * count to decide when to free the allocation, you should allocate a
5322 * compound page, and use put_page() instead of __free_pages().
5323 *
5324 * Context: May be called in interrupt context or while holding a normal
5325 * spinlock, but not in NMI context or while holding a raw spinlock.
5326 */
__free_pages(struct page * page,unsigned int order)5327 void __free_pages(struct page *page, unsigned int order)
5328 {
5329 ___free_pages(page, order, FPI_NONE);
5330 }
5331 EXPORT_SYMBOL(__free_pages);
5332
5333 /*
5334 * Can be called while holding raw_spin_lock or from IRQ and NMI for any
5335 * page type (not only those that came from alloc_pages_nolock)
5336 */
free_pages_nolock(struct page * page,unsigned int order)5337 void free_pages_nolock(struct page *page, unsigned int order)
5338 {
5339 ___free_pages(page, order, FPI_TRYLOCK);
5340 }
5341
5342 /**
5343 * free_pages - Free pages allocated with __get_free_pages().
5344 * @addr: The virtual address tied to a page returned from __get_free_pages().
5345 * @order: The order of the allocation.
5346 *
5347 * This function behaves the same as __free_pages(). Use this function
5348 * to free pages when you only have a valid virtual address. If you have
5349 * the page, call __free_pages() instead.
5350 */
free_pages(unsigned long addr,unsigned int order)5351 void free_pages(unsigned long addr, unsigned int order)
5352 {
5353 if (addr != 0) {
5354 VM_BUG_ON(!virt_addr_valid((void *)addr));
5355 __free_pages(virt_to_page((void *)addr), order);
5356 }
5357 }
5358
5359 EXPORT_SYMBOL(free_pages);
5360
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5361 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5362 size_t size)
5363 {
5364 if (addr) {
5365 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5366 struct page *page = virt_to_page((void *)addr);
5367 struct page *last = page + nr;
5368
5369 split_page_owner(page, order, 0);
5370 pgalloc_tag_split(page_folio(page), order, 0);
5371 split_page_memcg(page, order);
5372 while (page < --last)
5373 set_page_refcounted(last);
5374
5375 last = page + (1UL << order);
5376 for (page += nr; page < last; page++)
5377 __free_pages_ok(page, 0, FPI_TO_TAIL);
5378 }
5379 return (void *)addr;
5380 }
5381
5382 /**
5383 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5384 * @size: the number of bytes to allocate
5385 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5386 *
5387 * This function is similar to alloc_pages(), except that it allocates the
5388 * minimum number of pages to satisfy the request. alloc_pages() can only
5389 * allocate memory in power-of-two pages.
5390 *
5391 * This function is also limited by MAX_PAGE_ORDER.
5392 *
5393 * Memory allocated by this function must be released by free_pages_exact().
5394 *
5395 * Return: pointer to the allocated area or %NULL in case of error.
5396 */
alloc_pages_exact_noprof(size_t size,gfp_t gfp_mask)5397 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
5398 {
5399 unsigned int order = get_order(size);
5400 unsigned long addr;
5401
5402 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5403 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5404
5405 addr = get_free_pages_noprof(gfp_mask, order);
5406 return make_alloc_exact(addr, order, size);
5407 }
5408 EXPORT_SYMBOL(alloc_pages_exact_noprof);
5409
5410 /**
5411 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5412 * pages on a node.
5413 * @nid: the preferred node ID where memory should be allocated
5414 * @size: the number of bytes to allocate
5415 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5416 *
5417 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5418 * back.
5419 *
5420 * Return: pointer to the allocated area or %NULL in case of error.
5421 */
alloc_pages_exact_nid_noprof(int nid,size_t size,gfp_t gfp_mask)5422 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
5423 {
5424 unsigned int order = get_order(size);
5425 struct page *p;
5426
5427 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5428 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5429
5430 p = alloc_pages_node_noprof(nid, gfp_mask, order);
5431 if (!p)
5432 return NULL;
5433 return make_alloc_exact((unsigned long)page_address(p), order, size);
5434 }
5435
5436 /**
5437 * free_pages_exact - release memory allocated via alloc_pages_exact()
5438 * @virt: the value returned by alloc_pages_exact.
5439 * @size: size of allocation, same value as passed to alloc_pages_exact().
5440 *
5441 * Release the memory allocated by a previous call to alloc_pages_exact.
5442 */
free_pages_exact(void * virt,size_t size)5443 void free_pages_exact(void *virt, size_t size)
5444 {
5445 unsigned long addr = (unsigned long)virt;
5446 unsigned long end = addr + PAGE_ALIGN(size);
5447
5448 while (addr < end) {
5449 free_page(addr);
5450 addr += PAGE_SIZE;
5451 }
5452 }
5453 EXPORT_SYMBOL(free_pages_exact);
5454
5455 /**
5456 * nr_free_zone_pages - count number of pages beyond high watermark
5457 * @offset: The zone index of the highest zone
5458 *
5459 * nr_free_zone_pages() counts the number of pages which are beyond the
5460 * high watermark within all zones at or below a given zone index. For each
5461 * zone, the number of pages is calculated as:
5462 *
5463 * nr_free_zone_pages = managed_pages - high_pages
5464 *
5465 * Return: number of pages beyond high watermark.
5466 */
nr_free_zone_pages(int offset)5467 static unsigned long nr_free_zone_pages(int offset)
5468 {
5469 struct zoneref *z;
5470 struct zone *zone;
5471
5472 /* Just pick one node, since fallback list is circular */
5473 unsigned long sum = 0;
5474
5475 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5476
5477 for_each_zone_zonelist(zone, z, zonelist, offset) {
5478 unsigned long size = zone_managed_pages(zone);
5479 unsigned long high = high_wmark_pages(zone);
5480 if (size > high)
5481 sum += size - high;
5482 }
5483
5484 return sum;
5485 }
5486
5487 /**
5488 * nr_free_buffer_pages - count number of pages beyond high watermark
5489 *
5490 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5491 * watermark within ZONE_DMA and ZONE_NORMAL.
5492 *
5493 * Return: number of pages beyond high watermark within ZONE_DMA and
5494 * ZONE_NORMAL.
5495 */
nr_free_buffer_pages(void)5496 unsigned long nr_free_buffer_pages(void)
5497 {
5498 return nr_free_zone_pages(gfp_zone(GFP_USER));
5499 }
5500 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5501
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)5502 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5503 {
5504 zoneref->zone = zone;
5505 zoneref->zone_idx = zone_idx(zone);
5506 }
5507
5508 /*
5509 * Builds allocation fallback zone lists.
5510 *
5511 * Add all populated zones of a node to the zonelist.
5512 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)5513 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5514 {
5515 struct zone *zone;
5516 enum zone_type zone_type = MAX_NR_ZONES;
5517 int nr_zones = 0;
5518
5519 do {
5520 zone_type--;
5521 zone = pgdat->node_zones + zone_type;
5522 if (populated_zone(zone)) {
5523 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5524 check_highest_zone(zone_type);
5525 }
5526 } while (zone_type);
5527
5528 return nr_zones;
5529 }
5530
5531 #ifdef CONFIG_NUMA
5532
__parse_numa_zonelist_order(char * s)5533 static int __parse_numa_zonelist_order(char *s)
5534 {
5535 /*
5536 * We used to support different zonelists modes but they turned
5537 * out to be just not useful. Let's keep the warning in place
5538 * if somebody still use the cmd line parameter so that we do
5539 * not fail it silently
5540 */
5541 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5542 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
5543 return -EINVAL;
5544 }
5545 return 0;
5546 }
5547
5548 static char numa_zonelist_order[] = "Node";
5549 #define NUMA_ZONELIST_ORDER_LEN 16
5550 /*
5551 * sysctl handler for numa_zonelist_order
5552 */
numa_zonelist_order_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5553 static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
5554 void *buffer, size_t *length, loff_t *ppos)
5555 {
5556 if (write)
5557 return __parse_numa_zonelist_order(buffer);
5558 return proc_dostring(table, write, buffer, length, ppos);
5559 }
5560
5561 static int node_load[MAX_NUMNODES];
5562
5563 /**
5564 * find_next_best_node - find the next node that should appear in a given node's fallback list
5565 * @node: node whose fallback list we're appending
5566 * @used_node_mask: nodemask_t of already used nodes
5567 *
5568 * We use a number of factors to determine which is the next node that should
5569 * appear on a given node's fallback list. The node should not have appeared
5570 * already in @node's fallback list, and it should be the next closest node
5571 * according to the distance array (which contains arbitrary distance values
5572 * from each node to each node in the system), and should also prefer nodes
5573 * with no CPUs, since presumably they'll have very little allocation pressure
5574 * on them otherwise.
5575 *
5576 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5577 */
find_next_best_node(int node,nodemask_t * used_node_mask)5578 int find_next_best_node(int node, nodemask_t *used_node_mask)
5579 {
5580 int n, val;
5581 int min_val = INT_MAX;
5582 int best_node = NUMA_NO_NODE;
5583
5584 /*
5585 * Use the local node if we haven't already, but for memoryless local
5586 * node, we should skip it and fall back to other nodes.
5587 */
5588 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
5589 node_set(node, *used_node_mask);
5590 return node;
5591 }
5592
5593 for_each_node_state(n, N_MEMORY) {
5594
5595 /* Don't want a node to appear more than once */
5596 if (node_isset(n, *used_node_mask))
5597 continue;
5598
5599 /* Use the distance array to find the distance */
5600 val = node_distance(node, n);
5601
5602 /* Penalize nodes under us ("prefer the next node") */
5603 val += (n < node);
5604
5605 /* Give preference to headless and unused nodes */
5606 if (!cpumask_empty(cpumask_of_node(n)))
5607 val += PENALTY_FOR_NODE_WITH_CPUS;
5608
5609 /* Slight preference for less loaded node */
5610 val *= MAX_NUMNODES;
5611 val += node_load[n];
5612
5613 if (val < min_val) {
5614 min_val = val;
5615 best_node = n;
5616 }
5617 }
5618
5619 if (best_node >= 0)
5620 node_set(best_node, *used_node_mask);
5621
5622 return best_node;
5623 }
5624
5625
5626 /*
5627 * Build zonelists ordered by node and zones within node.
5628 * This results in maximum locality--normal zone overflows into local
5629 * DMA zone, if any--but risks exhausting DMA zone.
5630 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)5631 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5632 unsigned nr_nodes)
5633 {
5634 struct zoneref *zonerefs;
5635 int i;
5636
5637 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5638
5639 for (i = 0; i < nr_nodes; i++) {
5640 int nr_zones;
5641
5642 pg_data_t *node = NODE_DATA(node_order[i]);
5643
5644 nr_zones = build_zonerefs_node(node, zonerefs);
5645 zonerefs += nr_zones;
5646 }
5647 zonerefs->zone = NULL;
5648 zonerefs->zone_idx = 0;
5649 }
5650
5651 /*
5652 * Build __GFP_THISNODE zonelists
5653 */
build_thisnode_zonelists(pg_data_t * pgdat)5654 static void build_thisnode_zonelists(pg_data_t *pgdat)
5655 {
5656 struct zoneref *zonerefs;
5657 int nr_zones;
5658
5659 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5660 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5661 zonerefs += nr_zones;
5662 zonerefs->zone = NULL;
5663 zonerefs->zone_idx = 0;
5664 }
5665
build_zonelists(pg_data_t * pgdat)5666 static void build_zonelists(pg_data_t *pgdat)
5667 {
5668 static int node_order[MAX_NUMNODES];
5669 int node, nr_nodes = 0;
5670 nodemask_t used_mask = NODE_MASK_NONE;
5671 int local_node, prev_node;
5672
5673 /* NUMA-aware ordering of nodes */
5674 local_node = pgdat->node_id;
5675 prev_node = local_node;
5676
5677 memset(node_order, 0, sizeof(node_order));
5678 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5679 /*
5680 * We don't want to pressure a particular node.
5681 * So adding penalty to the first node in same
5682 * distance group to make it round-robin.
5683 */
5684 if (node_distance(local_node, node) !=
5685 node_distance(local_node, prev_node))
5686 node_load[node] += 1;
5687
5688 node_order[nr_nodes++] = node;
5689 prev_node = node;
5690 }
5691
5692 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5693 build_thisnode_zonelists(pgdat);
5694 pr_info("Fallback order for Node %d: ", local_node);
5695 for (node = 0; node < nr_nodes; node++)
5696 pr_cont("%d ", node_order[node]);
5697 pr_cont("\n");
5698 }
5699
5700 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5701 /*
5702 * Return node id of node used for "local" allocations.
5703 * I.e., first node id of first zone in arg node's generic zonelist.
5704 * Used for initializing percpu 'numa_mem', which is used primarily
5705 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5706 */
local_memory_node(int node)5707 int local_memory_node(int node)
5708 {
5709 struct zoneref *z;
5710
5711 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5712 gfp_zone(GFP_KERNEL),
5713 NULL);
5714 return zonelist_node_idx(z);
5715 }
5716 #endif
5717
5718 static void setup_min_unmapped_ratio(void);
5719 static void setup_min_slab_ratio(void);
5720 #else /* CONFIG_NUMA */
5721
build_zonelists(pg_data_t * pgdat)5722 static void build_zonelists(pg_data_t *pgdat)
5723 {
5724 struct zoneref *zonerefs;
5725 int nr_zones;
5726
5727 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5728 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5729 zonerefs += nr_zones;
5730
5731 zonerefs->zone = NULL;
5732 zonerefs->zone_idx = 0;
5733 }
5734
5735 #endif /* CONFIG_NUMA */
5736
5737 /*
5738 * Boot pageset table. One per cpu which is going to be used for all
5739 * zones and all nodes. The parameters will be set in such a way
5740 * that an item put on a list will immediately be handed over to
5741 * the buddy list. This is safe since pageset manipulation is done
5742 * with interrupts disabled.
5743 *
5744 * The boot_pagesets must be kept even after bootup is complete for
5745 * unused processors and/or zones. They do play a role for bootstrapping
5746 * hotplugged processors.
5747 *
5748 * zoneinfo_show() and maybe other functions do
5749 * not check if the processor is online before following the pageset pointer.
5750 * Other parts of the kernel may not check if the zone is available.
5751 */
5752 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5753 /* These effectively disable the pcplists in the boot pageset completely */
5754 #define BOOT_PAGESET_HIGH 0
5755 #define BOOT_PAGESET_BATCH 1
5756 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5757 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5758
__build_all_zonelists(void * data)5759 static void __build_all_zonelists(void *data)
5760 {
5761 int nid;
5762 int __maybe_unused cpu;
5763 pg_data_t *self = data;
5764 unsigned long flags;
5765
5766 /*
5767 * The zonelist_update_seq must be acquired with irqsave because the
5768 * reader can be invoked from IRQ with GFP_ATOMIC.
5769 */
5770 write_seqlock_irqsave(&zonelist_update_seq, flags);
5771 /*
5772 * Also disable synchronous printk() to prevent any printk() from
5773 * trying to hold port->lock, for
5774 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5775 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5776 */
5777 printk_deferred_enter();
5778
5779 #ifdef CONFIG_NUMA
5780 memset(node_load, 0, sizeof(node_load));
5781 #endif
5782
5783 /*
5784 * This node is hotadded and no memory is yet present. So just
5785 * building zonelists is fine - no need to touch other nodes.
5786 */
5787 if (self && !node_online(self->node_id)) {
5788 build_zonelists(self);
5789 } else {
5790 /*
5791 * All possible nodes have pgdat preallocated
5792 * in free_area_init
5793 */
5794 for_each_node(nid) {
5795 pg_data_t *pgdat = NODE_DATA(nid);
5796
5797 build_zonelists(pgdat);
5798 }
5799
5800 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5801 /*
5802 * We now know the "local memory node" for each node--
5803 * i.e., the node of the first zone in the generic zonelist.
5804 * Set up numa_mem percpu variable for on-line cpus. During
5805 * boot, only the boot cpu should be on-line; we'll init the
5806 * secondary cpus' numa_mem as they come on-line. During
5807 * node/memory hotplug, we'll fixup all on-line cpus.
5808 */
5809 for_each_online_cpu(cpu)
5810 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5811 #endif
5812 }
5813
5814 printk_deferred_exit();
5815 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5816 }
5817
5818 static noinline void __init
build_all_zonelists_init(void)5819 build_all_zonelists_init(void)
5820 {
5821 int cpu;
5822
5823 __build_all_zonelists(NULL);
5824
5825 /*
5826 * Initialize the boot_pagesets that are going to be used
5827 * for bootstrapping processors. The real pagesets for
5828 * each zone will be allocated later when the per cpu
5829 * allocator is available.
5830 *
5831 * boot_pagesets are used also for bootstrapping offline
5832 * cpus if the system is already booted because the pagesets
5833 * are needed to initialize allocators on a specific cpu too.
5834 * F.e. the percpu allocator needs the page allocator which
5835 * needs the percpu allocator in order to allocate its pagesets
5836 * (a chicken-egg dilemma).
5837 */
5838 for_each_possible_cpu(cpu)
5839 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5840
5841 mminit_verify_zonelist();
5842 cpuset_init_current_mems_allowed();
5843 }
5844
5845 /*
5846 * unless system_state == SYSTEM_BOOTING.
5847 *
5848 * __ref due to call of __init annotated helper build_all_zonelists_init
5849 * [protected by SYSTEM_BOOTING].
5850 */
build_all_zonelists(pg_data_t * pgdat)5851 void __ref build_all_zonelists(pg_data_t *pgdat)
5852 {
5853 unsigned long vm_total_pages;
5854
5855 if (system_state == SYSTEM_BOOTING) {
5856 build_all_zonelists_init();
5857 } else {
5858 __build_all_zonelists(pgdat);
5859 /* cpuset refresh routine should be here */
5860 }
5861 /* Get the number of free pages beyond high watermark in all zones. */
5862 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5863 /*
5864 * Disable grouping by mobility if the number of pages in the
5865 * system is too low to allow the mechanism to work. It would be
5866 * more accurate, but expensive to check per-zone. This check is
5867 * made on memory-hotadd so a system can start with mobility
5868 * disabled and enable it later
5869 */
5870 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5871 page_group_by_mobility_disabled = 1;
5872 else
5873 page_group_by_mobility_disabled = 0;
5874
5875 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5876 nr_online_nodes,
5877 str_off_on(page_group_by_mobility_disabled),
5878 vm_total_pages);
5879 #ifdef CONFIG_NUMA
5880 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5881 #endif
5882 }
5883
zone_batchsize(struct zone * zone)5884 static int zone_batchsize(struct zone *zone)
5885 {
5886 #ifdef CONFIG_MMU
5887 int batch;
5888
5889 /*
5890 * The number of pages to batch allocate is either ~0.025%
5891 * of the zone or 256KB, whichever is smaller. The batch
5892 * size is striking a balance between allocation latency
5893 * and zone lock contention.
5894 */
5895 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
5896 if (batch <= 1)
5897 return 1;
5898
5899 /*
5900 * Clamp the batch to a 2^n - 1 value. Having a power
5901 * of 2 value was found to be more likely to have
5902 * suboptimal cache aliasing properties in some cases.
5903 *
5904 * For example if 2 tasks are alternately allocating
5905 * batches of pages, one task can end up with a lot
5906 * of pages of one half of the possible page colors
5907 * and the other with pages of the other colors.
5908 */
5909 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5910
5911 return batch;
5912
5913 #else
5914 /* The deferral and batching of frees should be suppressed under NOMMU
5915 * conditions.
5916 *
5917 * The problem is that NOMMU needs to be able to allocate large chunks
5918 * of contiguous memory as there's no hardware page translation to
5919 * assemble apparent contiguous memory from discontiguous pages.
5920 *
5921 * Queueing large contiguous runs of pages for batching, however,
5922 * causes the pages to actually be freed in smaller chunks. As there
5923 * can be a significant delay between the individual batches being
5924 * recycled, this leads to the once large chunks of space being
5925 * fragmented and becoming unavailable for high-order allocations.
5926 */
5927 return 0;
5928 #endif
5929 }
5930
5931 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online,int high_fraction)5932 static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5933 int high_fraction)
5934 {
5935 #ifdef CONFIG_MMU
5936 int high;
5937 int nr_split_cpus;
5938 unsigned long total_pages;
5939
5940 if (!high_fraction) {
5941 /*
5942 * By default, the high value of the pcp is based on the zone
5943 * low watermark so that if they are full then background
5944 * reclaim will not be started prematurely.
5945 */
5946 total_pages = low_wmark_pages(zone);
5947 } else {
5948 /*
5949 * If percpu_pagelist_high_fraction is configured, the high
5950 * value is based on a fraction of the managed pages in the
5951 * zone.
5952 */
5953 total_pages = zone_managed_pages(zone) / high_fraction;
5954 }
5955
5956 /*
5957 * Split the high value across all online CPUs local to the zone. Note
5958 * that early in boot that CPUs may not be online yet and that during
5959 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5960 * onlined. For memory nodes that have no CPUs, split the high value
5961 * across all online CPUs to mitigate the risk that reclaim is triggered
5962 * prematurely due to pages stored on pcp lists.
5963 */
5964 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5965 if (!nr_split_cpus)
5966 nr_split_cpus = num_online_cpus();
5967 high = total_pages / nr_split_cpus;
5968
5969 /*
5970 * Ensure high is at least batch*4. The multiple is based on the
5971 * historical relationship between high and batch.
5972 */
5973 high = max(high, batch << 2);
5974
5975 return high;
5976 #else
5977 return 0;
5978 #endif
5979 }
5980
5981 /*
5982 * pcp->high and pcp->batch values are related and generally batch is lower
5983 * than high. They are also related to pcp->count such that count is lower
5984 * than high, and as soon as it reaches high, the pcplist is flushed.
5985 *
5986 * However, guaranteeing these relations at all times would require e.g. write
5987 * barriers here but also careful usage of read barriers at the read side, and
5988 * thus be prone to error and bad for performance. Thus the update only prevents
5989 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
5990 * should ensure they can cope with those fields changing asynchronously, and
5991 * fully trust only the pcp->count field on the local CPU with interrupts
5992 * disabled.
5993 *
5994 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5995 * outside of boot time (or some other assurance that no concurrent updaters
5996 * exist).
5997 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high_min,unsigned long high_max,unsigned long batch)5998 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
5999 unsigned long high_max, unsigned long batch)
6000 {
6001 WRITE_ONCE(pcp->batch, batch);
6002 WRITE_ONCE(pcp->high_min, high_min);
6003 WRITE_ONCE(pcp->high_max, high_max);
6004 }
6005
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)6006 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6007 {
6008 int pindex;
6009
6010 memset(pcp, 0, sizeof(*pcp));
6011 memset(pzstats, 0, sizeof(*pzstats));
6012
6013 spin_lock_init(&pcp->lock);
6014 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6015 INIT_LIST_HEAD(&pcp->lists[pindex]);
6016
6017 /*
6018 * Set batch and high values safe for a boot pageset. A true percpu
6019 * pageset's initialization will update them subsequently. Here we don't
6020 * need to be as careful as pageset_update() as nobody can access the
6021 * pageset yet.
6022 */
6023 pcp->high_min = BOOT_PAGESET_HIGH;
6024 pcp->high_max = BOOT_PAGESET_HIGH;
6025 pcp->batch = BOOT_PAGESET_BATCH;
6026 }
6027
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high_min,unsigned long high_max,unsigned long batch)6028 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
6029 unsigned long high_max, unsigned long batch)
6030 {
6031 struct per_cpu_pages *pcp;
6032 int cpu;
6033
6034 for_each_possible_cpu(cpu) {
6035 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6036 pageset_update(pcp, high_min, high_max, batch);
6037 }
6038 }
6039
6040 /*
6041 * Calculate and set new high and batch values for all per-cpu pagesets of a
6042 * zone based on the zone's size.
6043 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)6044 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6045 {
6046 int new_high_min, new_high_max, new_batch;
6047
6048 new_batch = zone_batchsize(zone);
6049 if (percpu_pagelist_high_fraction) {
6050 new_high_min = zone_highsize(zone, new_batch, cpu_online,
6051 percpu_pagelist_high_fraction);
6052 /*
6053 * PCP high is tuned manually, disable auto-tuning via
6054 * setting high_min and high_max to the manual value.
6055 */
6056 new_high_max = new_high_min;
6057 } else {
6058 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
6059 new_high_max = zone_highsize(zone, new_batch, cpu_online,
6060 MIN_PERCPU_PAGELIST_HIGH_FRACTION);
6061 }
6062
6063 if (zone->pageset_high_min == new_high_min &&
6064 zone->pageset_high_max == new_high_max &&
6065 zone->pageset_batch == new_batch)
6066 return;
6067
6068 zone->pageset_high_min = new_high_min;
6069 zone->pageset_high_max = new_high_max;
6070 zone->pageset_batch = new_batch;
6071
6072 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
6073 new_batch);
6074 }
6075
setup_zone_pageset(struct zone * zone)6076 void __meminit setup_zone_pageset(struct zone *zone)
6077 {
6078 int cpu;
6079
6080 /* Size may be 0 on !SMP && !NUMA */
6081 if (sizeof(struct per_cpu_zonestat) > 0)
6082 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6083
6084 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6085 for_each_possible_cpu(cpu) {
6086 struct per_cpu_pages *pcp;
6087 struct per_cpu_zonestat *pzstats;
6088
6089 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6090 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6091 per_cpu_pages_init(pcp, pzstats);
6092 }
6093
6094 zone_set_pageset_high_and_batch(zone, 0);
6095 }
6096
6097 /*
6098 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6099 * page high values need to be recalculated.
6100 */
zone_pcp_update(struct zone * zone,int cpu_online)6101 static void zone_pcp_update(struct zone *zone, int cpu_online)
6102 {
6103 mutex_lock(&pcp_batch_high_lock);
6104 zone_set_pageset_high_and_batch(zone, cpu_online);
6105 mutex_unlock(&pcp_batch_high_lock);
6106 }
6107
zone_pcp_update_cacheinfo(struct zone * zone,unsigned int cpu)6108 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
6109 {
6110 struct per_cpu_pages *pcp;
6111 struct cpu_cacheinfo *cci;
6112
6113 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6114 cci = get_cpu_cacheinfo(cpu);
6115 /*
6116 * If data cache slice of CPU is large enough, "pcp->batch"
6117 * pages can be preserved in PCP before draining PCP for
6118 * consecutive high-order pages freeing without allocation.
6119 * This can reduce zone lock contention without hurting
6120 * cache-hot pages sharing.
6121 */
6122 spin_lock(&pcp->lock);
6123 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
6124 pcp->flags |= PCPF_FREE_HIGH_BATCH;
6125 else
6126 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
6127 spin_unlock(&pcp->lock);
6128 }
6129
setup_pcp_cacheinfo(unsigned int cpu)6130 void setup_pcp_cacheinfo(unsigned int cpu)
6131 {
6132 struct zone *zone;
6133
6134 for_each_populated_zone(zone)
6135 zone_pcp_update_cacheinfo(zone, cpu);
6136 }
6137
6138 /*
6139 * Allocate per cpu pagesets and initialize them.
6140 * Before this call only boot pagesets were available.
6141 */
setup_per_cpu_pageset(void)6142 void __init setup_per_cpu_pageset(void)
6143 {
6144 struct pglist_data *pgdat;
6145 struct zone *zone;
6146 int __maybe_unused cpu;
6147
6148 for_each_populated_zone(zone)
6149 setup_zone_pageset(zone);
6150
6151 #ifdef CONFIG_NUMA
6152 /*
6153 * Unpopulated zones continue using the boot pagesets.
6154 * The numa stats for these pagesets need to be reset.
6155 * Otherwise, they will end up skewing the stats of
6156 * the nodes these zones are associated with.
6157 */
6158 for_each_possible_cpu(cpu) {
6159 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6160 memset(pzstats->vm_numa_event, 0,
6161 sizeof(pzstats->vm_numa_event));
6162 }
6163 #endif
6164
6165 for_each_online_pgdat(pgdat)
6166 pgdat->per_cpu_nodestats =
6167 alloc_percpu(struct per_cpu_nodestat);
6168 }
6169
zone_pcp_init(struct zone * zone)6170 __meminit void zone_pcp_init(struct zone *zone)
6171 {
6172 /*
6173 * per cpu subsystem is not up at this point. The following code
6174 * relies on the ability of the linker to provide the
6175 * offset of a (static) per cpu variable into the per cpu area.
6176 */
6177 zone->per_cpu_pageset = &boot_pageset;
6178 zone->per_cpu_zonestats = &boot_zonestats;
6179 zone->pageset_high_min = BOOT_PAGESET_HIGH;
6180 zone->pageset_high_max = BOOT_PAGESET_HIGH;
6181 zone->pageset_batch = BOOT_PAGESET_BATCH;
6182
6183 if (populated_zone(zone))
6184 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
6185 zone->present_pages, zone_batchsize(zone));
6186 }
6187
6188 static void setup_per_zone_lowmem_reserve(void);
6189
adjust_managed_page_count(struct page * page,long count)6190 void adjust_managed_page_count(struct page *page, long count)
6191 {
6192 atomic_long_add(count, &page_zone(page)->managed_pages);
6193 totalram_pages_add(count);
6194 setup_per_zone_lowmem_reserve();
6195 }
6196 EXPORT_SYMBOL(adjust_managed_page_count);
6197
free_reserved_area(void * start,void * end,int poison,const char * s)6198 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
6199 {
6200 void *pos;
6201 unsigned long pages = 0;
6202
6203 start = (void *)PAGE_ALIGN((unsigned long)start);
6204 end = (void *)((unsigned long)end & PAGE_MASK);
6205 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6206 struct page *page = virt_to_page(pos);
6207 void *direct_map_addr;
6208
6209 /*
6210 * 'direct_map_addr' might be different from 'pos'
6211 * because some architectures' virt_to_page()
6212 * work with aliases. Getting the direct map
6213 * address ensures that we get a _writeable_
6214 * alias for the memset().
6215 */
6216 direct_map_addr = page_address(page);
6217 /*
6218 * Perform a kasan-unchecked memset() since this memory
6219 * has not been initialized.
6220 */
6221 direct_map_addr = kasan_reset_tag(direct_map_addr);
6222 if ((unsigned int)poison <= 0xFF)
6223 memset(direct_map_addr, poison, PAGE_SIZE);
6224
6225 free_reserved_page(page);
6226 }
6227
6228 if (pages && s)
6229 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
6230
6231 return pages;
6232 }
6233
free_reserved_page(struct page * page)6234 void free_reserved_page(struct page *page)
6235 {
6236 clear_page_tag_ref(page);
6237 ClearPageReserved(page);
6238 init_page_count(page);
6239 __free_page(page);
6240 adjust_managed_page_count(page, 1);
6241 }
6242 EXPORT_SYMBOL(free_reserved_page);
6243
page_alloc_cpu_dead(unsigned int cpu)6244 static int page_alloc_cpu_dead(unsigned int cpu)
6245 {
6246 struct zone *zone;
6247
6248 lru_add_drain_cpu(cpu);
6249 mlock_drain_remote(cpu);
6250 drain_pages(cpu);
6251
6252 /*
6253 * Spill the event counters of the dead processor
6254 * into the current processors event counters.
6255 * This artificially elevates the count of the current
6256 * processor.
6257 */
6258 vm_events_fold_cpu(cpu);
6259
6260 /*
6261 * Zero the differential counters of the dead processor
6262 * so that the vm statistics are consistent.
6263 *
6264 * This is only okay since the processor is dead and cannot
6265 * race with what we are doing.
6266 */
6267 cpu_vm_stats_fold(cpu);
6268
6269 for_each_populated_zone(zone)
6270 zone_pcp_update(zone, 0);
6271
6272 return 0;
6273 }
6274
page_alloc_cpu_online(unsigned int cpu)6275 static int page_alloc_cpu_online(unsigned int cpu)
6276 {
6277 struct zone *zone;
6278
6279 for_each_populated_zone(zone)
6280 zone_pcp_update(zone, 1);
6281 return 0;
6282 }
6283
page_alloc_init_cpuhp(void)6284 void __init page_alloc_init_cpuhp(void)
6285 {
6286 int ret;
6287
6288 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
6289 "mm/page_alloc:pcp",
6290 page_alloc_cpu_online,
6291 page_alloc_cpu_dead);
6292 WARN_ON(ret < 0);
6293 }
6294
6295 /*
6296 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6297 * or min_free_kbytes changes.
6298 */
calculate_totalreserve_pages(void)6299 static void calculate_totalreserve_pages(void)
6300 {
6301 struct pglist_data *pgdat;
6302 unsigned long reserve_pages = 0;
6303 enum zone_type i, j;
6304
6305 for_each_online_pgdat(pgdat) {
6306
6307 pgdat->totalreserve_pages = 0;
6308
6309 for (i = 0; i < MAX_NR_ZONES; i++) {
6310 struct zone *zone = pgdat->node_zones + i;
6311 long max = 0;
6312 unsigned long managed_pages = zone_managed_pages(zone);
6313
6314 /*
6315 * lowmem_reserve[j] is monotonically non-decreasing
6316 * in j for a given zone (see
6317 * setup_per_zone_lowmem_reserve()). The maximum
6318 * valid reserve lives at the highest index with a
6319 * non-zero value, so scan backwards and stop at the
6320 * first hit.
6321 */
6322 for (j = MAX_NR_ZONES - 1; j > i; j--) {
6323 if (!zone->lowmem_reserve[j])
6324 continue;
6325
6326 max = zone->lowmem_reserve[j];
6327 break;
6328 }
6329 /* we treat the high watermark as reserved pages. */
6330 max += high_wmark_pages(zone);
6331
6332 max = min_t(unsigned long, max, managed_pages);
6333
6334 pgdat->totalreserve_pages += max;
6335
6336 reserve_pages += max;
6337 }
6338 }
6339 totalreserve_pages = reserve_pages;
6340 trace_mm_calculate_totalreserve_pages(totalreserve_pages);
6341 }
6342
6343 /*
6344 * setup_per_zone_lowmem_reserve - called whenever
6345 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
6346 * has a correct pages reserved value, so an adequate number of
6347 * pages are left in the zone after a successful __alloc_pages().
6348 */
setup_per_zone_lowmem_reserve(void)6349 static void setup_per_zone_lowmem_reserve(void)
6350 {
6351 struct pglist_data *pgdat;
6352 enum zone_type i, j;
6353 /*
6354 * For a given zone node_zones[i], lowmem_reserve[j] (j > i)
6355 * represents how many pages in zone i must effectively be kept
6356 * in reserve when deciding whether an allocation class that is
6357 * allowed to allocate from zones up to j may fall back into
6358 * zone i.
6359 *
6360 * As j increases, the allocation class can use a strictly larger
6361 * set of fallback zones and therefore must not be allowed to
6362 * deplete low zones more aggressively than a less flexible one.
6363 * As a result, lowmem_reserve[j] is required to be monotonically
6364 * non-decreasing in j for each zone i. Callers such as
6365 * calculate_totalreserve_pages() rely on this monotonicity when
6366 * selecting the maximum reserve entry.
6367 */
6368 for_each_online_pgdat(pgdat) {
6369 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
6370 struct zone *zone = &pgdat->node_zones[i];
6371 int ratio = sysctl_lowmem_reserve_ratio[i];
6372 bool clear = !ratio || !zone_managed_pages(zone);
6373 unsigned long managed_pages = 0;
6374
6375 for (j = i + 1; j < MAX_NR_ZONES; j++) {
6376 struct zone *upper_zone = &pgdat->node_zones[j];
6377
6378 managed_pages += zone_managed_pages(upper_zone);
6379
6380 if (clear)
6381 zone->lowmem_reserve[j] = 0;
6382 else
6383 zone->lowmem_reserve[j] = managed_pages / ratio;
6384 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
6385 zone->lowmem_reserve[j]);
6386 }
6387 }
6388 }
6389
6390 /* update totalreserve_pages */
6391 calculate_totalreserve_pages();
6392 }
6393
__setup_per_zone_wmarks(void)6394 static void __setup_per_zone_wmarks(void)
6395 {
6396 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6397 unsigned long lowmem_pages = 0;
6398 struct zone *zone;
6399 unsigned long flags;
6400
6401 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
6402 for_each_zone(zone) {
6403 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
6404 lowmem_pages += zone_managed_pages(zone);
6405 }
6406
6407 for_each_zone(zone) {
6408 u64 tmp;
6409
6410 spin_lock_irqsave(&zone->lock, flags);
6411 tmp = (u64)pages_min * zone_managed_pages(zone);
6412 tmp = div64_ul(tmp, lowmem_pages);
6413 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
6414 /*
6415 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6416 * need highmem and movable zones pages, so cap pages_min
6417 * to a small value here.
6418 *
6419 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6420 * deltas control async page reclaim, and so should
6421 * not be capped for highmem and movable zones.
6422 */
6423 unsigned long min_pages;
6424
6425 min_pages = zone_managed_pages(zone) / 1024;
6426 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6427 zone->_watermark[WMARK_MIN] = min_pages;
6428 } else {
6429 /*
6430 * If it's a lowmem zone, reserve a number of pages
6431 * proportionate to the zone's size.
6432 */
6433 zone->_watermark[WMARK_MIN] = tmp;
6434 }
6435
6436 /*
6437 * Set the kswapd watermarks distance according to the
6438 * scale factor in proportion to available memory, but
6439 * ensure a minimum size on small systems.
6440 */
6441 tmp = max_t(u64, tmp >> 2,
6442 mult_frac(zone_managed_pages(zone),
6443 watermark_scale_factor, 10000));
6444
6445 zone->watermark_boost = 0;
6446 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6447 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6448 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
6449 trace_mm_setup_per_zone_wmarks(zone);
6450
6451 spin_unlock_irqrestore(&zone->lock, flags);
6452 }
6453
6454 /* update totalreserve_pages */
6455 calculate_totalreserve_pages();
6456 }
6457
6458 /**
6459 * setup_per_zone_wmarks - called when min_free_kbytes changes
6460 * or when memory is hot-{added|removed}
6461 *
6462 * Ensures that the watermark[min,low,high] values for each zone are set
6463 * correctly with respect to min_free_kbytes.
6464 */
setup_per_zone_wmarks(void)6465 void setup_per_zone_wmarks(void)
6466 {
6467 struct zone *zone;
6468 static DEFINE_SPINLOCK(lock);
6469
6470 spin_lock(&lock);
6471 __setup_per_zone_wmarks();
6472 spin_unlock(&lock);
6473
6474 /*
6475 * The watermark size have changed so update the pcpu batch
6476 * and high limits or the limits may be inappropriate.
6477 */
6478 for_each_zone(zone)
6479 zone_pcp_update(zone, 0);
6480 }
6481
6482 /*
6483 * Initialise min_free_kbytes.
6484 *
6485 * For small machines we want it small (128k min). For large machines
6486 * we want it large (256MB max). But it is not linear, because network
6487 * bandwidth does not increase linearly with machine size. We use
6488 *
6489 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6490 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6491 *
6492 * which yields
6493 *
6494 * 16MB: 512k
6495 * 32MB: 724k
6496 * 64MB: 1024k
6497 * 128MB: 1448k
6498 * 256MB: 2048k
6499 * 512MB: 2896k
6500 * 1024MB: 4096k
6501 * 2048MB: 5792k
6502 * 4096MB: 8192k
6503 * 8192MB: 11584k
6504 * 16384MB: 16384k
6505 */
calculate_min_free_kbytes(void)6506 void calculate_min_free_kbytes(void)
6507 {
6508 unsigned long lowmem_kbytes;
6509 int new_min_free_kbytes;
6510
6511 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6512 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6513
6514 if (new_min_free_kbytes > user_min_free_kbytes)
6515 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6516 else
6517 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6518 new_min_free_kbytes, user_min_free_kbytes);
6519
6520 }
6521
init_per_zone_wmark_min(void)6522 int __meminit init_per_zone_wmark_min(void)
6523 {
6524 calculate_min_free_kbytes();
6525 setup_per_zone_wmarks();
6526 refresh_zone_stat_thresholds();
6527 setup_per_zone_lowmem_reserve();
6528
6529 #ifdef CONFIG_NUMA
6530 setup_min_unmapped_ratio();
6531 setup_min_slab_ratio();
6532 #endif
6533
6534 khugepaged_min_free_kbytes_update();
6535
6536 return 0;
6537 }
postcore_initcall(init_per_zone_wmark_min)6538 postcore_initcall(init_per_zone_wmark_min)
6539
6540 /*
6541 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6542 * that we can call two helper functions whenever min_free_kbytes
6543 * changes.
6544 */
6545 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
6546 void *buffer, size_t *length, loff_t *ppos)
6547 {
6548 int rc;
6549
6550 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6551 if (rc)
6552 return rc;
6553
6554 if (write) {
6555 user_min_free_kbytes = min_free_kbytes;
6556 setup_per_zone_wmarks();
6557 }
6558 return 0;
6559 }
6560
watermark_scale_factor_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6561 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
6562 void *buffer, size_t *length, loff_t *ppos)
6563 {
6564 int rc;
6565
6566 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6567 if (rc)
6568 return rc;
6569
6570 if (write)
6571 setup_per_zone_wmarks();
6572
6573 return 0;
6574 }
6575
6576 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)6577 static void setup_min_unmapped_ratio(void)
6578 {
6579 pg_data_t *pgdat;
6580 struct zone *zone;
6581
6582 for_each_online_pgdat(pgdat)
6583 pgdat->min_unmapped_pages = 0;
6584
6585 for_each_zone(zone)
6586 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6587 sysctl_min_unmapped_ratio) / 100;
6588 }
6589
6590
sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6591 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
6592 void *buffer, size_t *length, loff_t *ppos)
6593 {
6594 int rc;
6595
6596 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6597 if (rc)
6598 return rc;
6599
6600 setup_min_unmapped_ratio();
6601
6602 return 0;
6603 }
6604
setup_min_slab_ratio(void)6605 static void setup_min_slab_ratio(void)
6606 {
6607 pg_data_t *pgdat;
6608 struct zone *zone;
6609
6610 for_each_online_pgdat(pgdat)
6611 pgdat->min_slab_pages = 0;
6612
6613 for_each_zone(zone)
6614 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6615 sysctl_min_slab_ratio) / 100;
6616 }
6617
sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6618 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
6619 void *buffer, size_t *length, loff_t *ppos)
6620 {
6621 int rc;
6622
6623 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6624 if (rc)
6625 return rc;
6626
6627 setup_min_slab_ratio();
6628
6629 return 0;
6630 }
6631 #endif
6632
6633 /*
6634 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6635 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6636 * whenever sysctl_lowmem_reserve_ratio changes.
6637 *
6638 * The reserve ratio obviously has absolutely no relation with the
6639 * minimum watermarks. The lowmem reserve ratio can only make sense
6640 * if in function of the boot time zone sizes.
6641 */
lowmem_reserve_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6642 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
6643 int write, void *buffer, size_t *length, loff_t *ppos)
6644 {
6645 int i;
6646
6647 proc_dointvec_minmax(table, write, buffer, length, ppos);
6648
6649 for (i = 0; i < MAX_NR_ZONES; i++) {
6650 if (sysctl_lowmem_reserve_ratio[i] < 1)
6651 sysctl_lowmem_reserve_ratio[i] = 0;
6652 }
6653
6654 setup_per_zone_lowmem_reserve();
6655 return 0;
6656 }
6657
6658 /*
6659 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6660 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6661 * pagelist can have before it gets flushed back to buddy allocator.
6662 */
percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6663 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
6664 int write, void *buffer, size_t *length, loff_t *ppos)
6665 {
6666 struct zone *zone;
6667 int old_percpu_pagelist_high_fraction;
6668 int ret;
6669
6670 mutex_lock(&pcp_batch_high_lock);
6671 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
6672
6673 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6674 if (!write || ret < 0)
6675 goto out;
6676
6677 /* Sanity checking to avoid pcp imbalance */
6678 if (percpu_pagelist_high_fraction &&
6679 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6680 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
6681 ret = -EINVAL;
6682 goto out;
6683 }
6684
6685 /* No change? */
6686 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
6687 goto out;
6688
6689 for_each_populated_zone(zone)
6690 zone_set_pageset_high_and_batch(zone, 0);
6691 out:
6692 mutex_unlock(&pcp_batch_high_lock);
6693 return ret;
6694 }
6695
6696 static const struct ctl_table page_alloc_sysctl_table[] = {
6697 {
6698 .procname = "min_free_kbytes",
6699 .data = &min_free_kbytes,
6700 .maxlen = sizeof(min_free_kbytes),
6701 .mode = 0644,
6702 .proc_handler = min_free_kbytes_sysctl_handler,
6703 .extra1 = SYSCTL_ZERO,
6704 },
6705 {
6706 .procname = "watermark_boost_factor",
6707 .data = &watermark_boost_factor,
6708 .maxlen = sizeof(watermark_boost_factor),
6709 .mode = 0644,
6710 .proc_handler = proc_dointvec_minmax,
6711 .extra1 = SYSCTL_ZERO,
6712 },
6713 {
6714 .procname = "watermark_scale_factor",
6715 .data = &watermark_scale_factor,
6716 .maxlen = sizeof(watermark_scale_factor),
6717 .mode = 0644,
6718 .proc_handler = watermark_scale_factor_sysctl_handler,
6719 .extra1 = SYSCTL_ONE,
6720 .extra2 = SYSCTL_THREE_THOUSAND,
6721 },
6722 {
6723 .procname = "defrag_mode",
6724 .data = &defrag_mode,
6725 .maxlen = sizeof(defrag_mode),
6726 .mode = 0644,
6727 .proc_handler = proc_dointvec_minmax,
6728 .extra1 = SYSCTL_ZERO,
6729 .extra2 = SYSCTL_ONE,
6730 },
6731 {
6732 .procname = "percpu_pagelist_high_fraction",
6733 .data = &percpu_pagelist_high_fraction,
6734 .maxlen = sizeof(percpu_pagelist_high_fraction),
6735 .mode = 0644,
6736 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
6737 .extra1 = SYSCTL_ZERO,
6738 },
6739 {
6740 .procname = "lowmem_reserve_ratio",
6741 .data = &sysctl_lowmem_reserve_ratio,
6742 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
6743 .mode = 0644,
6744 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
6745 },
6746 #ifdef CONFIG_NUMA
6747 {
6748 .procname = "numa_zonelist_order",
6749 .data = &numa_zonelist_order,
6750 .maxlen = NUMA_ZONELIST_ORDER_LEN,
6751 .mode = 0644,
6752 .proc_handler = numa_zonelist_order_handler,
6753 },
6754 {
6755 .procname = "min_unmapped_ratio",
6756 .data = &sysctl_min_unmapped_ratio,
6757 .maxlen = sizeof(sysctl_min_unmapped_ratio),
6758 .mode = 0644,
6759 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6760 .extra1 = SYSCTL_ZERO,
6761 .extra2 = SYSCTL_ONE_HUNDRED,
6762 },
6763 {
6764 .procname = "min_slab_ratio",
6765 .data = &sysctl_min_slab_ratio,
6766 .maxlen = sizeof(sysctl_min_slab_ratio),
6767 .mode = 0644,
6768 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6769 .extra1 = SYSCTL_ZERO,
6770 .extra2 = SYSCTL_ONE_HUNDRED,
6771 },
6772 #endif
6773 };
6774
page_alloc_sysctl_init(void)6775 void __init page_alloc_sysctl_init(void)
6776 {
6777 register_sysctl_init("vm", page_alloc_sysctl_table);
6778 }
6779
6780 #ifdef CONFIG_CONTIG_ALLOC
6781 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6782 static void alloc_contig_dump_pages(struct list_head *page_list)
6783 {
6784 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6785
6786 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6787 struct page *page;
6788
6789 dump_stack();
6790 list_for_each_entry(page, page_list, lru)
6791 dump_page(page, "migration failure");
6792 }
6793 }
6794
6795 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6796 static int __alloc_contig_migrate_range(struct compact_control *cc,
6797 unsigned long start, unsigned long end)
6798 {
6799 /* This function is based on compact_zone() from compaction.c. */
6800 unsigned int nr_reclaimed;
6801 unsigned long pfn = start;
6802 unsigned int tries = 0;
6803 int ret = 0;
6804 struct migration_target_control mtc = {
6805 .nid = zone_to_nid(cc->zone),
6806 .gfp_mask = cc->gfp_mask,
6807 .reason = MR_CONTIG_RANGE,
6808 };
6809
6810 lru_cache_disable();
6811
6812 while (pfn < end || !list_empty(&cc->migratepages)) {
6813 if (fatal_signal_pending(current)) {
6814 ret = -EINTR;
6815 break;
6816 }
6817
6818 if (list_empty(&cc->migratepages)) {
6819 cc->nr_migratepages = 0;
6820 ret = isolate_migratepages_range(cc, pfn, end);
6821 if (ret && ret != -EAGAIN)
6822 break;
6823 pfn = cc->migrate_pfn;
6824 tries = 0;
6825 } else if (++tries == 5) {
6826 ret = -EBUSY;
6827 break;
6828 }
6829
6830 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6831 &cc->migratepages);
6832 cc->nr_migratepages -= nr_reclaimed;
6833
6834 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6835 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6836
6837 /*
6838 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6839 * to retry again over this error, so do the same here.
6840 */
6841 if (ret == -ENOMEM)
6842 break;
6843 }
6844
6845 lru_cache_enable();
6846 if (ret < 0) {
6847 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6848 alloc_contig_dump_pages(&cc->migratepages);
6849 putback_movable_pages(&cc->migratepages);
6850 }
6851
6852 return (ret < 0) ? ret : 0;
6853 }
6854
split_free_pages(struct list_head * list,gfp_t gfp_mask)6855 static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
6856 {
6857 int order;
6858
6859 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6860 struct page *page, *next;
6861 int nr_pages = 1 << order;
6862
6863 list_for_each_entry_safe(page, next, &list[order], lru) {
6864 int i;
6865
6866 post_alloc_hook(page, order, gfp_mask);
6867 set_page_refcounted(page);
6868 if (!order)
6869 continue;
6870
6871 split_page(page, order);
6872
6873 /* Add all subpages to the order-0 head, in sequence. */
6874 list_del(&page->lru);
6875 for (i = 0; i < nr_pages; i++)
6876 list_add_tail(&page[i].lru, &list[0]);
6877 }
6878 }
6879 }
6880
__alloc_contig_verify_gfp_mask(gfp_t gfp_mask,gfp_t * gfp_cc_mask)6881 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
6882 {
6883 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
6884 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
6885 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
6886 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
6887
6888 /*
6889 * We are given the range to allocate; node, mobility and placement
6890 * hints are irrelevant at this point. We'll simply ignore them.
6891 */
6892 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
6893 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
6894
6895 /*
6896 * We only support most reclaim flags (but not NOFAIL/NORETRY), and
6897 * selected action flags.
6898 */
6899 if (gfp_mask & ~(reclaim_mask | action_mask))
6900 return -EINVAL;
6901
6902 /*
6903 * Flags to control page compaction/migration/reclaim, to free up our
6904 * page range. Migratable pages are movable, __GFP_MOVABLE is implied
6905 * for them.
6906 *
6907 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
6908 * to not degrade callers.
6909 */
6910 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
6911 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
6912 return 0;
6913 }
6914
6915 /**
6916 * alloc_contig_range() -- tries to allocate given range of pages
6917 * @start: start PFN to allocate
6918 * @end: one-past-the-last PFN to allocate
6919 * @alloc_flags: allocation information
6920 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
6921 * action and reclaim modifiers are supported. Reclaim modifiers
6922 * control allocation behavior during compaction/migration/reclaim.
6923 *
6924 * The PFN range does not have to be pageblock aligned. The PFN range must
6925 * belong to a single zone.
6926 *
6927 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6928 * pageblocks in the range. Once isolated, the pageblocks should not
6929 * be modified by others.
6930 *
6931 * Return: zero on success or negative error code. On success all
6932 * pages which PFN is in [start, end) are allocated for the caller and
6933 * need to be freed with free_contig_range().
6934 */
alloc_contig_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)6935 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
6936 acr_flags_t alloc_flags, gfp_t gfp_mask)
6937 {
6938 const unsigned int order = ilog2(end - start);
6939 unsigned long outer_start, outer_end;
6940 int ret = 0;
6941
6942 struct compact_control cc = {
6943 .nr_migratepages = 0,
6944 .order = -1,
6945 .zone = page_zone(pfn_to_page(start)),
6946 .mode = MIGRATE_SYNC,
6947 .ignore_skip_hint = true,
6948 .no_set_skip_hint = true,
6949 .alloc_contig = true,
6950 };
6951 INIT_LIST_HEAD(&cc.migratepages);
6952 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
6953 PB_ISOLATE_MODE_CMA_ALLOC :
6954 PB_ISOLATE_MODE_OTHER;
6955
6956 /*
6957 * In contrast to the buddy, we allow for orders here that exceed
6958 * MAX_PAGE_ORDER, so we must manually make sure that we are not
6959 * exceeding the maximum folio order.
6960 */
6961 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
6962 return -EINVAL;
6963
6964 gfp_mask = current_gfp_context(gfp_mask);
6965 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
6966 return -EINVAL;
6967
6968 /*
6969 * What we do here is we mark all pageblocks in range as
6970 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6971 * have different sizes, and due to the way page allocator
6972 * work, start_isolate_page_range() has special handlings for this.
6973 *
6974 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6975 * migrate the pages from an unaligned range (ie. pages that
6976 * we are interested in). This will put all the pages in
6977 * range back to page allocator as MIGRATE_ISOLATE.
6978 *
6979 * When this is done, we take the pages in range from page
6980 * allocator removing them from the buddy system. This way
6981 * page allocator will never consider using them.
6982 *
6983 * This lets us mark the pageblocks back as
6984 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6985 * aligned range but not in the unaligned, original range are
6986 * put back to page allocator so that buddy can use them.
6987 */
6988
6989 ret = start_isolate_page_range(start, end, mode);
6990 if (ret)
6991 goto done;
6992
6993 drain_all_pages(cc.zone);
6994
6995 /*
6996 * In case of -EBUSY, we'd like to know which page causes problem.
6997 * So, just fall through. test_pages_isolated() has a tracepoint
6998 * which will report the busy page.
6999 *
7000 * It is possible that busy pages could become available before
7001 * the call to test_pages_isolated, and the range will actually be
7002 * allocated. So, if we fall through be sure to clear ret so that
7003 * -EBUSY is not accidentally used or returned to caller.
7004 */
7005 ret = __alloc_contig_migrate_range(&cc, start, end);
7006 if (ret && ret != -EBUSY)
7007 goto done;
7008
7009 /*
7010 * When in-use hugetlb pages are migrated, they may simply be released
7011 * back into the free hugepage pool instead of being returned to the
7012 * buddy system. After the migration of in-use huge pages is completed,
7013 * we will invoke replace_free_hugepage_folios() to ensure that these
7014 * hugepages are properly released to the buddy system.
7015 */
7016 ret = replace_free_hugepage_folios(start, end);
7017 if (ret)
7018 goto done;
7019
7020 /*
7021 * Pages from [start, end) are within a pageblock_nr_pages
7022 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7023 * more, all pages in [start, end) are free in page allocator.
7024 * What we are going to do is to allocate all pages from
7025 * [start, end) (that is remove them from page allocator).
7026 *
7027 * The only problem is that pages at the beginning and at the
7028 * end of interesting range may be not aligned with pages that
7029 * page allocator holds, ie. they can be part of higher order
7030 * pages. Because of this, we reserve the bigger range and
7031 * once this is done free the pages we are not interested in.
7032 *
7033 * We don't have to hold zone->lock here because the pages are
7034 * isolated thus they won't get removed from buddy.
7035 */
7036 outer_start = find_large_buddy(start);
7037
7038 /* Make sure the range is really isolated. */
7039 if (test_pages_isolated(outer_start, end, mode)) {
7040 ret = -EBUSY;
7041 goto done;
7042 }
7043
7044 /* Grab isolated pages from freelists. */
7045 outer_end = isolate_freepages_range(&cc, outer_start, end);
7046 if (!outer_end) {
7047 ret = -EBUSY;
7048 goto done;
7049 }
7050
7051 if (!(gfp_mask & __GFP_COMP)) {
7052 split_free_pages(cc.freepages, gfp_mask);
7053
7054 /* Free head and tail (if any) */
7055 if (start != outer_start)
7056 free_contig_range(outer_start, start - outer_start);
7057 if (end != outer_end)
7058 free_contig_range(end, outer_end - end);
7059 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
7060 struct page *head = pfn_to_page(start);
7061
7062 check_new_pages(head, order);
7063 prep_new_page(head, order, gfp_mask, 0);
7064 set_page_refcounted(head);
7065 } else {
7066 ret = -EINVAL;
7067 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
7068 start, end, outer_start, outer_end);
7069 }
7070 done:
7071 undo_isolate_page_range(start, end);
7072 return ret;
7073 }
7074 EXPORT_SYMBOL(alloc_contig_range_noprof);
7075
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)7076 static int __alloc_contig_pages(unsigned long start_pfn,
7077 unsigned long nr_pages, gfp_t gfp_mask)
7078 {
7079 unsigned long end_pfn = start_pfn + nr_pages;
7080
7081 return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE,
7082 gfp_mask);
7083 }
7084
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)7085 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
7086 unsigned long nr_pages)
7087 {
7088 unsigned long i, end_pfn = start_pfn + nr_pages;
7089 struct page *page;
7090
7091 for (i = start_pfn; i < end_pfn; i++) {
7092 page = pfn_to_online_page(i);
7093 if (!page)
7094 return false;
7095
7096 if (page_zone(page) != z)
7097 return false;
7098
7099 if (PageReserved(page))
7100 return false;
7101
7102 if (PageHuge(page))
7103 return false;
7104 }
7105 return true;
7106 }
7107
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)7108 static bool zone_spans_last_pfn(const struct zone *zone,
7109 unsigned long start_pfn, unsigned long nr_pages)
7110 {
7111 unsigned long last_pfn = start_pfn + nr_pages - 1;
7112
7113 return zone_spans_pfn(zone, last_pfn);
7114 }
7115
7116 /**
7117 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
7118 * @nr_pages: Number of contiguous pages to allocate
7119 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
7120 * action and reclaim modifiers are supported. Reclaim modifiers
7121 * control allocation behavior during compaction/migration/reclaim.
7122 * @nid: Target node
7123 * @nodemask: Mask for other possible nodes
7124 *
7125 * This routine is a wrapper around alloc_contig_range(). It scans over zones
7126 * on an applicable zonelist to find a contiguous pfn range which can then be
7127 * tried for allocation with alloc_contig_range(). This routine is intended
7128 * for allocation requests which can not be fulfilled with the buddy allocator.
7129 *
7130 * The allocated memory is always aligned to a page boundary. If nr_pages is a
7131 * power of two, then allocated range is also guaranteed to be aligned to same
7132 * nr_pages (e.g. 1GB request would be aligned to 1GB).
7133 *
7134 * Allocated pages can be freed with free_contig_range() or by manually calling
7135 * __free_page() on each allocated page.
7136 *
7137 * Return: pointer to contiguous pages on success, or NULL if not successful.
7138 */
alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7139 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
7140 int nid, nodemask_t *nodemask)
7141 {
7142 unsigned long ret, pfn, flags;
7143 struct zonelist *zonelist;
7144 struct zone *zone;
7145 struct zoneref *z;
7146
7147 zonelist = node_zonelist(nid, gfp_mask);
7148 for_each_zone_zonelist_nodemask(zone, z, zonelist,
7149 gfp_zone(gfp_mask), nodemask) {
7150 spin_lock_irqsave(&zone->lock, flags);
7151
7152 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
7153 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
7154 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
7155 /*
7156 * We release the zone lock here because
7157 * alloc_contig_range() will also lock the zone
7158 * at some point. If there's an allocation
7159 * spinning on this lock, it may win the race
7160 * and cause alloc_contig_range() to fail...
7161 */
7162 spin_unlock_irqrestore(&zone->lock, flags);
7163 ret = __alloc_contig_pages(pfn, nr_pages,
7164 gfp_mask);
7165 if (!ret)
7166 return pfn_to_page(pfn);
7167 spin_lock_irqsave(&zone->lock, flags);
7168 }
7169 pfn += nr_pages;
7170 }
7171 spin_unlock_irqrestore(&zone->lock, flags);
7172 }
7173 return NULL;
7174 }
7175 #endif /* CONFIG_CONTIG_ALLOC */
7176
free_contig_range(unsigned long pfn,unsigned long nr_pages)7177 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
7178 {
7179 unsigned long count = 0;
7180 struct folio *folio = pfn_folio(pfn);
7181
7182 if (folio_test_large(folio)) {
7183 int expected = folio_nr_pages(folio);
7184
7185 if (nr_pages == expected)
7186 folio_put(folio);
7187 else
7188 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
7189 pfn, nr_pages, expected);
7190 return;
7191 }
7192
7193 for (; nr_pages--; pfn++) {
7194 struct page *page = pfn_to_page(pfn);
7195
7196 count += page_count(page) != 1;
7197 __free_page(page);
7198 }
7199 WARN(count != 0, "%lu pages are still in use!\n", count);
7200 }
7201 EXPORT_SYMBOL(free_contig_range);
7202
7203 /*
7204 * Effectively disable pcplists for the zone by setting the high limit to 0
7205 * and draining all cpus. A concurrent page freeing on another CPU that's about
7206 * to put the page on pcplist will either finish before the drain and the page
7207 * will be drained, or observe the new high limit and skip the pcplist.
7208 *
7209 * Must be paired with a call to zone_pcp_enable().
7210 */
zone_pcp_disable(struct zone * zone)7211 void zone_pcp_disable(struct zone *zone)
7212 {
7213 mutex_lock(&pcp_batch_high_lock);
7214 __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
7215 __drain_all_pages(zone, true);
7216 }
7217
zone_pcp_enable(struct zone * zone)7218 void zone_pcp_enable(struct zone *zone)
7219 {
7220 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
7221 zone->pageset_high_max, zone->pageset_batch);
7222 mutex_unlock(&pcp_batch_high_lock);
7223 }
7224
zone_pcp_reset(struct zone * zone)7225 void zone_pcp_reset(struct zone *zone)
7226 {
7227 int cpu;
7228 struct per_cpu_zonestat *pzstats;
7229
7230 if (zone->per_cpu_pageset != &boot_pageset) {
7231 for_each_online_cpu(cpu) {
7232 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7233 drain_zonestat(zone, pzstats);
7234 }
7235 free_percpu(zone->per_cpu_pageset);
7236 zone->per_cpu_pageset = &boot_pageset;
7237 if (zone->per_cpu_zonestats != &boot_zonestats) {
7238 free_percpu(zone->per_cpu_zonestats);
7239 zone->per_cpu_zonestats = &boot_zonestats;
7240 }
7241 }
7242 }
7243
7244 #ifdef CONFIG_MEMORY_HOTREMOVE
7245 /*
7246 * All pages in the range must be in a single zone, must not contain holes,
7247 * must span full sections, and must be isolated before calling this function.
7248 *
7249 * Returns the number of managed (non-PageOffline()) pages in the range: the
7250 * number of pages for which memory offlining code must adjust managed page
7251 * counters using adjust_managed_page_count().
7252 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)7253 unsigned long __offline_isolated_pages(unsigned long start_pfn,
7254 unsigned long end_pfn)
7255 {
7256 unsigned long already_offline = 0, flags;
7257 unsigned long pfn = start_pfn;
7258 struct page *page;
7259 struct zone *zone;
7260 unsigned int order;
7261
7262 offline_mem_sections(pfn, end_pfn);
7263 zone = page_zone(pfn_to_page(pfn));
7264 spin_lock_irqsave(&zone->lock, flags);
7265 while (pfn < end_pfn) {
7266 page = pfn_to_page(pfn);
7267 /*
7268 * The HWPoisoned page may be not in buddy system, and
7269 * page_count() is not 0.
7270 */
7271 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7272 pfn++;
7273 continue;
7274 }
7275 /*
7276 * At this point all remaining PageOffline() pages have a
7277 * reference count of 0 and can simply be skipped.
7278 */
7279 if (PageOffline(page)) {
7280 BUG_ON(page_count(page));
7281 BUG_ON(PageBuddy(page));
7282 already_offline++;
7283 pfn++;
7284 continue;
7285 }
7286
7287 BUG_ON(page_count(page));
7288 BUG_ON(!PageBuddy(page));
7289 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
7290 order = buddy_order(page);
7291 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
7292 pfn += (1 << order);
7293 }
7294 spin_unlock_irqrestore(&zone->lock, flags);
7295
7296 return end_pfn - start_pfn - already_offline;
7297 }
7298 #endif
7299
7300 /*
7301 * This function returns a stable result only if called under zone lock.
7302 */
is_free_buddy_page(const struct page * page)7303 bool is_free_buddy_page(const struct page *page)
7304 {
7305 unsigned long pfn = page_to_pfn(page);
7306 unsigned int order;
7307
7308 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7309 const struct page *head = page - (pfn & ((1 << order) - 1));
7310
7311 if (PageBuddy(head) &&
7312 buddy_order_unsafe(head) >= order)
7313 break;
7314 }
7315
7316 return order <= MAX_PAGE_ORDER;
7317 }
7318 EXPORT_SYMBOL(is_free_buddy_page);
7319
7320 #ifdef CONFIG_MEMORY_FAILURE
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)7321 static inline void add_to_free_list(struct page *page, struct zone *zone,
7322 unsigned int order, int migratetype,
7323 bool tail)
7324 {
7325 __add_to_free_list(page, zone, order, migratetype, tail);
7326 account_freepages(zone, 1 << order, migratetype);
7327 }
7328
7329 /*
7330 * Break down a higher-order page in sub-pages, and keep our target out of
7331 * buddy allocator.
7332 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)7333 static void break_down_buddy_pages(struct zone *zone, struct page *page,
7334 struct page *target, int low, int high,
7335 int migratetype)
7336 {
7337 unsigned long size = 1 << high;
7338 struct page *current_buddy;
7339
7340 while (high > low) {
7341 high--;
7342 size >>= 1;
7343
7344 if (target >= &page[size]) {
7345 current_buddy = page;
7346 page = page + size;
7347 } else {
7348 current_buddy = page + size;
7349 }
7350
7351 if (set_page_guard(zone, current_buddy, high))
7352 continue;
7353
7354 add_to_free_list(current_buddy, zone, high, migratetype, false);
7355 set_buddy_order(current_buddy, high);
7356 }
7357 }
7358
7359 /*
7360 * Take a page that will be marked as poisoned off the buddy allocator.
7361 */
take_page_off_buddy(struct page * page)7362 bool take_page_off_buddy(struct page *page)
7363 {
7364 struct zone *zone = page_zone(page);
7365 unsigned long pfn = page_to_pfn(page);
7366 unsigned long flags;
7367 unsigned int order;
7368 bool ret = false;
7369
7370 spin_lock_irqsave(&zone->lock, flags);
7371 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7372 struct page *page_head = page - (pfn & ((1 << order) - 1));
7373 int page_order = buddy_order(page_head);
7374
7375 if (PageBuddy(page_head) && page_order >= order) {
7376 unsigned long pfn_head = page_to_pfn(page_head);
7377 int migratetype = get_pfnblock_migratetype(page_head,
7378 pfn_head);
7379
7380 del_page_from_free_list(page_head, zone, page_order,
7381 migratetype);
7382 break_down_buddy_pages(zone, page_head, page, 0,
7383 page_order, migratetype);
7384 SetPageHWPoisonTakenOff(page);
7385 ret = true;
7386 break;
7387 }
7388 if (page_count(page_head) > 0)
7389 break;
7390 }
7391 spin_unlock_irqrestore(&zone->lock, flags);
7392 return ret;
7393 }
7394
7395 /*
7396 * Cancel takeoff done by take_page_off_buddy().
7397 */
put_page_back_buddy(struct page * page)7398 bool put_page_back_buddy(struct page *page)
7399 {
7400 struct zone *zone = page_zone(page);
7401 unsigned long flags;
7402 bool ret = false;
7403
7404 spin_lock_irqsave(&zone->lock, flags);
7405 if (put_page_testzero(page)) {
7406 unsigned long pfn = page_to_pfn(page);
7407 int migratetype = get_pfnblock_migratetype(page, pfn);
7408
7409 ClearPageHWPoisonTakenOff(page);
7410 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
7411 if (TestClearPageHWPoison(page)) {
7412 ret = true;
7413 }
7414 }
7415 spin_unlock_irqrestore(&zone->lock, flags);
7416
7417 return ret;
7418 }
7419 #endif
7420
7421 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)7422 bool has_managed_dma(void)
7423 {
7424 struct pglist_data *pgdat;
7425
7426 for_each_online_pgdat(pgdat) {
7427 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
7428
7429 if (managed_zone(zone))
7430 return true;
7431 }
7432 return false;
7433 }
7434 #endif /* CONFIG_ZONE_DMA */
7435
7436 #ifdef CONFIG_UNACCEPTED_MEMORY
7437
7438 static bool lazy_accept = true;
7439
accept_memory_parse(char * p)7440 static int __init accept_memory_parse(char *p)
7441 {
7442 if (!strcmp(p, "lazy")) {
7443 lazy_accept = true;
7444 return 0;
7445 } else if (!strcmp(p, "eager")) {
7446 lazy_accept = false;
7447 return 0;
7448 } else {
7449 return -EINVAL;
7450 }
7451 }
7452 early_param("accept_memory", accept_memory_parse);
7453
page_contains_unaccepted(struct page * page,unsigned int order)7454 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7455 {
7456 phys_addr_t start = page_to_phys(page);
7457
7458 return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
7459 }
7460
__accept_page(struct zone * zone,unsigned long * flags,struct page * page)7461 static void __accept_page(struct zone *zone, unsigned long *flags,
7462 struct page *page)
7463 {
7464 list_del(&page->lru);
7465 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7466 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
7467 __ClearPageUnaccepted(page);
7468 spin_unlock_irqrestore(&zone->lock, *flags);
7469
7470 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
7471
7472 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
7473 }
7474
accept_page(struct page * page)7475 void accept_page(struct page *page)
7476 {
7477 struct zone *zone = page_zone(page);
7478 unsigned long flags;
7479
7480 spin_lock_irqsave(&zone->lock, flags);
7481 if (!PageUnaccepted(page)) {
7482 spin_unlock_irqrestore(&zone->lock, flags);
7483 return;
7484 }
7485
7486 /* Unlocks zone->lock */
7487 __accept_page(zone, &flags, page);
7488 }
7489
try_to_accept_memory_one(struct zone * zone)7490 static bool try_to_accept_memory_one(struct zone *zone)
7491 {
7492 unsigned long flags;
7493 struct page *page;
7494
7495 spin_lock_irqsave(&zone->lock, flags);
7496 page = list_first_entry_or_null(&zone->unaccepted_pages,
7497 struct page, lru);
7498 if (!page) {
7499 spin_unlock_irqrestore(&zone->lock, flags);
7500 return false;
7501 }
7502
7503 /* Unlocks zone->lock */
7504 __accept_page(zone, &flags, page);
7505
7506 return true;
7507 }
7508
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7509 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7510 int alloc_flags)
7511 {
7512 long to_accept, wmark;
7513 bool ret = false;
7514
7515 if (list_empty(&zone->unaccepted_pages))
7516 return false;
7517
7518 /* Bailout, since try_to_accept_memory_one() needs to take a lock */
7519 if (alloc_flags & ALLOC_TRYLOCK)
7520 return false;
7521
7522 wmark = promo_wmark_pages(zone);
7523
7524 /*
7525 * Watermarks have not been initialized yet.
7526 *
7527 * Accepting one MAX_ORDER page to ensure progress.
7528 */
7529 if (!wmark)
7530 return try_to_accept_memory_one(zone);
7531
7532 /* How much to accept to get to promo watermark? */
7533 to_accept = wmark -
7534 (zone_page_state(zone, NR_FREE_PAGES) -
7535 __zone_watermark_unusable_free(zone, order, 0) -
7536 zone_page_state(zone, NR_UNACCEPTED));
7537
7538 while (to_accept > 0) {
7539 if (!try_to_accept_memory_one(zone))
7540 break;
7541 ret = true;
7542 to_accept -= MAX_ORDER_NR_PAGES;
7543 }
7544
7545 return ret;
7546 }
7547
__free_unaccepted(struct page * page)7548 static bool __free_unaccepted(struct page *page)
7549 {
7550 struct zone *zone = page_zone(page);
7551 unsigned long flags;
7552
7553 if (!lazy_accept)
7554 return false;
7555
7556 spin_lock_irqsave(&zone->lock, flags);
7557 list_add_tail(&page->lru, &zone->unaccepted_pages);
7558 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7559 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
7560 __SetPageUnaccepted(page);
7561 spin_unlock_irqrestore(&zone->lock, flags);
7562
7563 return true;
7564 }
7565
7566 #else
7567
page_contains_unaccepted(struct page * page,unsigned int order)7568 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7569 {
7570 return false;
7571 }
7572
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7573 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7574 int alloc_flags)
7575 {
7576 return false;
7577 }
7578
__free_unaccepted(struct page * page)7579 static bool __free_unaccepted(struct page *page)
7580 {
7581 BUILD_BUG();
7582 return false;
7583 }
7584
7585 #endif /* CONFIG_UNACCEPTED_MEMORY */
7586
alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7587 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7588 {
7589 /*
7590 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
7591 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd
7592 * is not safe in arbitrary context.
7593 *
7594 * These two are the conditions for gfpflags_allow_spinning() being true.
7595 *
7596 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason
7597 * to warn. Also warn would trigger printk() which is unsafe from
7598 * various contexts. We cannot use printk_deferred_enter() to mitigate,
7599 * since the running context is unknown.
7600 *
7601 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below
7602 * is safe in any context. Also zeroing the page is mandatory for
7603 * BPF use cases.
7604 *
7605 * Though __GFP_NOMEMALLOC is not checked in the code path below,
7606 * specify it here to highlight that alloc_pages_nolock()
7607 * doesn't want to deplete reserves.
7608 */
7609 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
7610 | gfp_flags;
7611 unsigned int alloc_flags = ALLOC_TRYLOCK;
7612 struct alloc_context ac = { };
7613 struct page *page;
7614
7615 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
7616 /*
7617 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
7618 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current
7619 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
7620 * mark the task as the owner of another rt_spin_lock which will
7621 * confuse PI logic, so return immediately if called form hard IRQ or
7622 * NMI.
7623 *
7624 * Note, irqs_disabled() case is ok. This function can be called
7625 * from raw_spin_lock_irqsave region.
7626 */
7627 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
7628 return NULL;
7629 if (!pcp_allowed_order(order))
7630 return NULL;
7631
7632 /* Bailout, since _deferred_grow_zone() needs to take a lock */
7633 if (deferred_pages_enabled())
7634 return NULL;
7635
7636 if (nid == NUMA_NO_NODE)
7637 nid = numa_node_id();
7638
7639 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
7640 &alloc_gfp, &alloc_flags);
7641
7642 /*
7643 * Best effort allocation from percpu free list.
7644 * If it's empty attempt to spin_trylock zone->lock.
7645 */
7646 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
7647
7648 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
7649
7650 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
7651 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
7652 __free_frozen_pages(page, order, FPI_TRYLOCK);
7653 page = NULL;
7654 }
7655 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
7656 kmsan_alloc_page(page, order, alloc_gfp);
7657 return page;
7658 }
7659 /**
7660 * alloc_pages_nolock - opportunistic reentrant allocation from any context
7661 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
7662 * @nid: node to allocate from
7663 * @order: allocation order size
7664 *
7665 * Allocates pages of a given order from the given node. This is safe to
7666 * call from any context (from atomic, NMI, and also reentrant
7667 * allocator -> tracepoint -> alloc_pages_nolock_noprof).
7668 * Allocation is best effort and to be expected to fail easily so nobody should
7669 * rely on the success. Failures are not reported via warn_alloc().
7670 * See always fail conditions below.
7671 *
7672 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7673 * It means ENOMEM. There is no reason to call it again and expect !NULL.
7674 */
alloc_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7675 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7676 {
7677 struct page *page;
7678
7679 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
7680 if (page)
7681 set_page_refcounted(page);
7682 return page;
7683 }
7684 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);
7685