1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
23 #include <linux/compiler.h>
24 #include <linux/kernel.h>
25 #include <linux/kasan.h>
26 #include <linux/kmsan.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/ratelimit.h>
30 #include <linux/oom.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/pagevec.h>
36 #include <linux/memory_hotplug.h>
37 #include <linux/nodemask.h>
38 #include <linux/vmstat.h>
39 #include <linux/fault-inject.h>
40 #include <linux/compaction.h>
41 #include <trace/events/kmem.h>
42 #include <trace/events/oom.h>
43 #include <linux/prefetch.h>
44 #include <linux/mm_inline.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/migrate.h>
47 #include <linux/sched/mm.h>
48 #include <linux/page_owner.h>
49 #include <linux/page_table_check.h>
50 #include <linux/memcontrol.h>
51 #include <linux/ftrace.h>
52 #include <linux/lockdep.h>
53 #include <linux/psi.h>
54 #include <linux/khugepaged.h>
55 #include <linux/delayacct.h>
56 #include <linux/cacheinfo.h>
57 #include <linux/pgalloc_tag.h>
58 #include <asm/div64.h>
59 #include "internal.h"
60 #include "shuffle.h"
61 #include "page_reporting.h"
62
63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
64 typedef int __bitwise fpi_t;
65
66 /* No special request */
67 #define FPI_NONE ((__force fpi_t)0)
68
69 /*
70 * Skip free page reporting notification for the (possibly merged) page.
71 * This does not hinder free page reporting from grabbing the page,
72 * reporting it and marking it "reported" - it only skips notifying
73 * the free page reporting infrastructure about a newly freed page. For
74 * example, used when temporarily pulling a page from a freelist and
75 * putting it back unmodified.
76 */
77 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
78
79 /*
80 * Place the (possibly merged) page to the tail of the freelist. Will ignore
81 * page shuffling (relevant code - e.g., memory onlining - is expected to
82 * shuffle the whole zone).
83 *
84 * Note: No code should rely on this flag for correctness - it's purely
85 * to allow for optimizations when handing back either fresh pages
86 * (memory onlining) or untouched pages (page isolation, free page
87 * reporting).
88 */
89 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
90
91 /* Free the page without taking locks. Rely on trylock only. */
92 #define FPI_TRYLOCK ((__force fpi_t)BIT(2))
93
94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
95 static DEFINE_MUTEX(pcp_batch_high_lock);
96 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
97
98 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
99 /*
100 * On SMP, spin_trylock is sufficient protection.
101 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
102 * Pass flags to a no-op inline function to typecheck and silence the unused
103 * variable warning.
104 */
__pcp_trylock_noop(unsigned long * flags)105 static inline void __pcp_trylock_noop(unsigned long *flags) { }
106 #define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags))
107 #define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags))
108 #else
109
110 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
111 #define pcp_trylock_prepare(flags) local_irq_save(flags)
112 #define pcp_trylock_finish(flags) local_irq_restore(flags)
113 #endif
114
115 /*
116 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
117 * a migration causing the wrong PCP to be locked and remote memory being
118 * potentially allocated, pin the task to the CPU for the lookup+lock.
119 * preempt_disable is used on !RT because it is faster than migrate_disable.
120 * migrate_disable is used on RT because otherwise RT spinlock usage is
121 * interfered with and a high priority task cannot preempt the allocator.
122 */
123 #ifndef CONFIG_PREEMPT_RT
124 #define pcpu_task_pin() preempt_disable()
125 #define pcpu_task_unpin() preempt_enable()
126 #else
127 #define pcpu_task_pin() migrate_disable()
128 #define pcpu_task_unpin() migrate_enable()
129 #endif
130
131 /*
132 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
133 * Return value should be used with equivalent unlock helper.
134 */
135 #define pcpu_spin_trylock(type, member, ptr) \
136 ({ \
137 type *_ret; \
138 pcpu_task_pin(); \
139 _ret = this_cpu_ptr(ptr); \
140 if (!spin_trylock(&_ret->member)) { \
141 pcpu_task_unpin(); \
142 _ret = NULL; \
143 } \
144 _ret; \
145 })
146
147 #define pcpu_spin_unlock(member, ptr) \
148 ({ \
149 spin_unlock(&ptr->member); \
150 pcpu_task_unpin(); \
151 })
152
153 /* struct per_cpu_pages specific helpers. */
154 #define pcp_spin_trylock(ptr, UP_flags) \
155 ({ \
156 struct per_cpu_pages *__ret; \
157 pcp_trylock_prepare(UP_flags); \
158 __ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \
159 if (!__ret) \
160 pcp_trylock_finish(UP_flags); \
161 __ret; \
162 })
163
164 #define pcp_spin_unlock(ptr, UP_flags) \
165 ({ \
166 pcpu_spin_unlock(lock, ptr); \
167 pcp_trylock_finish(UP_flags); \
168 })
169
170 /*
171 * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e.
172 * a potentially remote cpu drain) and get interrupted by an operation that
173 * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP
174 * spinlock assumptions making the trylock a no-op. So we have to turn that
175 * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no
176 * remote cpu's so we can only be locking the only existing local one.
177 */
178 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
__flags_noop(unsigned long * flags)179 static inline void __flags_noop(unsigned long *flags) { }
180 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \
181 ({ \
182 __flags_noop(&(flags)); \
183 spin_lock(&(ptr)->lock); \
184 })
185 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
186 ({ \
187 spin_unlock(&(ptr)->lock); \
188 __flags_noop(&(flags)); \
189 })
190 #else
191 #define pcp_spin_lock_maybe_irqsave(ptr, flags) \
192 spin_lock_irqsave(&(ptr)->lock, flags)
193 #define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
194 spin_unlock_irqrestore(&(ptr)->lock, flags)
195 #endif
196
197 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
198 DEFINE_PER_CPU(int, numa_node);
199 EXPORT_PER_CPU_SYMBOL(numa_node);
200 #endif
201
202 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
203
204 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
205 /*
206 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
207 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
208 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
209 * defined in <linux/topology.h>.
210 */
211 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
212 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
213 #endif
214
215 static DEFINE_MUTEX(pcpu_drain_mutex);
216
217 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
218 volatile unsigned long latent_entropy __latent_entropy;
219 EXPORT_SYMBOL(latent_entropy);
220 #endif
221
222 /*
223 * Array of node states.
224 */
225 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
226 [N_POSSIBLE] = NODE_MASK_ALL,
227 [N_ONLINE] = { { [0] = 1UL } },
228 #ifndef CONFIG_NUMA
229 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
230 #ifdef CONFIG_HIGHMEM
231 [N_HIGH_MEMORY] = { { [0] = 1UL } },
232 #endif
233 [N_MEMORY] = { { [0] = 1UL } },
234 [N_CPU] = { { [0] = 1UL } },
235 #endif /* NUMA */
236 };
237 EXPORT_SYMBOL(node_states);
238
239 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
240
241 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
242 unsigned int pageblock_order __read_mostly;
243 #endif
244
245 static void __free_pages_ok(struct page *page, unsigned int order,
246 fpi_t fpi_flags);
247
248 /*
249 * results with 256, 32 in the lowmem_reserve sysctl:
250 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
251 * 1G machine -> (16M dma, 784M normal, 224M high)
252 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
253 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
254 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
255 *
256 * TBD: should special case ZONE_DMA32 machines here - in those we normally
257 * don't need any ZONE_NORMAL reservation
258 */
259 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
260 #ifdef CONFIG_ZONE_DMA
261 [ZONE_DMA] = 256,
262 #endif
263 #ifdef CONFIG_ZONE_DMA32
264 [ZONE_DMA32] = 256,
265 #endif
266 [ZONE_NORMAL] = 32,
267 #ifdef CONFIG_HIGHMEM
268 [ZONE_HIGHMEM] = 0,
269 #endif
270 [ZONE_MOVABLE] = 0,
271 };
272
273 char * const zone_names[MAX_NR_ZONES] = {
274 #ifdef CONFIG_ZONE_DMA
275 "DMA",
276 #endif
277 #ifdef CONFIG_ZONE_DMA32
278 "DMA32",
279 #endif
280 "Normal",
281 #ifdef CONFIG_HIGHMEM
282 "HighMem",
283 #endif
284 "Movable",
285 #ifdef CONFIG_ZONE_DEVICE
286 "Device",
287 #endif
288 };
289
290 const char * const migratetype_names[MIGRATE_TYPES] = {
291 "Unmovable",
292 "Movable",
293 "Reclaimable",
294 "HighAtomic",
295 #ifdef CONFIG_CMA
296 "CMA",
297 #endif
298 #ifdef CONFIG_MEMORY_ISOLATION
299 "Isolate",
300 #endif
301 };
302
303 int min_free_kbytes = 1024;
304 int user_min_free_kbytes = -1;
305 static int watermark_boost_factor __read_mostly = 15000;
306 static int watermark_scale_factor = 10;
307 int defrag_mode;
308
309 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
310 int movable_zone;
311 EXPORT_SYMBOL(movable_zone);
312
313 #if MAX_NUMNODES > 1
314 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
315 unsigned int nr_online_nodes __read_mostly = 1;
316 EXPORT_SYMBOL(nr_node_ids);
317 EXPORT_SYMBOL(nr_online_nodes);
318 #endif
319
320 static bool page_contains_unaccepted(struct page *page, unsigned int order);
321 static bool cond_accept_memory(struct zone *zone, unsigned int order,
322 int alloc_flags);
323 static bool __free_unaccepted(struct page *page);
324
325 int page_group_by_mobility_disabled __read_mostly;
326
327 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
328 /*
329 * During boot we initialize deferred pages on-demand, as needed, but once
330 * page_alloc_init_late() has finished, the deferred pages are all initialized,
331 * and we can permanently disable that path.
332 */
333 DEFINE_STATIC_KEY_TRUE(deferred_pages);
334
deferred_pages_enabled(void)335 static inline bool deferred_pages_enabled(void)
336 {
337 return static_branch_unlikely(&deferred_pages);
338 }
339
340 /*
341 * deferred_grow_zone() is __init, but it is called from
342 * get_page_from_freelist() during early boot until deferred_pages permanently
343 * disables this call. This is why we have refdata wrapper to avoid warning,
344 * and to ensure that the function body gets unloaded.
345 */
346 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)347 _deferred_grow_zone(struct zone *zone, unsigned int order)
348 {
349 return deferred_grow_zone(zone, order);
350 }
351 #else
deferred_pages_enabled(void)352 static inline bool deferred_pages_enabled(void)
353 {
354 return false;
355 }
356
_deferred_grow_zone(struct zone * zone,unsigned int order)357 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
358 {
359 return false;
360 }
361 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
362
363 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)364 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
365 unsigned long pfn)
366 {
367 #ifdef CONFIG_SPARSEMEM
368 return section_to_usemap(__pfn_to_section(pfn));
369 #else
370 return page_zone(page)->pageblock_flags;
371 #endif /* CONFIG_SPARSEMEM */
372 }
373
pfn_to_bitidx(const struct page * page,unsigned long pfn)374 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
375 {
376 #ifdef CONFIG_SPARSEMEM
377 pfn &= (PAGES_PER_SECTION-1);
378 #else
379 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
380 #endif /* CONFIG_SPARSEMEM */
381 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
382 }
383
is_standalone_pb_bit(enum pageblock_bits pb_bit)384 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit)
385 {
386 return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS;
387 }
388
389 static __always_inline void
get_pfnblock_bitmap_bitidx(const struct page * page,unsigned long pfn,unsigned long ** bitmap_word,unsigned long * bitidx)390 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
391 unsigned long **bitmap_word, unsigned long *bitidx)
392 {
393 unsigned long *bitmap;
394 unsigned long word_bitidx;
395
396 #ifdef CONFIG_MEMORY_ISOLATION
397 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
398 #else
399 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
400 #endif
401 BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK);
402 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
403
404 bitmap = get_pageblock_bitmap(page, pfn);
405 *bitidx = pfn_to_bitidx(page, pfn);
406 word_bitidx = *bitidx / BITS_PER_LONG;
407 *bitidx &= (BITS_PER_LONG - 1);
408 *bitmap_word = &bitmap[word_bitidx];
409 }
410
411
412 /**
413 * __get_pfnblock_flags_mask - Return the requested group of flags for
414 * a pageblock_nr_pages block of pages
415 * @page: The page within the block of interest
416 * @pfn: The target page frame number
417 * @mask: mask of bits that the caller is interested in
418 *
419 * Return: pageblock_bits flags
420 */
__get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)421 static unsigned long __get_pfnblock_flags_mask(const struct page *page,
422 unsigned long pfn,
423 unsigned long mask)
424 {
425 unsigned long *bitmap_word;
426 unsigned long bitidx;
427 unsigned long word;
428
429 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
430 /*
431 * This races, without locks, with set_pfnblock_migratetype(). Ensure
432 * a consistent read of the memory array, so that results, even though
433 * racy, are not corrupted.
434 */
435 word = READ_ONCE(*bitmap_word);
436 return (word >> bitidx) & mask;
437 }
438
439 /**
440 * get_pfnblock_bit - Check if a standalone bit of a pageblock is set
441 * @page: The page within the block of interest
442 * @pfn: The target page frame number
443 * @pb_bit: pageblock bit to check
444 *
445 * Return: true if the bit is set, otherwise false
446 */
get_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)447 bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
448 enum pageblock_bits pb_bit)
449 {
450 unsigned long *bitmap_word;
451 unsigned long bitidx;
452
453 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
454 return false;
455
456 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
457
458 return test_bit(bitidx + pb_bit, bitmap_word);
459 }
460
461 /**
462 * get_pfnblock_migratetype - Return the migratetype of a pageblock
463 * @page: The page within the block of interest
464 * @pfn: The target page frame number
465 *
466 * Return: The migratetype of the pageblock
467 *
468 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
469 * to save a call to page_to_pfn().
470 */
471 __always_inline enum migratetype
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)472 get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
473 {
474 unsigned long mask = MIGRATETYPE_AND_ISO_MASK;
475 unsigned long flags;
476
477 flags = __get_pfnblock_flags_mask(page, pfn, mask);
478
479 #ifdef CONFIG_MEMORY_ISOLATION
480 if (flags & BIT(PB_migrate_isolate))
481 return MIGRATE_ISOLATE;
482 #endif
483 return flags & MIGRATETYPE_MASK;
484 }
485
486 /**
487 * __set_pfnblock_flags_mask - Set the requested group of flags for
488 * a pageblock_nr_pages block of pages
489 * @page: The page within the block of interest
490 * @pfn: The target page frame number
491 * @flags: The flags to set
492 * @mask: mask of bits that the caller is interested in
493 */
__set_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long flags,unsigned long mask)494 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
495 unsigned long flags, unsigned long mask)
496 {
497 unsigned long *bitmap_word;
498 unsigned long bitidx;
499 unsigned long word;
500
501 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
502
503 mask <<= bitidx;
504 flags <<= bitidx;
505
506 word = READ_ONCE(*bitmap_word);
507 do {
508 } while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags));
509 }
510
511 /**
512 * set_pfnblock_bit - Set a standalone bit of a pageblock
513 * @page: The page within the block of interest
514 * @pfn: The target page frame number
515 * @pb_bit: pageblock bit to set
516 */
set_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)517 void set_pfnblock_bit(const struct page *page, unsigned long pfn,
518 enum pageblock_bits pb_bit)
519 {
520 unsigned long *bitmap_word;
521 unsigned long bitidx;
522
523 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
524 return;
525
526 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
527
528 set_bit(bitidx + pb_bit, bitmap_word);
529 }
530
531 /**
532 * clear_pfnblock_bit - Clear a standalone bit of a pageblock
533 * @page: The page within the block of interest
534 * @pfn: The target page frame number
535 * @pb_bit: pageblock bit to clear
536 */
clear_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)537 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
538 enum pageblock_bits pb_bit)
539 {
540 unsigned long *bitmap_word;
541 unsigned long bitidx;
542
543 if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
544 return;
545
546 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
547
548 clear_bit(bitidx + pb_bit, bitmap_word);
549 }
550
551 /**
552 * set_pageblock_migratetype - Set the migratetype of a pageblock
553 * @page: The page within the block of interest
554 * @migratetype: migratetype to set
555 */
set_pageblock_migratetype(struct page * page,enum migratetype migratetype)556 static void set_pageblock_migratetype(struct page *page,
557 enum migratetype migratetype)
558 {
559 if (unlikely(page_group_by_mobility_disabled &&
560 migratetype < MIGRATE_PCPTYPES))
561 migratetype = MIGRATE_UNMOVABLE;
562
563 #ifdef CONFIG_MEMORY_ISOLATION
564 if (migratetype == MIGRATE_ISOLATE) {
565 VM_WARN_ONCE(1,
566 "Use set_pageblock_isolate() for pageblock isolation");
567 return;
568 }
569 VM_WARN_ONCE(get_pageblock_isolate(page),
570 "Use clear_pageblock_isolate() to unisolate pageblock");
571 /* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */
572 #endif
573 __set_pfnblock_flags_mask(page, page_to_pfn(page),
574 (unsigned long)migratetype,
575 MIGRATETYPE_AND_ISO_MASK);
576 }
577
init_pageblock_migratetype(struct page * page,enum migratetype migratetype,bool isolate)578 void __meminit init_pageblock_migratetype(struct page *page,
579 enum migratetype migratetype,
580 bool isolate)
581 {
582 unsigned long flags;
583
584 if (unlikely(page_group_by_mobility_disabled &&
585 migratetype < MIGRATE_PCPTYPES))
586 migratetype = MIGRATE_UNMOVABLE;
587
588 flags = migratetype;
589
590 #ifdef CONFIG_MEMORY_ISOLATION
591 if (migratetype == MIGRATE_ISOLATE) {
592 VM_WARN_ONCE(
593 1,
594 "Set isolate=true to isolate pageblock with a migratetype");
595 return;
596 }
597 if (isolate)
598 flags |= BIT(PB_migrate_isolate);
599 #endif
600 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags,
601 MIGRATETYPE_AND_ISO_MASK);
602 }
603
604 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)605 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
606 {
607 int ret;
608 unsigned seq;
609 unsigned long pfn = page_to_pfn(page);
610 unsigned long sp, start_pfn;
611
612 do {
613 seq = zone_span_seqbegin(zone);
614 start_pfn = zone->zone_start_pfn;
615 sp = zone->spanned_pages;
616 ret = !zone_spans_pfn(zone, pfn);
617 } while (zone_span_seqretry(zone, seq));
618
619 if (ret)
620 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
621 pfn, zone_to_nid(zone), zone->name,
622 start_pfn, start_pfn + sp);
623
624 return ret;
625 }
626
627 /*
628 * Temporary debugging check for pages not lying within a given zone.
629 */
bad_range(struct zone * zone,struct page * page)630 static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
631 {
632 if (page_outside_zone_boundaries(zone, page))
633 return true;
634 if (zone != page_zone(page))
635 return true;
636
637 return false;
638 }
639 #else
bad_range(struct zone * zone,struct page * page)640 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
641 {
642 return false;
643 }
644 #endif
645
bad_page(struct page * page,const char * reason)646 static void bad_page(struct page *page, const char *reason)
647 {
648 static unsigned long resume;
649 static unsigned long nr_shown;
650 static unsigned long nr_unshown;
651
652 /*
653 * Allow a burst of 60 reports, then keep quiet for that minute;
654 * or allow a steady drip of one report per second.
655 */
656 if (nr_shown == 60) {
657 if (time_before(jiffies, resume)) {
658 nr_unshown++;
659 goto out;
660 }
661 if (nr_unshown) {
662 pr_alert(
663 "BUG: Bad page state: %lu messages suppressed\n",
664 nr_unshown);
665 nr_unshown = 0;
666 }
667 nr_shown = 0;
668 }
669 if (nr_shown++ == 0)
670 resume = jiffies + 60 * HZ;
671
672 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
673 current->comm, page_to_pfn(page));
674 dump_page(page, reason);
675
676 print_modules();
677 dump_stack();
678 out:
679 /* Leave bad fields for debug, except PageBuddy could make trouble */
680 if (PageBuddy(page))
681 __ClearPageBuddy(page);
682 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
683 }
684
order_to_pindex(int migratetype,int order)685 static inline unsigned int order_to_pindex(int migratetype, int order)
686 {
687
688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 bool movable;
690 if (order > PAGE_ALLOC_COSTLY_ORDER) {
691 VM_BUG_ON(order != HPAGE_PMD_ORDER);
692
693 movable = migratetype == MIGRATE_MOVABLE;
694
695 return NR_LOWORDER_PCP_LISTS + movable;
696 }
697 #else
698 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
699 #endif
700
701 return (MIGRATE_PCPTYPES * order) + migratetype;
702 }
703
pindex_to_order(unsigned int pindex)704 static inline int pindex_to_order(unsigned int pindex)
705 {
706 int order = pindex / MIGRATE_PCPTYPES;
707
708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
709 if (pindex >= NR_LOWORDER_PCP_LISTS)
710 order = HPAGE_PMD_ORDER;
711 #else
712 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
713 #endif
714
715 return order;
716 }
717
pcp_allowed_order(unsigned int order)718 static inline bool pcp_allowed_order(unsigned int order)
719 {
720 if (order <= PAGE_ALLOC_COSTLY_ORDER)
721 return true;
722 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
723 if (order == HPAGE_PMD_ORDER)
724 return true;
725 #endif
726 return false;
727 }
728
729 /*
730 * Higher-order pages are called "compound pages". They are structured thusly:
731 *
732 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
733 *
734 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
735 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
736 *
737 * The first tail page's ->compound_order holds the order of allocation.
738 * This usage means that zero-order pages may not be compound.
739 */
740
prep_compound_page(struct page * page,unsigned int order)741 void prep_compound_page(struct page *page, unsigned int order)
742 {
743 int i;
744 int nr_pages = 1 << order;
745
746 __SetPageHead(page);
747 for (i = 1; i < nr_pages; i++)
748 prep_compound_tail(page, i);
749
750 prep_compound_head(page, order);
751 }
752
set_buddy_order(struct page * page,unsigned int order)753 static inline void set_buddy_order(struct page *page, unsigned int order)
754 {
755 set_page_private(page, order);
756 __SetPageBuddy(page);
757 }
758
759 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)760 static inline struct capture_control *task_capc(struct zone *zone)
761 {
762 struct capture_control *capc = current->capture_control;
763
764 return unlikely(capc) &&
765 !(current->flags & PF_KTHREAD) &&
766 !capc->page &&
767 capc->cc->zone == zone ? capc : NULL;
768 }
769
770 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)771 compaction_capture(struct capture_control *capc, struct page *page,
772 int order, int migratetype)
773 {
774 if (!capc || order != capc->cc->order)
775 return false;
776
777 /* Do not accidentally pollute CMA or isolated regions*/
778 if (is_migrate_cma(migratetype) ||
779 is_migrate_isolate(migratetype))
780 return false;
781
782 /*
783 * Do not let lower order allocations pollute a movable pageblock
784 * unless compaction is also requesting movable pages.
785 * This might let an unmovable request use a reclaimable pageblock
786 * and vice-versa but no more than normal fallback logic which can
787 * have trouble finding a high-order free page.
788 */
789 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
790 capc->cc->migratetype != MIGRATE_MOVABLE)
791 return false;
792
793 if (migratetype != capc->cc->migratetype)
794 trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
795 capc->cc->migratetype, migratetype);
796
797 capc->page = page;
798 return true;
799 }
800
801 #else
task_capc(struct zone * zone)802 static inline struct capture_control *task_capc(struct zone *zone)
803 {
804 return NULL;
805 }
806
807 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)808 compaction_capture(struct capture_control *capc, struct page *page,
809 int order, int migratetype)
810 {
811 return false;
812 }
813 #endif /* CONFIG_COMPACTION */
814
account_freepages(struct zone * zone,int nr_pages,int migratetype)815 static inline void account_freepages(struct zone *zone, int nr_pages,
816 int migratetype)
817 {
818 lockdep_assert_held(&zone->lock);
819
820 if (is_migrate_isolate(migratetype))
821 return;
822
823 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
824
825 if (is_migrate_cma(migratetype))
826 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
827 else if (migratetype == MIGRATE_HIGHATOMIC)
828 WRITE_ONCE(zone->nr_free_highatomic,
829 zone->nr_free_highatomic + nr_pages);
830 }
831
832 /* Used for pages not on another list */
__add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)833 static inline void __add_to_free_list(struct page *page, struct zone *zone,
834 unsigned int order, int migratetype,
835 bool tail)
836 {
837 struct free_area *area = &zone->free_area[order];
838 int nr_pages = 1 << order;
839
840 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
841 "page type is %d, passed migratetype is %d (nr=%d)\n",
842 get_pageblock_migratetype(page), migratetype, nr_pages);
843
844 if (tail)
845 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
846 else
847 list_add(&page->buddy_list, &area->free_list[migratetype]);
848 area->nr_free++;
849
850 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
851 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
852 }
853
854 /*
855 * Used for pages which are on another list. Move the pages to the tail
856 * of the list - so the moved pages won't immediately be considered for
857 * allocation again (e.g., optimization for memory onlining).
858 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt)859 static inline void move_to_free_list(struct page *page, struct zone *zone,
860 unsigned int order, int old_mt, int new_mt)
861 {
862 struct free_area *area = &zone->free_area[order];
863 int nr_pages = 1 << order;
864
865 /* Free page moving can fail, so it happens before the type update */
866 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
867 "page type is %d, passed migratetype is %d (nr=%d)\n",
868 get_pageblock_migratetype(page), old_mt, nr_pages);
869
870 list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
871
872 account_freepages(zone, -nr_pages, old_mt);
873 account_freepages(zone, nr_pages, new_mt);
874
875 if (order >= pageblock_order &&
876 is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
877 if (!is_migrate_isolate(old_mt))
878 nr_pages = -nr_pages;
879 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
880 }
881 }
882
__del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)883 static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
884 unsigned int order, int migratetype)
885 {
886 int nr_pages = 1 << order;
887
888 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
889 "page type is %d, passed migratetype is %d (nr=%d)\n",
890 get_pageblock_migratetype(page), migratetype, nr_pages);
891
892 /* clear reported state and update reported page count */
893 if (page_reported(page))
894 __ClearPageReported(page);
895
896 list_del(&page->buddy_list);
897 __ClearPageBuddy(page);
898 set_page_private(page, 0);
899 zone->free_area[order].nr_free--;
900
901 if (order >= pageblock_order && !is_migrate_isolate(migratetype))
902 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
903 }
904
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)905 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
906 unsigned int order, int migratetype)
907 {
908 __del_page_from_free_list(page, zone, order, migratetype);
909 account_freepages(zone, -(1 << order), migratetype);
910 }
911
get_page_from_free_area(struct free_area * area,int migratetype)912 static inline struct page *get_page_from_free_area(struct free_area *area,
913 int migratetype)
914 {
915 return list_first_entry_or_null(&area->free_list[migratetype],
916 struct page, buddy_list);
917 }
918
919 /*
920 * If this is less than the 2nd largest possible page, check if the buddy
921 * of the next-higher order is free. If it is, it's possible
922 * that pages are being freed that will coalesce soon. In case,
923 * that is happening, add the free page to the tail of the list
924 * so it's less likely to be used soon and more likely to be merged
925 * as a 2-level higher order page
926 */
927 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)928 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
929 struct page *page, unsigned int order)
930 {
931 unsigned long higher_page_pfn;
932 struct page *higher_page;
933
934 if (order >= MAX_PAGE_ORDER - 1)
935 return false;
936
937 higher_page_pfn = buddy_pfn & pfn;
938 higher_page = page + (higher_page_pfn - pfn);
939
940 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
941 NULL) != NULL;
942 }
943
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)944 static void change_pageblock_range(struct page *pageblock_page,
945 int start_order, int migratetype)
946 {
947 int nr_pageblocks = 1 << (start_order - pageblock_order);
948
949 while (nr_pageblocks--) {
950 set_pageblock_migratetype(pageblock_page, migratetype);
951 pageblock_page += pageblock_nr_pages;
952 }
953 }
954
955 /*
956 * Freeing function for a buddy system allocator.
957 *
958 * The concept of a buddy system is to maintain direct-mapped table
959 * (containing bit values) for memory blocks of various "orders".
960 * The bottom level table contains the map for the smallest allocatable
961 * units of memory (here, pages), and each level above it describes
962 * pairs of units from the levels below, hence, "buddies".
963 * At a high level, all that happens here is marking the table entry
964 * at the bottom level available, and propagating the changes upward
965 * as necessary, plus some accounting needed to play nicely with other
966 * parts of the VM system.
967 * At each level, we keep a list of pages, which are heads of continuous
968 * free pages of length of (1 << order) and marked with PageBuddy.
969 * Page's order is recorded in page_private(page) field.
970 * So when we are allocating or freeing one, we can derive the state of the
971 * other. That is, if we allocate a small block, and both were
972 * free, the remainder of the region must be split into blocks.
973 * If a block is freed, and its buddy is also free, then this
974 * triggers coalescing into a block of larger size.
975 *
976 * -- nyc
977 */
978
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)979 static inline void __free_one_page(struct page *page,
980 unsigned long pfn,
981 struct zone *zone, unsigned int order,
982 int migratetype, fpi_t fpi_flags)
983 {
984 struct capture_control *capc = task_capc(zone);
985 unsigned long buddy_pfn = 0;
986 unsigned long combined_pfn;
987 struct page *buddy;
988 bool to_tail;
989
990 VM_BUG_ON(!zone_is_initialized(zone));
991 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
992
993 VM_BUG_ON(migratetype == -1);
994 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
995 VM_BUG_ON_PAGE(bad_range(zone, page), page);
996
997 account_freepages(zone, 1 << order, migratetype);
998
999 while (order < MAX_PAGE_ORDER) {
1000 int buddy_mt = migratetype;
1001
1002 if (compaction_capture(capc, page, order, migratetype)) {
1003 account_freepages(zone, -(1 << order), migratetype);
1004 return;
1005 }
1006
1007 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
1008 if (!buddy)
1009 goto done_merging;
1010
1011 if (unlikely(order >= pageblock_order)) {
1012 /*
1013 * We want to prevent merge between freepages on pageblock
1014 * without fallbacks and normal pageblock. Without this,
1015 * pageblock isolation could cause incorrect freepage or CMA
1016 * accounting or HIGHATOMIC accounting.
1017 */
1018 buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
1019
1020 if (migratetype != buddy_mt &&
1021 (!migratetype_is_mergeable(migratetype) ||
1022 !migratetype_is_mergeable(buddy_mt)))
1023 goto done_merging;
1024 }
1025
1026 /*
1027 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1028 * merge with it and move up one order.
1029 */
1030 if (page_is_guard(buddy))
1031 clear_page_guard(zone, buddy, order);
1032 else
1033 __del_page_from_free_list(buddy, zone, order, buddy_mt);
1034
1035 if (unlikely(buddy_mt != migratetype)) {
1036 /*
1037 * Match buddy type. This ensures that an
1038 * expand() down the line puts the sub-blocks
1039 * on the right freelists.
1040 */
1041 change_pageblock_range(buddy, order, migratetype);
1042 }
1043
1044 combined_pfn = buddy_pfn & pfn;
1045 page = page + (combined_pfn - pfn);
1046 pfn = combined_pfn;
1047 order++;
1048 }
1049
1050 done_merging:
1051 set_buddy_order(page, order);
1052
1053 if (fpi_flags & FPI_TO_TAIL)
1054 to_tail = true;
1055 else if (is_shuffle_order(order))
1056 to_tail = shuffle_pick_tail();
1057 else
1058 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1059
1060 __add_to_free_list(page, zone, order, migratetype, to_tail);
1061
1062 /* Notify page reporting subsystem of freed page */
1063 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1064 page_reporting_notify_free(order);
1065 }
1066
1067 /*
1068 * A bad page could be due to a number of fields. Instead of multiple branches,
1069 * try and check multiple fields with one check. The caller must do a detailed
1070 * check if necessary.
1071 */
page_expected_state(struct page * page,unsigned long check_flags)1072 static inline bool page_expected_state(struct page *page,
1073 unsigned long check_flags)
1074 {
1075 if (unlikely(atomic_read(&page->_mapcount) != -1))
1076 return false;
1077
1078 if (unlikely((unsigned long)page->mapping |
1079 page_ref_count(page) |
1080 #ifdef CONFIG_MEMCG
1081 page->memcg_data |
1082 #endif
1083 page_pool_page_is_pp(page) |
1084 (page->flags.f & check_flags)))
1085 return false;
1086
1087 return true;
1088 }
1089
page_bad_reason(struct page * page,unsigned long flags)1090 static const char *page_bad_reason(struct page *page, unsigned long flags)
1091 {
1092 const char *bad_reason = NULL;
1093
1094 if (unlikely(atomic_read(&page->_mapcount) != -1))
1095 bad_reason = "nonzero mapcount";
1096 if (unlikely(page->mapping != NULL))
1097 bad_reason = "non-NULL mapping";
1098 if (unlikely(page_ref_count(page) != 0))
1099 bad_reason = "nonzero _refcount";
1100 if (unlikely(page->flags.f & flags)) {
1101 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1102 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1103 else
1104 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1105 }
1106 #ifdef CONFIG_MEMCG
1107 if (unlikely(page->memcg_data))
1108 bad_reason = "page still charged to cgroup";
1109 #endif
1110 if (unlikely(page_pool_page_is_pp(page)))
1111 bad_reason = "page_pool leak";
1112 return bad_reason;
1113 }
1114
free_page_is_bad(struct page * page)1115 static inline bool free_page_is_bad(struct page *page)
1116 {
1117 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1118 return false;
1119
1120 /* Something has gone sideways, find it */
1121 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1122 return true;
1123 }
1124
is_check_pages_enabled(void)1125 static inline bool is_check_pages_enabled(void)
1126 {
1127 return static_branch_unlikely(&check_pages_enabled);
1128 }
1129
free_tail_page_prepare(struct page * head_page,struct page * page)1130 static int free_tail_page_prepare(struct page *head_page, struct page *page)
1131 {
1132 struct folio *folio = (struct folio *)head_page;
1133 int ret = 1;
1134
1135 /*
1136 * We rely page->lru.next never has bit 0 set, unless the page
1137 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1138 */
1139 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1140
1141 if (!is_check_pages_enabled()) {
1142 ret = 0;
1143 goto out;
1144 }
1145 switch (page - head_page) {
1146 case 1:
1147 /* the first tail page: these may be in place of ->mapping */
1148 if (unlikely(folio_large_mapcount(folio))) {
1149 bad_page(page, "nonzero large_mapcount");
1150 goto out;
1151 }
1152 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
1153 unlikely(atomic_read(&folio->_nr_pages_mapped))) {
1154 bad_page(page, "nonzero nr_pages_mapped");
1155 goto out;
1156 }
1157 if (IS_ENABLED(CONFIG_MM_ID)) {
1158 if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
1159 bad_page(page, "nonzero mm mapcount 0");
1160 goto out;
1161 }
1162 if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
1163 bad_page(page, "nonzero mm mapcount 1");
1164 goto out;
1165 }
1166 }
1167 if (IS_ENABLED(CONFIG_64BIT)) {
1168 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1169 bad_page(page, "nonzero entire_mapcount");
1170 goto out;
1171 }
1172 if (unlikely(atomic_read(&folio->_pincount))) {
1173 bad_page(page, "nonzero pincount");
1174 goto out;
1175 }
1176 }
1177 break;
1178 case 2:
1179 /* the second tail page: deferred_list overlaps ->mapping */
1180 if (unlikely(!list_empty(&folio->_deferred_list))) {
1181 bad_page(page, "on deferred list");
1182 goto out;
1183 }
1184 if (!IS_ENABLED(CONFIG_64BIT)) {
1185 if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1186 bad_page(page, "nonzero entire_mapcount");
1187 goto out;
1188 }
1189 if (unlikely(atomic_read(&folio->_pincount))) {
1190 bad_page(page, "nonzero pincount");
1191 goto out;
1192 }
1193 }
1194 break;
1195 case 3:
1196 /* the third tail page: hugetlb specifics overlap ->mappings */
1197 if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
1198 break;
1199 fallthrough;
1200 default:
1201 if (page->mapping != TAIL_MAPPING) {
1202 bad_page(page, "corrupted mapping in tail page");
1203 goto out;
1204 }
1205 break;
1206 }
1207 if (unlikely(!PageTail(page))) {
1208 bad_page(page, "PageTail not set");
1209 goto out;
1210 }
1211 if (unlikely(compound_head(page) != head_page)) {
1212 bad_page(page, "compound_head not consistent");
1213 goto out;
1214 }
1215 ret = 0;
1216 out:
1217 page->mapping = NULL;
1218 clear_compound_head(page);
1219 return ret;
1220 }
1221
1222 /*
1223 * Skip KASAN memory poisoning when either:
1224 *
1225 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1226 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1227 * using page tags instead (see below).
1228 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1229 * that error detection is disabled for accesses via the page address.
1230 *
1231 * Pages will have match-all tags in the following circumstances:
1232 *
1233 * 1. Pages are being initialized for the first time, including during deferred
1234 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1235 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1236 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1237 * 3. The allocation was excluded from being checked due to sampling,
1238 * see the call to kasan_unpoison_pages.
1239 *
1240 * Poisoning pages during deferred memory init will greatly lengthen the
1241 * process and cause problem in large memory systems as the deferred pages
1242 * initialization is done with interrupt disabled.
1243 *
1244 * Assuming that there will be no reference to those newly initialized
1245 * pages before they are ever allocated, this should have no effect on
1246 * KASAN memory tracking as the poison will be properly inserted at page
1247 * allocation time. The only corner case is when pages are allocated by
1248 * on-demand allocation and then freed again before the deferred pages
1249 * initialization is done, but this is not likely to happen.
1250 */
should_skip_kasan_poison(struct page * page)1251 static inline bool should_skip_kasan_poison(struct page *page)
1252 {
1253 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1254 return deferred_pages_enabled();
1255
1256 return page_kasan_tag(page) == KASAN_TAG_KERNEL;
1257 }
1258
kernel_init_pages(struct page * page,int numpages)1259 static void kernel_init_pages(struct page *page, int numpages)
1260 {
1261 int i;
1262
1263 /* s390's use of memset() could override KASAN redzones. */
1264 kasan_disable_current();
1265 for (i = 0; i < numpages; i++)
1266 clear_highpage_kasan_tagged(page + i);
1267 kasan_enable_current();
1268 }
1269
1270 #ifdef CONFIG_MEM_ALLOC_PROFILING
1271
1272 /* Should be called only if mem_alloc_profiling_enabled() */
__clear_page_tag_ref(struct page * page)1273 void __clear_page_tag_ref(struct page *page)
1274 {
1275 union pgtag_ref_handle handle;
1276 union codetag_ref ref;
1277
1278 if (get_page_tag_ref(page, &ref, &handle)) {
1279 set_codetag_empty(&ref);
1280 update_page_tag_ref(handle, &ref);
1281 put_page_tag_ref(handle);
1282 }
1283 }
1284
1285 /* Should be called only if mem_alloc_profiling_enabled() */
1286 static noinline
__pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1287 void __pgalloc_tag_add(struct page *page, struct task_struct *task,
1288 unsigned int nr)
1289 {
1290 union pgtag_ref_handle handle;
1291 union codetag_ref ref;
1292
1293 if (get_page_tag_ref(page, &ref, &handle)) {
1294 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
1295 update_page_tag_ref(handle, &ref);
1296 put_page_tag_ref(handle);
1297 }
1298 }
1299
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1300 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1301 unsigned int nr)
1302 {
1303 if (mem_alloc_profiling_enabled())
1304 __pgalloc_tag_add(page, task, nr);
1305 }
1306
1307 /* Should be called only if mem_alloc_profiling_enabled() */
1308 static noinline
__pgalloc_tag_sub(struct page * page,unsigned int nr)1309 void __pgalloc_tag_sub(struct page *page, unsigned int nr)
1310 {
1311 union pgtag_ref_handle handle;
1312 union codetag_ref ref;
1313
1314 if (get_page_tag_ref(page, &ref, &handle)) {
1315 alloc_tag_sub(&ref, PAGE_SIZE * nr);
1316 update_page_tag_ref(handle, &ref);
1317 put_page_tag_ref(handle);
1318 }
1319 }
1320
pgalloc_tag_sub(struct page * page,unsigned int nr)1321 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
1322 {
1323 if (mem_alloc_profiling_enabled())
1324 __pgalloc_tag_sub(page, nr);
1325 }
1326
1327 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1328 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
1329 {
1330 if (tag)
1331 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
1332 }
1333
1334 #else /* CONFIG_MEM_ALLOC_PROFILING */
1335
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1336 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1337 unsigned int nr) {}
pgalloc_tag_sub(struct page * page,unsigned int nr)1338 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1339 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
1340
1341 #endif /* CONFIG_MEM_ALLOC_PROFILING */
1342
free_pages_prepare(struct page * page,unsigned int order)1343 __always_inline bool free_pages_prepare(struct page *page,
1344 unsigned int order)
1345 {
1346 int bad = 0;
1347 bool skip_kasan_poison = should_skip_kasan_poison(page);
1348 bool init = want_init_on_free();
1349 bool compound = PageCompound(page);
1350 struct folio *folio = page_folio(page);
1351
1352 VM_BUG_ON_PAGE(PageTail(page), page);
1353
1354 trace_mm_page_free(page, order);
1355 kmsan_free_page(page, order);
1356
1357 if (memcg_kmem_online() && PageMemcgKmem(page))
1358 __memcg_kmem_uncharge_page(page, order);
1359
1360 /*
1361 * In rare cases, when truncation or holepunching raced with
1362 * munlock after VM_LOCKED was cleared, Mlocked may still be
1363 * found set here. This does not indicate a problem, unless
1364 * "unevictable_pgs_cleared" appears worryingly large.
1365 */
1366 if (unlikely(folio_test_mlocked(folio))) {
1367 long nr_pages = folio_nr_pages(folio);
1368
1369 __folio_clear_mlocked(folio);
1370 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
1371 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
1372 }
1373
1374 if (unlikely(PageHWPoison(page)) && !order) {
1375 /* Do not let hwpoison pages hit pcplists/buddy */
1376 reset_page_owner(page, order);
1377 page_table_check_free(page, order);
1378 pgalloc_tag_sub(page, 1 << order);
1379
1380 /*
1381 * The page is isolated and accounted for.
1382 * Mark the codetag as empty to avoid accounting error
1383 * when the page is freed by unpoison_memory().
1384 */
1385 clear_page_tag_ref(page);
1386 return false;
1387 }
1388
1389 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1390
1391 /*
1392 * Check tail pages before head page information is cleared to
1393 * avoid checking PageCompound for order-0 pages.
1394 */
1395 if (unlikely(order)) {
1396 int i;
1397
1398 if (compound) {
1399 page[1].flags.f &= ~PAGE_FLAGS_SECOND;
1400 #ifdef NR_PAGES_IN_LARGE_FOLIO
1401 folio->_nr_pages = 0;
1402 #endif
1403 }
1404 for (i = 1; i < (1 << order); i++) {
1405 if (compound)
1406 bad += free_tail_page_prepare(page, page + i);
1407 if (is_check_pages_enabled()) {
1408 if (free_page_is_bad(page + i)) {
1409 bad++;
1410 continue;
1411 }
1412 }
1413 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1414 }
1415 }
1416 if (folio_test_anon(folio)) {
1417 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
1418 folio->mapping = NULL;
1419 }
1420 if (unlikely(page_has_type(page)))
1421 /* Reset the page_type (which overlays _mapcount) */
1422 page->page_type = UINT_MAX;
1423
1424 if (is_check_pages_enabled()) {
1425 if (free_page_is_bad(page))
1426 bad++;
1427 if (bad)
1428 return false;
1429 }
1430
1431 page_cpupid_reset_last(page);
1432 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1433 reset_page_owner(page, order);
1434 page_table_check_free(page, order);
1435 pgalloc_tag_sub(page, 1 << order);
1436
1437 if (!PageHighMem(page)) {
1438 debug_check_no_locks_freed(page_address(page),
1439 PAGE_SIZE << order);
1440 debug_check_no_obj_freed(page_address(page),
1441 PAGE_SIZE << order);
1442 }
1443
1444 kernel_poison_pages(page, 1 << order);
1445
1446 /*
1447 * As memory initialization might be integrated into KASAN,
1448 * KASAN poisoning and memory initialization code must be
1449 * kept together to avoid discrepancies in behavior.
1450 *
1451 * With hardware tag-based KASAN, memory tags must be set before the
1452 * page becomes unavailable via debug_pagealloc or arch_free_page.
1453 */
1454 if (!skip_kasan_poison) {
1455 kasan_poison_pages(page, order, init);
1456
1457 /* Memory is already initialized if KASAN did it internally. */
1458 if (kasan_has_integrated_init())
1459 init = false;
1460 }
1461 if (init)
1462 kernel_init_pages(page, 1 << order);
1463
1464 /*
1465 * arch_free_page() can make the page's contents inaccessible. s390
1466 * does this. So nothing which can access the page's contents should
1467 * happen after this.
1468 */
1469 arch_free_page(page, order);
1470
1471 debug_pagealloc_unmap_pages(page, 1 << order);
1472
1473 return true;
1474 }
1475
1476 /*
1477 * Frees a number of pages from the PCP lists
1478 * Assumes all pages on list are in same zone.
1479 * count is the number of pages to free.
1480 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1481 static void free_pcppages_bulk(struct zone *zone, int count,
1482 struct per_cpu_pages *pcp,
1483 int pindex)
1484 {
1485 unsigned long flags;
1486 unsigned int order;
1487 struct page *page;
1488
1489 /*
1490 * Ensure proper count is passed which otherwise would stuck in the
1491 * below while (list_empty(list)) loop.
1492 */
1493 count = min(pcp->count, count);
1494
1495 /* Ensure requested pindex is drained first. */
1496 pindex = pindex - 1;
1497
1498 spin_lock_irqsave(&zone->lock, flags);
1499
1500 while (count > 0) {
1501 struct list_head *list;
1502 int nr_pages;
1503
1504 /* Remove pages from lists in a round-robin fashion. */
1505 do {
1506 if (++pindex > NR_PCP_LISTS - 1)
1507 pindex = 0;
1508 list = &pcp->lists[pindex];
1509 } while (list_empty(list));
1510
1511 order = pindex_to_order(pindex);
1512 nr_pages = 1 << order;
1513 do {
1514 unsigned long pfn;
1515 int mt;
1516
1517 page = list_last_entry(list, struct page, pcp_list);
1518 pfn = page_to_pfn(page);
1519 mt = get_pfnblock_migratetype(page, pfn);
1520
1521 /* must delete to avoid corrupting pcp list */
1522 list_del(&page->pcp_list);
1523 count -= nr_pages;
1524 pcp->count -= nr_pages;
1525
1526 __free_one_page(page, pfn, zone, order, mt, FPI_NONE);
1527 trace_mm_page_pcpu_drain(page, order, mt);
1528 } while (count > 0 && !list_empty(list));
1529 }
1530
1531 spin_unlock_irqrestore(&zone->lock, flags);
1532 }
1533
1534 /* Split a multi-block free page into its individual pageblocks. */
split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi)1535 static void split_large_buddy(struct zone *zone, struct page *page,
1536 unsigned long pfn, int order, fpi_t fpi)
1537 {
1538 unsigned long end = pfn + (1 << order);
1539
1540 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
1541 /* Caller removed page from freelist, buddy info cleared! */
1542 VM_WARN_ON_ONCE(PageBuddy(page));
1543
1544 if (order > pageblock_order)
1545 order = pageblock_order;
1546
1547 do {
1548 int mt = get_pfnblock_migratetype(page, pfn);
1549
1550 __free_one_page(page, pfn, zone, order, mt, fpi);
1551 pfn += 1 << order;
1552 if (pfn == end)
1553 break;
1554 page = pfn_to_page(pfn);
1555 } while (1);
1556 }
1557
add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order)1558 static void add_page_to_zone_llist(struct zone *zone, struct page *page,
1559 unsigned int order)
1560 {
1561 /* Remember the order */
1562 page->private = order;
1563 /* Add the page to the free list */
1564 llist_add(&page->pcp_llist, &zone->trylock_free_pages);
1565 }
1566
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags)1567 static void free_one_page(struct zone *zone, struct page *page,
1568 unsigned long pfn, unsigned int order,
1569 fpi_t fpi_flags)
1570 {
1571 struct llist_head *llhead;
1572 unsigned long flags;
1573
1574 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1575 if (!spin_trylock_irqsave(&zone->lock, flags)) {
1576 add_page_to_zone_llist(zone, page, order);
1577 return;
1578 }
1579 } else {
1580 spin_lock_irqsave(&zone->lock, flags);
1581 }
1582
1583 /* The lock succeeded. Process deferred pages. */
1584 llhead = &zone->trylock_free_pages;
1585 if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
1586 struct llist_node *llnode;
1587 struct page *p, *tmp;
1588
1589 llnode = llist_del_all(llhead);
1590 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
1591 unsigned int p_order = p->private;
1592
1593 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
1594 __count_vm_events(PGFREE, 1 << p_order);
1595 }
1596 }
1597 split_large_buddy(zone, page, pfn, order, fpi_flags);
1598 spin_unlock_irqrestore(&zone->lock, flags);
1599
1600 __count_vm_events(PGFREE, 1 << order);
1601 }
1602
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1603 static void __free_pages_ok(struct page *page, unsigned int order,
1604 fpi_t fpi_flags)
1605 {
1606 unsigned long pfn = page_to_pfn(page);
1607 struct zone *zone = page_zone(page);
1608
1609 if (free_pages_prepare(page, order))
1610 free_one_page(zone, page, pfn, order, fpi_flags);
1611 }
1612
__free_pages_core(struct page * page,unsigned int order,enum meminit_context context)1613 void __meminit __free_pages_core(struct page *page, unsigned int order,
1614 enum meminit_context context)
1615 {
1616 unsigned int nr_pages = 1 << order;
1617 struct page *p = page;
1618 unsigned int loop;
1619
1620 /*
1621 * When initializing the memmap, __init_single_page() sets the refcount
1622 * of all pages to 1 ("allocated"/"not free"). We have to set the
1623 * refcount of all involved pages to 0.
1624 *
1625 * Note that hotplugged memory pages are initialized to PageOffline().
1626 * Pages freed from memblock might be marked as reserved.
1627 */
1628 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1629 unlikely(context == MEMINIT_HOTPLUG)) {
1630 for (loop = 0; loop < nr_pages; loop++, p++) {
1631 VM_WARN_ON_ONCE(PageReserved(p));
1632 __ClearPageOffline(p);
1633 set_page_count(p, 0);
1634 }
1635
1636 adjust_managed_page_count(page, nr_pages);
1637 } else {
1638 for (loop = 0; loop < nr_pages; loop++, p++) {
1639 __ClearPageReserved(p);
1640 set_page_count(p, 0);
1641 }
1642
1643 /* memblock adjusts totalram_pages() manually. */
1644 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1645 }
1646
1647 if (page_contains_unaccepted(page, order)) {
1648 if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
1649 return;
1650
1651 accept_memory(page_to_phys(page), PAGE_SIZE << order);
1652 }
1653
1654 /*
1655 * Bypass PCP and place fresh pages right to the tail, primarily
1656 * relevant for memory onlining.
1657 */
1658 __free_pages_ok(page, order, FPI_TO_TAIL);
1659 }
1660
1661 /*
1662 * Check that the whole (or subset of) a pageblock given by the interval of
1663 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1664 * with the migration of free compaction scanner.
1665 *
1666 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1667 *
1668 * It's possible on some configurations to have a setup like node0 node1 node0
1669 * i.e. it's possible that all pages within a zones range of pages do not
1670 * belong to a single zone. We assume that a border between node0 and node1
1671 * can occur within a single pageblock, but not a node0 node1 node0
1672 * interleaving within a single pageblock. It is therefore sufficient to check
1673 * the first and last page of a pageblock and avoid checking each individual
1674 * page in a pageblock.
1675 *
1676 * Note: the function may return non-NULL struct page even for a page block
1677 * which contains a memory hole (i.e. there is no physical memory for a subset
1678 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1679 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1680 * even though the start pfn is online and valid. This should be safe most of
1681 * the time because struct pages are still initialized via init_unavailable_range()
1682 * and pfn walkers shouldn't touch any physical memory range for which they do
1683 * not recognize any specific metadata in struct pages.
1684 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1685 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1686 unsigned long end_pfn, struct zone *zone)
1687 {
1688 struct page *start_page;
1689 struct page *end_page;
1690
1691 /* end_pfn is one past the range we are checking */
1692 end_pfn--;
1693
1694 if (!pfn_valid(end_pfn))
1695 return NULL;
1696
1697 start_page = pfn_to_online_page(start_pfn);
1698 if (!start_page)
1699 return NULL;
1700
1701 if (page_zone(start_page) != zone)
1702 return NULL;
1703
1704 end_page = pfn_to_page(end_pfn);
1705
1706 /* This gives a shorter code than deriving page_zone(end_page) */
1707 if (page_zone_id(start_page) != page_zone_id(end_page))
1708 return NULL;
1709
1710 return start_page;
1711 }
1712
1713 /*
1714 * The order of subdivision here is critical for the IO subsystem.
1715 * Please do not alter this order without good reasons and regression
1716 * testing. Specifically, as large blocks of memory are subdivided,
1717 * the order in which smaller blocks are delivered depends on the order
1718 * they're subdivided in this function. This is the primary factor
1719 * influencing the order in which pages are delivered to the IO
1720 * subsystem according to empirical testing, and this is also justified
1721 * by considering the behavior of a buddy system containing a single
1722 * large block of memory acted on by a series of small allocations.
1723 * This behavior is a critical factor in sglist merging's success.
1724 *
1725 * -- nyc
1726 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1727 static inline unsigned int expand(struct zone *zone, struct page *page, int low,
1728 int high, int migratetype)
1729 {
1730 unsigned int size = 1 << high;
1731 unsigned int nr_added = 0;
1732
1733 while (high > low) {
1734 high--;
1735 size >>= 1;
1736 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1737
1738 /*
1739 * Mark as guard pages (or page), that will allow to
1740 * merge back to allocator when buddy will be freed.
1741 * Corresponding page table entries will not be touched,
1742 * pages will stay not present in virtual address space
1743 */
1744 if (set_page_guard(zone, &page[size], high))
1745 continue;
1746
1747 __add_to_free_list(&page[size], zone, high, migratetype, false);
1748 set_buddy_order(&page[size], high);
1749 nr_added += size;
1750 }
1751
1752 return nr_added;
1753 }
1754
page_del_and_expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1755 static __always_inline void page_del_and_expand(struct zone *zone,
1756 struct page *page, int low,
1757 int high, int migratetype)
1758 {
1759 int nr_pages = 1 << high;
1760
1761 __del_page_from_free_list(page, zone, high, migratetype);
1762 nr_pages -= expand(zone, page, low, high, migratetype);
1763 account_freepages(zone, -nr_pages, migratetype);
1764 }
1765
check_new_page_bad(struct page * page)1766 static void check_new_page_bad(struct page *page)
1767 {
1768 if (unlikely(PageHWPoison(page))) {
1769 /* Don't complain about hwpoisoned pages */
1770 if (PageBuddy(page))
1771 __ClearPageBuddy(page);
1772 return;
1773 }
1774
1775 bad_page(page,
1776 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1777 }
1778
1779 /*
1780 * This page is about to be returned from the page allocator
1781 */
check_new_page(struct page * page)1782 static bool check_new_page(struct page *page)
1783 {
1784 if (likely(page_expected_state(page,
1785 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1786 return false;
1787
1788 check_new_page_bad(page);
1789 return true;
1790 }
1791
check_new_pages(struct page * page,unsigned int order)1792 static inline bool check_new_pages(struct page *page, unsigned int order)
1793 {
1794 if (is_check_pages_enabled()) {
1795 for (int i = 0; i < (1 << order); i++) {
1796 struct page *p = page + i;
1797
1798 if (check_new_page(p))
1799 return true;
1800 }
1801 }
1802
1803 return false;
1804 }
1805
should_skip_kasan_unpoison(gfp_t flags)1806 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1807 {
1808 /* Don't skip if a software KASAN mode is enabled. */
1809 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1810 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1811 return false;
1812
1813 /* Skip, if hardware tag-based KASAN is not enabled. */
1814 if (!kasan_hw_tags_enabled())
1815 return true;
1816
1817 /*
1818 * With hardware tag-based KASAN enabled, skip if this has been
1819 * requested via __GFP_SKIP_KASAN.
1820 */
1821 return flags & __GFP_SKIP_KASAN;
1822 }
1823
should_skip_init(gfp_t flags)1824 static inline bool should_skip_init(gfp_t flags)
1825 {
1826 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1827 if (!kasan_hw_tags_enabled())
1828 return false;
1829
1830 /* For hardware tag-based KASAN, skip if requested. */
1831 return (flags & __GFP_SKIP_ZERO);
1832 }
1833
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1834 inline void post_alloc_hook(struct page *page, unsigned int order,
1835 gfp_t gfp_flags)
1836 {
1837 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1838 !should_skip_init(gfp_flags);
1839 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1840 int i;
1841
1842 set_page_private(page, 0);
1843
1844 arch_alloc_page(page, order);
1845 debug_pagealloc_map_pages(page, 1 << order);
1846
1847 /*
1848 * Page unpoisoning must happen before memory initialization.
1849 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1850 * allocations and the page unpoisoning code will complain.
1851 */
1852 kernel_unpoison_pages(page, 1 << order);
1853
1854 /*
1855 * As memory initialization might be integrated into KASAN,
1856 * KASAN unpoisoning and memory initializion code must be
1857 * kept together to avoid discrepancies in behavior.
1858 */
1859
1860 /*
1861 * If memory tags should be zeroed
1862 * (which happens only when memory should be initialized as well).
1863 */
1864 if (zero_tags)
1865 init = !tag_clear_highpages(page, 1 << order);
1866
1867 if (!should_skip_kasan_unpoison(gfp_flags) &&
1868 kasan_unpoison_pages(page, order, init)) {
1869 /* Take note that memory was initialized by KASAN. */
1870 if (kasan_has_integrated_init())
1871 init = false;
1872 } else {
1873 /*
1874 * If memory tags have not been set by KASAN, reset the page
1875 * tags to ensure page_address() dereferencing does not fault.
1876 */
1877 for (i = 0; i != 1 << order; ++i)
1878 page_kasan_tag_reset(page + i);
1879 }
1880 /* If memory is still not initialized, initialize it now. */
1881 if (init)
1882 kernel_init_pages(page, 1 << order);
1883
1884 set_page_owner(page, order, gfp_flags);
1885 page_table_check_alloc(page, order);
1886 pgalloc_tag_add(page, current, 1 << order);
1887 }
1888
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1889 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1890 unsigned int alloc_flags)
1891 {
1892 post_alloc_hook(page, order, gfp_flags);
1893
1894 if (order && (gfp_flags & __GFP_COMP))
1895 prep_compound_page(page, order);
1896
1897 /*
1898 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1899 * allocate the page. The expectation is that the caller is taking
1900 * steps that will free more memory. The caller should avoid the page
1901 * being used for !PFMEMALLOC purposes.
1902 */
1903 if (alloc_flags & ALLOC_NO_WATERMARKS)
1904 set_page_pfmemalloc(page);
1905 else
1906 clear_page_pfmemalloc(page);
1907 }
1908
1909 /*
1910 * Go through the free lists for the given migratetype and remove
1911 * the smallest available page from the freelists
1912 */
1913 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1914 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1915 int migratetype)
1916 {
1917 unsigned int current_order;
1918 struct free_area *area;
1919 struct page *page;
1920
1921 /* Find a page of the appropriate size in the preferred list */
1922 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1923 area = &(zone->free_area[current_order]);
1924 page = get_page_from_free_area(area, migratetype);
1925 if (!page)
1926 continue;
1927
1928 page_del_and_expand(zone, page, order, current_order,
1929 migratetype);
1930 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1931 pcp_allowed_order(order) &&
1932 migratetype < MIGRATE_PCPTYPES);
1933 return page;
1934 }
1935
1936 return NULL;
1937 }
1938
1939
1940 /*
1941 * This array describes the order lists are fallen back to when
1942 * the free lists for the desirable migrate type are depleted
1943 *
1944 * The other migratetypes do not have fallbacks.
1945 */
1946 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
1947 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1948 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1949 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1950 };
1951
1952 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1953 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1954 unsigned int order)
1955 {
1956 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1957 }
1958 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1959 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1960 unsigned int order) { return NULL; }
1961 #endif
1962
1963 /*
1964 * Move all free pages of a block to new type's freelist. Caller needs to
1965 * change the block type.
1966 */
__move_freepages_block(struct zone * zone,unsigned long start_pfn,int old_mt,int new_mt)1967 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1968 int old_mt, int new_mt)
1969 {
1970 struct page *page;
1971 unsigned long pfn, end_pfn;
1972 unsigned int order;
1973 int pages_moved = 0;
1974
1975 VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
1976 end_pfn = pageblock_end_pfn(start_pfn);
1977
1978 for (pfn = start_pfn; pfn < end_pfn;) {
1979 page = pfn_to_page(pfn);
1980 if (!PageBuddy(page)) {
1981 pfn++;
1982 continue;
1983 }
1984
1985 /* Make sure we are not inadvertently changing nodes */
1986 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1987 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1988
1989 order = buddy_order(page);
1990
1991 move_to_free_list(page, zone, order, old_mt, new_mt);
1992
1993 pfn += 1 << order;
1994 pages_moved += 1 << order;
1995 }
1996
1997 return pages_moved;
1998 }
1999
prep_move_freepages_block(struct zone * zone,struct page * page,unsigned long * start_pfn,int * num_free,int * num_movable)2000 static bool prep_move_freepages_block(struct zone *zone, struct page *page,
2001 unsigned long *start_pfn,
2002 int *num_free, int *num_movable)
2003 {
2004 unsigned long pfn, start, end;
2005
2006 pfn = page_to_pfn(page);
2007 start = pageblock_start_pfn(pfn);
2008 end = pageblock_end_pfn(pfn);
2009
2010 /*
2011 * The caller only has the lock for @zone, don't touch ranges
2012 * that straddle into other zones. While we could move part of
2013 * the range that's inside the zone, this call is usually
2014 * accompanied by other operations such as migratetype updates
2015 * which also should be locked.
2016 */
2017 if (!zone_spans_pfn(zone, start))
2018 return false;
2019 if (!zone_spans_pfn(zone, end - 1))
2020 return false;
2021
2022 *start_pfn = start;
2023
2024 if (num_free) {
2025 *num_free = 0;
2026 *num_movable = 0;
2027 for (pfn = start; pfn < end;) {
2028 page = pfn_to_page(pfn);
2029 if (PageBuddy(page)) {
2030 int nr = 1 << buddy_order(page);
2031
2032 *num_free += nr;
2033 pfn += nr;
2034 continue;
2035 }
2036 /*
2037 * We assume that pages that could be isolated for
2038 * migration are movable. But we don't actually try
2039 * isolating, as that would be expensive.
2040 */
2041 if (PageLRU(page) || page_has_movable_ops(page))
2042 (*num_movable)++;
2043 pfn++;
2044 }
2045 }
2046
2047 return true;
2048 }
2049
move_freepages_block(struct zone * zone,struct page * page,int old_mt,int new_mt)2050 static int move_freepages_block(struct zone *zone, struct page *page,
2051 int old_mt, int new_mt)
2052 {
2053 unsigned long start_pfn;
2054 int res;
2055
2056 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2057 return -1;
2058
2059 res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
2060 set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
2061
2062 return res;
2063
2064 }
2065
2066 #ifdef CONFIG_MEMORY_ISOLATION
2067 /* Look for a buddy that straddles start_pfn */
find_large_buddy(unsigned long start_pfn)2068 static unsigned long find_large_buddy(unsigned long start_pfn)
2069 {
2070 /*
2071 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing
2072 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking
2073 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy,
2074 * the starting order does not matter.
2075 */
2076 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER;
2077 struct page *page;
2078 unsigned long pfn = start_pfn;
2079
2080 while (!PageBuddy(page = pfn_to_page(pfn))) {
2081 /* Nothing found */
2082 if (++order > MAX_PAGE_ORDER)
2083 return start_pfn;
2084 pfn &= ~0UL << order;
2085 }
2086
2087 /*
2088 * Found a preceding buddy, but does it straddle?
2089 */
2090 if (pfn + (1 << buddy_order(page)) > start_pfn)
2091 return pfn;
2092
2093 /* Nothing found */
2094 return start_pfn;
2095 }
2096
toggle_pageblock_isolate(struct page * page,bool isolate)2097 static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
2098 {
2099 if (isolate)
2100 set_pageblock_isolate(page);
2101 else
2102 clear_pageblock_isolate(page);
2103 }
2104
2105 /**
2106 * __move_freepages_block_isolate - move free pages in block for page isolation
2107 * @zone: the zone
2108 * @page: the pageblock page
2109 * @isolate: to isolate the given pageblock or unisolate it
2110 *
2111 * This is similar to move_freepages_block(), but handles the special
2112 * case encountered in page isolation, where the block of interest
2113 * might be part of a larger buddy spanning multiple pageblocks.
2114 *
2115 * Unlike the regular page allocator path, which moves pages while
2116 * stealing buddies off the freelist, page isolation is interested in
2117 * arbitrary pfn ranges that may have overlapping buddies on both ends.
2118 *
2119 * This function handles that. Straddling buddies are split into
2120 * individual pageblocks. Only the block of interest is moved.
2121 *
2122 * Returns %true if pages could be moved, %false otherwise.
2123 */
__move_freepages_block_isolate(struct zone * zone,struct page * page,bool isolate)2124 static bool __move_freepages_block_isolate(struct zone *zone,
2125 struct page *page, bool isolate)
2126 {
2127 unsigned long start_pfn, buddy_pfn;
2128 int from_mt;
2129 int to_mt;
2130 struct page *buddy;
2131
2132 if (isolate == get_pageblock_isolate(page)) {
2133 VM_WARN_ONCE(1, "%s a pageblock that is already in that state",
2134 isolate ? "Isolate" : "Unisolate");
2135 return false;
2136 }
2137
2138 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2139 return false;
2140
2141 /* No splits needed if buddies can't span multiple blocks */
2142 if (pageblock_order == MAX_PAGE_ORDER)
2143 goto move;
2144
2145 buddy_pfn = find_large_buddy(start_pfn);
2146 buddy = pfn_to_page(buddy_pfn);
2147 /* We're a part of a larger buddy */
2148 if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) {
2149 int order = buddy_order(buddy);
2150
2151 del_page_from_free_list(buddy, zone, order,
2152 get_pfnblock_migratetype(buddy, buddy_pfn));
2153 toggle_pageblock_isolate(page, isolate);
2154 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
2155 return true;
2156 }
2157
2158 move:
2159 /* Use MIGRATETYPE_MASK to get non-isolate migratetype */
2160 if (isolate) {
2161 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2162 MIGRATETYPE_MASK);
2163 to_mt = MIGRATE_ISOLATE;
2164 } else {
2165 from_mt = MIGRATE_ISOLATE;
2166 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2167 MIGRATETYPE_MASK);
2168 }
2169
2170 __move_freepages_block(zone, start_pfn, from_mt, to_mt);
2171 toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate);
2172
2173 return true;
2174 }
2175
pageblock_isolate_and_move_free_pages(struct zone * zone,struct page * page)2176 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
2177 {
2178 return __move_freepages_block_isolate(zone, page, true);
2179 }
2180
pageblock_unisolate_and_move_free_pages(struct zone * zone,struct page * page)2181 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
2182 {
2183 return __move_freepages_block_isolate(zone, page, false);
2184 }
2185
2186 #endif /* CONFIG_MEMORY_ISOLATION */
2187
boost_watermark(struct zone * zone)2188 static inline bool boost_watermark(struct zone *zone)
2189 {
2190 unsigned long max_boost;
2191
2192 if (!watermark_boost_factor)
2193 return false;
2194 /*
2195 * Don't bother in zones that are unlikely to produce results.
2196 * On small machines, including kdump capture kernels running
2197 * in a small area, boosting the watermark can cause an out of
2198 * memory situation immediately.
2199 */
2200 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2201 return false;
2202
2203 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2204 watermark_boost_factor, 10000);
2205
2206 /*
2207 * high watermark may be uninitialised if fragmentation occurs
2208 * very early in boot so do not boost. We do not fall
2209 * through and boost by pageblock_nr_pages as failing
2210 * allocations that early means that reclaim is not going
2211 * to help and it may even be impossible to reclaim the
2212 * boosted watermark resulting in a hang.
2213 */
2214 if (!max_boost)
2215 return false;
2216
2217 max_boost = max(pageblock_nr_pages, max_boost);
2218
2219 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2220 max_boost);
2221
2222 return true;
2223 }
2224
2225 /*
2226 * When we are falling back to another migratetype during allocation, should we
2227 * try to claim an entire block to satisfy further allocations, instead of
2228 * polluting multiple pageblocks?
2229 */
should_try_claim_block(unsigned int order,int start_mt)2230 static bool should_try_claim_block(unsigned int order, int start_mt)
2231 {
2232 /*
2233 * Leaving this order check is intended, although there is
2234 * relaxed order check in next check. The reason is that
2235 * we can actually claim the whole pageblock if this condition met,
2236 * but, below check doesn't guarantee it and that is just heuristic
2237 * so could be changed anytime.
2238 */
2239 if (order >= pageblock_order)
2240 return true;
2241
2242 /*
2243 * Above a certain threshold, always try to claim, as it's likely there
2244 * will be more free pages in the pageblock.
2245 */
2246 if (order >= pageblock_order / 2)
2247 return true;
2248
2249 /*
2250 * Unmovable/reclaimable allocations would cause permanent
2251 * fragmentations if they fell back to allocating from a movable block
2252 * (polluting it), so we try to claim the whole block regardless of the
2253 * allocation size. Later movable allocations can always steal from this
2254 * block, which is less problematic.
2255 */
2256 if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
2257 return true;
2258
2259 if (page_group_by_mobility_disabled)
2260 return true;
2261
2262 /*
2263 * Movable pages won't cause permanent fragmentation, so when you alloc
2264 * small pages, we just need to temporarily steal unmovable or
2265 * reclaimable pages that are closest to the request size. After a
2266 * while, memory compaction may occur to form large contiguous pages,
2267 * and the next movable allocation may not need to steal.
2268 */
2269 return false;
2270 }
2271
2272 /*
2273 * Check whether there is a suitable fallback freepage with requested order.
2274 * If claimable is true, this function returns fallback_mt only if
2275 * we would do this whole-block claiming. This would help to reduce
2276 * fragmentation due to mixed migratetype pages in one pageblock.
2277 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool claimable)2278 int find_suitable_fallback(struct free_area *area, unsigned int order,
2279 int migratetype, bool claimable)
2280 {
2281 int i;
2282
2283 if (claimable && !should_try_claim_block(order, migratetype))
2284 return -2;
2285
2286 if (area->nr_free == 0)
2287 return -1;
2288
2289 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
2290 int fallback_mt = fallbacks[migratetype][i];
2291
2292 if (!free_area_empty(area, fallback_mt))
2293 return fallback_mt;
2294 }
2295
2296 return -1;
2297 }
2298
2299 /*
2300 * This function implements actual block claiming behaviour. If order is large
2301 * enough, we can claim the whole pageblock for the requested migratetype. If
2302 * not, we check the pageblock for constituent pages; if at least half of the
2303 * pages are free or compatible, we can still claim the whole block, so pages
2304 * freed in the future will be put on the correct free list.
2305 */
2306 static struct page *
try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags)2307 try_to_claim_block(struct zone *zone, struct page *page,
2308 int current_order, int order, int start_type,
2309 int block_type, unsigned int alloc_flags)
2310 {
2311 int free_pages, movable_pages, alike_pages;
2312 unsigned long start_pfn;
2313
2314 /* Take ownership for orders >= pageblock_order */
2315 if (current_order >= pageblock_order) {
2316 unsigned int nr_added;
2317
2318 del_page_from_free_list(page, zone, current_order, block_type);
2319 change_pageblock_range(page, current_order, start_type);
2320 nr_added = expand(zone, page, order, current_order, start_type);
2321 account_freepages(zone, nr_added, start_type);
2322 return page;
2323 }
2324
2325 /*
2326 * Boost watermarks to increase reclaim pressure to reduce the
2327 * likelihood of future fallbacks. Wake kswapd now as the node
2328 * may be balanced overall and kswapd will not wake naturally.
2329 */
2330 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2331 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2332
2333 /* moving whole block can fail due to zone boundary conditions */
2334 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
2335 &movable_pages))
2336 return NULL;
2337
2338 /*
2339 * Determine how many pages are compatible with our allocation.
2340 * For movable allocation, it's the number of movable pages which
2341 * we just obtained. For other types it's a bit more tricky.
2342 */
2343 if (start_type == MIGRATE_MOVABLE) {
2344 alike_pages = movable_pages;
2345 } else {
2346 /*
2347 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2348 * to MOVABLE pageblock, consider all non-movable pages as
2349 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2350 * vice versa, be conservative since we can't distinguish the
2351 * exact migratetype of non-movable pages.
2352 */
2353 if (block_type == MIGRATE_MOVABLE)
2354 alike_pages = pageblock_nr_pages
2355 - (free_pages + movable_pages);
2356 else
2357 alike_pages = 0;
2358 }
2359 /*
2360 * If a sufficient number of pages in the block are either free or of
2361 * compatible migratability as our allocation, claim the whole block.
2362 */
2363 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2364 page_group_by_mobility_disabled) {
2365 __move_freepages_block(zone, start_pfn, block_type, start_type);
2366 set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
2367 return __rmqueue_smallest(zone, order, start_type);
2368 }
2369
2370 return NULL;
2371 }
2372
2373 /*
2374 * Try to allocate from some fallback migratetype by claiming the entire block,
2375 * i.e. converting it to the allocation's start migratetype.
2376 *
2377 * The use of signed ints for order and current_order is a deliberate
2378 * deviation from the rest of this file, to make the for loop
2379 * condition simpler.
2380 */
2381 static __always_inline struct page *
__rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2382 __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
2383 unsigned int alloc_flags)
2384 {
2385 struct free_area *area;
2386 int current_order;
2387 int min_order = order;
2388 struct page *page;
2389 int fallback_mt;
2390
2391 /*
2392 * Do not steal pages from freelists belonging to other pageblocks
2393 * i.e. orders < pageblock_order. If there are no local zones free,
2394 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2395 */
2396 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2397 min_order = pageblock_order;
2398
2399 /*
2400 * Find the largest available free page in the other list. This roughly
2401 * approximates finding the pageblock with the most free pages, which
2402 * would be too costly to do exactly.
2403 */
2404 for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
2405 --current_order) {
2406 area = &(zone->free_area[current_order]);
2407 fallback_mt = find_suitable_fallback(area, current_order,
2408 start_migratetype, true);
2409
2410 /* No block in that order */
2411 if (fallback_mt == -1)
2412 continue;
2413
2414 /* Advanced into orders too low to claim, abort */
2415 if (fallback_mt == -2)
2416 break;
2417
2418 page = get_page_from_free_area(area, fallback_mt);
2419 page = try_to_claim_block(zone, page, current_order, order,
2420 start_migratetype, fallback_mt,
2421 alloc_flags);
2422 if (page) {
2423 trace_mm_page_alloc_extfrag(page, order, current_order,
2424 start_migratetype, fallback_mt);
2425 return page;
2426 }
2427 }
2428
2429 return NULL;
2430 }
2431
2432 /*
2433 * Try to steal a single page from some fallback migratetype. Leave the rest of
2434 * the block as its current migratetype, potentially causing fragmentation.
2435 */
2436 static __always_inline struct page *
__rmqueue_steal(struct zone * zone,int order,int start_migratetype)2437 __rmqueue_steal(struct zone *zone, int order, int start_migratetype)
2438 {
2439 struct free_area *area;
2440 int current_order;
2441 struct page *page;
2442 int fallback_mt;
2443
2444 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2445 area = &(zone->free_area[current_order]);
2446 fallback_mt = find_suitable_fallback(area, current_order,
2447 start_migratetype, false);
2448 if (fallback_mt == -1)
2449 continue;
2450
2451 page = get_page_from_free_area(area, fallback_mt);
2452 page_del_and_expand(zone, page, order, current_order, fallback_mt);
2453 trace_mm_page_alloc_extfrag(page, order, current_order,
2454 start_migratetype, fallback_mt);
2455 return page;
2456 }
2457
2458 return NULL;
2459 }
2460
2461 enum rmqueue_mode {
2462 RMQUEUE_NORMAL,
2463 RMQUEUE_CMA,
2464 RMQUEUE_CLAIM,
2465 RMQUEUE_STEAL,
2466 };
2467
2468 /*
2469 * Do the hard work of removing an element from the buddy allocator.
2470 * Call me with the zone->lock already held.
2471 */
2472 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode)2473 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2474 unsigned int alloc_flags, enum rmqueue_mode *mode)
2475 {
2476 struct page *page;
2477
2478 if (IS_ENABLED(CONFIG_CMA)) {
2479 /*
2480 * Balance movable allocations between regular and CMA areas by
2481 * allocating from CMA when over half of the zone's free memory
2482 * is in the CMA area.
2483 */
2484 if (alloc_flags & ALLOC_CMA &&
2485 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2486 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2487 page = __rmqueue_cma_fallback(zone, order);
2488 if (page)
2489 return page;
2490 }
2491 }
2492
2493 /*
2494 * First try the freelists of the requested migratetype, then try
2495 * fallbacks modes with increasing levels of fragmentation risk.
2496 *
2497 * The fallback logic is expensive and rmqueue_bulk() calls in
2498 * a loop with the zone->lock held, meaning the freelists are
2499 * not subject to any outside changes. Remember in *mode where
2500 * we found pay dirt, to save us the search on the next call.
2501 */
2502 switch (*mode) {
2503 case RMQUEUE_NORMAL:
2504 page = __rmqueue_smallest(zone, order, migratetype);
2505 if (page)
2506 return page;
2507 fallthrough;
2508 case RMQUEUE_CMA:
2509 if (alloc_flags & ALLOC_CMA) {
2510 page = __rmqueue_cma_fallback(zone, order);
2511 if (page) {
2512 *mode = RMQUEUE_CMA;
2513 return page;
2514 }
2515 }
2516 fallthrough;
2517 case RMQUEUE_CLAIM:
2518 page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
2519 if (page) {
2520 /* Replenished preferred freelist, back to normal mode. */
2521 *mode = RMQUEUE_NORMAL;
2522 return page;
2523 }
2524 fallthrough;
2525 case RMQUEUE_STEAL:
2526 if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
2527 page = __rmqueue_steal(zone, order, migratetype);
2528 if (page) {
2529 *mode = RMQUEUE_STEAL;
2530 return page;
2531 }
2532 }
2533 }
2534 return NULL;
2535 }
2536
2537 /*
2538 * Obtain a specified number of elements from the buddy allocator, all under
2539 * a single hold of the lock, for efficiency. Add them to the supplied list.
2540 * Returns the number of new pages which were placed at *list.
2541 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2542 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2543 unsigned long count, struct list_head *list,
2544 int migratetype, unsigned int alloc_flags)
2545 {
2546 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
2547 unsigned long flags;
2548 int i;
2549
2550 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2551 if (!spin_trylock_irqsave(&zone->lock, flags))
2552 return 0;
2553 } else {
2554 spin_lock_irqsave(&zone->lock, flags);
2555 }
2556 for (i = 0; i < count; ++i) {
2557 struct page *page = __rmqueue(zone, order, migratetype,
2558 alloc_flags, &rmqm);
2559 if (unlikely(page == NULL))
2560 break;
2561
2562 /*
2563 * Split buddy pages returned by expand() are received here in
2564 * physical page order. The page is added to the tail of
2565 * caller's list. From the callers perspective, the linked list
2566 * is ordered by page number under some conditions. This is
2567 * useful for IO devices that can forward direction from the
2568 * head, thus also in the physical page order. This is useful
2569 * for IO devices that can merge IO requests if the physical
2570 * pages are ordered properly.
2571 */
2572 list_add_tail(&page->pcp_list, list);
2573 }
2574 spin_unlock_irqrestore(&zone->lock, flags);
2575
2576 return i;
2577 }
2578
2579 /*
2580 * Called from the vmstat counter updater to decay the PCP high.
2581 * Return whether there are addition works to do.
2582 */
decay_pcp_high(struct zone * zone,struct per_cpu_pages * pcp)2583 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2584 {
2585 int high_min, to_drain, to_drain_batched, batch;
2586 unsigned long UP_flags;
2587 bool todo = false;
2588
2589 high_min = READ_ONCE(pcp->high_min);
2590 batch = READ_ONCE(pcp->batch);
2591 /*
2592 * Decrease pcp->high periodically to try to free possible
2593 * idle PCP pages. And, avoid to free too many pages to
2594 * control latency. This caps pcp->high decrement too.
2595 */
2596 if (pcp->high > high_min) {
2597 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2598 pcp->high - (pcp->high >> 3), high_min);
2599 if (pcp->high > high_min)
2600 todo = true;
2601 }
2602
2603 to_drain = pcp->count - pcp->high;
2604 while (to_drain > 0) {
2605 to_drain_batched = min(to_drain, batch);
2606 pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
2607 free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
2608 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
2609 todo = true;
2610
2611 to_drain -= to_drain_batched;
2612 }
2613
2614 return todo;
2615 }
2616
2617 #ifdef CONFIG_NUMA
2618 /*
2619 * Called from the vmstat counter updater to drain pagesets of this
2620 * currently executing processor on remote nodes after they have
2621 * expired.
2622 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2623 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2624 {
2625 unsigned long UP_flags;
2626 int to_drain, batch;
2627
2628 batch = READ_ONCE(pcp->batch);
2629 to_drain = min(pcp->count, batch);
2630 if (to_drain > 0) {
2631 pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
2632 free_pcppages_bulk(zone, to_drain, pcp, 0);
2633 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
2634 }
2635 }
2636 #endif
2637
2638 /*
2639 * Drain pcplists of the indicated processor and zone.
2640 */
drain_pages_zone(unsigned int cpu,struct zone * zone)2641 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2642 {
2643 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2644 unsigned long UP_flags;
2645 int count;
2646
2647 do {
2648 pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
2649 count = pcp->count;
2650 if (count) {
2651 int to_drain = min(count,
2652 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2653
2654 free_pcppages_bulk(zone, to_drain, pcp, 0);
2655 count -= to_drain;
2656 }
2657 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
2658 } while (count);
2659 }
2660
2661 /*
2662 * Drain pcplists of all zones on the indicated processor.
2663 */
drain_pages(unsigned int cpu)2664 static void drain_pages(unsigned int cpu)
2665 {
2666 struct zone *zone;
2667
2668 for_each_populated_zone(zone) {
2669 drain_pages_zone(cpu, zone);
2670 }
2671 }
2672
2673 /*
2674 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2675 */
drain_local_pages(struct zone * zone)2676 void drain_local_pages(struct zone *zone)
2677 {
2678 int cpu = smp_processor_id();
2679
2680 if (zone)
2681 drain_pages_zone(cpu, zone);
2682 else
2683 drain_pages(cpu);
2684 }
2685
2686 /*
2687 * The implementation of drain_all_pages(), exposing an extra parameter to
2688 * drain on all cpus.
2689 *
2690 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2691 * not empty. The check for non-emptiness can however race with a free to
2692 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2693 * that need the guarantee that every CPU has drained can disable the
2694 * optimizing racy check.
2695 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2696 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2697 {
2698 int cpu;
2699
2700 /*
2701 * Allocate in the BSS so we won't require allocation in
2702 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2703 */
2704 static cpumask_t cpus_with_pcps;
2705
2706 /*
2707 * Do not drain if one is already in progress unless it's specific to
2708 * a zone. Such callers are primarily CMA and memory hotplug and need
2709 * the drain to be complete when the call returns.
2710 */
2711 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2712 if (!zone)
2713 return;
2714 mutex_lock(&pcpu_drain_mutex);
2715 }
2716
2717 /*
2718 * We don't care about racing with CPU hotplug event
2719 * as offline notification will cause the notified
2720 * cpu to drain that CPU pcps and on_each_cpu_mask
2721 * disables preemption as part of its processing
2722 */
2723 for_each_online_cpu(cpu) {
2724 struct per_cpu_pages *pcp;
2725 struct zone *z;
2726 bool has_pcps = false;
2727
2728 if (force_all_cpus) {
2729 /*
2730 * The pcp.count check is racy, some callers need a
2731 * guarantee that no cpu is missed.
2732 */
2733 has_pcps = true;
2734 } else if (zone) {
2735 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2736 if (pcp->count)
2737 has_pcps = true;
2738 } else {
2739 for_each_populated_zone(z) {
2740 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2741 if (pcp->count) {
2742 has_pcps = true;
2743 break;
2744 }
2745 }
2746 }
2747
2748 if (has_pcps)
2749 cpumask_set_cpu(cpu, &cpus_with_pcps);
2750 else
2751 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2752 }
2753
2754 for_each_cpu(cpu, &cpus_with_pcps) {
2755 if (zone)
2756 drain_pages_zone(cpu, zone);
2757 else
2758 drain_pages(cpu);
2759 }
2760
2761 mutex_unlock(&pcpu_drain_mutex);
2762 }
2763
2764 /*
2765 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2766 *
2767 * When zone parameter is non-NULL, spill just the single zone's pages.
2768 */
drain_all_pages(struct zone * zone)2769 void drain_all_pages(struct zone *zone)
2770 {
2771 __drain_all_pages(zone, false);
2772 }
2773
nr_pcp_free(struct per_cpu_pages * pcp,int batch,int high,bool free_high)2774 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2775 {
2776 int min_nr_free, max_nr_free;
2777
2778 /* Free as much as possible if batch freeing high-order pages. */
2779 if (unlikely(free_high))
2780 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2781
2782 /* Check for PCP disabled or boot pageset */
2783 if (unlikely(high < batch))
2784 return 1;
2785
2786 /* Leave at least pcp->batch pages on the list */
2787 min_nr_free = batch;
2788 max_nr_free = high - batch;
2789
2790 /*
2791 * Increase the batch number to the number of the consecutive
2792 * freed pages to reduce zone lock contention.
2793 */
2794 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2795
2796 return batch;
2797 }
2798
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,int batch,bool free_high)2799 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2800 int batch, bool free_high)
2801 {
2802 int high, high_min, high_max;
2803
2804 high_min = READ_ONCE(pcp->high_min);
2805 high_max = READ_ONCE(pcp->high_max);
2806 high = pcp->high = clamp(pcp->high, high_min, high_max);
2807
2808 if (unlikely(!high))
2809 return 0;
2810
2811 if (unlikely(free_high)) {
2812 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2813 high_min);
2814 return 0;
2815 }
2816
2817 /*
2818 * If reclaim is active, limit the number of pages that can be
2819 * stored on pcp lists
2820 */
2821 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
2822 int free_count = max_t(int, pcp->free_count, batch);
2823
2824 pcp->high = max(high - free_count, high_min);
2825 return min(batch << 2, pcp->high);
2826 }
2827
2828 if (high_min == high_max)
2829 return high;
2830
2831 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
2832 int free_count = max_t(int, pcp->free_count, batch);
2833
2834 pcp->high = max(high - free_count, high_min);
2835 high = max(pcp->count, high_min);
2836 } else if (pcp->count >= high) {
2837 int need_high = pcp->free_count + batch;
2838
2839 /* pcp->high should be large enough to hold batch freed pages */
2840 if (pcp->high < need_high)
2841 pcp->high = clamp(need_high, high_min, high_max);
2842 }
2843
2844 return high;
2845 }
2846
2847 /*
2848 * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the
2849 * pcp's watermarks below high.
2850 *
2851 * May return a freed pcp, if during page freeing the pcp spinlock cannot be
2852 * reacquired. Return true if pcp is locked, false otherwise.
2853 */
free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags,unsigned long * UP_flags)2854 static bool free_frozen_page_commit(struct zone *zone,
2855 struct per_cpu_pages *pcp, struct page *page, int migratetype,
2856 unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags)
2857 {
2858 int high, batch;
2859 int to_free, to_free_batched;
2860 int pindex;
2861 int cpu = smp_processor_id();
2862 int ret = true;
2863 bool free_high = false;
2864
2865 /*
2866 * On freeing, reduce the number of pages that are batch allocated.
2867 * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2868 * allocations.
2869 */
2870 pcp->alloc_factor >>= 1;
2871 __count_vm_events(PGFREE, 1 << order);
2872 pindex = order_to_pindex(migratetype, order);
2873 list_add(&page->pcp_list, &pcp->lists[pindex]);
2874 pcp->count += 1 << order;
2875
2876 batch = READ_ONCE(pcp->batch);
2877 /*
2878 * As high-order pages other than THP's stored on PCP can contribute
2879 * to fragmentation, limit the number stored when PCP is heavily
2880 * freeing without allocation. The remainder after bulk freeing
2881 * stops will be drained from vmstat refresh context.
2882 */
2883 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
2884 free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
2885 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2886 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2887 pcp->count >= batch));
2888 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2889 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2890 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2891 }
2892 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2893 pcp->free_count += (1 << order);
2894
2895 if (unlikely(fpi_flags & FPI_TRYLOCK)) {
2896 /*
2897 * Do not attempt to take a zone lock. Let pcp->count get
2898 * over high mark temporarily.
2899 */
2900 return true;
2901 }
2902
2903 high = nr_pcp_high(pcp, zone, batch, free_high);
2904 if (pcp->count < high)
2905 return true;
2906
2907 to_free = nr_pcp_free(pcp, batch, high, free_high);
2908 while (to_free > 0 && pcp->count > 0) {
2909 to_free_batched = min(to_free, batch);
2910 free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
2911 to_free -= to_free_batched;
2912
2913 if (to_free == 0 || pcp->count == 0)
2914 break;
2915
2916 pcp_spin_unlock(pcp, *UP_flags);
2917
2918 pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags);
2919 if (!pcp) {
2920 ret = false;
2921 break;
2922 }
2923
2924 /*
2925 * Check if this thread has been migrated to a different CPU.
2926 * If that is the case, give up and indicate that the pcp is
2927 * returned in an unlocked state.
2928 */
2929 if (smp_processor_id() != cpu) {
2930 pcp_spin_unlock(pcp, *UP_flags);
2931 ret = false;
2932 break;
2933 }
2934 }
2935
2936 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2937 zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2938 ZONE_MOVABLE, 0)) {
2939 struct pglist_data *pgdat = zone->zone_pgdat;
2940 clear_bit(ZONE_BELOW_HIGH, &zone->flags);
2941
2942 /*
2943 * Assume that memory pressure on this node is gone and may be
2944 * in a reclaimable state. If a memory fallback node exists,
2945 * direct reclaim may not have been triggered, causing a
2946 * 'hopeless node' to stay in that state for a while. Let
2947 * kswapd work again by resetting kswapd_failures.
2948 */
2949 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES &&
2950 next_memory_node(pgdat->node_id) < MAX_NUMNODES)
2951 atomic_set(&pgdat->kswapd_failures, 0);
2952 }
2953 return ret;
2954 }
2955
2956 /*
2957 * Free a pcp page
2958 */
__free_frozen_pages(struct page * page,unsigned int order,fpi_t fpi_flags)2959 static void __free_frozen_pages(struct page *page, unsigned int order,
2960 fpi_t fpi_flags)
2961 {
2962 unsigned long UP_flags;
2963 struct per_cpu_pages *pcp;
2964 struct zone *zone;
2965 unsigned long pfn = page_to_pfn(page);
2966 int migratetype;
2967
2968 if (!pcp_allowed_order(order)) {
2969 __free_pages_ok(page, order, fpi_flags);
2970 return;
2971 }
2972
2973 if (!free_pages_prepare(page, order))
2974 return;
2975
2976 /*
2977 * We only track unmovable, reclaimable and movable on pcp lists.
2978 * Place ISOLATE pages on the isolated list because they are being
2979 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2980 * get those areas back if necessary. Otherwise, we may have to free
2981 * excessively into the page allocator
2982 */
2983 zone = page_zone(page);
2984 migratetype = get_pfnblock_migratetype(page, pfn);
2985 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2986 if (unlikely(is_migrate_isolate(migratetype))) {
2987 free_one_page(zone, page, pfn, order, fpi_flags);
2988 return;
2989 }
2990 migratetype = MIGRATE_MOVABLE;
2991 }
2992
2993 if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
2994 && (in_nmi() || in_hardirq()))) {
2995 add_page_to_zone_llist(zone, page, order);
2996 return;
2997 }
2998 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
2999 if (pcp) {
3000 if (!free_frozen_page_commit(zone, pcp, page, migratetype,
3001 order, fpi_flags, &UP_flags))
3002 return;
3003 pcp_spin_unlock(pcp, UP_flags);
3004 } else {
3005 free_one_page(zone, page, pfn, order, fpi_flags);
3006 }
3007 }
3008
free_frozen_pages(struct page * page,unsigned int order)3009 void free_frozen_pages(struct page *page, unsigned int order)
3010 {
3011 __free_frozen_pages(page, order, FPI_NONE);
3012 }
3013
3014 /*
3015 * Free a batch of folios
3016 */
free_unref_folios(struct folio_batch * folios)3017 void free_unref_folios(struct folio_batch *folios)
3018 {
3019 unsigned long UP_flags;
3020 struct per_cpu_pages *pcp = NULL;
3021 struct zone *locked_zone = NULL;
3022 int i, j;
3023
3024 /* Prepare folios for freeing */
3025 for (i = 0, j = 0; i < folios->nr; i++) {
3026 struct folio *folio = folios->folios[i];
3027 unsigned long pfn = folio_pfn(folio);
3028 unsigned int order = folio_order(folio);
3029
3030 if (!free_pages_prepare(&folio->page, order))
3031 continue;
3032 /*
3033 * Free orders not handled on the PCP directly to the
3034 * allocator.
3035 */
3036 if (!pcp_allowed_order(order)) {
3037 free_one_page(folio_zone(folio), &folio->page,
3038 pfn, order, FPI_NONE);
3039 continue;
3040 }
3041 folio->private = (void *)(unsigned long)order;
3042 if (j != i)
3043 folios->folios[j] = folio;
3044 j++;
3045 }
3046 folios->nr = j;
3047
3048 for (i = 0; i < folios->nr; i++) {
3049 struct folio *folio = folios->folios[i];
3050 struct zone *zone = folio_zone(folio);
3051 unsigned long pfn = folio_pfn(folio);
3052 unsigned int order = (unsigned long)folio->private;
3053 int migratetype;
3054
3055 folio->private = NULL;
3056 migratetype = get_pfnblock_migratetype(&folio->page, pfn);
3057
3058 /* Different zone requires a different pcp lock */
3059 if (zone != locked_zone ||
3060 is_migrate_isolate(migratetype)) {
3061 if (pcp) {
3062 pcp_spin_unlock(pcp, UP_flags);
3063 locked_zone = NULL;
3064 pcp = NULL;
3065 }
3066
3067 /*
3068 * Free isolated pages directly to the
3069 * allocator, see comment in free_frozen_pages.
3070 */
3071 if (is_migrate_isolate(migratetype)) {
3072 free_one_page(zone, &folio->page, pfn,
3073 order, FPI_NONE);
3074 continue;
3075 }
3076
3077 /*
3078 * trylock is necessary as folios may be getting freed
3079 * from IRQ or SoftIRQ context after an IO completion.
3080 */
3081 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
3082 if (unlikely(!pcp)) {
3083 free_one_page(zone, &folio->page, pfn,
3084 order, FPI_NONE);
3085 continue;
3086 }
3087 locked_zone = zone;
3088 }
3089
3090 /*
3091 * Non-isolated types over MIGRATE_PCPTYPES get added
3092 * to the MIGRATE_MOVABLE pcp list.
3093 */
3094 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3095 migratetype = MIGRATE_MOVABLE;
3096
3097 trace_mm_page_free_batched(&folio->page);
3098 if (!free_frozen_page_commit(zone, pcp, &folio->page,
3099 migratetype, order, FPI_NONE, &UP_flags)) {
3100 pcp = NULL;
3101 locked_zone = NULL;
3102 }
3103 }
3104
3105 if (pcp)
3106 pcp_spin_unlock(pcp, UP_flags);
3107 folio_batch_reinit(folios);
3108 }
3109
3110 /*
3111 * split_page takes a non-compound higher-order page, and splits it into
3112 * n (1<<order) sub-pages: page[0..n]
3113 * Each sub-page must be freed individually.
3114 *
3115 * Note: this is probably too low level an operation for use in drivers.
3116 * Please consult with lkml before using this in your driver.
3117 */
split_page(struct page * page,unsigned int order)3118 void split_page(struct page *page, unsigned int order)
3119 {
3120 int i;
3121
3122 VM_BUG_ON_PAGE(PageCompound(page), page);
3123 VM_BUG_ON_PAGE(!page_count(page), page);
3124
3125 for (i = 1; i < (1 << order); i++)
3126 set_page_refcounted(page + i);
3127 split_page_owner(page, order, 0);
3128 pgalloc_tag_split(page_folio(page), order, 0);
3129 split_page_memcg(page, order);
3130 }
3131 EXPORT_SYMBOL_GPL(split_page);
3132
__isolate_free_page(struct page * page,unsigned int order)3133 int __isolate_free_page(struct page *page, unsigned int order)
3134 {
3135 struct zone *zone = page_zone(page);
3136 int mt = get_pageblock_migratetype(page);
3137
3138 if (!is_migrate_isolate(mt)) {
3139 unsigned long watermark;
3140 /*
3141 * Obey watermarks as if the page was being allocated. We can
3142 * emulate a high-order watermark check with a raised order-0
3143 * watermark, because we already know our high-order page
3144 * exists.
3145 */
3146 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3147 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3148 return 0;
3149 }
3150
3151 del_page_from_free_list(page, zone, order, mt);
3152
3153 /*
3154 * Set the pageblock if the isolated page is at least half of a
3155 * pageblock
3156 */
3157 if (order >= pageblock_order - 1) {
3158 struct page *endpage = page + (1 << order) - 1;
3159 for (; page < endpage; page += pageblock_nr_pages) {
3160 int mt = get_pageblock_migratetype(page);
3161 /*
3162 * Only change normal pageblocks (i.e., they can merge
3163 * with others)
3164 */
3165 if (migratetype_is_mergeable(mt))
3166 move_freepages_block(zone, page, mt,
3167 MIGRATE_MOVABLE);
3168 }
3169 }
3170
3171 return 1UL << order;
3172 }
3173
3174 /**
3175 * __putback_isolated_page - Return a now-isolated page back where we got it
3176 * @page: Page that was isolated
3177 * @order: Order of the isolated page
3178 * @mt: The page's pageblock's migratetype
3179 *
3180 * This function is meant to return a page pulled from the free lists via
3181 * __isolate_free_page back to the free lists they were pulled from.
3182 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3183 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3184 {
3185 struct zone *zone = page_zone(page);
3186
3187 /* zone lock should be held when this function is called */
3188 lockdep_assert_held(&zone->lock);
3189
3190 /* Return isolated page to tail of freelist. */
3191 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3192 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3193 }
3194
3195 /*
3196 * Update NUMA hit/miss statistics
3197 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)3198 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3199 long nr_account)
3200 {
3201 #ifdef CONFIG_NUMA
3202 enum numa_stat_item local_stat = NUMA_LOCAL;
3203
3204 /* skip numa counters update if numa stats is disabled */
3205 if (!static_branch_likely(&vm_numa_stat_key))
3206 return;
3207
3208 if (zone_to_nid(z) != numa_node_id())
3209 local_stat = NUMA_OTHER;
3210
3211 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3212 __count_numa_events(z, NUMA_HIT, nr_account);
3213 else {
3214 __count_numa_events(z, NUMA_MISS, nr_account);
3215 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3216 }
3217 __count_numa_events(z, local_stat, nr_account);
3218 #endif
3219 }
3220
3221 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)3222 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3223 unsigned int order, unsigned int alloc_flags,
3224 int migratetype)
3225 {
3226 struct page *page;
3227 unsigned long flags;
3228
3229 do {
3230 page = NULL;
3231 if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
3232 if (!spin_trylock_irqsave(&zone->lock, flags))
3233 return NULL;
3234 } else {
3235 spin_lock_irqsave(&zone->lock, flags);
3236 }
3237 if (alloc_flags & ALLOC_HIGHATOMIC)
3238 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3239 if (!page) {
3240 enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
3241
3242 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
3243
3244 /*
3245 * If the allocation fails, allow OOM handling and
3246 * order-0 (atomic) allocs access to HIGHATOMIC
3247 * reserves as failing now is worse than failing a
3248 * high-order atomic allocation in the future.
3249 */
3250 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
3251 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3252
3253 if (!page) {
3254 spin_unlock_irqrestore(&zone->lock, flags);
3255 return NULL;
3256 }
3257 }
3258 spin_unlock_irqrestore(&zone->lock, flags);
3259 } while (check_new_pages(page, order));
3260
3261 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3262 zone_statistics(preferred_zone, zone, 1);
3263
3264 return page;
3265 }
3266
nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order)3267 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
3268 {
3269 int high, base_batch, batch, max_nr_alloc;
3270 int high_max, high_min;
3271
3272 base_batch = READ_ONCE(pcp->batch);
3273 high_min = READ_ONCE(pcp->high_min);
3274 high_max = READ_ONCE(pcp->high_max);
3275 high = pcp->high = clamp(pcp->high, high_min, high_max);
3276
3277 /* Check for PCP disabled or boot pageset */
3278 if (unlikely(high < base_batch))
3279 return 1;
3280
3281 if (order)
3282 batch = base_batch;
3283 else
3284 batch = (base_batch << pcp->alloc_factor);
3285
3286 /*
3287 * If we had larger pcp->high, we could avoid to allocate from
3288 * zone.
3289 */
3290 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
3291 high = pcp->high = min(high + batch, high_max);
3292
3293 if (!order) {
3294 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
3295 /*
3296 * Double the number of pages allocated each time there is
3297 * subsequent allocation of order-0 pages without any freeing.
3298 */
3299 if (batch <= max_nr_alloc &&
3300 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
3301 pcp->alloc_factor++;
3302 batch = min(batch, max_nr_alloc);
3303 }
3304
3305 /*
3306 * Scale batch relative to order if batch implies free pages
3307 * can be stored on the PCP. Batch can be 1 for small zones or
3308 * for boot pagesets which should never store free pages as
3309 * the pages may belong to arbitrary zones.
3310 */
3311 if (batch > 1)
3312 batch = max(batch >> order, 2);
3313
3314 return batch;
3315 }
3316
3317 /* Remove page from the per-cpu list, caller must protect the list */
3318 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)3319 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3320 int migratetype,
3321 unsigned int alloc_flags,
3322 struct per_cpu_pages *pcp,
3323 struct list_head *list)
3324 {
3325 struct page *page;
3326
3327 do {
3328 if (list_empty(list)) {
3329 int batch = nr_pcp_alloc(pcp, zone, order);
3330 int alloced;
3331
3332 alloced = rmqueue_bulk(zone, order,
3333 batch, list,
3334 migratetype, alloc_flags);
3335
3336 pcp->count += alloced << order;
3337 if (unlikely(list_empty(list)))
3338 return NULL;
3339 }
3340
3341 page = list_first_entry(list, struct page, pcp_list);
3342 list_del(&page->pcp_list);
3343 pcp->count -= 1 << order;
3344 } while (check_new_pages(page, order));
3345
3346 return page;
3347 }
3348
3349 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3350 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3351 struct zone *zone, unsigned int order,
3352 int migratetype, unsigned int alloc_flags)
3353 {
3354 struct per_cpu_pages *pcp;
3355 struct list_head *list;
3356 struct page *page;
3357 unsigned long UP_flags;
3358
3359 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
3360 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
3361 if (!pcp)
3362 return NULL;
3363
3364 /*
3365 * On allocation, reduce the number of pages that are batch freed.
3366 * See nr_pcp_free() where free_factor is increased for subsequent
3367 * frees.
3368 */
3369 pcp->free_count >>= 1;
3370 list = &pcp->lists[order_to_pindex(migratetype, order)];
3371 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3372 pcp_spin_unlock(pcp, UP_flags);
3373 if (page) {
3374 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3375 zone_statistics(preferred_zone, zone, 1);
3376 }
3377 return page;
3378 }
3379
3380 /*
3381 * Allocate a page from the given zone.
3382 * Use pcplists for THP or "cheap" high-order allocations.
3383 */
3384
3385 /*
3386 * Do not instrument rmqueue() with KMSAN. This function may call
3387 * __msan_poison_alloca() through a call to set_pfnblock_migratetype().
3388 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3389 * may call rmqueue() again, which will result in a deadlock.
3390 */
3391 __no_sanitize_memory
3392 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)3393 struct page *rmqueue(struct zone *preferred_zone,
3394 struct zone *zone, unsigned int order,
3395 gfp_t gfp_flags, unsigned int alloc_flags,
3396 int migratetype)
3397 {
3398 struct page *page;
3399
3400 if (likely(pcp_allowed_order(order))) {
3401 page = rmqueue_pcplist(preferred_zone, zone, order,
3402 migratetype, alloc_flags);
3403 if (likely(page))
3404 goto out;
3405 }
3406
3407 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3408 migratetype);
3409
3410 out:
3411 /* Separate test+clear to avoid unnecessary atomics */
3412 if ((alloc_flags & ALLOC_KSWAPD) &&
3413 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3414 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3415 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3416 }
3417
3418 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3419 return page;
3420 }
3421
3422 /*
3423 * Reserve the pageblock(s) surrounding an allocation request for
3424 * exclusive use of high-order atomic allocations if there are no
3425 * empty page blocks that contain a page with a suitable order
3426 */
reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone)3427 static void reserve_highatomic_pageblock(struct page *page, int order,
3428 struct zone *zone)
3429 {
3430 int mt;
3431 unsigned long max_managed, flags;
3432
3433 /*
3434 * The number reserved as: minimum is 1 pageblock, maximum is
3435 * roughly 1% of a zone. But if 1% of a zone falls below a
3436 * pageblock size, then don't reserve any pageblocks.
3437 * Check is race-prone but harmless.
3438 */
3439 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
3440 return;
3441 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
3442 if (zone->nr_reserved_highatomic >= max_managed)
3443 return;
3444
3445 spin_lock_irqsave(&zone->lock, flags);
3446
3447 /* Recheck the nr_reserved_highatomic limit under the lock */
3448 if (zone->nr_reserved_highatomic >= max_managed)
3449 goto out_unlock;
3450
3451 /* Yoink! */
3452 mt = get_pageblock_migratetype(page);
3453 /* Only reserve normal pageblocks (i.e., they can merge with others) */
3454 if (!migratetype_is_mergeable(mt))
3455 goto out_unlock;
3456
3457 if (order < pageblock_order) {
3458 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
3459 goto out_unlock;
3460 zone->nr_reserved_highatomic += pageblock_nr_pages;
3461 } else {
3462 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
3463 zone->nr_reserved_highatomic += 1 << order;
3464 }
3465
3466 out_unlock:
3467 spin_unlock_irqrestore(&zone->lock, flags);
3468 }
3469
3470 /*
3471 * Used when an allocation is about to fail under memory pressure. This
3472 * potentially hurts the reliability of high-order allocations when under
3473 * intense memory pressure but failed atomic allocations should be easier
3474 * to recover from than an OOM.
3475 *
3476 * If @force is true, try to unreserve pageblocks even though highatomic
3477 * pageblock is exhausted.
3478 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)3479 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
3480 bool force)
3481 {
3482 struct zonelist *zonelist = ac->zonelist;
3483 unsigned long flags;
3484 struct zoneref *z;
3485 struct zone *zone;
3486 struct page *page;
3487 int order;
3488 int ret;
3489
3490 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3491 ac->nodemask) {
3492 /*
3493 * Preserve at least one pageblock unless memory pressure
3494 * is really high.
3495 */
3496 if (!force && zone->nr_reserved_highatomic <=
3497 pageblock_nr_pages)
3498 continue;
3499
3500 spin_lock_irqsave(&zone->lock, flags);
3501 for (order = 0; order < NR_PAGE_ORDERS; order++) {
3502 struct free_area *area = &(zone->free_area[order]);
3503 unsigned long size;
3504
3505 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3506 if (!page)
3507 continue;
3508
3509 size = max(pageblock_nr_pages, 1UL << order);
3510 /*
3511 * It should never happen but changes to
3512 * locking could inadvertently allow a per-cpu
3513 * drain to add pages to MIGRATE_HIGHATOMIC
3514 * while unreserving so be safe and watch for
3515 * underflows.
3516 */
3517 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
3518 size = zone->nr_reserved_highatomic;
3519 zone->nr_reserved_highatomic -= size;
3520
3521 /*
3522 * Convert to ac->migratetype and avoid the normal
3523 * pageblock stealing heuristics. Minimally, the caller
3524 * is doing the work and needs the pages. More
3525 * importantly, if the block was always converted to
3526 * MIGRATE_UNMOVABLE or another type then the number
3527 * of pageblocks that cannot be completely freed
3528 * may increase.
3529 */
3530 if (order < pageblock_order)
3531 ret = move_freepages_block(zone, page,
3532 MIGRATE_HIGHATOMIC,
3533 ac->migratetype);
3534 else {
3535 move_to_free_list(page, zone, order,
3536 MIGRATE_HIGHATOMIC,
3537 ac->migratetype);
3538 change_pageblock_range(page, order,
3539 ac->migratetype);
3540 ret = 1;
3541 }
3542 /*
3543 * Reserving the block(s) already succeeded,
3544 * so this should not fail on zone boundaries.
3545 */
3546 WARN_ON_ONCE(ret == -1);
3547 if (ret > 0) {
3548 spin_unlock_irqrestore(&zone->lock, flags);
3549 return ret;
3550 }
3551 }
3552 spin_unlock_irqrestore(&zone->lock, flags);
3553 }
3554
3555 return false;
3556 }
3557
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)3558 static inline long __zone_watermark_unusable_free(struct zone *z,
3559 unsigned int order, unsigned int alloc_flags)
3560 {
3561 long unusable_free = (1 << order) - 1;
3562
3563 /*
3564 * If the caller does not have rights to reserves below the min
3565 * watermark then subtract the free pages reserved for highatomic.
3566 */
3567 if (likely(!(alloc_flags & ALLOC_RESERVES)))
3568 unusable_free += READ_ONCE(z->nr_free_highatomic);
3569
3570 #ifdef CONFIG_CMA
3571 /* If allocation can't use CMA areas don't use free CMA pages */
3572 if (!(alloc_flags & ALLOC_CMA))
3573 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3574 #endif
3575
3576 return unusable_free;
3577 }
3578
3579 /*
3580 * Return true if free base pages are above 'mark'. For high-order checks it
3581 * will return true of the order-0 watermark is reached and there is at least
3582 * one free page of a suitable size. Checking now avoids taking the zone lock
3583 * to check in the allocation paths if no pages are free.
3584 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)3585 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3586 int highest_zoneidx, unsigned int alloc_flags,
3587 long free_pages)
3588 {
3589 long min = mark;
3590 int o;
3591
3592 /* free_pages may go negative - that's OK */
3593 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3594
3595 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3596 /*
3597 * __GFP_HIGH allows access to 50% of the min reserve as well
3598 * as OOM.
3599 */
3600 if (alloc_flags & ALLOC_MIN_RESERVE) {
3601 min -= min / 2;
3602
3603 /*
3604 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3605 * access more reserves than just __GFP_HIGH. Other
3606 * non-blocking allocations requests such as GFP_NOWAIT
3607 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3608 * access to the min reserve.
3609 */
3610 if (alloc_flags & ALLOC_NON_BLOCK)
3611 min -= min / 4;
3612 }
3613
3614 /*
3615 * OOM victims can try even harder than the normal reserve
3616 * users on the grounds that it's definitely going to be in
3617 * the exit path shortly and free memory. Any allocation it
3618 * makes during the free path will be small and short-lived.
3619 */
3620 if (alloc_flags & ALLOC_OOM)
3621 min -= min / 2;
3622 }
3623
3624 /*
3625 * Check watermarks for an order-0 allocation request. If these
3626 * are not met, then a high-order request also cannot go ahead
3627 * even if a suitable page happened to be free.
3628 */
3629 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3630 return false;
3631
3632 /* If this is an order-0 request then the watermark is fine */
3633 if (!order)
3634 return true;
3635
3636 /* For a high-order request, check at least one suitable page is free */
3637 for (o = order; o < NR_PAGE_ORDERS; o++) {
3638 struct free_area *area = &z->free_area[o];
3639 int mt;
3640
3641 if (!area->nr_free)
3642 continue;
3643
3644 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3645 if (!free_area_empty(area, mt))
3646 return true;
3647 }
3648
3649 #ifdef CONFIG_CMA
3650 if ((alloc_flags & ALLOC_CMA) &&
3651 !free_area_empty(area, MIGRATE_CMA)) {
3652 return true;
3653 }
3654 #endif
3655 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3656 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3657 return true;
3658 }
3659 }
3660 return false;
3661 }
3662
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)3663 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3664 int highest_zoneidx, unsigned int alloc_flags)
3665 {
3666 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3667 zone_page_state(z, NR_FREE_PAGES));
3668 }
3669
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)3670 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3671 unsigned long mark, int highest_zoneidx,
3672 unsigned int alloc_flags, gfp_t gfp_mask)
3673 {
3674 long free_pages;
3675
3676 free_pages = zone_page_state(z, NR_FREE_PAGES);
3677
3678 /*
3679 * Fast check for order-0 only. If this fails then the reserves
3680 * need to be calculated.
3681 */
3682 if (!order) {
3683 long usable_free;
3684 long reserved;
3685
3686 usable_free = free_pages;
3687 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3688
3689 /* reserved may over estimate high-atomic reserves. */
3690 usable_free -= min(usable_free, reserved);
3691 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
3692 return true;
3693 }
3694
3695 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3696 free_pages))
3697 return true;
3698
3699 /*
3700 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
3701 * when checking the min watermark. The min watermark is the
3702 * point where boosting is ignored so that kswapd is woken up
3703 * when below the low watermark.
3704 */
3705 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
3706 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3707 mark = z->_watermark[WMARK_MIN];
3708 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3709 alloc_flags, free_pages);
3710 }
3711
3712 return false;
3713 }
3714
3715 #ifdef CONFIG_NUMA
3716 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3717
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3718 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3719 {
3720 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3721 node_reclaim_distance;
3722 }
3723 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3724 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3725 {
3726 return true;
3727 }
3728 #endif /* CONFIG_NUMA */
3729
3730 /*
3731 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3732 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3733 * premature use of a lower zone may cause lowmem pressure problems that
3734 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3735 * probably too small. It only makes sense to spread allocations to avoid
3736 * fragmentation between the Normal and DMA32 zones.
3737 */
3738 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3739 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3740 {
3741 unsigned int alloc_flags;
3742
3743 /*
3744 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3745 * to save a branch.
3746 */
3747 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3748
3749 if (defrag_mode) {
3750 alloc_flags |= ALLOC_NOFRAGMENT;
3751 return alloc_flags;
3752 }
3753
3754 #ifdef CONFIG_ZONE_DMA32
3755 if (!zone)
3756 return alloc_flags;
3757
3758 if (zone_idx(zone) != ZONE_NORMAL)
3759 return alloc_flags;
3760
3761 /*
3762 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3763 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3764 * on UMA that if Normal is populated then so is DMA32.
3765 */
3766 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3767 if (nr_online_nodes > 1 && !populated_zone(--zone))
3768 return alloc_flags;
3769
3770 alloc_flags |= ALLOC_NOFRAGMENT;
3771 #endif /* CONFIG_ZONE_DMA32 */
3772 return alloc_flags;
3773 }
3774
3775 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3776 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3777 unsigned int alloc_flags)
3778 {
3779 #ifdef CONFIG_CMA
3780 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3781 alloc_flags |= ALLOC_CMA;
3782 #endif
3783 return alloc_flags;
3784 }
3785
3786 /*
3787 * get_page_from_freelist goes through the zonelist trying to allocate
3788 * a page.
3789 */
3790 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3791 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3792 const struct alloc_context *ac)
3793 {
3794 struct zoneref *z;
3795 struct zone *zone;
3796 struct pglist_data *last_pgdat = NULL;
3797 bool last_pgdat_dirty_ok = false;
3798 bool no_fallback;
3799 bool skip_kswapd_nodes = nr_online_nodes > 1;
3800 bool skipped_kswapd_nodes = false;
3801
3802 retry:
3803 /*
3804 * Scan zonelist, looking for a zone with enough free.
3805 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c.
3806 */
3807 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3808 z = ac->preferred_zoneref;
3809 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3810 ac->nodemask) {
3811 struct page *page;
3812 unsigned long mark;
3813
3814 if (cpusets_enabled() &&
3815 (alloc_flags & ALLOC_CPUSET) &&
3816 !__cpuset_zone_allowed(zone, gfp_mask))
3817 continue;
3818 /*
3819 * When allocating a page cache page for writing, we
3820 * want to get it from a node that is within its dirty
3821 * limit, such that no single node holds more than its
3822 * proportional share of globally allowed dirty pages.
3823 * The dirty limits take into account the node's
3824 * lowmem reserves and high watermark so that kswapd
3825 * should be able to balance it without having to
3826 * write pages from its LRU list.
3827 *
3828 * XXX: For now, allow allocations to potentially
3829 * exceed the per-node dirty limit in the slowpath
3830 * (spread_dirty_pages unset) before going into reclaim,
3831 * which is important when on a NUMA setup the allowed
3832 * nodes are together not big enough to reach the
3833 * global limit. The proper fix for these situations
3834 * will require awareness of nodes in the
3835 * dirty-throttling and the flusher threads.
3836 */
3837 if (ac->spread_dirty_pages) {
3838 if (last_pgdat != zone->zone_pgdat) {
3839 last_pgdat = zone->zone_pgdat;
3840 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3841 }
3842
3843 if (!last_pgdat_dirty_ok)
3844 continue;
3845 }
3846
3847 if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
3848 zone != zonelist_zone(ac->preferred_zoneref)) {
3849 int local_nid;
3850
3851 /*
3852 * If moving to a remote node, retry but allow
3853 * fragmenting fallbacks. Locality is more important
3854 * than fragmentation avoidance.
3855 */
3856 local_nid = zonelist_node_idx(ac->preferred_zoneref);
3857 if (zone_to_nid(zone) != local_nid) {
3858 alloc_flags &= ~ALLOC_NOFRAGMENT;
3859 goto retry;
3860 }
3861 }
3862
3863 /*
3864 * If kswapd is already active on a node, keep looking
3865 * for other nodes that might be idle. This can happen
3866 * if another process has NUMA bindings and is causing
3867 * kswapd wakeups on only some nodes. Avoid accidental
3868 * "node_reclaim_mode"-like behavior in this case.
3869 */
3870 if (skip_kswapd_nodes &&
3871 !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
3872 skipped_kswapd_nodes = true;
3873 continue;
3874 }
3875
3876 cond_accept_memory(zone, order, alloc_flags);
3877
3878 /*
3879 * Detect whether the number of free pages is below high
3880 * watermark. If so, we will decrease pcp->high and free
3881 * PCP pages in free path to reduce the possibility of
3882 * premature page reclaiming. Detection is done here to
3883 * avoid to do that in hotter free path.
3884 */
3885 if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3886 goto check_alloc_wmark;
3887
3888 mark = high_wmark_pages(zone);
3889 if (zone_watermark_fast(zone, order, mark,
3890 ac->highest_zoneidx, alloc_flags,
3891 gfp_mask))
3892 goto try_this_zone;
3893 else
3894 set_bit(ZONE_BELOW_HIGH, &zone->flags);
3895
3896 check_alloc_wmark:
3897 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3898 if (!zone_watermark_fast(zone, order, mark,
3899 ac->highest_zoneidx, alloc_flags,
3900 gfp_mask)) {
3901 int ret;
3902
3903 if (cond_accept_memory(zone, order, alloc_flags))
3904 goto try_this_zone;
3905
3906 /*
3907 * Watermark failed for this zone, but see if we can
3908 * grow this zone if it contains deferred pages.
3909 */
3910 if (deferred_pages_enabled()) {
3911 if (_deferred_grow_zone(zone, order))
3912 goto try_this_zone;
3913 }
3914 /* Checked here to keep the fast path fast */
3915 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3916 if (alloc_flags & ALLOC_NO_WATERMARKS)
3917 goto try_this_zone;
3918
3919 if (!node_reclaim_enabled() ||
3920 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
3921 continue;
3922
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3924 switch (ret) {
3925 case NODE_RECLAIM_NOSCAN:
3926 /* did not scan */
3927 continue;
3928 case NODE_RECLAIM_FULL:
3929 /* scanned but unreclaimable */
3930 continue;
3931 default:
3932 /* did we reclaim enough */
3933 if (zone_watermark_ok(zone, order, mark,
3934 ac->highest_zoneidx, alloc_flags))
3935 goto try_this_zone;
3936
3937 continue;
3938 }
3939 }
3940
3941 try_this_zone:
3942 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
3943 gfp_mask, alloc_flags, ac->migratetype);
3944 if (page) {
3945 prep_new_page(page, order, gfp_mask, alloc_flags);
3946
3947 /*
3948 * If this is a high-order atomic allocation then check
3949 * if the pageblock should be reserved for the future
3950 */
3951 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3952 reserve_highatomic_pageblock(page, order, zone);
3953
3954 return page;
3955 } else {
3956 if (cond_accept_memory(zone, order, alloc_flags))
3957 goto try_this_zone;
3958
3959 /* Try again if zone has deferred pages */
3960 if (deferred_pages_enabled()) {
3961 if (_deferred_grow_zone(zone, order))
3962 goto try_this_zone;
3963 }
3964 }
3965 }
3966
3967 /*
3968 * If we skipped over nodes with active kswapds and found no
3969 * idle nodes, retry and place anywhere the watermarks permit.
3970 */
3971 if (skip_kswapd_nodes && skipped_kswapd_nodes) {
3972 skip_kswapd_nodes = false;
3973 goto retry;
3974 }
3975
3976 /*
3977 * It's possible on a UMA machine to get through all zones that are
3978 * fragmented. If avoiding fragmentation, reset and try again.
3979 */
3980 if (no_fallback && !defrag_mode) {
3981 alloc_flags &= ~ALLOC_NOFRAGMENT;
3982 goto retry;
3983 }
3984
3985 return NULL;
3986 }
3987
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3988 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3989 {
3990 unsigned int filter = SHOW_MEM_FILTER_NODES;
3991
3992 /*
3993 * This documents exceptions given to allocations in certain
3994 * contexts that are allowed to allocate outside current's set
3995 * of allowed nodes.
3996 */
3997 if (!(gfp_mask & __GFP_NOMEMALLOC))
3998 if (tsk_is_oom_victim(current) ||
3999 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4000 filter &= ~SHOW_MEM_FILTER_NODES;
4001 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4002 filter &= ~SHOW_MEM_FILTER_NODES;
4003
4004 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
4005 mem_cgroup_show_protected_memory(NULL);
4006 }
4007
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)4008 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4009 {
4010 struct va_format vaf;
4011 va_list args;
4012 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4013
4014 if ((gfp_mask & __GFP_NOWARN) ||
4015 !__ratelimit(&nopage_rs) ||
4016 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4017 return;
4018
4019 va_start(args, fmt);
4020 vaf.fmt = fmt;
4021 vaf.va = &args;
4022 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4023 current->comm, &vaf, gfp_mask, &gfp_mask,
4024 nodemask_pr_args(nodemask));
4025 va_end(args);
4026
4027 cpuset_print_current_mems_allowed();
4028 pr_cont("\n");
4029 dump_stack();
4030 warn_alloc_show_mem(gfp_mask, nodemask);
4031 }
4032
4033 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4034 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4035 unsigned int alloc_flags,
4036 const struct alloc_context *ac)
4037 {
4038 struct page *page;
4039
4040 page = get_page_from_freelist(gfp_mask, order,
4041 alloc_flags|ALLOC_CPUSET, ac);
4042 /*
4043 * fallback to ignore cpuset restriction if our nodes
4044 * are depleted
4045 */
4046 if (!page)
4047 page = get_page_from_freelist(gfp_mask, order,
4048 alloc_flags, ac);
4049 return page;
4050 }
4051
4052 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4053 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4054 const struct alloc_context *ac, unsigned long *did_some_progress)
4055 {
4056 struct oom_control oc = {
4057 .zonelist = ac->zonelist,
4058 .nodemask = ac->nodemask,
4059 .memcg = NULL,
4060 .gfp_mask = gfp_mask,
4061 .order = order,
4062 };
4063 struct page *page;
4064
4065 *did_some_progress = 0;
4066
4067 /*
4068 * Acquire the oom lock. If that fails, somebody else is
4069 * making progress for us.
4070 */
4071 if (!mutex_trylock(&oom_lock)) {
4072 *did_some_progress = 1;
4073 schedule_timeout_uninterruptible(1);
4074 return NULL;
4075 }
4076
4077 /*
4078 * Go through the zonelist yet one more time, keep very high watermark
4079 * here, this is only to catch a parallel oom killing, we must fail if
4080 * we're still under heavy pressure. But make sure that this reclaim
4081 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4082 * allocation which will never fail due to oom_lock already held.
4083 */
4084 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4085 ~__GFP_DIRECT_RECLAIM, order,
4086 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4087 if (page)
4088 goto out;
4089
4090 /* Coredumps can quickly deplete all memory reserves */
4091 if (current->flags & PF_DUMPCORE)
4092 goto out;
4093 /* The OOM killer will not help higher order allocs */
4094 if (order > PAGE_ALLOC_COSTLY_ORDER)
4095 goto out;
4096 /*
4097 * We have already exhausted all our reclaim opportunities without any
4098 * success so it is time to admit defeat. We will skip the OOM killer
4099 * because it is very likely that the caller has a more reasonable
4100 * fallback than shooting a random task.
4101 *
4102 * The OOM killer may not free memory on a specific node.
4103 */
4104 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4105 goto out;
4106 /* The OOM killer does not needlessly kill tasks for lowmem */
4107 if (ac->highest_zoneidx < ZONE_NORMAL)
4108 goto out;
4109 if (pm_suspended_storage())
4110 goto out;
4111 /*
4112 * XXX: GFP_NOFS allocations should rather fail than rely on
4113 * other request to make a forward progress.
4114 * We are in an unfortunate situation where out_of_memory cannot
4115 * do much for this context but let's try it to at least get
4116 * access to memory reserved if the current task is killed (see
4117 * out_of_memory). Once filesystems are ready to handle allocation
4118 * failures more gracefully we should just bail out here.
4119 */
4120
4121 /* Exhausted what can be done so it's blame time */
4122 if (out_of_memory(&oc) ||
4123 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4124 *did_some_progress = 1;
4125
4126 /*
4127 * Help non-failing allocations by giving them access to memory
4128 * reserves
4129 */
4130 if (gfp_mask & __GFP_NOFAIL)
4131 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4132 ALLOC_NO_WATERMARKS, ac);
4133 }
4134 out:
4135 mutex_unlock(&oom_lock);
4136 return page;
4137 }
4138
4139 /*
4140 * Maximum number of compaction retries with a progress before OOM
4141 * killer is consider as the only way to move forward.
4142 */
4143 #define MAX_COMPACT_RETRIES 16
4144
4145 #ifdef CONFIG_COMPACTION
4146 /* Try memory compaction for high-order allocations before reclaim */
4147 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4148 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4149 unsigned int alloc_flags, const struct alloc_context *ac,
4150 enum compact_priority prio, enum compact_result *compact_result)
4151 {
4152 struct page *page = NULL;
4153 unsigned long pflags;
4154 unsigned int noreclaim_flag;
4155
4156 if (!order)
4157 return NULL;
4158
4159 psi_memstall_enter(&pflags);
4160 delayacct_compact_start();
4161 noreclaim_flag = memalloc_noreclaim_save();
4162
4163 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4164 prio, &page);
4165
4166 memalloc_noreclaim_restore(noreclaim_flag);
4167 psi_memstall_leave(&pflags);
4168 delayacct_compact_end();
4169
4170 if (*compact_result == COMPACT_SKIPPED)
4171 return NULL;
4172 /*
4173 * At least in one zone compaction wasn't deferred or skipped, so let's
4174 * count a compaction stall
4175 */
4176 count_vm_event(COMPACTSTALL);
4177
4178 /* Prep a captured page if available */
4179 if (page)
4180 prep_new_page(page, order, gfp_mask, alloc_flags);
4181
4182 /* Try get a page from the freelist if available */
4183 if (!page)
4184 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4185
4186 if (page) {
4187 struct zone *zone = page_zone(page);
4188
4189 zone->compact_blockskip_flush = false;
4190 compaction_defer_reset(zone, order, true);
4191 count_vm_event(COMPACTSUCCESS);
4192 return page;
4193 }
4194
4195 /*
4196 * It's bad if compaction run occurs and fails. The most likely reason
4197 * is that pages exist, but not enough to satisfy watermarks.
4198 */
4199 count_vm_event(COMPACTFAIL);
4200
4201 cond_resched();
4202
4203 return NULL;
4204 }
4205
4206 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4207 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4208 enum compact_result compact_result,
4209 enum compact_priority *compact_priority,
4210 int *compaction_retries)
4211 {
4212 int max_retries = MAX_COMPACT_RETRIES;
4213 int min_priority;
4214 bool ret = false;
4215 int retries = *compaction_retries;
4216 enum compact_priority priority = *compact_priority;
4217
4218 if (!order)
4219 return false;
4220
4221 if (fatal_signal_pending(current))
4222 return false;
4223
4224 /*
4225 * Compaction was skipped due to a lack of free order-0
4226 * migration targets. Continue if reclaim can help.
4227 */
4228 if (compact_result == COMPACT_SKIPPED) {
4229 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4230 goto out;
4231 }
4232
4233 /*
4234 * Compaction managed to coalesce some page blocks, but the
4235 * allocation failed presumably due to a race. Retry some.
4236 */
4237 if (compact_result == COMPACT_SUCCESS) {
4238 /*
4239 * !costly requests are much more important than
4240 * __GFP_RETRY_MAYFAIL costly ones because they are de
4241 * facto nofail and invoke OOM killer to move on while
4242 * costly can fail and users are ready to cope with
4243 * that. 1/4 retries is rather arbitrary but we would
4244 * need much more detailed feedback from compaction to
4245 * make a better decision.
4246 */
4247 if (order > PAGE_ALLOC_COSTLY_ORDER)
4248 max_retries /= 4;
4249
4250 if (++(*compaction_retries) <= max_retries) {
4251 ret = true;
4252 goto out;
4253 }
4254 }
4255
4256 /*
4257 * Compaction failed. Retry with increasing priority.
4258 */
4259 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4260 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4261
4262 if (*compact_priority > min_priority) {
4263 (*compact_priority)--;
4264 *compaction_retries = 0;
4265 ret = true;
4266 }
4267 out:
4268 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4269 return ret;
4270 }
4271 #else
4272 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4273 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4274 unsigned int alloc_flags, const struct alloc_context *ac,
4275 enum compact_priority prio, enum compact_result *compact_result)
4276 {
4277 *compact_result = COMPACT_SKIPPED;
4278 return NULL;
4279 }
4280
4281 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4282 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4283 enum compact_result compact_result,
4284 enum compact_priority *compact_priority,
4285 int *compaction_retries)
4286 {
4287 struct zone *zone;
4288 struct zoneref *z;
4289
4290 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4291 return false;
4292
4293 /*
4294 * There are setups with compaction disabled which would prefer to loop
4295 * inside the allocator rather than hit the oom killer prematurely.
4296 * Let's give them a good hope and keep retrying while the order-0
4297 * watermarks are OK.
4298 */
4299 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4300 ac->highest_zoneidx, ac->nodemask) {
4301 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4302 ac->highest_zoneidx, alloc_flags))
4303 return true;
4304 }
4305 return false;
4306 }
4307 #endif /* CONFIG_COMPACTION */
4308
4309 #ifdef CONFIG_LOCKDEP
4310 static struct lockdep_map __fs_reclaim_map =
4311 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4312
__need_reclaim(gfp_t gfp_mask)4313 static bool __need_reclaim(gfp_t gfp_mask)
4314 {
4315 /* no reclaim without waiting on it */
4316 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4317 return false;
4318
4319 /* this guy won't enter reclaim */
4320 if (current->flags & PF_MEMALLOC)
4321 return false;
4322
4323 if (gfp_mask & __GFP_NOLOCKDEP)
4324 return false;
4325
4326 return true;
4327 }
4328
__fs_reclaim_acquire(unsigned long ip)4329 void __fs_reclaim_acquire(unsigned long ip)
4330 {
4331 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4332 }
4333
__fs_reclaim_release(unsigned long ip)4334 void __fs_reclaim_release(unsigned long ip)
4335 {
4336 lock_release(&__fs_reclaim_map, ip);
4337 }
4338
fs_reclaim_acquire(gfp_t gfp_mask)4339 void fs_reclaim_acquire(gfp_t gfp_mask)
4340 {
4341 gfp_mask = current_gfp_context(gfp_mask);
4342
4343 if (__need_reclaim(gfp_mask)) {
4344 if (gfp_mask & __GFP_FS)
4345 __fs_reclaim_acquire(_RET_IP_);
4346
4347 #ifdef CONFIG_MMU_NOTIFIER
4348 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4349 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4350 #endif
4351
4352 }
4353 }
4354 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4355
fs_reclaim_release(gfp_t gfp_mask)4356 void fs_reclaim_release(gfp_t gfp_mask)
4357 {
4358 gfp_mask = current_gfp_context(gfp_mask);
4359
4360 if (__need_reclaim(gfp_mask)) {
4361 if (gfp_mask & __GFP_FS)
4362 __fs_reclaim_release(_RET_IP_);
4363 }
4364 }
4365 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4366 #endif
4367
4368 /*
4369 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4370 * have been rebuilt so allocation retries. Reader side does not lock and
4371 * retries the allocation if zonelist changes. Writer side is protected by the
4372 * embedded spin_lock.
4373 */
4374 static DEFINE_SEQLOCK(zonelist_update_seq);
4375
zonelist_iter_begin(void)4376 static unsigned int zonelist_iter_begin(void)
4377 {
4378 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4379 return read_seqbegin(&zonelist_update_seq);
4380
4381 return 0;
4382 }
4383
check_retry_zonelist(unsigned int seq)4384 static unsigned int check_retry_zonelist(unsigned int seq)
4385 {
4386 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4387 return read_seqretry(&zonelist_update_seq, seq);
4388
4389 return seq;
4390 }
4391
4392 /* Perform direct synchronous page reclaim */
4393 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4394 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4395 const struct alloc_context *ac)
4396 {
4397 unsigned int noreclaim_flag;
4398 unsigned long progress;
4399
4400 cond_resched();
4401
4402 /* We now go into synchronous reclaim */
4403 cpuset_memory_pressure_bump();
4404 fs_reclaim_acquire(gfp_mask);
4405 noreclaim_flag = memalloc_noreclaim_save();
4406
4407 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4408 ac->nodemask);
4409
4410 memalloc_noreclaim_restore(noreclaim_flag);
4411 fs_reclaim_release(gfp_mask);
4412
4413 cond_resched();
4414
4415 return progress;
4416 }
4417
4418 /* The really slow allocator path where we enter direct reclaim */
4419 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4420 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4421 unsigned int alloc_flags, const struct alloc_context *ac,
4422 unsigned long *did_some_progress)
4423 {
4424 struct page *page = NULL;
4425 unsigned long pflags;
4426 bool drained = false;
4427
4428 psi_memstall_enter(&pflags);
4429 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4430 if (unlikely(!(*did_some_progress)))
4431 goto out;
4432
4433 retry:
4434 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4435
4436 /*
4437 * If an allocation failed after direct reclaim, it could be because
4438 * pages are pinned on the per-cpu lists or in high alloc reserves.
4439 * Shrink them and try again
4440 */
4441 if (!page && !drained) {
4442 unreserve_highatomic_pageblock(ac, false);
4443 drain_all_pages(NULL);
4444 drained = true;
4445 goto retry;
4446 }
4447 out:
4448 psi_memstall_leave(&pflags);
4449
4450 return page;
4451 }
4452
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4453 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4454 const struct alloc_context *ac)
4455 {
4456 struct zoneref *z;
4457 struct zone *zone;
4458 pg_data_t *last_pgdat = NULL;
4459 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4460 unsigned int reclaim_order;
4461
4462 if (defrag_mode)
4463 reclaim_order = max(order, pageblock_order);
4464 else
4465 reclaim_order = order;
4466
4467 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4468 ac->nodemask) {
4469 if (!managed_zone(zone))
4470 continue;
4471 if (last_pgdat == zone->zone_pgdat)
4472 continue;
4473 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
4474 last_pgdat = zone->zone_pgdat;
4475 }
4476 }
4477
4478 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)4479 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
4480 {
4481 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4482
4483 /*
4484 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
4485 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4486 * to save two branches.
4487 */
4488 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
4489 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4490
4491 /*
4492 * The caller may dip into page reserves a bit more if the caller
4493 * cannot run direct reclaim, or if the caller has realtime scheduling
4494 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4495 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
4496 */
4497 alloc_flags |= (__force int)
4498 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4499
4500 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
4501 /*
4502 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4503 * if it can't schedule.
4504 */
4505 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
4506 alloc_flags |= ALLOC_NON_BLOCK;
4507
4508 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
4509 alloc_flags |= ALLOC_HIGHATOMIC;
4510 }
4511
4512 /*
4513 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4514 * GFP_ATOMIC) rather than fail, see the comment for
4515 * cpuset_current_node_allowed().
4516 */
4517 if (alloc_flags & ALLOC_MIN_RESERVE)
4518 alloc_flags &= ~ALLOC_CPUSET;
4519 } else if (unlikely(rt_or_dl_task(current)) && in_task())
4520 alloc_flags |= ALLOC_MIN_RESERVE;
4521
4522 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4523
4524 if (defrag_mode)
4525 alloc_flags |= ALLOC_NOFRAGMENT;
4526
4527 return alloc_flags;
4528 }
4529
oom_reserves_allowed(struct task_struct * tsk)4530 static bool oom_reserves_allowed(struct task_struct *tsk)
4531 {
4532 if (!tsk_is_oom_victim(tsk))
4533 return false;
4534
4535 /*
4536 * !MMU doesn't have oom reaper so give access to memory reserves
4537 * only to the thread with TIF_MEMDIE set
4538 */
4539 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4540 return false;
4541
4542 return true;
4543 }
4544
4545 /*
4546 * Distinguish requests which really need access to full memory
4547 * reserves from oom victims which can live with a portion of it
4548 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)4549 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4550 {
4551 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4552 return 0;
4553 if (gfp_mask & __GFP_MEMALLOC)
4554 return ALLOC_NO_WATERMARKS;
4555 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4556 return ALLOC_NO_WATERMARKS;
4557 if (!in_interrupt()) {
4558 if (current->flags & PF_MEMALLOC)
4559 return ALLOC_NO_WATERMARKS;
4560 else if (oom_reserves_allowed(current))
4561 return ALLOC_OOM;
4562 }
4563
4564 return 0;
4565 }
4566
gfp_pfmemalloc_allowed(gfp_t gfp_mask)4567 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4568 {
4569 return !!__gfp_pfmemalloc_flags(gfp_mask);
4570 }
4571
4572 /*
4573 * Checks whether it makes sense to retry the reclaim to make a forward progress
4574 * for the given allocation request.
4575 *
4576 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4577 * without success, or when we couldn't even meet the watermark if we
4578 * reclaimed all remaining pages on the LRU lists.
4579 *
4580 * Returns true if a retry is viable or false to enter the oom path.
4581 */
4582 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)4583 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4584 struct alloc_context *ac, int alloc_flags,
4585 bool did_some_progress, int *no_progress_loops)
4586 {
4587 struct zone *zone;
4588 struct zoneref *z;
4589 bool ret = false;
4590
4591 /*
4592 * Costly allocations might have made a progress but this doesn't mean
4593 * their order will become available due to high fragmentation so
4594 * always increment the no progress counter for them
4595 */
4596 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4597 *no_progress_loops = 0;
4598 else
4599 (*no_progress_loops)++;
4600
4601 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4602 goto out;
4603
4604
4605 /*
4606 * Keep reclaiming pages while there is a chance this will lead
4607 * somewhere. If none of the target zones can satisfy our allocation
4608 * request even if all reclaimable pages are considered then we are
4609 * screwed and have to go OOM.
4610 */
4611 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4612 ac->highest_zoneidx, ac->nodemask) {
4613 unsigned long available;
4614 unsigned long reclaimable;
4615 unsigned long min_wmark = min_wmark_pages(zone);
4616 bool wmark;
4617
4618 if (cpusets_enabled() &&
4619 (alloc_flags & ALLOC_CPUSET) &&
4620 !__cpuset_zone_allowed(zone, gfp_mask))
4621 continue;
4622
4623 available = reclaimable = zone_reclaimable_pages(zone);
4624 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4625
4626 /*
4627 * Would the allocation succeed if we reclaimed all
4628 * reclaimable pages?
4629 */
4630 wmark = __zone_watermark_ok(zone, order, min_wmark,
4631 ac->highest_zoneidx, alloc_flags, available);
4632 trace_reclaim_retry_zone(z, order, reclaimable,
4633 available, min_wmark, *no_progress_loops, wmark);
4634 if (wmark) {
4635 ret = true;
4636 break;
4637 }
4638 }
4639
4640 /*
4641 * Memory allocation/reclaim might be called from a WQ context and the
4642 * current implementation of the WQ concurrency control doesn't
4643 * recognize that a particular WQ is congested if the worker thread is
4644 * looping without ever sleeping. Therefore we have to do a short sleep
4645 * here rather than calling cond_resched().
4646 */
4647 if (current->flags & PF_WQ_WORKER)
4648 schedule_timeout_uninterruptible(1);
4649 else
4650 cond_resched();
4651 out:
4652 /* Before OOM, exhaust highatomic_reserve */
4653 if (!ret)
4654 return unreserve_highatomic_pageblock(ac, true);
4655
4656 return ret;
4657 }
4658
4659 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)4660 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4661 {
4662 /*
4663 * It's possible that cpuset's mems_allowed and the nodemask from
4664 * mempolicy don't intersect. This should be normally dealt with by
4665 * policy_nodemask(), but it's possible to race with cpuset update in
4666 * such a way the check therein was true, and then it became false
4667 * before we got our cpuset_mems_cookie here.
4668 * This assumes that for all allocations, ac->nodemask can come only
4669 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4670 * when it does not intersect with the cpuset restrictions) or the
4671 * caller can deal with a violated nodemask.
4672 */
4673 if (cpusets_enabled() && ac->nodemask &&
4674 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4675 ac->nodemask = NULL;
4676 return true;
4677 }
4678
4679 /*
4680 * When updating a task's mems_allowed or mempolicy nodemask, it is
4681 * possible to race with parallel threads in such a way that our
4682 * allocation can fail while the mask is being updated. If we are about
4683 * to fail, check if the cpuset changed during allocation and if so,
4684 * retry.
4685 */
4686 if (read_mems_allowed_retry(cpuset_mems_cookie))
4687 return true;
4688
4689 return false;
4690 }
4691
4692 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)4693 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4694 struct alloc_context *ac)
4695 {
4696 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4697 bool can_compact = gfp_compaction_allowed(gfp_mask);
4698 bool nofail = gfp_mask & __GFP_NOFAIL;
4699 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4700 struct page *page = NULL;
4701 unsigned int alloc_flags;
4702 unsigned long did_some_progress;
4703 enum compact_priority compact_priority;
4704 enum compact_result compact_result;
4705 int compaction_retries;
4706 int no_progress_loops;
4707 unsigned int cpuset_mems_cookie;
4708 unsigned int zonelist_iter_cookie;
4709 int reserve_flags;
4710
4711 if (unlikely(nofail)) {
4712 /*
4713 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
4714 * otherwise, we may result in lockup.
4715 */
4716 WARN_ON_ONCE(!can_direct_reclaim);
4717 /*
4718 * PF_MEMALLOC request from this context is rather bizarre
4719 * because we cannot reclaim anything and only can loop waiting
4720 * for somebody to do a work for us.
4721 */
4722 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4723 }
4724
4725 restart:
4726 compaction_retries = 0;
4727 no_progress_loops = 0;
4728 compact_result = COMPACT_SKIPPED;
4729 compact_priority = DEF_COMPACT_PRIORITY;
4730 cpuset_mems_cookie = read_mems_allowed_begin();
4731 zonelist_iter_cookie = zonelist_iter_begin();
4732
4733 /*
4734 * The fast path uses conservative alloc_flags to succeed only until
4735 * kswapd needs to be woken up, and to avoid the cost of setting up
4736 * alloc_flags precisely. So we do that now.
4737 */
4738 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4739
4740 /*
4741 * We need to recalculate the starting point for the zonelist iterator
4742 * because we might have used different nodemask in the fast path, or
4743 * there was a cpuset modification and we are retrying - otherwise we
4744 * could end up iterating over non-eligible zones endlessly.
4745 */
4746 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4747 ac->highest_zoneidx, ac->nodemask);
4748 if (!zonelist_zone(ac->preferred_zoneref))
4749 goto nopage;
4750
4751 /*
4752 * Check for insane configurations where the cpuset doesn't contain
4753 * any suitable zone to satisfy the request - e.g. non-movable
4754 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4755 */
4756 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4757 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4758 ac->highest_zoneidx,
4759 &cpuset_current_mems_allowed);
4760 if (!zonelist_zone(z))
4761 goto nopage;
4762 }
4763
4764 if (alloc_flags & ALLOC_KSWAPD)
4765 wake_all_kswapds(order, gfp_mask, ac);
4766
4767 /*
4768 * The adjusted alloc_flags might result in immediate success, so try
4769 * that first
4770 */
4771 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4772 if (page)
4773 goto got_pg;
4774
4775 /*
4776 * For costly allocations, try direct compaction first, as it's likely
4777 * that we have enough base pages and don't need to reclaim. For non-
4778 * movable high-order allocations, do that as well, as compaction will
4779 * try prevent permanent fragmentation by migrating from blocks of the
4780 * same migratetype.
4781 * Don't try this for allocations that are allowed to ignore
4782 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4783 */
4784 if (can_direct_reclaim && can_compact &&
4785 (costly_order ||
4786 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4787 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4788 page = __alloc_pages_direct_compact(gfp_mask, order,
4789 alloc_flags, ac,
4790 INIT_COMPACT_PRIORITY,
4791 &compact_result);
4792 if (page)
4793 goto got_pg;
4794
4795 /*
4796 * Checks for costly allocations with __GFP_NORETRY, which
4797 * includes some THP page fault allocations
4798 */
4799 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4800 /*
4801 * If allocating entire pageblock(s) and compaction
4802 * failed because all zones are below low watermarks
4803 * or is prohibited because it recently failed at this
4804 * order, fail immediately unless the allocator has
4805 * requested compaction and reclaim retry.
4806 *
4807 * Reclaim is
4808 * - potentially very expensive because zones are far
4809 * below their low watermarks or this is part of very
4810 * bursty high order allocations,
4811 * - not guaranteed to help because isolate_freepages()
4812 * may not iterate over freed pages as part of its
4813 * linear scan, and
4814 * - unlikely to make entire pageblocks free on its
4815 * own.
4816 */
4817 if (compact_result == COMPACT_SKIPPED ||
4818 compact_result == COMPACT_DEFERRED)
4819 goto nopage;
4820
4821 /*
4822 * Looks like reclaim/compaction is worth trying, but
4823 * sync compaction could be very expensive, so keep
4824 * using async compaction.
4825 */
4826 compact_priority = INIT_COMPACT_PRIORITY;
4827 }
4828 }
4829
4830 retry:
4831 /*
4832 * Deal with possible cpuset update races or zonelist updates to avoid
4833 * infinite retries.
4834 */
4835 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4836 check_retry_zonelist(zonelist_iter_cookie))
4837 goto restart;
4838
4839 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4840 if (alloc_flags & ALLOC_KSWAPD)
4841 wake_all_kswapds(order, gfp_mask, ac);
4842
4843 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4844 if (reserve_flags)
4845 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4846 (alloc_flags & ALLOC_KSWAPD);
4847
4848 /*
4849 * Reset the nodemask and zonelist iterators if memory policies can be
4850 * ignored. These allocations are high priority and system rather than
4851 * user oriented.
4852 */
4853 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4854 ac->nodemask = NULL;
4855 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4856 ac->highest_zoneidx, ac->nodemask);
4857 }
4858
4859 /* Attempt with potentially adjusted zonelist and alloc_flags */
4860 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4861 if (page)
4862 goto got_pg;
4863
4864 /* Caller is not willing to reclaim, we can't balance anything */
4865 if (!can_direct_reclaim)
4866 goto nopage;
4867
4868 /* Avoid recursion of direct reclaim */
4869 if (current->flags & PF_MEMALLOC)
4870 goto nopage;
4871
4872 /* Try direct reclaim and then allocating */
4873 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4874 &did_some_progress);
4875 if (page)
4876 goto got_pg;
4877
4878 /* Try direct compaction and then allocating */
4879 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4880 compact_priority, &compact_result);
4881 if (page)
4882 goto got_pg;
4883
4884 /* Do not loop if specifically requested */
4885 if (gfp_mask & __GFP_NORETRY)
4886 goto nopage;
4887
4888 /*
4889 * Do not retry costly high order allocations unless they are
4890 * __GFP_RETRY_MAYFAIL and we can compact
4891 */
4892 if (costly_order && (!can_compact ||
4893 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4894 goto nopage;
4895
4896 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4897 did_some_progress > 0, &no_progress_loops))
4898 goto retry;
4899
4900 /*
4901 * It doesn't make any sense to retry for the compaction if the order-0
4902 * reclaim is not able to make any progress because the current
4903 * implementation of the compaction depends on the sufficient amount
4904 * of free memory (see __compaction_suitable)
4905 */
4906 if (did_some_progress > 0 && can_compact &&
4907 should_compact_retry(ac, order, alloc_flags,
4908 compact_result, &compact_priority,
4909 &compaction_retries))
4910 goto retry;
4911
4912 /* Reclaim/compaction failed to prevent the fallback */
4913 if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
4914 alloc_flags &= ~ALLOC_NOFRAGMENT;
4915 goto retry;
4916 }
4917
4918 /*
4919 * Deal with possible cpuset update races or zonelist updates to avoid
4920 * a unnecessary OOM kill.
4921 */
4922 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4923 check_retry_zonelist(zonelist_iter_cookie))
4924 goto restart;
4925
4926 /* Reclaim has failed us, start killing things */
4927 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4928 if (page)
4929 goto got_pg;
4930
4931 /* Avoid allocations with no watermarks from looping endlessly */
4932 if (tsk_is_oom_victim(current) &&
4933 (alloc_flags & ALLOC_OOM ||
4934 (gfp_mask & __GFP_NOMEMALLOC)))
4935 goto nopage;
4936
4937 /* Retry as long as the OOM killer is making progress */
4938 if (did_some_progress) {
4939 no_progress_loops = 0;
4940 goto retry;
4941 }
4942
4943 nopage:
4944 /*
4945 * Deal with possible cpuset update races or zonelist updates to avoid
4946 * a unnecessary OOM kill.
4947 */
4948 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4949 check_retry_zonelist(zonelist_iter_cookie))
4950 goto restart;
4951
4952 /*
4953 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4954 * we always retry
4955 */
4956 if (unlikely(nofail)) {
4957 /*
4958 * Lacking direct_reclaim we can't do anything to reclaim memory,
4959 * we disregard these unreasonable nofail requests and still
4960 * return NULL
4961 */
4962 if (!can_direct_reclaim)
4963 goto fail;
4964
4965 /*
4966 * Help non-failing allocations by giving some access to memory
4967 * reserves normally used for high priority non-blocking
4968 * allocations but do not use ALLOC_NO_WATERMARKS because this
4969 * could deplete whole memory reserves which would just make
4970 * the situation worse.
4971 */
4972 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4973 if (page)
4974 goto got_pg;
4975
4976 cond_resched();
4977 goto retry;
4978 }
4979 fail:
4980 warn_alloc(gfp_mask, ac->nodemask,
4981 "page allocation failure: order:%u", order);
4982 got_pg:
4983 return page;
4984 }
4985
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4986 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4987 int preferred_nid, nodemask_t *nodemask,
4988 struct alloc_context *ac, gfp_t *alloc_gfp,
4989 unsigned int *alloc_flags)
4990 {
4991 ac->highest_zoneidx = gfp_zone(gfp_mask);
4992 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4993 ac->nodemask = nodemask;
4994 ac->migratetype = gfp_migratetype(gfp_mask);
4995
4996 if (cpusets_enabled()) {
4997 *alloc_gfp |= __GFP_HARDWALL;
4998 /*
4999 * When we are in the interrupt context, it is irrelevant
5000 * to the current task context. It means that any node ok.
5001 */
5002 if (in_task() && !ac->nodemask)
5003 ac->nodemask = &cpuset_current_mems_allowed;
5004 else
5005 *alloc_flags |= ALLOC_CPUSET;
5006 }
5007
5008 might_alloc(gfp_mask);
5009
5010 /*
5011 * Don't invoke should_fail logic, since it may call
5012 * get_random_u32() and printk() which need to spin_lock.
5013 */
5014 if (!(*alloc_flags & ALLOC_TRYLOCK) &&
5015 should_fail_alloc_page(gfp_mask, order))
5016 return false;
5017
5018 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5019
5020 /* Dirty zone balancing only done in the fast path */
5021 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5022
5023 /*
5024 * The preferred zone is used for statistics but crucially it is
5025 * also used as the starting point for the zonelist iterator. It
5026 * may get reset for allocations that ignore memory policies.
5027 */
5028 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5029 ac->highest_zoneidx, ac->nodemask);
5030
5031 return true;
5032 }
5033
5034 /*
5035 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
5036 * @gfp: GFP flags for the allocation
5037 * @preferred_nid: The preferred NUMA node ID to allocate from
5038 * @nodemask: Set of nodes to allocate from, may be NULL
5039 * @nr_pages: The number of pages desired in the array
5040 * @page_array: Array to store the pages
5041 *
5042 * This is a batched version of the page allocator that attempts to allocate
5043 * @nr_pages quickly. Pages are added to @page_array.
5044 *
5045 * Note that only the elements in @page_array that were cleared to %NULL on
5046 * entry are populated with newly allocated pages. @nr_pages is the maximum
5047 * number of pages that will be stored in the array.
5048 *
5049 * Returns the number of pages in @page_array, including ones already
5050 * allocated on entry. This can be less than the number requested in @nr_pages,
5051 * but all empty slots are filled from the beginning. I.e., if all slots in
5052 * @page_array were set to %NULL on entry, the slots from 0 to the return value
5053 * - 1 will be filled.
5054 */
alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array)5055 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
5056 nodemask_t *nodemask, int nr_pages,
5057 struct page **page_array)
5058 {
5059 struct page *page;
5060 unsigned long UP_flags;
5061 struct zone *zone;
5062 struct zoneref *z;
5063 struct per_cpu_pages *pcp;
5064 struct list_head *pcp_list;
5065 struct alloc_context ac;
5066 gfp_t alloc_gfp;
5067 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5068 int nr_populated = 0, nr_account = 0;
5069
5070 /*
5071 * Skip populated array elements to determine if any pages need
5072 * to be allocated before disabling IRQs.
5073 */
5074 while (nr_populated < nr_pages && page_array[nr_populated])
5075 nr_populated++;
5076
5077 /* No pages requested? */
5078 if (unlikely(nr_pages <= 0))
5079 goto out;
5080
5081 /* Already populated array? */
5082 if (unlikely(nr_pages - nr_populated == 0))
5083 goto out;
5084
5085 /* Bulk allocator does not support memcg accounting. */
5086 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
5087 goto failed;
5088
5089 /* Use the single page allocator for one page. */
5090 if (nr_pages - nr_populated == 1)
5091 goto failed;
5092
5093 #ifdef CONFIG_PAGE_OWNER
5094 /*
5095 * PAGE_OWNER may recurse into the allocator to allocate space to
5096 * save the stack with pagesets.lock held. Releasing/reacquiring
5097 * removes much of the performance benefit of bulk allocation so
5098 * force the caller to allocate one page at a time as it'll have
5099 * similar performance to added complexity to the bulk allocator.
5100 */
5101 if (static_branch_unlikely(&page_owner_inited))
5102 goto failed;
5103 #endif
5104
5105 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5106 gfp &= gfp_allowed_mask;
5107 alloc_gfp = gfp;
5108 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5109 goto out;
5110 gfp = alloc_gfp;
5111
5112 /* Find an allowed local zone that meets the low watermark. */
5113 z = ac.preferred_zoneref;
5114 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
5115 unsigned long mark;
5116
5117 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5118 !__cpuset_zone_allowed(zone, gfp)) {
5119 continue;
5120 }
5121
5122 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
5123 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
5124 goto failed;
5125 }
5126
5127 cond_accept_memory(zone, 0, alloc_flags);
5128 retry_this_zone:
5129 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5130 if (zone_watermark_fast(zone, 0, mark,
5131 zonelist_zone_idx(ac.preferred_zoneref),
5132 alloc_flags, gfp)) {
5133 break;
5134 }
5135
5136 if (cond_accept_memory(zone, 0, alloc_flags))
5137 goto retry_this_zone;
5138
5139 /* Try again if zone has deferred pages */
5140 if (deferred_pages_enabled()) {
5141 if (_deferred_grow_zone(zone, 0))
5142 goto retry_this_zone;
5143 }
5144 }
5145
5146 /*
5147 * If there are no allowed local zones that meets the watermarks then
5148 * try to allocate a single page and reclaim if necessary.
5149 */
5150 if (unlikely(!zone))
5151 goto failed;
5152
5153 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
5154 pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
5155 if (!pcp)
5156 goto failed;
5157
5158 /* Attempt the batch allocation */
5159 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5160 while (nr_populated < nr_pages) {
5161
5162 /* Skip existing pages */
5163 if (page_array[nr_populated]) {
5164 nr_populated++;
5165 continue;
5166 }
5167
5168 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5169 pcp, pcp_list);
5170 if (unlikely(!page)) {
5171 /* Try and allocate at least one page */
5172 if (!nr_account) {
5173 pcp_spin_unlock(pcp, UP_flags);
5174 goto failed;
5175 }
5176 break;
5177 }
5178 nr_account++;
5179
5180 prep_new_page(page, 0, gfp, 0);
5181 set_page_refcounted(page);
5182 page_array[nr_populated++] = page;
5183 }
5184
5185 pcp_spin_unlock(pcp, UP_flags);
5186
5187 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5188 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
5189
5190 out:
5191 return nr_populated;
5192
5193 failed:
5194 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
5195 if (page)
5196 page_array[nr_populated++] = page;
5197 goto out;
5198 }
5199 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
5200
5201 /*
5202 * This is the 'heart' of the zoned buddy allocator.
5203 */
__alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5204 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
5205 int preferred_nid, nodemask_t *nodemask)
5206 {
5207 struct page *page;
5208 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5209 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5210 struct alloc_context ac = { };
5211
5212 /*
5213 * There are several places where we assume that the order value is sane
5214 * so bail out early if the request is out of bound.
5215 */
5216 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
5217 return NULL;
5218
5219 gfp &= gfp_allowed_mask;
5220 /*
5221 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5222 * resp. GFP_NOIO which has to be inherited for all allocation requests
5223 * from a particular context which has been marked by
5224 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5225 * movable zones are not used during allocation.
5226 */
5227 gfp = current_gfp_context(gfp);
5228 alloc_gfp = gfp;
5229 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5230 &alloc_gfp, &alloc_flags))
5231 return NULL;
5232
5233 /*
5234 * Forbid the first pass from falling back to types that fragment
5235 * memory until all local zones are considered.
5236 */
5237 alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
5238
5239 /* First allocation attempt */
5240 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5241 if (likely(page))
5242 goto out;
5243
5244 alloc_gfp = gfp;
5245 ac.spread_dirty_pages = false;
5246
5247 /*
5248 * Restore the original nodemask if it was potentially replaced with
5249 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5250 */
5251 ac.nodemask = nodemask;
5252
5253 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5254
5255 out:
5256 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
5257 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5258 free_frozen_pages(page, order);
5259 page = NULL;
5260 }
5261
5262 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5263 kmsan_alloc_page(page, order, alloc_gfp);
5264
5265 return page;
5266 }
5267 EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
5268
__alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5269 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
5270 int preferred_nid, nodemask_t *nodemask)
5271 {
5272 struct page *page;
5273
5274 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
5275 if (page)
5276 set_page_refcounted(page);
5277 return page;
5278 }
5279 EXPORT_SYMBOL(__alloc_pages_noprof);
5280
__folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5281 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
5282 nodemask_t *nodemask)
5283 {
5284 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
5285 preferred_nid, nodemask);
5286 return page_rmappable_folio(page);
5287 }
5288 EXPORT_SYMBOL(__folio_alloc_noprof);
5289
5290 /*
5291 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5292 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5293 * you need to access high mem.
5294 */
get_free_pages_noprof(gfp_t gfp_mask,unsigned int order)5295 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
5296 {
5297 struct page *page;
5298
5299 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
5300 if (!page)
5301 return 0;
5302 return (unsigned long) page_address(page);
5303 }
5304 EXPORT_SYMBOL(get_free_pages_noprof);
5305
get_zeroed_page_noprof(gfp_t gfp_mask)5306 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
5307 {
5308 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
5309 }
5310 EXPORT_SYMBOL(get_zeroed_page_noprof);
5311
___free_pages(struct page * page,unsigned int order,fpi_t fpi_flags)5312 static void ___free_pages(struct page *page, unsigned int order,
5313 fpi_t fpi_flags)
5314 {
5315 /* get PageHead before we drop reference */
5316 int head = PageHead(page);
5317 /* get alloc tag in case the page is released by others */
5318 struct alloc_tag *tag = pgalloc_tag_get(page);
5319
5320 if (put_page_testzero(page))
5321 __free_frozen_pages(page, order, fpi_flags);
5322 else if (!head) {
5323 pgalloc_tag_sub_pages(tag, (1 << order) - 1);
5324 while (order-- > 0) {
5325 /*
5326 * The "tail" pages of this non-compound high-order
5327 * page will have no code tags, so to avoid warnings
5328 * mark them as empty.
5329 */
5330 clear_page_tag_ref(page + (1 << order));
5331 __free_frozen_pages(page + (1 << order), order,
5332 fpi_flags);
5333 }
5334 }
5335 }
5336
5337 /**
5338 * __free_pages - Free pages allocated with alloc_pages().
5339 * @page: The page pointer returned from alloc_pages().
5340 * @order: The order of the allocation.
5341 *
5342 * This function can free multi-page allocations that are not compound
5343 * pages. It does not check that the @order passed in matches that of
5344 * the allocation, so it is easy to leak memory. Freeing more memory
5345 * than was allocated will probably emit a warning.
5346 *
5347 * If the last reference to this page is speculative, it will be released
5348 * by put_page() which only frees the first page of a non-compound
5349 * allocation. To prevent the remaining pages from being leaked, we free
5350 * the subsequent pages here. If you want to use the page's reference
5351 * count to decide when to free the allocation, you should allocate a
5352 * compound page, and use put_page() instead of __free_pages().
5353 *
5354 * Context: May be called in interrupt context or while holding a normal
5355 * spinlock, but not in NMI context or while holding a raw spinlock.
5356 */
__free_pages(struct page * page,unsigned int order)5357 void __free_pages(struct page *page, unsigned int order)
5358 {
5359 ___free_pages(page, order, FPI_NONE);
5360 }
5361 EXPORT_SYMBOL(__free_pages);
5362
5363 /*
5364 * Can be called while holding raw_spin_lock or from IRQ and NMI for any
5365 * page type (not only those that came from alloc_pages_nolock)
5366 */
free_pages_nolock(struct page * page,unsigned int order)5367 void free_pages_nolock(struct page *page, unsigned int order)
5368 {
5369 ___free_pages(page, order, FPI_TRYLOCK);
5370 }
5371
5372 /**
5373 * free_pages - Free pages allocated with __get_free_pages().
5374 * @addr: The virtual address tied to a page returned from __get_free_pages().
5375 * @order: The order of the allocation.
5376 *
5377 * This function behaves the same as __free_pages(). Use this function
5378 * to free pages when you only have a valid virtual address. If you have
5379 * the page, call __free_pages() instead.
5380 */
free_pages(unsigned long addr,unsigned int order)5381 void free_pages(unsigned long addr, unsigned int order)
5382 {
5383 if (addr != 0) {
5384 VM_BUG_ON(!virt_addr_valid((void *)addr));
5385 __free_pages(virt_to_page((void *)addr), order);
5386 }
5387 }
5388
5389 EXPORT_SYMBOL(free_pages);
5390
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5391 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5392 size_t size)
5393 {
5394 if (addr) {
5395 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5396 struct page *page = virt_to_page((void *)addr);
5397 struct page *last = page + nr;
5398
5399 split_page_owner(page, order, 0);
5400 pgalloc_tag_split(page_folio(page), order, 0);
5401 split_page_memcg(page, order);
5402 while (page < --last)
5403 set_page_refcounted(last);
5404
5405 last = page + (1UL << order);
5406 for (page += nr; page < last; page++)
5407 __free_pages_ok(page, 0, FPI_TO_TAIL);
5408 }
5409 return (void *)addr;
5410 }
5411
5412 /**
5413 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5414 * @size: the number of bytes to allocate
5415 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5416 *
5417 * This function is similar to alloc_pages(), except that it allocates the
5418 * minimum number of pages to satisfy the request. alloc_pages() can only
5419 * allocate memory in power-of-two pages.
5420 *
5421 * This function is also limited by MAX_PAGE_ORDER.
5422 *
5423 * Memory allocated by this function must be released by free_pages_exact().
5424 *
5425 * Return: pointer to the allocated area or %NULL in case of error.
5426 */
alloc_pages_exact_noprof(size_t size,gfp_t gfp_mask)5427 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
5428 {
5429 unsigned int order = get_order(size);
5430 unsigned long addr;
5431
5432 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5433 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5434
5435 addr = get_free_pages_noprof(gfp_mask, order);
5436 return make_alloc_exact(addr, order, size);
5437 }
5438 EXPORT_SYMBOL(alloc_pages_exact_noprof);
5439
5440 /**
5441 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5442 * pages on a node.
5443 * @nid: the preferred node ID where memory should be allocated
5444 * @size: the number of bytes to allocate
5445 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5446 *
5447 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5448 * back.
5449 *
5450 * Return: pointer to the allocated area or %NULL in case of error.
5451 */
alloc_pages_exact_nid_noprof(int nid,size_t size,gfp_t gfp_mask)5452 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
5453 {
5454 unsigned int order = get_order(size);
5455 struct page *p;
5456
5457 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5458 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5459
5460 p = alloc_pages_node_noprof(nid, gfp_mask, order);
5461 if (!p)
5462 return NULL;
5463 return make_alloc_exact((unsigned long)page_address(p), order, size);
5464 }
5465
5466 /**
5467 * free_pages_exact - release memory allocated via alloc_pages_exact()
5468 * @virt: the value returned by alloc_pages_exact.
5469 * @size: size of allocation, same value as passed to alloc_pages_exact().
5470 *
5471 * Release the memory allocated by a previous call to alloc_pages_exact.
5472 */
free_pages_exact(void * virt,size_t size)5473 void free_pages_exact(void *virt, size_t size)
5474 {
5475 unsigned long addr = (unsigned long)virt;
5476 unsigned long end = addr + PAGE_ALIGN(size);
5477
5478 while (addr < end) {
5479 free_page(addr);
5480 addr += PAGE_SIZE;
5481 }
5482 }
5483 EXPORT_SYMBOL(free_pages_exact);
5484
5485 /**
5486 * nr_free_zone_pages - count number of pages beyond high watermark
5487 * @offset: The zone index of the highest zone
5488 *
5489 * nr_free_zone_pages() counts the number of pages which are beyond the
5490 * high watermark within all zones at or below a given zone index. For each
5491 * zone, the number of pages is calculated as:
5492 *
5493 * nr_free_zone_pages = managed_pages - high_pages
5494 *
5495 * Return: number of pages beyond high watermark.
5496 */
nr_free_zone_pages(int offset)5497 static unsigned long nr_free_zone_pages(int offset)
5498 {
5499 struct zoneref *z;
5500 struct zone *zone;
5501
5502 /* Just pick one node, since fallback list is circular */
5503 unsigned long sum = 0;
5504
5505 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5506
5507 for_each_zone_zonelist(zone, z, zonelist, offset) {
5508 unsigned long size = zone_managed_pages(zone);
5509 unsigned long high = high_wmark_pages(zone);
5510 if (size > high)
5511 sum += size - high;
5512 }
5513
5514 return sum;
5515 }
5516
5517 /**
5518 * nr_free_buffer_pages - count number of pages beyond high watermark
5519 *
5520 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5521 * watermark within ZONE_DMA and ZONE_NORMAL.
5522 *
5523 * Return: number of pages beyond high watermark within ZONE_DMA and
5524 * ZONE_NORMAL.
5525 */
nr_free_buffer_pages(void)5526 unsigned long nr_free_buffer_pages(void)
5527 {
5528 return nr_free_zone_pages(gfp_zone(GFP_USER));
5529 }
5530 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5531
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)5532 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5533 {
5534 zoneref->zone = zone;
5535 zoneref->zone_idx = zone_idx(zone);
5536 }
5537
5538 /*
5539 * Builds allocation fallback zone lists.
5540 *
5541 * Add all populated zones of a node to the zonelist.
5542 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)5543 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5544 {
5545 struct zone *zone;
5546 enum zone_type zone_type = MAX_NR_ZONES;
5547 int nr_zones = 0;
5548
5549 do {
5550 zone_type--;
5551 zone = pgdat->node_zones + zone_type;
5552 if (populated_zone(zone)) {
5553 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5554 check_highest_zone(zone_type);
5555 }
5556 } while (zone_type);
5557
5558 return nr_zones;
5559 }
5560
5561 #ifdef CONFIG_NUMA
5562
__parse_numa_zonelist_order(char * s)5563 static int __parse_numa_zonelist_order(char *s)
5564 {
5565 /*
5566 * We used to support different zonelists modes but they turned
5567 * out to be just not useful. Let's keep the warning in place
5568 * if somebody still use the cmd line parameter so that we do
5569 * not fail it silently
5570 */
5571 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5572 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
5573 return -EINVAL;
5574 }
5575 return 0;
5576 }
5577
5578 static char numa_zonelist_order[] = "Node";
5579 #define NUMA_ZONELIST_ORDER_LEN 16
5580 /*
5581 * sysctl handler for numa_zonelist_order
5582 */
numa_zonelist_order_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5583 static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
5584 void *buffer, size_t *length, loff_t *ppos)
5585 {
5586 if (write)
5587 return __parse_numa_zonelist_order(buffer);
5588 return proc_dostring(table, write, buffer, length, ppos);
5589 }
5590
5591 static int node_load[MAX_NUMNODES];
5592
5593 /**
5594 * find_next_best_node - find the next node that should appear in a given node's fallback list
5595 * @node: node whose fallback list we're appending
5596 * @used_node_mask: nodemask_t of already used nodes
5597 *
5598 * We use a number of factors to determine which is the next node that should
5599 * appear on a given node's fallback list. The node should not have appeared
5600 * already in @node's fallback list, and it should be the next closest node
5601 * according to the distance array (which contains arbitrary distance values
5602 * from each node to each node in the system), and should also prefer nodes
5603 * with no CPUs, since presumably they'll have very little allocation pressure
5604 * on them otherwise.
5605 *
5606 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5607 */
find_next_best_node(int node,nodemask_t * used_node_mask)5608 int find_next_best_node(int node, nodemask_t *used_node_mask)
5609 {
5610 int n, val;
5611 int min_val = INT_MAX;
5612 int best_node = NUMA_NO_NODE;
5613
5614 /*
5615 * Use the local node if we haven't already, but for memoryless local
5616 * node, we should skip it and fall back to other nodes.
5617 */
5618 if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
5619 node_set(node, *used_node_mask);
5620 return node;
5621 }
5622
5623 for_each_node_state(n, N_MEMORY) {
5624
5625 /* Don't want a node to appear more than once */
5626 if (node_isset(n, *used_node_mask))
5627 continue;
5628
5629 /* Use the distance array to find the distance */
5630 val = node_distance(node, n);
5631
5632 /* Penalize nodes under us ("prefer the next node") */
5633 val += (n < node);
5634
5635 /* Give preference to headless and unused nodes */
5636 if (!cpumask_empty(cpumask_of_node(n)))
5637 val += PENALTY_FOR_NODE_WITH_CPUS;
5638
5639 /* Slight preference for less loaded node */
5640 val *= MAX_NUMNODES;
5641 val += node_load[n];
5642
5643 if (val < min_val) {
5644 min_val = val;
5645 best_node = n;
5646 }
5647 }
5648
5649 if (best_node >= 0)
5650 node_set(best_node, *used_node_mask);
5651
5652 return best_node;
5653 }
5654
5655
5656 /*
5657 * Build zonelists ordered by node and zones within node.
5658 * This results in maximum locality--normal zone overflows into local
5659 * DMA zone, if any--but risks exhausting DMA zone.
5660 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)5661 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5662 unsigned nr_nodes)
5663 {
5664 struct zoneref *zonerefs;
5665 int i;
5666
5667 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5668
5669 for (i = 0; i < nr_nodes; i++) {
5670 int nr_zones;
5671
5672 pg_data_t *node = NODE_DATA(node_order[i]);
5673
5674 nr_zones = build_zonerefs_node(node, zonerefs);
5675 zonerefs += nr_zones;
5676 }
5677 zonerefs->zone = NULL;
5678 zonerefs->zone_idx = 0;
5679 }
5680
5681 /*
5682 * Build __GFP_THISNODE zonelists
5683 */
build_thisnode_zonelists(pg_data_t * pgdat)5684 static void build_thisnode_zonelists(pg_data_t *pgdat)
5685 {
5686 struct zoneref *zonerefs;
5687 int nr_zones;
5688
5689 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5690 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5691 zonerefs += nr_zones;
5692 zonerefs->zone = NULL;
5693 zonerefs->zone_idx = 0;
5694 }
5695
build_zonelists(pg_data_t * pgdat)5696 static void build_zonelists(pg_data_t *pgdat)
5697 {
5698 static int node_order[MAX_NUMNODES];
5699 int node, nr_nodes = 0;
5700 nodemask_t used_mask = NODE_MASK_NONE;
5701 int local_node, prev_node;
5702
5703 /* NUMA-aware ordering of nodes */
5704 local_node = pgdat->node_id;
5705 prev_node = local_node;
5706
5707 memset(node_order, 0, sizeof(node_order));
5708 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5709 /*
5710 * We don't want to pressure a particular node.
5711 * So adding penalty to the first node in same
5712 * distance group to make it round-robin.
5713 */
5714 if (node_distance(local_node, node) !=
5715 node_distance(local_node, prev_node))
5716 node_load[node] += 1;
5717
5718 node_order[nr_nodes++] = node;
5719 prev_node = node;
5720 }
5721
5722 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5723 build_thisnode_zonelists(pgdat);
5724 pr_info("Fallback order for Node %d: ", local_node);
5725 for (node = 0; node < nr_nodes; node++)
5726 pr_cont("%d ", node_order[node]);
5727 pr_cont("\n");
5728 }
5729
5730 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5731 /*
5732 * Return node id of node used for "local" allocations.
5733 * I.e., first node id of first zone in arg node's generic zonelist.
5734 * Used for initializing percpu 'numa_mem', which is used primarily
5735 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5736 */
local_memory_node(int node)5737 int local_memory_node(int node)
5738 {
5739 struct zoneref *z;
5740
5741 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5742 gfp_zone(GFP_KERNEL),
5743 NULL);
5744 return zonelist_node_idx(z);
5745 }
5746 #endif
5747
5748 static void setup_min_unmapped_ratio(void);
5749 static void setup_min_slab_ratio(void);
5750 #else /* CONFIG_NUMA */
5751
build_zonelists(pg_data_t * pgdat)5752 static void build_zonelists(pg_data_t *pgdat)
5753 {
5754 struct zoneref *zonerefs;
5755 int nr_zones;
5756
5757 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5758 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5759 zonerefs += nr_zones;
5760
5761 zonerefs->zone = NULL;
5762 zonerefs->zone_idx = 0;
5763 }
5764
5765 #endif /* CONFIG_NUMA */
5766
5767 /*
5768 * Boot pageset table. One per cpu which is going to be used for all
5769 * zones and all nodes. The parameters will be set in such a way
5770 * that an item put on a list will immediately be handed over to
5771 * the buddy list. This is safe since pageset manipulation is done
5772 * with interrupts disabled.
5773 *
5774 * The boot_pagesets must be kept even after bootup is complete for
5775 * unused processors and/or zones. They do play a role for bootstrapping
5776 * hotplugged processors.
5777 *
5778 * zoneinfo_show() and maybe other functions do
5779 * not check if the processor is online before following the pageset pointer.
5780 * Other parts of the kernel may not check if the zone is available.
5781 */
5782 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5783 /* These effectively disable the pcplists in the boot pageset completely */
5784 #define BOOT_PAGESET_HIGH 0
5785 #define BOOT_PAGESET_BATCH 1
5786 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5787 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5788
__build_all_zonelists(void * data)5789 static void __build_all_zonelists(void *data)
5790 {
5791 int nid;
5792 int __maybe_unused cpu;
5793 pg_data_t *self = data;
5794 unsigned long flags;
5795
5796 /*
5797 * The zonelist_update_seq must be acquired with irqsave because the
5798 * reader can be invoked from IRQ with GFP_ATOMIC.
5799 */
5800 write_seqlock_irqsave(&zonelist_update_seq, flags);
5801 /*
5802 * Also disable synchronous printk() to prevent any printk() from
5803 * trying to hold port->lock, for
5804 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5805 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5806 */
5807 printk_deferred_enter();
5808
5809 #ifdef CONFIG_NUMA
5810 memset(node_load, 0, sizeof(node_load));
5811 #endif
5812
5813 /*
5814 * This node is hotadded and no memory is yet present. So just
5815 * building zonelists is fine - no need to touch other nodes.
5816 */
5817 if (self && !node_online(self->node_id)) {
5818 build_zonelists(self);
5819 } else {
5820 /*
5821 * All possible nodes have pgdat preallocated
5822 * in free_area_init
5823 */
5824 for_each_node(nid) {
5825 pg_data_t *pgdat = NODE_DATA(nid);
5826
5827 build_zonelists(pgdat);
5828 }
5829
5830 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5831 /*
5832 * We now know the "local memory node" for each node--
5833 * i.e., the node of the first zone in the generic zonelist.
5834 * Set up numa_mem percpu variable for on-line cpus. During
5835 * boot, only the boot cpu should be on-line; we'll init the
5836 * secondary cpus' numa_mem as they come on-line. During
5837 * node/memory hotplug, we'll fixup all on-line cpus.
5838 */
5839 for_each_online_cpu(cpu)
5840 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5841 #endif
5842 }
5843
5844 printk_deferred_exit();
5845 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5846 }
5847
5848 static noinline void __init
build_all_zonelists_init(void)5849 build_all_zonelists_init(void)
5850 {
5851 int cpu;
5852
5853 __build_all_zonelists(NULL);
5854
5855 /*
5856 * Initialize the boot_pagesets that are going to be used
5857 * for bootstrapping processors. The real pagesets for
5858 * each zone will be allocated later when the per cpu
5859 * allocator is available.
5860 *
5861 * boot_pagesets are used also for bootstrapping offline
5862 * cpus if the system is already booted because the pagesets
5863 * are needed to initialize allocators on a specific cpu too.
5864 * F.e. the percpu allocator needs the page allocator which
5865 * needs the percpu allocator in order to allocate its pagesets
5866 * (a chicken-egg dilemma).
5867 */
5868 for_each_possible_cpu(cpu)
5869 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5870
5871 mminit_verify_zonelist();
5872 cpuset_init_current_mems_allowed();
5873 }
5874
5875 /*
5876 * unless system_state == SYSTEM_BOOTING.
5877 *
5878 * __ref due to call of __init annotated helper build_all_zonelists_init
5879 * [protected by SYSTEM_BOOTING].
5880 */
build_all_zonelists(pg_data_t * pgdat)5881 void __ref build_all_zonelists(pg_data_t *pgdat)
5882 {
5883 unsigned long vm_total_pages;
5884
5885 if (system_state == SYSTEM_BOOTING) {
5886 build_all_zonelists_init();
5887 } else {
5888 __build_all_zonelists(pgdat);
5889 /* cpuset refresh routine should be here */
5890 }
5891 /* Get the number of free pages beyond high watermark in all zones. */
5892 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5893 /*
5894 * Disable grouping by mobility if the number of pages in the
5895 * system is too low to allow the mechanism to work. It would be
5896 * more accurate, but expensive to check per-zone. This check is
5897 * made on memory-hotadd so a system can start with mobility
5898 * disabled and enable it later
5899 */
5900 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5901 page_group_by_mobility_disabled = 1;
5902 else
5903 page_group_by_mobility_disabled = 0;
5904
5905 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5906 nr_online_nodes,
5907 str_off_on(page_group_by_mobility_disabled),
5908 vm_total_pages);
5909 #ifdef CONFIG_NUMA
5910 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5911 #endif
5912 }
5913
zone_batchsize(struct zone * zone)5914 static int zone_batchsize(struct zone *zone)
5915 {
5916 #ifdef CONFIG_MMU
5917 int batch;
5918
5919 /*
5920 * The number of pages to batch allocate is either ~0.025%
5921 * of the zone or 256KB, whichever is smaller. The batch
5922 * size is striking a balance between allocation latency
5923 * and zone lock contention.
5924 */
5925 batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
5926 if (batch <= 1)
5927 return 1;
5928
5929 /*
5930 * Clamp the batch to a 2^n - 1 value. Having a power
5931 * of 2 value was found to be more likely to have
5932 * suboptimal cache aliasing properties in some cases.
5933 *
5934 * For example if 2 tasks are alternately allocating
5935 * batches of pages, one task can end up with a lot
5936 * of pages of one half of the possible page colors
5937 * and the other with pages of the other colors.
5938 */
5939 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5940
5941 return batch;
5942
5943 #else
5944 /* The deferral and batching of frees should be suppressed under NOMMU
5945 * conditions.
5946 *
5947 * The problem is that NOMMU needs to be able to allocate large chunks
5948 * of contiguous memory as there's no hardware page translation to
5949 * assemble apparent contiguous memory from discontiguous pages.
5950 *
5951 * Queueing large contiguous runs of pages for batching, however,
5952 * causes the pages to actually be freed in smaller chunks. As there
5953 * can be a significant delay between the individual batches being
5954 * recycled, this leads to the once large chunks of space being
5955 * fragmented and becoming unavailable for high-order allocations.
5956 */
5957 return 1;
5958 #endif
5959 }
5960
5961 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online,int high_fraction)5962 static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5963 int high_fraction)
5964 {
5965 #ifdef CONFIG_MMU
5966 int high;
5967 int nr_split_cpus;
5968 unsigned long total_pages;
5969
5970 if (!high_fraction) {
5971 /*
5972 * By default, the high value of the pcp is based on the zone
5973 * low watermark so that if they are full then background
5974 * reclaim will not be started prematurely.
5975 */
5976 total_pages = low_wmark_pages(zone);
5977 } else {
5978 /*
5979 * If percpu_pagelist_high_fraction is configured, the high
5980 * value is based on a fraction of the managed pages in the
5981 * zone.
5982 */
5983 total_pages = zone_managed_pages(zone) / high_fraction;
5984 }
5985
5986 /*
5987 * Split the high value across all online CPUs local to the zone. Note
5988 * that early in boot that CPUs may not be online yet and that during
5989 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5990 * onlined. For memory nodes that have no CPUs, split the high value
5991 * across all online CPUs to mitigate the risk that reclaim is triggered
5992 * prematurely due to pages stored on pcp lists.
5993 */
5994 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5995 if (!nr_split_cpus)
5996 nr_split_cpus = num_online_cpus();
5997 high = total_pages / nr_split_cpus;
5998
5999 /*
6000 * Ensure high is at least batch*4. The multiple is based on the
6001 * historical relationship between high and batch.
6002 */
6003 high = max(high, batch << 2);
6004
6005 return high;
6006 #else
6007 return 0;
6008 #endif
6009 }
6010
6011 /*
6012 * pcp->high and pcp->batch values are related and generally batch is lower
6013 * than high. They are also related to pcp->count such that count is lower
6014 * than high, and as soon as it reaches high, the pcplist is flushed.
6015 *
6016 * However, guaranteeing these relations at all times would require e.g. write
6017 * barriers here but also careful usage of read barriers at the read side, and
6018 * thus be prone to error and bad for performance. Thus the update only prevents
6019 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
6020 * should ensure they can cope with those fields changing asynchronously, and
6021 * fully trust only the pcp->count field on the local CPU with interrupts
6022 * disabled.
6023 *
6024 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6025 * outside of boot time (or some other assurance that no concurrent updaters
6026 * exist).
6027 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high_min,unsigned long high_max,unsigned long batch)6028 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
6029 unsigned long high_max, unsigned long batch)
6030 {
6031 WRITE_ONCE(pcp->batch, batch);
6032 WRITE_ONCE(pcp->high_min, high_min);
6033 WRITE_ONCE(pcp->high_max, high_max);
6034 }
6035
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)6036 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6037 {
6038 int pindex;
6039
6040 memset(pcp, 0, sizeof(*pcp));
6041 memset(pzstats, 0, sizeof(*pzstats));
6042
6043 spin_lock_init(&pcp->lock);
6044 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6045 INIT_LIST_HEAD(&pcp->lists[pindex]);
6046
6047 /*
6048 * Set batch and high values safe for a boot pageset. A true percpu
6049 * pageset's initialization will update them subsequently. Here we don't
6050 * need to be as careful as pageset_update() as nobody can access the
6051 * pageset yet.
6052 */
6053 pcp->high_min = BOOT_PAGESET_HIGH;
6054 pcp->high_max = BOOT_PAGESET_HIGH;
6055 pcp->batch = BOOT_PAGESET_BATCH;
6056 }
6057
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high_min,unsigned long high_max,unsigned long batch)6058 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
6059 unsigned long high_max, unsigned long batch)
6060 {
6061 struct per_cpu_pages *pcp;
6062 int cpu;
6063
6064 for_each_possible_cpu(cpu) {
6065 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6066 pageset_update(pcp, high_min, high_max, batch);
6067 }
6068 }
6069
6070 /*
6071 * Calculate and set new high and batch values for all per-cpu pagesets of a
6072 * zone based on the zone's size.
6073 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)6074 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6075 {
6076 int new_high_min, new_high_max, new_batch;
6077
6078 new_batch = zone_batchsize(zone);
6079 if (percpu_pagelist_high_fraction) {
6080 new_high_min = zone_highsize(zone, new_batch, cpu_online,
6081 percpu_pagelist_high_fraction);
6082 /*
6083 * PCP high is tuned manually, disable auto-tuning via
6084 * setting high_min and high_max to the manual value.
6085 */
6086 new_high_max = new_high_min;
6087 } else {
6088 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
6089 new_high_max = zone_highsize(zone, new_batch, cpu_online,
6090 MIN_PERCPU_PAGELIST_HIGH_FRACTION);
6091 }
6092
6093 if (zone->pageset_high_min == new_high_min &&
6094 zone->pageset_high_max == new_high_max &&
6095 zone->pageset_batch == new_batch)
6096 return;
6097
6098 zone->pageset_high_min = new_high_min;
6099 zone->pageset_high_max = new_high_max;
6100 zone->pageset_batch = new_batch;
6101
6102 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
6103 new_batch);
6104 }
6105
setup_zone_pageset(struct zone * zone)6106 void __meminit setup_zone_pageset(struct zone *zone)
6107 {
6108 int cpu;
6109
6110 /* Size may be 0 on !SMP && !NUMA */
6111 if (sizeof(struct per_cpu_zonestat) > 0)
6112 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6113
6114 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6115 for_each_possible_cpu(cpu) {
6116 struct per_cpu_pages *pcp;
6117 struct per_cpu_zonestat *pzstats;
6118
6119 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6120 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6121 per_cpu_pages_init(pcp, pzstats);
6122 }
6123
6124 zone_set_pageset_high_and_batch(zone, 0);
6125 }
6126
6127 /*
6128 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6129 * page high values need to be recalculated.
6130 */
zone_pcp_update(struct zone * zone,int cpu_online)6131 static void zone_pcp_update(struct zone *zone, int cpu_online)
6132 {
6133 mutex_lock(&pcp_batch_high_lock);
6134 zone_set_pageset_high_and_batch(zone, cpu_online);
6135 mutex_unlock(&pcp_batch_high_lock);
6136 }
6137
zone_pcp_update_cacheinfo(struct zone * zone,unsigned int cpu)6138 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
6139 {
6140 struct per_cpu_pages *pcp;
6141 struct cpu_cacheinfo *cci;
6142 unsigned long UP_flags;
6143
6144 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6145 cci = get_cpu_cacheinfo(cpu);
6146 /*
6147 * If data cache slice of CPU is large enough, "pcp->batch"
6148 * pages can be preserved in PCP before draining PCP for
6149 * consecutive high-order pages freeing without allocation.
6150 * This can reduce zone lock contention without hurting
6151 * cache-hot pages sharing.
6152 */
6153 pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
6154 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
6155 pcp->flags |= PCPF_FREE_HIGH_BATCH;
6156 else
6157 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
6158 pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
6159 }
6160
setup_pcp_cacheinfo(unsigned int cpu)6161 void setup_pcp_cacheinfo(unsigned int cpu)
6162 {
6163 struct zone *zone;
6164
6165 for_each_populated_zone(zone)
6166 zone_pcp_update_cacheinfo(zone, cpu);
6167 }
6168
6169 /*
6170 * Allocate per cpu pagesets and initialize them.
6171 * Before this call only boot pagesets were available.
6172 */
setup_per_cpu_pageset(void)6173 void __init setup_per_cpu_pageset(void)
6174 {
6175 struct pglist_data *pgdat;
6176 struct zone *zone;
6177 int __maybe_unused cpu;
6178
6179 for_each_populated_zone(zone)
6180 setup_zone_pageset(zone);
6181
6182 #ifdef CONFIG_NUMA
6183 /*
6184 * Unpopulated zones continue using the boot pagesets.
6185 * The numa stats for these pagesets need to be reset.
6186 * Otherwise, they will end up skewing the stats of
6187 * the nodes these zones are associated with.
6188 */
6189 for_each_possible_cpu(cpu) {
6190 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6191 memset(pzstats->vm_numa_event, 0,
6192 sizeof(pzstats->vm_numa_event));
6193 }
6194 #endif
6195
6196 for_each_online_pgdat(pgdat)
6197 pgdat->per_cpu_nodestats =
6198 alloc_percpu(struct per_cpu_nodestat);
6199 }
6200
zone_pcp_init(struct zone * zone)6201 __meminit void zone_pcp_init(struct zone *zone)
6202 {
6203 /*
6204 * per cpu subsystem is not up at this point. The following code
6205 * relies on the ability of the linker to provide the
6206 * offset of a (static) per cpu variable into the per cpu area.
6207 */
6208 zone->per_cpu_pageset = &boot_pageset;
6209 zone->per_cpu_zonestats = &boot_zonestats;
6210 zone->pageset_high_min = BOOT_PAGESET_HIGH;
6211 zone->pageset_high_max = BOOT_PAGESET_HIGH;
6212 zone->pageset_batch = BOOT_PAGESET_BATCH;
6213
6214 if (populated_zone(zone))
6215 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
6216 zone->present_pages, zone_batchsize(zone));
6217 }
6218
6219 static void setup_per_zone_lowmem_reserve(void);
6220
adjust_managed_page_count(struct page * page,long count)6221 void adjust_managed_page_count(struct page *page, long count)
6222 {
6223 atomic_long_add(count, &page_zone(page)->managed_pages);
6224 totalram_pages_add(count);
6225 setup_per_zone_lowmem_reserve();
6226 }
6227 EXPORT_SYMBOL(adjust_managed_page_count);
6228
free_reserved_area(void * start,void * end,int poison,const char * s)6229 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
6230 {
6231 void *pos;
6232 unsigned long pages = 0;
6233
6234 start = (void *)PAGE_ALIGN((unsigned long)start);
6235 end = (void *)((unsigned long)end & PAGE_MASK);
6236 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6237 struct page *page = virt_to_page(pos);
6238 void *direct_map_addr;
6239
6240 /*
6241 * 'direct_map_addr' might be different from 'pos'
6242 * because some architectures' virt_to_page()
6243 * work with aliases. Getting the direct map
6244 * address ensures that we get a _writeable_
6245 * alias for the memset().
6246 */
6247 direct_map_addr = page_address(page);
6248 /*
6249 * Perform a kasan-unchecked memset() since this memory
6250 * has not been initialized.
6251 */
6252 direct_map_addr = kasan_reset_tag(direct_map_addr);
6253 if ((unsigned int)poison <= 0xFF)
6254 memset(direct_map_addr, poison, PAGE_SIZE);
6255
6256 free_reserved_page(page);
6257 }
6258
6259 if (pages && s)
6260 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
6261
6262 return pages;
6263 }
6264
free_reserved_page(struct page * page)6265 void free_reserved_page(struct page *page)
6266 {
6267 clear_page_tag_ref(page);
6268 ClearPageReserved(page);
6269 init_page_count(page);
6270 __free_page(page);
6271 adjust_managed_page_count(page, 1);
6272 }
6273 EXPORT_SYMBOL(free_reserved_page);
6274
page_alloc_cpu_dead(unsigned int cpu)6275 static int page_alloc_cpu_dead(unsigned int cpu)
6276 {
6277 struct zone *zone;
6278
6279 lru_add_drain_cpu(cpu);
6280 mlock_drain_remote(cpu);
6281 drain_pages(cpu);
6282
6283 /*
6284 * Spill the event counters of the dead processor
6285 * into the current processors event counters.
6286 * This artificially elevates the count of the current
6287 * processor.
6288 */
6289 vm_events_fold_cpu(cpu);
6290
6291 /*
6292 * Zero the differential counters of the dead processor
6293 * so that the vm statistics are consistent.
6294 *
6295 * This is only okay since the processor is dead and cannot
6296 * race with what we are doing.
6297 */
6298 cpu_vm_stats_fold(cpu);
6299
6300 for_each_populated_zone(zone)
6301 zone_pcp_update(zone, 0);
6302
6303 return 0;
6304 }
6305
page_alloc_cpu_online(unsigned int cpu)6306 static int page_alloc_cpu_online(unsigned int cpu)
6307 {
6308 struct zone *zone;
6309
6310 for_each_populated_zone(zone)
6311 zone_pcp_update(zone, 1);
6312 return 0;
6313 }
6314
page_alloc_init_cpuhp(void)6315 void __init page_alloc_init_cpuhp(void)
6316 {
6317 int ret;
6318
6319 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
6320 "mm/page_alloc:pcp",
6321 page_alloc_cpu_online,
6322 page_alloc_cpu_dead);
6323 WARN_ON(ret < 0);
6324 }
6325
6326 /*
6327 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6328 * or min_free_kbytes changes.
6329 */
calculate_totalreserve_pages(void)6330 static void calculate_totalreserve_pages(void)
6331 {
6332 struct pglist_data *pgdat;
6333 unsigned long reserve_pages = 0;
6334 enum zone_type i, j;
6335
6336 for_each_online_pgdat(pgdat) {
6337
6338 pgdat->totalreserve_pages = 0;
6339
6340 for (i = 0; i < MAX_NR_ZONES; i++) {
6341 struct zone *zone = pgdat->node_zones + i;
6342 long max = 0;
6343 unsigned long managed_pages = zone_managed_pages(zone);
6344
6345 /*
6346 * lowmem_reserve[j] is monotonically non-decreasing
6347 * in j for a given zone (see
6348 * setup_per_zone_lowmem_reserve()). The maximum
6349 * valid reserve lives at the highest index with a
6350 * non-zero value, so scan backwards and stop at the
6351 * first hit.
6352 */
6353 for (j = MAX_NR_ZONES - 1; j > i; j--) {
6354 if (!zone->lowmem_reserve[j])
6355 continue;
6356
6357 max = zone->lowmem_reserve[j];
6358 break;
6359 }
6360 /* we treat the high watermark as reserved pages. */
6361 max += high_wmark_pages(zone);
6362
6363 max = min_t(unsigned long, max, managed_pages);
6364
6365 pgdat->totalreserve_pages += max;
6366
6367 reserve_pages += max;
6368 }
6369 }
6370 totalreserve_pages = reserve_pages;
6371 trace_mm_calculate_totalreserve_pages(totalreserve_pages);
6372 }
6373
6374 /*
6375 * setup_per_zone_lowmem_reserve - called whenever
6376 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
6377 * has a correct pages reserved value, so an adequate number of
6378 * pages are left in the zone after a successful __alloc_pages().
6379 */
setup_per_zone_lowmem_reserve(void)6380 static void setup_per_zone_lowmem_reserve(void)
6381 {
6382 struct pglist_data *pgdat;
6383 enum zone_type i, j;
6384 /*
6385 * For a given zone node_zones[i], lowmem_reserve[j] (j > i)
6386 * represents how many pages in zone i must effectively be kept
6387 * in reserve when deciding whether an allocation class that is
6388 * allowed to allocate from zones up to j may fall back into
6389 * zone i.
6390 *
6391 * As j increases, the allocation class can use a strictly larger
6392 * set of fallback zones and therefore must not be allowed to
6393 * deplete low zones more aggressively than a less flexible one.
6394 * As a result, lowmem_reserve[j] is required to be monotonically
6395 * non-decreasing in j for each zone i. Callers such as
6396 * calculate_totalreserve_pages() rely on this monotonicity when
6397 * selecting the maximum reserve entry.
6398 */
6399 for_each_online_pgdat(pgdat) {
6400 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
6401 struct zone *zone = &pgdat->node_zones[i];
6402 int ratio = sysctl_lowmem_reserve_ratio[i];
6403 bool clear = !ratio || !zone_managed_pages(zone);
6404 unsigned long managed_pages = 0;
6405
6406 for (j = i + 1; j < MAX_NR_ZONES; j++) {
6407 struct zone *upper_zone = &pgdat->node_zones[j];
6408
6409 managed_pages += zone_managed_pages(upper_zone);
6410
6411 if (clear)
6412 zone->lowmem_reserve[j] = 0;
6413 else
6414 zone->lowmem_reserve[j] = managed_pages / ratio;
6415 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
6416 zone->lowmem_reserve[j]);
6417 }
6418 }
6419 }
6420
6421 /* update totalreserve_pages */
6422 calculate_totalreserve_pages();
6423 }
6424
__setup_per_zone_wmarks(void)6425 static void __setup_per_zone_wmarks(void)
6426 {
6427 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6428 unsigned long lowmem_pages = 0;
6429 struct zone *zone;
6430 unsigned long flags;
6431
6432 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
6433 for_each_zone(zone) {
6434 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
6435 lowmem_pages += zone_managed_pages(zone);
6436 }
6437
6438 for_each_zone(zone) {
6439 u64 tmp;
6440
6441 spin_lock_irqsave(&zone->lock, flags);
6442 tmp = (u64)pages_min * zone_managed_pages(zone);
6443 tmp = div64_ul(tmp, lowmem_pages);
6444 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
6445 /*
6446 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6447 * need highmem and movable zones pages, so cap pages_min
6448 * to a small value here.
6449 *
6450 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6451 * deltas control async page reclaim, and so should
6452 * not be capped for highmem and movable zones.
6453 */
6454 unsigned long min_pages;
6455
6456 min_pages = zone_managed_pages(zone) / 1024;
6457 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6458 zone->_watermark[WMARK_MIN] = min_pages;
6459 } else {
6460 /*
6461 * If it's a lowmem zone, reserve a number of pages
6462 * proportionate to the zone's size.
6463 */
6464 zone->_watermark[WMARK_MIN] = tmp;
6465 }
6466
6467 /*
6468 * Set the kswapd watermarks distance according to the
6469 * scale factor in proportion to available memory, but
6470 * ensure a minimum size on small systems.
6471 */
6472 tmp = max_t(u64, tmp >> 2,
6473 mult_frac(zone_managed_pages(zone),
6474 watermark_scale_factor, 10000));
6475
6476 zone->watermark_boost = 0;
6477 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6478 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6479 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
6480 trace_mm_setup_per_zone_wmarks(zone);
6481
6482 spin_unlock_irqrestore(&zone->lock, flags);
6483 }
6484
6485 /* update totalreserve_pages */
6486 calculate_totalreserve_pages();
6487 }
6488
6489 /**
6490 * setup_per_zone_wmarks - called when min_free_kbytes changes
6491 * or when memory is hot-{added|removed}
6492 *
6493 * Ensures that the watermark[min,low,high] values for each zone are set
6494 * correctly with respect to min_free_kbytes.
6495 */
setup_per_zone_wmarks(void)6496 void setup_per_zone_wmarks(void)
6497 {
6498 struct zone *zone;
6499 static DEFINE_SPINLOCK(lock);
6500
6501 spin_lock(&lock);
6502 __setup_per_zone_wmarks();
6503 spin_unlock(&lock);
6504
6505 /*
6506 * The watermark size have changed so update the pcpu batch
6507 * and high limits or the limits may be inappropriate.
6508 */
6509 for_each_zone(zone)
6510 zone_pcp_update(zone, 0);
6511 }
6512
6513 /*
6514 * Initialise min_free_kbytes.
6515 *
6516 * For small machines we want it small (128k min). For large machines
6517 * we want it large (256MB max). But it is not linear, because network
6518 * bandwidth does not increase linearly with machine size. We use
6519 *
6520 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6521 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6522 *
6523 * which yields
6524 *
6525 * 16MB: 512k
6526 * 32MB: 724k
6527 * 64MB: 1024k
6528 * 128MB: 1448k
6529 * 256MB: 2048k
6530 * 512MB: 2896k
6531 * 1024MB: 4096k
6532 * 2048MB: 5792k
6533 * 4096MB: 8192k
6534 * 8192MB: 11584k
6535 * 16384MB: 16384k
6536 */
calculate_min_free_kbytes(void)6537 void calculate_min_free_kbytes(void)
6538 {
6539 unsigned long lowmem_kbytes;
6540 int new_min_free_kbytes;
6541
6542 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6543 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6544
6545 if (new_min_free_kbytes > user_min_free_kbytes)
6546 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6547 else
6548 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6549 new_min_free_kbytes, user_min_free_kbytes);
6550
6551 }
6552
init_per_zone_wmark_min(void)6553 int __meminit init_per_zone_wmark_min(void)
6554 {
6555 calculate_min_free_kbytes();
6556 setup_per_zone_wmarks();
6557 refresh_zone_stat_thresholds();
6558 setup_per_zone_lowmem_reserve();
6559
6560 #ifdef CONFIG_NUMA
6561 setup_min_unmapped_ratio();
6562 setup_min_slab_ratio();
6563 #endif
6564
6565 khugepaged_min_free_kbytes_update();
6566
6567 return 0;
6568 }
postcore_initcall(init_per_zone_wmark_min)6569 postcore_initcall(init_per_zone_wmark_min)
6570
6571 /*
6572 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6573 * that we can call two helper functions whenever min_free_kbytes
6574 * changes.
6575 */
6576 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
6577 void *buffer, size_t *length, loff_t *ppos)
6578 {
6579 int rc;
6580
6581 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6582 if (rc)
6583 return rc;
6584
6585 if (write) {
6586 user_min_free_kbytes = min_free_kbytes;
6587 setup_per_zone_wmarks();
6588 }
6589 return 0;
6590 }
6591
watermark_scale_factor_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6592 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
6593 void *buffer, size_t *length, loff_t *ppos)
6594 {
6595 int rc;
6596
6597 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6598 if (rc)
6599 return rc;
6600
6601 if (write)
6602 setup_per_zone_wmarks();
6603
6604 return 0;
6605 }
6606
6607 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)6608 static void setup_min_unmapped_ratio(void)
6609 {
6610 pg_data_t *pgdat;
6611 struct zone *zone;
6612
6613 for_each_online_pgdat(pgdat)
6614 pgdat->min_unmapped_pages = 0;
6615
6616 for_each_zone(zone)
6617 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6618 sysctl_min_unmapped_ratio) / 100;
6619 }
6620
6621
sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6622 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
6623 void *buffer, size_t *length, loff_t *ppos)
6624 {
6625 int rc;
6626
6627 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6628 if (rc)
6629 return rc;
6630
6631 setup_min_unmapped_ratio();
6632
6633 return 0;
6634 }
6635
setup_min_slab_ratio(void)6636 static void setup_min_slab_ratio(void)
6637 {
6638 pg_data_t *pgdat;
6639 struct zone *zone;
6640
6641 for_each_online_pgdat(pgdat)
6642 pgdat->min_slab_pages = 0;
6643
6644 for_each_zone(zone)
6645 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6646 sysctl_min_slab_ratio) / 100;
6647 }
6648
sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6649 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
6650 void *buffer, size_t *length, loff_t *ppos)
6651 {
6652 int rc;
6653
6654 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6655 if (rc)
6656 return rc;
6657
6658 setup_min_slab_ratio();
6659
6660 return 0;
6661 }
6662 #endif
6663
6664 /*
6665 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6666 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6667 * whenever sysctl_lowmem_reserve_ratio changes.
6668 *
6669 * The reserve ratio obviously has absolutely no relation with the
6670 * minimum watermarks. The lowmem reserve ratio can only make sense
6671 * if in function of the boot time zone sizes.
6672 */
lowmem_reserve_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6673 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
6674 int write, void *buffer, size_t *length, loff_t *ppos)
6675 {
6676 int i;
6677
6678 proc_dointvec_minmax(table, write, buffer, length, ppos);
6679
6680 for (i = 0; i < MAX_NR_ZONES; i++) {
6681 if (sysctl_lowmem_reserve_ratio[i] < 1)
6682 sysctl_lowmem_reserve_ratio[i] = 0;
6683 }
6684
6685 setup_per_zone_lowmem_reserve();
6686 return 0;
6687 }
6688
6689 /*
6690 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6691 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6692 * pagelist can have before it gets flushed back to buddy allocator.
6693 */
percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6694 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
6695 int write, void *buffer, size_t *length, loff_t *ppos)
6696 {
6697 struct zone *zone;
6698 int old_percpu_pagelist_high_fraction;
6699 int ret;
6700
6701 /*
6702 * Avoid using pcp_batch_high_lock for reads as the value is read
6703 * atomically and a race with offlining is harmless.
6704 */
6705
6706 if (!write)
6707 return proc_dointvec_minmax(table, write, buffer, length, ppos);
6708
6709 mutex_lock(&pcp_batch_high_lock);
6710 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
6711
6712 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6713 if (ret < 0)
6714 goto out;
6715
6716 /* Sanity checking to avoid pcp imbalance */
6717 if (percpu_pagelist_high_fraction &&
6718 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6719 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
6720 ret = -EINVAL;
6721 goto out;
6722 }
6723
6724 /* No change? */
6725 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
6726 goto out;
6727
6728 for_each_populated_zone(zone)
6729 zone_set_pageset_high_and_batch(zone, 0);
6730 out:
6731 mutex_unlock(&pcp_batch_high_lock);
6732 return ret;
6733 }
6734
6735 static const struct ctl_table page_alloc_sysctl_table[] = {
6736 {
6737 .procname = "min_free_kbytes",
6738 .data = &min_free_kbytes,
6739 .maxlen = sizeof(min_free_kbytes),
6740 .mode = 0644,
6741 .proc_handler = min_free_kbytes_sysctl_handler,
6742 .extra1 = SYSCTL_ZERO,
6743 },
6744 {
6745 .procname = "watermark_boost_factor",
6746 .data = &watermark_boost_factor,
6747 .maxlen = sizeof(watermark_boost_factor),
6748 .mode = 0644,
6749 .proc_handler = proc_dointvec_minmax,
6750 .extra1 = SYSCTL_ZERO,
6751 },
6752 {
6753 .procname = "watermark_scale_factor",
6754 .data = &watermark_scale_factor,
6755 .maxlen = sizeof(watermark_scale_factor),
6756 .mode = 0644,
6757 .proc_handler = watermark_scale_factor_sysctl_handler,
6758 .extra1 = SYSCTL_ONE,
6759 .extra2 = SYSCTL_THREE_THOUSAND,
6760 },
6761 {
6762 .procname = "defrag_mode",
6763 .data = &defrag_mode,
6764 .maxlen = sizeof(defrag_mode),
6765 .mode = 0644,
6766 .proc_handler = proc_dointvec_minmax,
6767 .extra1 = SYSCTL_ZERO,
6768 .extra2 = SYSCTL_ONE,
6769 },
6770 {
6771 .procname = "percpu_pagelist_high_fraction",
6772 .data = &percpu_pagelist_high_fraction,
6773 .maxlen = sizeof(percpu_pagelist_high_fraction),
6774 .mode = 0644,
6775 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
6776 .extra1 = SYSCTL_ZERO,
6777 },
6778 {
6779 .procname = "lowmem_reserve_ratio",
6780 .data = &sysctl_lowmem_reserve_ratio,
6781 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
6782 .mode = 0644,
6783 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
6784 },
6785 #ifdef CONFIG_NUMA
6786 {
6787 .procname = "numa_zonelist_order",
6788 .data = &numa_zonelist_order,
6789 .maxlen = NUMA_ZONELIST_ORDER_LEN,
6790 .mode = 0644,
6791 .proc_handler = numa_zonelist_order_handler,
6792 },
6793 {
6794 .procname = "min_unmapped_ratio",
6795 .data = &sysctl_min_unmapped_ratio,
6796 .maxlen = sizeof(sysctl_min_unmapped_ratio),
6797 .mode = 0644,
6798 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6799 .extra1 = SYSCTL_ZERO,
6800 .extra2 = SYSCTL_ONE_HUNDRED,
6801 },
6802 {
6803 .procname = "min_slab_ratio",
6804 .data = &sysctl_min_slab_ratio,
6805 .maxlen = sizeof(sysctl_min_slab_ratio),
6806 .mode = 0644,
6807 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6808 .extra1 = SYSCTL_ZERO,
6809 .extra2 = SYSCTL_ONE_HUNDRED,
6810 },
6811 #endif
6812 };
6813
page_alloc_sysctl_init(void)6814 void __init page_alloc_sysctl_init(void)
6815 {
6816 register_sysctl_init("vm", page_alloc_sysctl_table);
6817 }
6818
6819 #ifdef CONFIG_CONTIG_ALLOC
6820 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6821 static void alloc_contig_dump_pages(struct list_head *page_list)
6822 {
6823 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6824
6825 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6826 struct page *page;
6827
6828 dump_stack();
6829 list_for_each_entry(page, page_list, lru)
6830 dump_page(page, "migration failure");
6831 }
6832 }
6833
6834 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6835 static int __alloc_contig_migrate_range(struct compact_control *cc,
6836 unsigned long start, unsigned long end)
6837 {
6838 /* This function is based on compact_zone() from compaction.c. */
6839 unsigned int nr_reclaimed;
6840 unsigned long pfn = start;
6841 unsigned int tries = 0;
6842 int ret = 0;
6843 struct migration_target_control mtc = {
6844 .nid = zone_to_nid(cc->zone),
6845 .gfp_mask = cc->gfp_mask,
6846 .reason = MR_CONTIG_RANGE,
6847 };
6848
6849 lru_cache_disable();
6850
6851 while (pfn < end || !list_empty(&cc->migratepages)) {
6852 if (fatal_signal_pending(current)) {
6853 ret = -EINTR;
6854 break;
6855 }
6856
6857 if (list_empty(&cc->migratepages)) {
6858 cc->nr_migratepages = 0;
6859 ret = isolate_migratepages_range(cc, pfn, end);
6860 if (ret && ret != -EAGAIN)
6861 break;
6862 pfn = cc->migrate_pfn;
6863 tries = 0;
6864 } else if (++tries == 5) {
6865 ret = -EBUSY;
6866 break;
6867 }
6868
6869 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6870 &cc->migratepages);
6871 cc->nr_migratepages -= nr_reclaimed;
6872
6873 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6874 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6875
6876 /*
6877 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6878 * to retry again over this error, so do the same here.
6879 */
6880 if (ret == -ENOMEM)
6881 break;
6882 }
6883
6884 lru_cache_enable();
6885 if (ret < 0) {
6886 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6887 alloc_contig_dump_pages(&cc->migratepages);
6888 putback_movable_pages(&cc->migratepages);
6889 }
6890
6891 return (ret < 0) ? ret : 0;
6892 }
6893
split_free_pages(struct list_head * list,gfp_t gfp_mask)6894 static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
6895 {
6896 int order;
6897
6898 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6899 struct page *page, *next;
6900 int nr_pages = 1 << order;
6901
6902 list_for_each_entry_safe(page, next, &list[order], lru) {
6903 int i;
6904
6905 post_alloc_hook(page, order, gfp_mask);
6906 set_page_refcounted(page);
6907 if (!order)
6908 continue;
6909
6910 split_page(page, order);
6911
6912 /* Add all subpages to the order-0 head, in sequence. */
6913 list_del(&page->lru);
6914 for (i = 0; i < nr_pages; i++)
6915 list_add_tail(&page[i].lru, &list[0]);
6916 }
6917 }
6918 }
6919
__alloc_contig_verify_gfp_mask(gfp_t gfp_mask,gfp_t * gfp_cc_mask)6920 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
6921 {
6922 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
6923 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
6924 __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
6925 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
6926
6927 /*
6928 * We are given the range to allocate; node, mobility and placement
6929 * hints are irrelevant at this point. We'll simply ignore them.
6930 */
6931 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
6932 __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
6933
6934 /*
6935 * We only support most reclaim flags (but not NOFAIL/NORETRY), and
6936 * selected action flags.
6937 */
6938 if (gfp_mask & ~(reclaim_mask | action_mask))
6939 return -EINVAL;
6940
6941 /*
6942 * Flags to control page compaction/migration/reclaim, to free up our
6943 * page range. Migratable pages are movable, __GFP_MOVABLE is implied
6944 * for them.
6945 *
6946 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
6947 * to not degrade callers.
6948 */
6949 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
6950 __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
6951 return 0;
6952 }
6953
6954 /**
6955 * alloc_contig_range() -- tries to allocate given range of pages
6956 * @start: start PFN to allocate
6957 * @end: one-past-the-last PFN to allocate
6958 * @alloc_flags: allocation information
6959 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
6960 * action and reclaim modifiers are supported. Reclaim modifiers
6961 * control allocation behavior during compaction/migration/reclaim.
6962 *
6963 * The PFN range does not have to be pageblock aligned. The PFN range must
6964 * belong to a single zone.
6965 *
6966 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6967 * pageblocks in the range. Once isolated, the pageblocks should not
6968 * be modified by others.
6969 *
6970 * Return: zero on success or negative error code. On success all
6971 * pages which PFN is in [start, end) are allocated for the caller and
6972 * need to be freed with free_contig_range().
6973 */
alloc_contig_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)6974 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
6975 acr_flags_t alloc_flags, gfp_t gfp_mask)
6976 {
6977 const unsigned int order = ilog2(end - start);
6978 unsigned long outer_start, outer_end;
6979 int ret = 0;
6980
6981 struct compact_control cc = {
6982 .nr_migratepages = 0,
6983 .order = -1,
6984 .zone = page_zone(pfn_to_page(start)),
6985 .mode = MIGRATE_SYNC,
6986 .ignore_skip_hint = true,
6987 .no_set_skip_hint = true,
6988 .alloc_contig = true,
6989 };
6990 INIT_LIST_HEAD(&cc.migratepages);
6991 enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
6992 PB_ISOLATE_MODE_CMA_ALLOC :
6993 PB_ISOLATE_MODE_OTHER;
6994
6995 /*
6996 * In contrast to the buddy, we allow for orders here that exceed
6997 * MAX_PAGE_ORDER, so we must manually make sure that we are not
6998 * exceeding the maximum folio order.
6999 */
7000 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
7001 return -EINVAL;
7002
7003 gfp_mask = current_gfp_context(gfp_mask);
7004 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
7005 return -EINVAL;
7006
7007 /*
7008 * What we do here is we mark all pageblocks in range as
7009 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7010 * have different sizes, and due to the way page allocator
7011 * work, start_isolate_page_range() has special handlings for this.
7012 *
7013 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7014 * migrate the pages from an unaligned range (ie. pages that
7015 * we are interested in). This will put all the pages in
7016 * range back to page allocator as MIGRATE_ISOLATE.
7017 *
7018 * When this is done, we take the pages in range from page
7019 * allocator removing them from the buddy system. This way
7020 * page allocator will never consider using them.
7021 *
7022 * This lets us mark the pageblocks back as
7023 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7024 * aligned range but not in the unaligned, original range are
7025 * put back to page allocator so that buddy can use them.
7026 */
7027
7028 ret = start_isolate_page_range(start, end, mode);
7029 if (ret)
7030 goto done;
7031
7032 drain_all_pages(cc.zone);
7033
7034 /*
7035 * In case of -EBUSY, we'd like to know which page causes problem.
7036 * So, just fall through. test_pages_isolated() has a tracepoint
7037 * which will report the busy page.
7038 *
7039 * It is possible that busy pages could become available before
7040 * the call to test_pages_isolated, and the range will actually be
7041 * allocated. So, if we fall through be sure to clear ret so that
7042 * -EBUSY is not accidentally used or returned to caller.
7043 */
7044 ret = __alloc_contig_migrate_range(&cc, start, end);
7045 if (ret && ret != -EBUSY)
7046 goto done;
7047
7048 /*
7049 * When in-use hugetlb pages are migrated, they may simply be released
7050 * back into the free hugepage pool instead of being returned to the
7051 * buddy system. After the migration of in-use huge pages is completed,
7052 * we will invoke replace_free_hugepage_folios() to ensure that these
7053 * hugepages are properly released to the buddy system.
7054 */
7055 ret = replace_free_hugepage_folios(start, end);
7056 if (ret)
7057 goto done;
7058
7059 /*
7060 * Pages from [start, end) are within a pageblock_nr_pages
7061 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7062 * more, all pages in [start, end) are free in page allocator.
7063 * What we are going to do is to allocate all pages from
7064 * [start, end) (that is remove them from page allocator).
7065 *
7066 * The only problem is that pages at the beginning and at the
7067 * end of interesting range may be not aligned with pages that
7068 * page allocator holds, ie. they can be part of higher order
7069 * pages. Because of this, we reserve the bigger range and
7070 * once this is done free the pages we are not interested in.
7071 *
7072 * We don't have to hold zone->lock here because the pages are
7073 * isolated thus they won't get removed from buddy.
7074 */
7075 outer_start = find_large_buddy(start);
7076
7077 /* Make sure the range is really isolated. */
7078 if (test_pages_isolated(outer_start, end, mode)) {
7079 ret = -EBUSY;
7080 goto done;
7081 }
7082
7083 /* Grab isolated pages from freelists. */
7084 outer_end = isolate_freepages_range(&cc, outer_start, end);
7085 if (!outer_end) {
7086 ret = -EBUSY;
7087 goto done;
7088 }
7089
7090 if (!(gfp_mask & __GFP_COMP)) {
7091 split_free_pages(cc.freepages, gfp_mask);
7092
7093 /* Free head and tail (if any) */
7094 if (start != outer_start)
7095 free_contig_range(outer_start, start - outer_start);
7096 if (end != outer_end)
7097 free_contig_range(end, outer_end - end);
7098 } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
7099 struct page *head = pfn_to_page(start);
7100
7101 check_new_pages(head, order);
7102 prep_new_page(head, order, gfp_mask, 0);
7103 set_page_refcounted(head);
7104 } else {
7105 ret = -EINVAL;
7106 WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
7107 start, end, outer_start, outer_end);
7108 }
7109 done:
7110 undo_isolate_page_range(start, end);
7111 return ret;
7112 }
7113 EXPORT_SYMBOL(alloc_contig_range_noprof);
7114
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)7115 static int __alloc_contig_pages(unsigned long start_pfn,
7116 unsigned long nr_pages, gfp_t gfp_mask)
7117 {
7118 unsigned long end_pfn = start_pfn + nr_pages;
7119
7120 return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE,
7121 gfp_mask);
7122 }
7123
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)7124 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
7125 unsigned long nr_pages)
7126 {
7127 unsigned long i, end_pfn = start_pfn + nr_pages;
7128 struct page *page;
7129
7130 for (i = start_pfn; i < end_pfn; i++) {
7131 page = pfn_to_online_page(i);
7132 if (!page)
7133 return false;
7134
7135 if (page_zone(page) != z)
7136 return false;
7137
7138 if (PageReserved(page))
7139 return false;
7140
7141 if (PageHuge(page))
7142 return false;
7143 }
7144 return true;
7145 }
7146
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)7147 static bool zone_spans_last_pfn(const struct zone *zone,
7148 unsigned long start_pfn, unsigned long nr_pages)
7149 {
7150 unsigned long last_pfn = start_pfn + nr_pages - 1;
7151
7152 return zone_spans_pfn(zone, last_pfn);
7153 }
7154
7155 /**
7156 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
7157 * @nr_pages: Number of contiguous pages to allocate
7158 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
7159 * action and reclaim modifiers are supported. Reclaim modifiers
7160 * control allocation behavior during compaction/migration/reclaim.
7161 * @nid: Target node
7162 * @nodemask: Mask for other possible nodes
7163 *
7164 * This routine is a wrapper around alloc_contig_range(). It scans over zones
7165 * on an applicable zonelist to find a contiguous pfn range which can then be
7166 * tried for allocation with alloc_contig_range(). This routine is intended
7167 * for allocation requests which can not be fulfilled with the buddy allocator.
7168 *
7169 * The allocated memory is always aligned to a page boundary. If nr_pages is a
7170 * power of two, then allocated range is also guaranteed to be aligned to same
7171 * nr_pages (e.g. 1GB request would be aligned to 1GB).
7172 *
7173 * Allocated pages can be freed with free_contig_range() or by manually calling
7174 * __free_page() on each allocated page.
7175 *
7176 * Return: pointer to contiguous pages on success, or NULL if not successful.
7177 */
alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7178 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
7179 int nid, nodemask_t *nodemask)
7180 {
7181 unsigned long ret, pfn, flags;
7182 struct zonelist *zonelist;
7183 struct zone *zone;
7184 struct zoneref *z;
7185
7186 zonelist = node_zonelist(nid, gfp_mask);
7187 for_each_zone_zonelist_nodemask(zone, z, zonelist,
7188 gfp_zone(gfp_mask), nodemask) {
7189 spin_lock_irqsave(&zone->lock, flags);
7190
7191 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
7192 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
7193 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
7194 /*
7195 * We release the zone lock here because
7196 * alloc_contig_range() will also lock the zone
7197 * at some point. If there's an allocation
7198 * spinning on this lock, it may win the race
7199 * and cause alloc_contig_range() to fail...
7200 */
7201 spin_unlock_irqrestore(&zone->lock, flags);
7202 ret = __alloc_contig_pages(pfn, nr_pages,
7203 gfp_mask);
7204 if (!ret)
7205 return pfn_to_page(pfn);
7206 spin_lock_irqsave(&zone->lock, flags);
7207 }
7208 pfn += nr_pages;
7209 }
7210 spin_unlock_irqrestore(&zone->lock, flags);
7211 }
7212 return NULL;
7213 }
7214 #endif /* CONFIG_CONTIG_ALLOC */
7215
free_contig_range(unsigned long pfn,unsigned long nr_pages)7216 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
7217 {
7218 unsigned long count = 0;
7219 struct folio *folio = pfn_folio(pfn);
7220
7221 if (folio_test_large(folio)) {
7222 int expected = folio_nr_pages(folio);
7223
7224 if (nr_pages == expected)
7225 folio_put(folio);
7226 else
7227 WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
7228 pfn, nr_pages, expected);
7229 return;
7230 }
7231
7232 for (; nr_pages--; pfn++) {
7233 struct page *page = pfn_to_page(pfn);
7234
7235 count += page_count(page) != 1;
7236 __free_page(page);
7237 }
7238 WARN(count != 0, "%lu pages are still in use!\n", count);
7239 }
7240 EXPORT_SYMBOL(free_contig_range);
7241
7242 /*
7243 * Effectively disable pcplists for the zone by setting the high limit to 0
7244 * and draining all cpus. A concurrent page freeing on another CPU that's about
7245 * to put the page on pcplist will either finish before the drain and the page
7246 * will be drained, or observe the new high limit and skip the pcplist.
7247 *
7248 * Must be paired with a call to zone_pcp_enable().
7249 */
zone_pcp_disable(struct zone * zone)7250 void zone_pcp_disable(struct zone *zone)
7251 {
7252 mutex_lock(&pcp_batch_high_lock);
7253 __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
7254 __drain_all_pages(zone, true);
7255 }
7256
zone_pcp_enable(struct zone * zone)7257 void zone_pcp_enable(struct zone *zone)
7258 {
7259 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
7260 zone->pageset_high_max, zone->pageset_batch);
7261 mutex_unlock(&pcp_batch_high_lock);
7262 }
7263
zone_pcp_reset(struct zone * zone)7264 void zone_pcp_reset(struct zone *zone)
7265 {
7266 int cpu;
7267 struct per_cpu_zonestat *pzstats;
7268
7269 if (zone->per_cpu_pageset != &boot_pageset) {
7270 for_each_online_cpu(cpu) {
7271 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7272 drain_zonestat(zone, pzstats);
7273 }
7274 free_percpu(zone->per_cpu_pageset);
7275 zone->per_cpu_pageset = &boot_pageset;
7276 if (zone->per_cpu_zonestats != &boot_zonestats) {
7277 free_percpu(zone->per_cpu_zonestats);
7278 zone->per_cpu_zonestats = &boot_zonestats;
7279 }
7280 }
7281 }
7282
7283 #ifdef CONFIG_MEMORY_HOTREMOVE
7284 /*
7285 * All pages in the range must be in a single zone, must not contain holes,
7286 * must span full sections, and must be isolated before calling this function.
7287 *
7288 * Returns the number of managed (non-PageOffline()) pages in the range: the
7289 * number of pages for which memory offlining code must adjust managed page
7290 * counters using adjust_managed_page_count().
7291 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)7292 unsigned long __offline_isolated_pages(unsigned long start_pfn,
7293 unsigned long end_pfn)
7294 {
7295 unsigned long already_offline = 0, flags;
7296 unsigned long pfn = start_pfn;
7297 struct page *page;
7298 struct zone *zone;
7299 unsigned int order;
7300
7301 offline_mem_sections(pfn, end_pfn);
7302 zone = page_zone(pfn_to_page(pfn));
7303 spin_lock_irqsave(&zone->lock, flags);
7304 while (pfn < end_pfn) {
7305 page = pfn_to_page(pfn);
7306 /*
7307 * The HWPoisoned page may be not in buddy system, and
7308 * page_count() is not 0.
7309 */
7310 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7311 pfn++;
7312 continue;
7313 }
7314 /*
7315 * At this point all remaining PageOffline() pages have a
7316 * reference count of 0 and can simply be skipped.
7317 */
7318 if (PageOffline(page)) {
7319 BUG_ON(page_count(page));
7320 BUG_ON(PageBuddy(page));
7321 already_offline++;
7322 pfn++;
7323 continue;
7324 }
7325
7326 BUG_ON(page_count(page));
7327 BUG_ON(!PageBuddy(page));
7328 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
7329 order = buddy_order(page);
7330 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
7331 pfn += (1 << order);
7332 }
7333 spin_unlock_irqrestore(&zone->lock, flags);
7334
7335 return end_pfn - start_pfn - already_offline;
7336 }
7337 #endif
7338
7339 /*
7340 * This function returns a stable result only if called under zone lock.
7341 */
is_free_buddy_page(const struct page * page)7342 bool is_free_buddy_page(const struct page *page)
7343 {
7344 unsigned long pfn = page_to_pfn(page);
7345 unsigned int order;
7346
7347 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7348 const struct page *head = page - (pfn & ((1 << order) - 1));
7349
7350 if (PageBuddy(head) &&
7351 buddy_order_unsafe(head) >= order)
7352 break;
7353 }
7354
7355 return order <= MAX_PAGE_ORDER;
7356 }
7357 EXPORT_SYMBOL(is_free_buddy_page);
7358
7359 #ifdef CONFIG_MEMORY_FAILURE
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)7360 static inline void add_to_free_list(struct page *page, struct zone *zone,
7361 unsigned int order, int migratetype,
7362 bool tail)
7363 {
7364 __add_to_free_list(page, zone, order, migratetype, tail);
7365 account_freepages(zone, 1 << order, migratetype);
7366 }
7367
7368 /*
7369 * Break down a higher-order page in sub-pages, and keep our target out of
7370 * buddy allocator.
7371 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)7372 static void break_down_buddy_pages(struct zone *zone, struct page *page,
7373 struct page *target, int low, int high,
7374 int migratetype)
7375 {
7376 unsigned long size = 1 << high;
7377 struct page *current_buddy;
7378
7379 while (high > low) {
7380 high--;
7381 size >>= 1;
7382
7383 if (target >= &page[size]) {
7384 current_buddy = page;
7385 page = page + size;
7386 } else {
7387 current_buddy = page + size;
7388 }
7389
7390 if (set_page_guard(zone, current_buddy, high))
7391 continue;
7392
7393 add_to_free_list(current_buddy, zone, high, migratetype, false);
7394 set_buddy_order(current_buddy, high);
7395 }
7396 }
7397
7398 /*
7399 * Take a page that will be marked as poisoned off the buddy allocator.
7400 */
take_page_off_buddy(struct page * page)7401 bool take_page_off_buddy(struct page *page)
7402 {
7403 struct zone *zone = page_zone(page);
7404 unsigned long pfn = page_to_pfn(page);
7405 unsigned long flags;
7406 unsigned int order;
7407 bool ret = false;
7408
7409 spin_lock_irqsave(&zone->lock, flags);
7410 for (order = 0; order < NR_PAGE_ORDERS; order++) {
7411 struct page *page_head = page - (pfn & ((1 << order) - 1));
7412 int page_order = buddy_order(page_head);
7413
7414 if (PageBuddy(page_head) && page_order >= order) {
7415 unsigned long pfn_head = page_to_pfn(page_head);
7416 int migratetype = get_pfnblock_migratetype(page_head,
7417 pfn_head);
7418
7419 del_page_from_free_list(page_head, zone, page_order,
7420 migratetype);
7421 break_down_buddy_pages(zone, page_head, page, 0,
7422 page_order, migratetype);
7423 SetPageHWPoisonTakenOff(page);
7424 ret = true;
7425 break;
7426 }
7427 if (page_count(page_head) > 0)
7428 break;
7429 }
7430 spin_unlock_irqrestore(&zone->lock, flags);
7431 return ret;
7432 }
7433
7434 /*
7435 * Cancel takeoff done by take_page_off_buddy().
7436 */
put_page_back_buddy(struct page * page)7437 bool put_page_back_buddy(struct page *page)
7438 {
7439 struct zone *zone = page_zone(page);
7440 unsigned long flags;
7441 bool ret = false;
7442
7443 spin_lock_irqsave(&zone->lock, flags);
7444 if (put_page_testzero(page)) {
7445 unsigned long pfn = page_to_pfn(page);
7446 int migratetype = get_pfnblock_migratetype(page, pfn);
7447
7448 ClearPageHWPoisonTakenOff(page);
7449 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
7450 if (TestClearPageHWPoison(page)) {
7451 ret = true;
7452 }
7453 }
7454 spin_unlock_irqrestore(&zone->lock, flags);
7455
7456 return ret;
7457 }
7458 #endif
7459
7460 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)7461 bool has_managed_dma(void)
7462 {
7463 struct pglist_data *pgdat;
7464
7465 for_each_online_pgdat(pgdat) {
7466 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
7467
7468 if (managed_zone(zone))
7469 return true;
7470 }
7471 return false;
7472 }
7473 #endif /* CONFIG_ZONE_DMA */
7474
7475 #ifdef CONFIG_UNACCEPTED_MEMORY
7476
7477 static bool lazy_accept = true;
7478
accept_memory_parse(char * p)7479 static int __init accept_memory_parse(char *p)
7480 {
7481 if (!strcmp(p, "lazy")) {
7482 lazy_accept = true;
7483 return 0;
7484 } else if (!strcmp(p, "eager")) {
7485 lazy_accept = false;
7486 return 0;
7487 } else {
7488 return -EINVAL;
7489 }
7490 }
7491 early_param("accept_memory", accept_memory_parse);
7492
page_contains_unaccepted(struct page * page,unsigned int order)7493 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7494 {
7495 phys_addr_t start = page_to_phys(page);
7496
7497 return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
7498 }
7499
__accept_page(struct zone * zone,unsigned long * flags,struct page * page)7500 static void __accept_page(struct zone *zone, unsigned long *flags,
7501 struct page *page)
7502 {
7503 list_del(&page->lru);
7504 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7505 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
7506 __ClearPageUnaccepted(page);
7507 spin_unlock_irqrestore(&zone->lock, *flags);
7508
7509 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
7510
7511 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
7512 }
7513
accept_page(struct page * page)7514 void accept_page(struct page *page)
7515 {
7516 struct zone *zone = page_zone(page);
7517 unsigned long flags;
7518
7519 spin_lock_irqsave(&zone->lock, flags);
7520 if (!PageUnaccepted(page)) {
7521 spin_unlock_irqrestore(&zone->lock, flags);
7522 return;
7523 }
7524
7525 /* Unlocks zone->lock */
7526 __accept_page(zone, &flags, page);
7527 }
7528
try_to_accept_memory_one(struct zone * zone)7529 static bool try_to_accept_memory_one(struct zone *zone)
7530 {
7531 unsigned long flags;
7532 struct page *page;
7533
7534 spin_lock_irqsave(&zone->lock, flags);
7535 page = list_first_entry_or_null(&zone->unaccepted_pages,
7536 struct page, lru);
7537 if (!page) {
7538 spin_unlock_irqrestore(&zone->lock, flags);
7539 return false;
7540 }
7541
7542 /* Unlocks zone->lock */
7543 __accept_page(zone, &flags, page);
7544
7545 return true;
7546 }
7547
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7548 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7549 int alloc_flags)
7550 {
7551 long to_accept, wmark;
7552 bool ret = false;
7553
7554 if (list_empty(&zone->unaccepted_pages))
7555 return false;
7556
7557 /* Bailout, since try_to_accept_memory_one() needs to take a lock */
7558 if (alloc_flags & ALLOC_TRYLOCK)
7559 return false;
7560
7561 wmark = promo_wmark_pages(zone);
7562
7563 /*
7564 * Watermarks have not been initialized yet.
7565 *
7566 * Accepting one MAX_ORDER page to ensure progress.
7567 */
7568 if (!wmark)
7569 return try_to_accept_memory_one(zone);
7570
7571 /* How much to accept to get to promo watermark? */
7572 to_accept = wmark -
7573 (zone_page_state(zone, NR_FREE_PAGES) -
7574 __zone_watermark_unusable_free(zone, order, 0) -
7575 zone_page_state(zone, NR_UNACCEPTED));
7576
7577 while (to_accept > 0) {
7578 if (!try_to_accept_memory_one(zone))
7579 break;
7580 ret = true;
7581 to_accept -= MAX_ORDER_NR_PAGES;
7582 }
7583
7584 return ret;
7585 }
7586
__free_unaccepted(struct page * page)7587 static bool __free_unaccepted(struct page *page)
7588 {
7589 struct zone *zone = page_zone(page);
7590 unsigned long flags;
7591
7592 if (!lazy_accept)
7593 return false;
7594
7595 spin_lock_irqsave(&zone->lock, flags);
7596 list_add_tail(&page->lru, &zone->unaccepted_pages);
7597 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7598 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
7599 __SetPageUnaccepted(page);
7600 spin_unlock_irqrestore(&zone->lock, flags);
7601
7602 return true;
7603 }
7604
7605 #else
7606
page_contains_unaccepted(struct page * page,unsigned int order)7607 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7608 {
7609 return false;
7610 }
7611
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7612 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7613 int alloc_flags)
7614 {
7615 return false;
7616 }
7617
__free_unaccepted(struct page * page)7618 static bool __free_unaccepted(struct page *page)
7619 {
7620 BUILD_BUG();
7621 return false;
7622 }
7623
7624 #endif /* CONFIG_UNACCEPTED_MEMORY */
7625
alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7626 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7627 {
7628 /*
7629 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
7630 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd
7631 * is not safe in arbitrary context.
7632 *
7633 * These two are the conditions for gfpflags_allow_spinning() being true.
7634 *
7635 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason
7636 * to warn. Also warn would trigger printk() which is unsafe from
7637 * various contexts. We cannot use printk_deferred_enter() to mitigate,
7638 * since the running context is unknown.
7639 *
7640 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below
7641 * is safe in any context. Also zeroing the page is mandatory for
7642 * BPF use cases.
7643 *
7644 * Though __GFP_NOMEMALLOC is not checked in the code path below,
7645 * specify it here to highlight that alloc_pages_nolock()
7646 * doesn't want to deplete reserves.
7647 */
7648 gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
7649 | gfp_flags;
7650 unsigned int alloc_flags = ALLOC_TRYLOCK;
7651 struct alloc_context ac = { };
7652 struct page *page;
7653
7654 VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
7655 /*
7656 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
7657 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current
7658 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
7659 * mark the task as the owner of another rt_spin_lock which will
7660 * confuse PI logic, so return immediately if called form hard IRQ or
7661 * NMI.
7662 *
7663 * Note, irqs_disabled() case is ok. This function can be called
7664 * from raw_spin_lock_irqsave region.
7665 */
7666 if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
7667 return NULL;
7668 if (!pcp_allowed_order(order))
7669 return NULL;
7670
7671 /* Bailout, since _deferred_grow_zone() needs to take a lock */
7672 if (deferred_pages_enabled())
7673 return NULL;
7674
7675 if (nid == NUMA_NO_NODE)
7676 nid = numa_node_id();
7677
7678 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
7679 &alloc_gfp, &alloc_flags);
7680
7681 /*
7682 * Best effort allocation from percpu free list.
7683 * If it's empty attempt to spin_trylock zone->lock.
7684 */
7685 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
7686
7687 /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
7688
7689 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
7690 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
7691 __free_frozen_pages(page, order, FPI_TRYLOCK);
7692 page = NULL;
7693 }
7694 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
7695 kmsan_alloc_page(page, order, alloc_gfp);
7696 return page;
7697 }
7698 /**
7699 * alloc_pages_nolock - opportunistic reentrant allocation from any context
7700 * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
7701 * @nid: node to allocate from
7702 * @order: allocation order size
7703 *
7704 * Allocates pages of a given order from the given node. This is safe to
7705 * call from any context (from atomic, NMI, and also reentrant
7706 * allocator -> tracepoint -> alloc_pages_nolock_noprof).
7707 * Allocation is best effort and to be expected to fail easily so nobody should
7708 * rely on the success. Failures are not reported via warn_alloc().
7709 * See always fail conditions below.
7710 *
7711 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7712 * It means ENOMEM. There is no reason to call it again and expect !NULL.
7713 */
alloc_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7714 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7715 {
7716 struct page *page;
7717
7718 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
7719 if (page)
7720 set_page_refcounted(page);
7721 return page;
7722 }
7723 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);
7724