xref: /linux/mm/internal.h (revision beb69e81724634063b9dbae4bc79e2e011fdeeb1)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21 
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24 
25 struct folio_batch;
26 
27 /*
28  * Maintains state across a page table move. The operation assumes both source
29  * and destination VMAs already exist and are specified by the user.
30  *
31  * Partial moves are permitted, but the old and new ranges must both reside
32  * within a VMA.
33  *
34  * mmap lock must be held in write and VMA write locks must be held on any VMA
35  * that is visible.
36  *
37  * Use the PAGETABLE_MOVE() macro to initialise this struct.
38  *
39  * The old_addr and new_addr fields are updated as the page table move is
40  * executed.
41  *
42  * NOTE: The page table move is affected by reading from [old_addr, old_end),
43  * and old_addr may be updated for better page table alignment, so len_in
44  * represents the length of the range being copied as specified by the user.
45  */
46 struct pagetable_move_control {
47 	struct vm_area_struct *old; /* Source VMA. */
48 	struct vm_area_struct *new; /* Destination VMA. */
49 	unsigned long old_addr; /* Address from which the move begins. */
50 	unsigned long old_end; /* Exclusive address at which old range ends. */
51 	unsigned long new_addr; /* Address to move page tables to. */
52 	unsigned long len_in; /* Bytes to remap specified by user. */
53 
54 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
55 	bool for_stack; /* Is this an early temp stack being moved? */
56 };
57 
58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
59 	struct pagetable_move_control name = {				\
60 		.old = old_,						\
61 		.new = new_,						\
62 		.old_addr = old_addr_,					\
63 		.old_end = (old_addr_) + (len_),			\
64 		.new_addr = new_addr_,					\
65 		.len_in = len_,						\
66 	}
67 
68 /*
69  * The set of flags that only affect watermark checking and reclaim
70  * behaviour. This is used by the MM to obey the caller constraints
71  * about IO, FS and watermark checking while ignoring placement
72  * hints such as HIGHMEM usage.
73  */
74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
75 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
76 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
77 			__GFP_NOLOCKDEP)
78 
79 /* The GFP flags allowed during early boot */
80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
81 
82 /* Control allocation cpuset and node placement constraints */
83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
84 
85 /* Do not use these with a slab allocator */
86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
87 
88 /*
89  * Different from WARN_ON_ONCE(), no warning will be issued
90  * when we specify __GFP_NOWARN.
91  */
92 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
93 	static bool __section(".data..once") __warned;			\
94 	int __ret_warn_once = !!(cond);					\
95 									\
96 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
97 		__warned = true;					\
98 		WARN_ON(1);						\
99 	}								\
100 	unlikely(__ret_warn_once);					\
101 })
102 
103 void page_writeback_init(void);
104 
105 /*
106  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
107  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
108  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
109  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
110  */
111 #define ENTIRELY_MAPPED		0x800000
112 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
113 
114 /*
115  * Flags passed to __show_mem() and show_free_areas() to suppress output in
116  * various contexts.
117  */
118 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
119 
120 /*
121  * How many individual pages have an elevated _mapcount.  Excludes
122  * the folio's entire_mapcount.
123  *
124  * Don't use this function outside of debugging code.
125  */
126 static inline int folio_nr_pages_mapped(const struct folio *folio)
127 {
128 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
129 		return -1;
130 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
131 }
132 
133 /*
134  * Retrieve the first entry of a folio based on a provided entry within the
135  * folio. We cannot rely on folio->swap as there is no guarantee that it has
136  * been initialized. Used for calling arch_swap_restore()
137  */
138 static inline swp_entry_t folio_swap(swp_entry_t entry,
139 		const struct folio *folio)
140 {
141 	swp_entry_t swap = {
142 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
143 	};
144 
145 	return swap;
146 }
147 
148 static inline void *folio_raw_mapping(const struct folio *folio)
149 {
150 	unsigned long mapping = (unsigned long)folio->mapping;
151 
152 	return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
153 }
154 
155 /*
156  * This is a file-backed mapping, and is about to be memory mapped - invoke its
157  * mmap hook and safely handle error conditions. On error, VMA hooks will be
158  * mutated.
159  *
160  * @file: File which backs the mapping.
161  * @vma:  VMA which we are mapping.
162  *
163  * Returns: 0 if success, error otherwise.
164  */
165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
166 {
167 	int err = call_mmap(file, vma);
168 
169 	if (likely(!err))
170 		return 0;
171 
172 	/*
173 	 * OK, we tried to call the file hook for mmap(), but an error
174 	 * arose. The mapping is in an inconsistent state and we most not invoke
175 	 * any further hooks on it.
176 	 */
177 	vma->vm_ops = &vma_dummy_vm_ops;
178 
179 	return err;
180 }
181 
182 /*
183  * If the VMA has a close hook then close it, and since closing it might leave
184  * it in an inconsistent state which makes the use of any hooks suspect, clear
185  * them down by installing dummy empty hooks.
186  */
187 static inline void vma_close(struct vm_area_struct *vma)
188 {
189 	if (vma->vm_ops && vma->vm_ops->close) {
190 		vma->vm_ops->close(vma);
191 
192 		/*
193 		 * The mapping is in an inconsistent state, and no further hooks
194 		 * may be invoked upon it.
195 		 */
196 		vma->vm_ops = &vma_dummy_vm_ops;
197 	}
198 }
199 
200 #ifdef CONFIG_MMU
201 
202 /* Flags for folio_pte_batch(). */
203 typedef int __bitwise fpb_t;
204 
205 /* Compare PTEs respecting the dirty bit. */
206 #define FPB_RESPECT_DIRTY		((__force fpb_t)BIT(0))
207 
208 /* Compare PTEs respecting the soft-dirty bit. */
209 #define FPB_RESPECT_SOFT_DIRTY		((__force fpb_t)BIT(1))
210 
211 /*
212  * Merge PTE write bits: if any PTE in the batch is writable, modify the
213  * PTE at @ptentp to be writable.
214  */
215 #define FPB_MERGE_WRITE			((__force fpb_t)BIT(2))
216 
217 /*
218  * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
219  * modify the PTE at @ptentp to be young or dirty, respectively.
220  */
221 #define FPB_MERGE_YOUNG_DIRTY		((__force fpb_t)BIT(3))
222 
223 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
224 {
225 	if (!(flags & FPB_RESPECT_DIRTY))
226 		pte = pte_mkclean(pte);
227 	if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
228 		pte = pte_clear_soft_dirty(pte);
229 	return pte_wrprotect(pte_mkold(pte));
230 }
231 
232 /**
233  * folio_pte_batch_flags - detect a PTE batch for a large folio
234  * @folio: The large folio to detect a PTE batch for.
235  * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
236  * @ptep: Page table pointer for the first entry.
237  * @ptentp: Pointer to a COPY of the first page table entry whose flags this
238  *	    function updates based on @flags if appropriate.
239  * @max_nr: The maximum number of table entries to consider.
240  * @flags: Flags to modify the PTE batch semantics.
241  *
242  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
243  * pages of the same large folio in a single VMA and a single page table.
244  *
245  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
246  * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
247  * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
248  *
249  * @ptep must map any page of the folio. max_nr must be at least one and
250  * must be limited by the caller so scanning cannot exceed a single VMA and
251  * a single page table.
252  *
253  * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
254  * be updated: it's crucial that a pointer to a COPY of the first
255  * page table entry, obtained through ptep_get(), is provided as @ptentp.
256  *
257  * This function will be inlined to optimize based on the input parameters;
258  * consider using folio_pte_batch() instead if applicable.
259  *
260  * Return: the number of table entries in the batch.
261  */
262 static inline unsigned int folio_pte_batch_flags(struct folio *folio,
263 		struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
264 		unsigned int max_nr, fpb_t flags)
265 {
266 	bool any_writable = false, any_young = false, any_dirty = false;
267 	pte_t expected_pte, pte = *ptentp;
268 	unsigned int nr, cur_nr;
269 
270 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
271 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
272 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
273 	/*
274 	 * Ensure this is a pointer to a copy not a pointer into a page table.
275 	 * If this is a stack value, it won't be a valid virtual address, but
276 	 * that's fine because it also cannot be pointing into the page table.
277 	 */
278 	VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
279 
280 	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
281 	max_nr = min_t(unsigned long, max_nr,
282 		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
283 
284 	nr = pte_batch_hint(ptep, pte);
285 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
286 	ptep = ptep + nr;
287 
288 	while (nr < max_nr) {
289 		pte = ptep_get(ptep);
290 
291 		if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
292 			break;
293 
294 		if (flags & FPB_MERGE_WRITE)
295 			any_writable |= pte_write(pte);
296 		if (flags & FPB_MERGE_YOUNG_DIRTY) {
297 			any_young |= pte_young(pte);
298 			any_dirty |= pte_dirty(pte);
299 		}
300 
301 		cur_nr = pte_batch_hint(ptep, pte);
302 		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
303 		ptep += cur_nr;
304 		nr += cur_nr;
305 	}
306 
307 	if (any_writable)
308 		*ptentp = pte_mkwrite(*ptentp, vma);
309 	if (any_young)
310 		*ptentp = pte_mkyoung(*ptentp);
311 	if (any_dirty)
312 		*ptentp = pte_mkdirty(*ptentp);
313 
314 	return min(nr, max_nr);
315 }
316 
317 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
318 		unsigned int max_nr);
319 
320 /**
321  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
322  *	 forward or backward by delta
323  * @pte: The initial pte state; is_swap_pte(pte) must be true and
324  *	 non_swap_entry() must be false.
325  * @delta: The direction and the offset we are moving; forward if delta
326  *	 is positive; backward if delta is negative
327  *
328  * Moves the swap offset, while maintaining all other fields, including
329  * swap type, and any swp pte bits. The resulting pte is returned.
330  */
331 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
332 {
333 	swp_entry_t entry = pte_to_swp_entry(pte);
334 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
335 						   (swp_offset(entry) + delta)));
336 
337 	if (pte_swp_soft_dirty(pte))
338 		new = pte_swp_mksoft_dirty(new);
339 	if (pte_swp_exclusive(pte))
340 		new = pte_swp_mkexclusive(new);
341 	if (pte_swp_uffd_wp(pte))
342 		new = pte_swp_mkuffd_wp(new);
343 
344 	return new;
345 }
346 
347 
348 /**
349  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
350  * @pte: The initial pte state; is_swap_pte(pte) must be true and
351  *	 non_swap_entry() must be false.
352  *
353  * Increments the swap offset, while maintaining all other fields, including
354  * swap type, and any swp pte bits. The resulting pte is returned.
355  */
356 static inline pte_t pte_next_swp_offset(pte_t pte)
357 {
358 	return pte_move_swp_offset(pte, 1);
359 }
360 
361 /**
362  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
363  * @start_ptep: Page table pointer for the first entry.
364  * @max_nr: The maximum number of table entries to consider.
365  * @pte: Page table entry for the first entry.
366  *
367  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
368  * containing swap entries all with consecutive offsets and targeting the same
369  * swap type, all with matching swp pte bits.
370  *
371  * max_nr must be at least one and must be limited by the caller so scanning
372  * cannot exceed a single page table.
373  *
374  * Return: the number of table entries in the batch.
375  */
376 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
377 {
378 	pte_t expected_pte = pte_next_swp_offset(pte);
379 	const pte_t *end_ptep = start_ptep + max_nr;
380 	swp_entry_t entry = pte_to_swp_entry(pte);
381 	pte_t *ptep = start_ptep + 1;
382 	unsigned short cgroup_id;
383 
384 	VM_WARN_ON(max_nr < 1);
385 	VM_WARN_ON(!is_swap_pte(pte));
386 	VM_WARN_ON(non_swap_entry(entry));
387 
388 	cgroup_id = lookup_swap_cgroup_id(entry);
389 	while (ptep < end_ptep) {
390 		pte = ptep_get(ptep);
391 
392 		if (!pte_same(pte, expected_pte))
393 			break;
394 		if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
395 			break;
396 		expected_pte = pte_next_swp_offset(expected_pte);
397 		ptep++;
398 	}
399 
400 	return ptep - start_ptep;
401 }
402 #endif /* CONFIG_MMU */
403 
404 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
405 						int nr_throttled);
406 static inline void acct_reclaim_writeback(struct folio *folio)
407 {
408 	pg_data_t *pgdat = folio_pgdat(folio);
409 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
410 
411 	if (nr_throttled)
412 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
413 }
414 
415 static inline void wake_throttle_isolated(pg_data_t *pgdat)
416 {
417 	wait_queue_head_t *wqh;
418 
419 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
420 	if (waitqueue_active(wqh))
421 		wake_up(wqh);
422 }
423 
424 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
425 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
426 {
427 	vm_fault_t ret = __vmf_anon_prepare(vmf);
428 
429 	if (unlikely(ret & VM_FAULT_RETRY))
430 		vma_end_read(vmf->vma);
431 	return ret;
432 }
433 
434 vm_fault_t do_swap_page(struct vm_fault *vmf);
435 void folio_rotate_reclaimable(struct folio *folio);
436 bool __folio_end_writeback(struct folio *folio);
437 void deactivate_file_folio(struct folio *folio);
438 void folio_activate(struct folio *folio);
439 
440 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
441 		   struct vm_area_struct *start_vma, unsigned long floor,
442 		   unsigned long ceiling, bool mm_wr_locked);
443 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
444 
445 struct zap_details;
446 void unmap_page_range(struct mmu_gather *tlb,
447 			     struct vm_area_struct *vma,
448 			     unsigned long addr, unsigned long end,
449 			     struct zap_details *details);
450 void zap_page_range_single_batched(struct mmu_gather *tlb,
451 		struct vm_area_struct *vma, unsigned long addr,
452 		unsigned long size, struct zap_details *details);
453 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
454 			   gfp_t gfp);
455 
456 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
457 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
458 static inline void force_page_cache_readahead(struct address_space *mapping,
459 		struct file *file, pgoff_t index, unsigned long nr_to_read)
460 {
461 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
462 	force_page_cache_ra(&ractl, nr_to_read);
463 }
464 
465 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
466 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
467 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
468 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
469 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
470 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
471 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
472 		loff_t end);
473 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
474 unsigned long mapping_try_invalidate(struct address_space *mapping,
475 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
476 
477 /**
478  * folio_evictable - Test whether a folio is evictable.
479  * @folio: The folio to test.
480  *
481  * Test whether @folio is evictable -- i.e., should be placed on
482  * active/inactive lists vs unevictable list.
483  *
484  * Reasons folio might not be evictable:
485  * 1. folio's mapping marked unevictable
486  * 2. One of the pages in the folio is part of an mlocked VMA
487  */
488 static inline bool folio_evictable(struct folio *folio)
489 {
490 	bool ret;
491 
492 	/* Prevent address_space of inode and swap cache from being freed */
493 	rcu_read_lock();
494 	ret = !mapping_unevictable(folio_mapping(folio)) &&
495 			!folio_test_mlocked(folio);
496 	rcu_read_unlock();
497 	return ret;
498 }
499 
500 /*
501  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
502  * a count of one.
503  */
504 static inline void set_page_refcounted(struct page *page)
505 {
506 	VM_BUG_ON_PAGE(PageTail(page), page);
507 	VM_BUG_ON_PAGE(page_ref_count(page), page);
508 	set_page_count(page, 1);
509 }
510 
511 /*
512  * Return true if a folio needs ->release_folio() calling upon it.
513  */
514 static inline bool folio_needs_release(struct folio *folio)
515 {
516 	struct address_space *mapping = folio_mapping(folio);
517 
518 	return folio_has_private(folio) ||
519 		(mapping && mapping_release_always(mapping));
520 }
521 
522 extern unsigned long highest_memmap_pfn;
523 
524 /*
525  * Maximum number of reclaim retries without progress before the OOM
526  * killer is consider the only way forward.
527  */
528 #define MAX_RECLAIM_RETRIES 16
529 
530 /*
531  * in mm/vmscan.c:
532  */
533 bool folio_isolate_lru(struct folio *folio);
534 void folio_putback_lru(struct folio *folio);
535 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
536 #ifdef CONFIG_NUMA
537 int user_proactive_reclaim(char *buf,
538 			   struct mem_cgroup *memcg, pg_data_t *pgdat);
539 #else
540 static inline int user_proactive_reclaim(char *buf,
541 			   struct mem_cgroup *memcg, pg_data_t *pgdat)
542 {
543 	return 0;
544 }
545 #endif
546 
547 /*
548  * in mm/rmap.c:
549  */
550 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
551 
552 /*
553  * in mm/page_alloc.c
554  */
555 #define K(x) ((x) << (PAGE_SHIFT-10))
556 
557 extern char * const zone_names[MAX_NR_ZONES];
558 
559 /* perform sanity checks on struct pages being allocated or freed */
560 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
561 
562 extern int min_free_kbytes;
563 extern int defrag_mode;
564 
565 void setup_per_zone_wmarks(void);
566 void calculate_min_free_kbytes(void);
567 int __meminit init_per_zone_wmark_min(void);
568 void page_alloc_sysctl_init(void);
569 
570 /*
571  * Structure for holding the mostly immutable allocation parameters passed
572  * between functions involved in allocations, including the alloc_pages*
573  * family of functions.
574  *
575  * nodemask, migratetype and highest_zoneidx are initialized only once in
576  * __alloc_pages() and then never change.
577  *
578  * zonelist, preferred_zone and highest_zoneidx are set first in
579  * __alloc_pages() for the fast path, and might be later changed
580  * in __alloc_pages_slowpath(). All other functions pass the whole structure
581  * by a const pointer.
582  */
583 struct alloc_context {
584 	struct zonelist *zonelist;
585 	nodemask_t *nodemask;
586 	struct zoneref *preferred_zoneref;
587 	int migratetype;
588 
589 	/*
590 	 * highest_zoneidx represents highest usable zone index of
591 	 * the allocation request. Due to the nature of the zone,
592 	 * memory on lower zone than the highest_zoneidx will be
593 	 * protected by lowmem_reserve[highest_zoneidx].
594 	 *
595 	 * highest_zoneidx is also used by reclaim/compaction to limit
596 	 * the target zone since higher zone than this index cannot be
597 	 * usable for this allocation request.
598 	 */
599 	enum zone_type highest_zoneidx;
600 	bool spread_dirty_pages;
601 };
602 
603 /*
604  * This function returns the order of a free page in the buddy system. In
605  * general, page_zone(page)->lock must be held by the caller to prevent the
606  * page from being allocated in parallel and returning garbage as the order.
607  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
608  * page cannot be allocated or merged in parallel. Alternatively, it must
609  * handle invalid values gracefully, and use buddy_order_unsafe() below.
610  */
611 static inline unsigned int buddy_order(struct page *page)
612 {
613 	/* PageBuddy() must be checked by the caller */
614 	return page_private(page);
615 }
616 
617 /*
618  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
619  * PageBuddy() should be checked first by the caller to minimize race window,
620  * and invalid values must be handled gracefully.
621  *
622  * READ_ONCE is used so that if the caller assigns the result into a local
623  * variable and e.g. tests it for valid range before using, the compiler cannot
624  * decide to remove the variable and inline the page_private(page) multiple
625  * times, potentially observing different values in the tests and the actual
626  * use of the result.
627  */
628 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
629 
630 /*
631  * This function checks whether a page is free && is the buddy
632  * we can coalesce a page and its buddy if
633  * (a) the buddy is not in a hole (check before calling!) &&
634  * (b) the buddy is in the buddy system &&
635  * (c) a page and its buddy have the same order &&
636  * (d) a page and its buddy are in the same zone.
637  *
638  * For recording whether a page is in the buddy system, we set PageBuddy.
639  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
640  *
641  * For recording page's order, we use page_private(page).
642  */
643 static inline bool page_is_buddy(struct page *page, struct page *buddy,
644 				 unsigned int order)
645 {
646 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
647 		return false;
648 
649 	if (buddy_order(buddy) != order)
650 		return false;
651 
652 	/*
653 	 * zone check is done late to avoid uselessly calculating
654 	 * zone/node ids for pages that could never merge.
655 	 */
656 	if (page_zone_id(page) != page_zone_id(buddy))
657 		return false;
658 
659 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
660 
661 	return true;
662 }
663 
664 /*
665  * Locate the struct page for both the matching buddy in our
666  * pair (buddy1) and the combined O(n+1) page they form (page).
667  *
668  * 1) Any buddy B1 will have an order O twin B2 which satisfies
669  * the following equation:
670  *     B2 = B1 ^ (1 << O)
671  * For example, if the starting buddy (buddy2) is #8 its order
672  * 1 buddy is #10:
673  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
674  *
675  * 2) Any buddy B will have an order O+1 parent P which
676  * satisfies the following equation:
677  *     P = B & ~(1 << O)
678  *
679  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
680  */
681 static inline unsigned long
682 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
683 {
684 	return page_pfn ^ (1 << order);
685 }
686 
687 /*
688  * Find the buddy of @page and validate it.
689  * @page: The input page
690  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
691  *       function is used in the performance-critical __free_one_page().
692  * @order: The order of the page
693  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
694  *             page_to_pfn().
695  *
696  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
697  * not the same as @page. The validation is necessary before use it.
698  *
699  * Return: the found buddy page or NULL if not found.
700  */
701 static inline struct page *find_buddy_page_pfn(struct page *page,
702 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
703 {
704 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
705 	struct page *buddy;
706 
707 	buddy = page + (__buddy_pfn - pfn);
708 	if (buddy_pfn)
709 		*buddy_pfn = __buddy_pfn;
710 
711 	if (page_is_buddy(page, buddy, order))
712 		return buddy;
713 	return NULL;
714 }
715 
716 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
717 				unsigned long end_pfn, struct zone *zone);
718 
719 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
720 				unsigned long end_pfn, struct zone *zone)
721 {
722 	if (zone->contiguous)
723 		return pfn_to_page(start_pfn);
724 
725 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
726 }
727 
728 void set_zone_contiguous(struct zone *zone);
729 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
730 			   unsigned long nr_pages);
731 
732 static inline void clear_zone_contiguous(struct zone *zone)
733 {
734 	zone->contiguous = false;
735 }
736 
737 extern int __isolate_free_page(struct page *page, unsigned int order);
738 extern void __putback_isolated_page(struct page *page, unsigned int order,
739 				    int mt);
740 extern void memblock_free_pages(struct page *page, unsigned long pfn,
741 					unsigned int order);
742 extern void __free_pages_core(struct page *page, unsigned int order,
743 		enum meminit_context context);
744 
745 /*
746  * This will have no effect, other than possibly generating a warning, if the
747  * caller passes in a non-large folio.
748  */
749 static inline void folio_set_order(struct folio *folio, unsigned int order)
750 {
751 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
752 		return;
753 
754 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
755 #ifdef NR_PAGES_IN_LARGE_FOLIO
756 	folio->_nr_pages = 1U << order;
757 #endif
758 }
759 
760 bool __folio_unqueue_deferred_split(struct folio *folio);
761 static inline bool folio_unqueue_deferred_split(struct folio *folio)
762 {
763 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
764 		return false;
765 
766 	/*
767 	 * At this point, there is no one trying to add the folio to
768 	 * deferred_list. If folio is not in deferred_list, it's safe
769 	 * to check without acquiring the split_queue_lock.
770 	 */
771 	if (data_race(list_empty(&folio->_deferred_list)))
772 		return false;
773 
774 	return __folio_unqueue_deferred_split(folio);
775 }
776 
777 static inline struct folio *page_rmappable_folio(struct page *page)
778 {
779 	struct folio *folio = (struct folio *)page;
780 
781 	if (folio && folio_test_large(folio))
782 		folio_set_large_rmappable(folio);
783 	return folio;
784 }
785 
786 static inline void prep_compound_head(struct page *page, unsigned int order)
787 {
788 	struct folio *folio = (struct folio *)page;
789 
790 	folio_set_order(folio, order);
791 	atomic_set(&folio->_large_mapcount, -1);
792 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
793 		atomic_set(&folio->_nr_pages_mapped, 0);
794 	if (IS_ENABLED(CONFIG_MM_ID)) {
795 		folio->_mm_ids = 0;
796 		folio->_mm_id_mapcount[0] = -1;
797 		folio->_mm_id_mapcount[1] = -1;
798 	}
799 	if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
800 		atomic_set(&folio->_pincount, 0);
801 		atomic_set(&folio->_entire_mapcount, -1);
802 	}
803 	if (order > 1)
804 		INIT_LIST_HEAD(&folio->_deferred_list);
805 }
806 
807 static inline void prep_compound_tail(struct page *head, int tail_idx)
808 {
809 	struct page *p = head + tail_idx;
810 
811 	p->mapping = TAIL_MAPPING;
812 	set_compound_head(p, head);
813 	set_page_private(p, 0);
814 }
815 
816 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
817 extern bool free_pages_prepare(struct page *page, unsigned int order);
818 
819 extern int user_min_free_kbytes;
820 
821 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
822 		nodemask_t *);
823 #define __alloc_frozen_pages(...) \
824 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
825 void free_frozen_pages(struct page *page, unsigned int order);
826 void free_unref_folios(struct folio_batch *fbatch);
827 
828 #ifdef CONFIG_NUMA
829 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
830 #else
831 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
832 {
833 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
834 }
835 #endif
836 
837 #define alloc_frozen_pages(...) \
838 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
839 
840 extern void zone_pcp_reset(struct zone *zone);
841 extern void zone_pcp_disable(struct zone *zone);
842 extern void zone_pcp_enable(struct zone *zone);
843 extern void zone_pcp_init(struct zone *zone);
844 
845 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
846 			  phys_addr_t min_addr,
847 			  int nid, bool exact_nid);
848 
849 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
850 		unsigned long, enum meminit_context, struct vmem_altmap *, int,
851 		bool);
852 
853 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
854 
855 /*
856  * in mm/compaction.c
857  */
858 /*
859  * compact_control is used to track pages being migrated and the free pages
860  * they are being migrated to during memory compaction. The free_pfn starts
861  * at the end of a zone and migrate_pfn begins at the start. Movable pages
862  * are moved to the end of a zone during a compaction run and the run
863  * completes when free_pfn <= migrate_pfn
864  */
865 struct compact_control {
866 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
867 	struct list_head migratepages;	/* List of pages being migrated */
868 	unsigned int nr_freepages;	/* Number of isolated free pages */
869 	unsigned int nr_migratepages;	/* Number of pages to migrate */
870 	unsigned long free_pfn;		/* isolate_freepages search base */
871 	/*
872 	 * Acts as an in/out parameter to page isolation for migration.
873 	 * isolate_migratepages uses it as a search base.
874 	 * isolate_migratepages_block will update the value to the next pfn
875 	 * after the last isolated one.
876 	 */
877 	unsigned long migrate_pfn;
878 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
879 	struct zone *zone;
880 	unsigned long total_migrate_scanned;
881 	unsigned long total_free_scanned;
882 	unsigned short fast_search_fail;/* failures to use free list searches */
883 	short search_order;		/* order to start a fast search at */
884 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
885 	int order;			/* order a direct compactor needs */
886 	int migratetype;		/* migratetype of direct compactor */
887 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
888 	const int highest_zoneidx;	/* zone index of a direct compactor */
889 	enum migrate_mode mode;		/* Async or sync migration mode */
890 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
891 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
892 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
893 	bool direct_compaction;		/* False from kcompactd or /proc/... */
894 	bool proactive_compaction;	/* kcompactd proactive compaction */
895 	bool whole_zone;		/* Whole zone should/has been scanned */
896 	bool contended;			/* Signal lock contention */
897 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
898 					 * when there are potentially transient
899 					 * isolation or migration failures to
900 					 * ensure forward progress.
901 					 */
902 	bool alloc_contig;		/* alloc_contig_range allocation */
903 };
904 
905 /*
906  * Used in direct compaction when a page should be taken from the freelists
907  * immediately when one is created during the free path.
908  */
909 struct capture_control {
910 	struct compact_control *cc;
911 	struct page *page;
912 };
913 
914 unsigned long
915 isolate_freepages_range(struct compact_control *cc,
916 			unsigned long start_pfn, unsigned long end_pfn);
917 int
918 isolate_migratepages_range(struct compact_control *cc,
919 			   unsigned long low_pfn, unsigned long end_pfn);
920 
921 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
922 void init_cma_reserved_pageblock(struct page *page);
923 
924 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
925 
926 struct cma;
927 
928 #ifdef CONFIG_CMA
929 void *cma_reserve_early(struct cma *cma, unsigned long size);
930 void init_cma_pageblock(struct page *page);
931 #else
932 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
933 {
934 	return NULL;
935 }
936 static inline void init_cma_pageblock(struct page *page)
937 {
938 }
939 #endif
940 
941 
942 int find_suitable_fallback(struct free_area *area, unsigned int order,
943 			   int migratetype, bool claimable);
944 
945 static inline bool free_area_empty(struct free_area *area, int migratetype)
946 {
947 	return list_empty(&area->free_list[migratetype]);
948 }
949 
950 /* mm/util.c */
951 struct anon_vma *folio_anon_vma(const struct folio *folio);
952 
953 #ifdef CONFIG_MMU
954 void unmap_mapping_folio(struct folio *folio);
955 extern long populate_vma_page_range(struct vm_area_struct *vma,
956 		unsigned long start, unsigned long end, int *locked);
957 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
958 		unsigned long end, bool write, int *locked);
959 extern bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags,
960 			       unsigned long bytes);
961 
962 /*
963  * NOTE: This function can't tell whether the folio is "fully mapped" in the
964  * range.
965  * "fully mapped" means all the pages of folio is associated with the page
966  * table of range while this function just check whether the folio range is
967  * within the range [start, end). Function caller needs to do page table
968  * check if it cares about the page table association.
969  *
970  * Typical usage (like mlock or madvise) is:
971  * Caller knows at least 1 page of folio is associated with page table of VMA
972  * and the range [start, end) is intersect with the VMA range. Caller wants
973  * to know whether the folio is fully associated with the range. It calls
974  * this function to check whether the folio is in the range first. Then checks
975  * the page table to know whether the folio is fully mapped to the range.
976  */
977 static inline bool
978 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
979 		unsigned long start, unsigned long end)
980 {
981 	pgoff_t pgoff, addr;
982 	unsigned long vma_pglen = vma_pages(vma);
983 
984 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
985 	if (start > end)
986 		return false;
987 
988 	if (start < vma->vm_start)
989 		start = vma->vm_start;
990 
991 	if (end > vma->vm_end)
992 		end = vma->vm_end;
993 
994 	pgoff = folio_pgoff(folio);
995 
996 	/* if folio start address is not in vma range */
997 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
998 		return false;
999 
1000 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1001 
1002 	return !(addr < start || end - addr < folio_size(folio));
1003 }
1004 
1005 static inline bool
1006 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
1007 {
1008 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
1009 }
1010 
1011 /*
1012  * mlock_vma_folio() and munlock_vma_folio():
1013  * should be called with vma's mmap_lock held for read or write,
1014  * under page table lock for the pte/pmd being added or removed.
1015  *
1016  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
1017  * the end of folio_remove_rmap_*(); but new anon folios are managed by
1018  * folio_add_lru_vma() calling mlock_new_folio().
1019  */
1020 void mlock_folio(struct folio *folio);
1021 static inline void mlock_vma_folio(struct folio *folio,
1022 				struct vm_area_struct *vma)
1023 {
1024 	/*
1025 	 * The VM_SPECIAL check here serves two purposes.
1026 	 * 1) VM_IO check prevents migration from double-counting during mlock.
1027 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
1028 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
1029 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1030 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
1031 	 */
1032 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1033 		mlock_folio(folio);
1034 }
1035 
1036 void munlock_folio(struct folio *folio);
1037 static inline void munlock_vma_folio(struct folio *folio,
1038 					struct vm_area_struct *vma)
1039 {
1040 	/*
1041 	 * munlock if the function is called. Ideally, we should only
1042 	 * do munlock if any page of folio is unmapped from VMA and
1043 	 * cause folio not fully mapped to VMA.
1044 	 *
1045 	 * But it's not easy to confirm that's the situation. So we
1046 	 * always munlock the folio and page reclaim will correct it
1047 	 * if it's wrong.
1048 	 */
1049 	if (unlikely(vma->vm_flags & VM_LOCKED))
1050 		munlock_folio(folio);
1051 }
1052 
1053 void mlock_new_folio(struct folio *folio);
1054 bool need_mlock_drain(int cpu);
1055 void mlock_drain_local(void);
1056 void mlock_drain_remote(int cpu);
1057 
1058 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1059 
1060 /**
1061  * vma_address - Find the virtual address a page range is mapped at
1062  * @vma: The vma which maps this object.
1063  * @pgoff: The page offset within its object.
1064  * @nr_pages: The number of pages to consider.
1065  *
1066  * If any page in this range is mapped by this VMA, return the first address
1067  * where any of these pages appear.  Otherwise, return -EFAULT.
1068  */
1069 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1070 		pgoff_t pgoff, unsigned long nr_pages)
1071 {
1072 	unsigned long address;
1073 
1074 	if (pgoff >= vma->vm_pgoff) {
1075 		address = vma->vm_start +
1076 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1077 		/* Check for address beyond vma (or wrapped through 0?) */
1078 		if (address < vma->vm_start || address >= vma->vm_end)
1079 			address = -EFAULT;
1080 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1081 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1082 		address = vma->vm_start;
1083 	} else {
1084 		address = -EFAULT;
1085 	}
1086 	return address;
1087 }
1088 
1089 /*
1090  * Then at what user virtual address will none of the range be found in vma?
1091  * Assumes that vma_address() already returned a good starting address.
1092  */
1093 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1094 {
1095 	struct vm_area_struct *vma = pvmw->vma;
1096 	pgoff_t pgoff;
1097 	unsigned long address;
1098 
1099 	/* Common case, plus ->pgoff is invalid for KSM */
1100 	if (pvmw->nr_pages == 1)
1101 		return pvmw->address + PAGE_SIZE;
1102 
1103 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1104 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1105 	/* Check for address beyond vma (or wrapped through 0?) */
1106 	if (address < vma->vm_start || address > vma->vm_end)
1107 		address = vma->vm_end;
1108 	return address;
1109 }
1110 
1111 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1112 						    struct file *fpin)
1113 {
1114 	int flags = vmf->flags;
1115 
1116 	if (fpin)
1117 		return fpin;
1118 
1119 	/*
1120 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1121 	 * anything, so we only pin the file and drop the mmap_lock if only
1122 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1123 	 */
1124 	if (fault_flag_allow_retry_first(flags) &&
1125 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1126 		fpin = get_file(vmf->vma->vm_file);
1127 		release_fault_lock(vmf);
1128 	}
1129 	return fpin;
1130 }
1131 #else /* !CONFIG_MMU */
1132 static inline void unmap_mapping_folio(struct folio *folio) { }
1133 static inline void mlock_new_folio(struct folio *folio) { }
1134 static inline bool need_mlock_drain(int cpu) { return false; }
1135 static inline void mlock_drain_local(void) { }
1136 static inline void mlock_drain_remote(int cpu) { }
1137 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1138 {
1139 }
1140 #endif /* !CONFIG_MMU */
1141 
1142 /* Memory initialisation debug and verification */
1143 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1144 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1145 
1146 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1147 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1148 
1149 void init_deferred_page(unsigned long pfn, int nid);
1150 
1151 enum mminit_level {
1152 	MMINIT_WARNING,
1153 	MMINIT_VERIFY,
1154 	MMINIT_TRACE
1155 };
1156 
1157 #ifdef CONFIG_DEBUG_MEMORY_INIT
1158 
1159 extern int mminit_loglevel;
1160 
1161 #define mminit_dprintk(level, prefix, fmt, arg...) \
1162 do { \
1163 	if (level < mminit_loglevel) { \
1164 		if (level <= MMINIT_WARNING) \
1165 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1166 		else \
1167 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1168 	} \
1169 } while (0)
1170 
1171 extern void mminit_verify_pageflags_layout(void);
1172 extern void mminit_verify_zonelist(void);
1173 #else
1174 
1175 static inline void mminit_dprintk(enum mminit_level level,
1176 				const char *prefix, const char *fmt, ...)
1177 {
1178 }
1179 
1180 static inline void mminit_verify_pageflags_layout(void)
1181 {
1182 }
1183 
1184 static inline void mminit_verify_zonelist(void)
1185 {
1186 }
1187 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1188 
1189 #define NODE_RECLAIM_NOSCAN	-2
1190 #define NODE_RECLAIM_FULL	-1
1191 #define NODE_RECLAIM_SOME	0
1192 #define NODE_RECLAIM_SUCCESS	1
1193 
1194 #ifdef CONFIG_NUMA
1195 extern int node_reclaim_mode;
1196 
1197 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1198 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1199 #else
1200 #define node_reclaim_mode 0
1201 
1202 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1203 				unsigned int order)
1204 {
1205 	return NODE_RECLAIM_NOSCAN;
1206 }
1207 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1208 {
1209 	return NUMA_NO_NODE;
1210 }
1211 #endif
1212 
1213 static inline bool node_reclaim_enabled(void)
1214 {
1215 	/* Is any node_reclaim_mode bit set? */
1216 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1217 }
1218 
1219 /*
1220  * mm/memory-failure.c
1221  */
1222 #ifdef CONFIG_MEMORY_FAILURE
1223 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1224 void shake_folio(struct folio *folio);
1225 extern int hwpoison_filter(struct page *p);
1226 
1227 extern u32 hwpoison_filter_dev_major;
1228 extern u32 hwpoison_filter_dev_minor;
1229 extern u64 hwpoison_filter_flags_mask;
1230 extern u64 hwpoison_filter_flags_value;
1231 extern u64 hwpoison_filter_memcg;
1232 extern u32 hwpoison_filter_enable;
1233 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1234 void SetPageHWPoisonTakenOff(struct page *page);
1235 void ClearPageHWPoisonTakenOff(struct page *page);
1236 bool take_page_off_buddy(struct page *page);
1237 bool put_page_back_buddy(struct page *page);
1238 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1239 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1240 		     struct vm_area_struct *vma, struct list_head *to_kill,
1241 		     unsigned long ksm_addr);
1242 unsigned long page_mapped_in_vma(const struct page *page,
1243 		struct vm_area_struct *vma);
1244 
1245 #else
1246 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1247 {
1248 	return -EBUSY;
1249 }
1250 #endif
1251 
1252 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1253         unsigned long, unsigned long,
1254         unsigned long, unsigned long);
1255 
1256 extern void set_pageblock_order(void);
1257 unsigned long reclaim_pages(struct list_head *folio_list);
1258 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1259 					    struct list_head *folio_list);
1260 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1261 #define ALLOC_WMARK_MIN		WMARK_MIN
1262 #define ALLOC_WMARK_LOW		WMARK_LOW
1263 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1264 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1265 
1266 /* Mask to get the watermark bits */
1267 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1268 
1269 /*
1270  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1271  * cannot assume a reduced access to memory reserves is sufficient for
1272  * !MMU
1273  */
1274 #ifdef CONFIG_MMU
1275 #define ALLOC_OOM		0x08
1276 #else
1277 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1278 #endif
1279 
1280 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1281 				       * to 25% of the min watermark or
1282 				       * 62.5% if __GFP_HIGH is set.
1283 				       */
1284 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1285 				       * of the min watermark.
1286 				       */
1287 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1288 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1289 #ifdef CONFIG_ZONE_DMA32
1290 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1291 #else
1292 #define ALLOC_NOFRAGMENT	  0x0
1293 #endif
1294 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1295 #define ALLOC_TRYLOCK		0x400 /* Only use spin_trylock in allocation path */
1296 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1297 
1298 /* Flags that allow allocations below the min watermark. */
1299 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1300 
1301 enum ttu_flags;
1302 struct tlbflush_unmap_batch;
1303 
1304 
1305 /*
1306  * only for MM internal work items which do not depend on
1307  * any allocations or locks which might depend on allocations
1308  */
1309 extern struct workqueue_struct *mm_percpu_wq;
1310 
1311 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1312 void try_to_unmap_flush(void);
1313 void try_to_unmap_flush_dirty(void);
1314 void flush_tlb_batched_pending(struct mm_struct *mm);
1315 #else
1316 static inline void try_to_unmap_flush(void)
1317 {
1318 }
1319 static inline void try_to_unmap_flush_dirty(void)
1320 {
1321 }
1322 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1323 {
1324 }
1325 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1326 
1327 extern const struct trace_print_flags pageflag_names[];
1328 extern const struct trace_print_flags vmaflag_names[];
1329 extern const struct trace_print_flags gfpflag_names[];
1330 
1331 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1332 {
1333 	return migratetype == MIGRATE_HIGHATOMIC;
1334 }
1335 
1336 void setup_zone_pageset(struct zone *zone);
1337 
1338 struct migration_target_control {
1339 	int nid;		/* preferred node id */
1340 	nodemask_t *nmask;
1341 	gfp_t gfp_mask;
1342 	enum migrate_reason reason;
1343 };
1344 
1345 /*
1346  * mm/filemap.c
1347  */
1348 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1349 			      struct folio *folio, loff_t fpos, size_t size);
1350 
1351 /*
1352  * mm/vmalloc.c
1353  */
1354 #ifdef CONFIG_MMU
1355 void __init vmalloc_init(void);
1356 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1357                 pgprot_t prot, struct page **pages, unsigned int page_shift);
1358 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1359 #else
1360 static inline void vmalloc_init(void)
1361 {
1362 }
1363 
1364 static inline
1365 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1366                 pgprot_t prot, struct page **pages, unsigned int page_shift)
1367 {
1368 	return -EINVAL;
1369 }
1370 #endif
1371 
1372 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1373 			       unsigned long end, pgprot_t prot,
1374 			       struct page **pages, unsigned int page_shift);
1375 
1376 void vunmap_range_noflush(unsigned long start, unsigned long end);
1377 
1378 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1379 
1380 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1381 		      unsigned long addr, int *flags, bool writable,
1382 		      int *last_cpupid);
1383 
1384 void free_zone_device_folio(struct folio *folio);
1385 int migrate_device_coherent_folio(struct folio *folio);
1386 
1387 struct vm_struct *__get_vm_area_node(unsigned long size,
1388 				     unsigned long align, unsigned long shift,
1389 				     vm_flags_t vm_flags, unsigned long start,
1390 				     unsigned long end, int node, gfp_t gfp_mask,
1391 				     const void *caller);
1392 
1393 /*
1394  * mm/gup.c
1395  */
1396 int __must_check try_grab_folio(struct folio *folio, int refs,
1397 				unsigned int flags);
1398 
1399 /*
1400  * mm/huge_memory.c
1401  */
1402 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1403 	       pud_t *pud, bool write);
1404 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1405 	       pmd_t *pmd, bool write);
1406 
1407 /*
1408  * Parses a string with mem suffixes into its order. Useful to parse kernel
1409  * parameters.
1410  */
1411 static inline int get_order_from_str(const char *size_str,
1412 				     unsigned long valid_orders)
1413 {
1414 	unsigned long size;
1415 	char *endptr;
1416 	int order;
1417 
1418 	size = memparse(size_str, &endptr);
1419 
1420 	if (!is_power_of_2(size))
1421 		return -EINVAL;
1422 	order = get_order(size);
1423 	if (BIT(order) & ~valid_orders)
1424 		return -EINVAL;
1425 
1426 	return order;
1427 }
1428 
1429 enum {
1430 	/* mark page accessed */
1431 	FOLL_TOUCH = 1 << 16,
1432 	/* a retry, previous pass started an IO */
1433 	FOLL_TRIED = 1 << 17,
1434 	/* we are working on non-current tsk/mm */
1435 	FOLL_REMOTE = 1 << 18,
1436 	/* pages must be released via unpin_user_page */
1437 	FOLL_PIN = 1 << 19,
1438 	/* gup_fast: prevent fall-back to slow gup */
1439 	FOLL_FAST_ONLY = 1 << 20,
1440 	/* allow unlocking the mmap lock */
1441 	FOLL_UNLOCKABLE = 1 << 21,
1442 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1443 	FOLL_MADV_POPULATE = 1 << 22,
1444 };
1445 
1446 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1447 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1448 			    FOLL_MADV_POPULATE)
1449 
1450 /*
1451  * Indicates for which pages that are write-protected in the page table,
1452  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1453  * GUP pin will remain consistent with the pages mapped into the page tables
1454  * of the MM.
1455  *
1456  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1457  * PageAnonExclusive() has to protect against concurrent GUP:
1458  * * Ordinary GUP: Using the PT lock
1459  * * GUP-fast and fork(): mm->write_protect_seq
1460  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1461  *    folio_try_share_anon_rmap_*()
1462  *
1463  * Must be called with the (sub)page that's actually referenced via the
1464  * page table entry, which might not necessarily be the head page for a
1465  * PTE-mapped THP.
1466  *
1467  * If the vma is NULL, we're coming from the GUP-fast path and might have
1468  * to fallback to the slow path just to lookup the vma.
1469  */
1470 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1471 				    unsigned int flags, struct page *page)
1472 {
1473 	/*
1474 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1475 	 * has to be writable -- and if it references (part of) an anonymous
1476 	 * folio, that part is required to be marked exclusive.
1477 	 */
1478 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1479 		return false;
1480 	/*
1481 	 * Note: PageAnon(page) is stable until the page is actually getting
1482 	 * freed.
1483 	 */
1484 	if (!PageAnon(page)) {
1485 		/*
1486 		 * We only care about R/O long-term pining: R/O short-term
1487 		 * pinning does not have the semantics to observe successive
1488 		 * changes through the process page tables.
1489 		 */
1490 		if (!(flags & FOLL_LONGTERM))
1491 			return false;
1492 
1493 		/* We really need the vma ... */
1494 		if (!vma)
1495 			return true;
1496 
1497 		/*
1498 		 * ... because we only care about writable private ("COW")
1499 		 * mappings where we have to break COW early.
1500 		 */
1501 		return is_cow_mapping(vma->vm_flags);
1502 	}
1503 
1504 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1505 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1506 		smp_rmb();
1507 
1508 	/*
1509 	 * Note that KSM pages cannot be exclusive, and consequently,
1510 	 * cannot get pinned.
1511 	 */
1512 	return !PageAnonExclusive(page);
1513 }
1514 
1515 extern bool mirrored_kernelcore;
1516 bool memblock_has_mirror(void);
1517 void memblock_free_all(void);
1518 
1519 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1520 					  unsigned long start, unsigned long end,
1521 					  pgoff_t pgoff)
1522 {
1523 	vma->vm_start = start;
1524 	vma->vm_end = end;
1525 	vma->vm_pgoff = pgoff;
1526 }
1527 
1528 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1529 {
1530 	/*
1531 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1532 	 * enablements, because when without soft-dirty being compiled in,
1533 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1534 	 * will be constantly true.
1535 	 */
1536 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1537 		return false;
1538 
1539 	/*
1540 	 * Soft-dirty is kind of special: its tracking is enabled when the
1541 	 * vma flags not set.
1542 	 */
1543 	return !(vma->vm_flags & VM_SOFTDIRTY);
1544 }
1545 
1546 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1547 {
1548 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1549 }
1550 
1551 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1552 {
1553 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1554 }
1555 
1556 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1557 				unsigned long zone, int nid);
1558 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1559 
1560 /* shrinker related functions */
1561 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1562 			  int priority);
1563 
1564 #ifdef CONFIG_SHRINKER_DEBUG
1565 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1566 			struct shrinker *shrinker, const char *fmt, va_list ap)
1567 {
1568 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1569 
1570 	return shrinker->name ? 0 : -ENOMEM;
1571 }
1572 
1573 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1574 {
1575 	kfree_const(shrinker->name);
1576 	shrinker->name = NULL;
1577 }
1578 
1579 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1580 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1581 					      int *debugfs_id);
1582 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1583 				    int debugfs_id);
1584 #else /* CONFIG_SHRINKER_DEBUG */
1585 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1586 {
1587 	return 0;
1588 }
1589 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1590 					      const char *fmt, va_list ap)
1591 {
1592 	return 0;
1593 }
1594 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1595 {
1596 }
1597 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1598 						     int *debugfs_id)
1599 {
1600 	*debugfs_id = -1;
1601 	return NULL;
1602 }
1603 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1604 					   int debugfs_id)
1605 {
1606 }
1607 #endif /* CONFIG_SHRINKER_DEBUG */
1608 
1609 /* Only track the nodes of mappings with shadow entries */
1610 void workingset_update_node(struct xa_node *node);
1611 extern struct list_lru shadow_nodes;
1612 #define mapping_set_update(xas, mapping) do {			\
1613 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1614 		xas_set_update(xas, workingset_update_node);	\
1615 		xas_set_lru(xas, &shadow_nodes);		\
1616 	}							\
1617 } while (0)
1618 
1619 /* mremap.c */
1620 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1621 
1622 #ifdef CONFIG_UNACCEPTED_MEMORY
1623 void accept_page(struct page *page);
1624 #else /* CONFIG_UNACCEPTED_MEMORY */
1625 static inline void accept_page(struct page *page)
1626 {
1627 }
1628 #endif /* CONFIG_UNACCEPTED_MEMORY */
1629 
1630 /* pagewalk.c */
1631 int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
1632 		unsigned long end, const struct mm_walk_ops *ops,
1633 		void *private);
1634 int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
1635 			  unsigned long end, const struct mm_walk_ops *ops,
1636 			  pgd_t *pgd, void *private);
1637 
1638 /* pt_reclaim.c */
1639 bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
1640 void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
1641 	      pmd_t pmdval);
1642 void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
1643 		     struct mmu_gather *tlb);
1644 
1645 #ifdef CONFIG_PT_RECLAIM
1646 bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1647 			   struct zap_details *details);
1648 #else
1649 static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1650 					 struct zap_details *details)
1651 {
1652 	return false;
1653 }
1654 #endif /* CONFIG_PT_RECLAIM */
1655 
1656 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
1657 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
1658 
1659 #endif	/* __MM_INTERNAL_H */
1660