xref: /linux/mm/internal.h (revision 2831fa8b8bcf1083f9526aa0c41fafb0796cf874)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/leafops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21 
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24 
25 struct folio_batch;
26 
27 /*
28  * Maintains state across a page table move. The operation assumes both source
29  * and destination VMAs already exist and are specified by the user.
30  *
31  * Partial moves are permitted, but the old and new ranges must both reside
32  * within a VMA.
33  *
34  * mmap lock must be held in write and VMA write locks must be held on any VMA
35  * that is visible.
36  *
37  * Use the PAGETABLE_MOVE() macro to initialise this struct.
38  *
39  * The old_addr and new_addr fields are updated as the page table move is
40  * executed.
41  *
42  * NOTE: The page table move is affected by reading from [old_addr, old_end),
43  * and old_addr may be updated for better page table alignment, so len_in
44  * represents the length of the range being copied as specified by the user.
45  */
46 struct pagetable_move_control {
47 	struct vm_area_struct *old; /* Source VMA. */
48 	struct vm_area_struct *new; /* Destination VMA. */
49 	unsigned long old_addr; /* Address from which the move begins. */
50 	unsigned long old_end; /* Exclusive address at which old range ends. */
51 	unsigned long new_addr; /* Address to move page tables to. */
52 	unsigned long len_in; /* Bytes to remap specified by user. */
53 
54 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
55 	bool for_stack; /* Is this an early temp stack being moved? */
56 };
57 
58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
59 	struct pagetable_move_control name = {				\
60 		.old = old_,						\
61 		.new = new_,						\
62 		.old_addr = old_addr_,					\
63 		.old_end = (old_addr_) + (len_),			\
64 		.new_addr = new_addr_,					\
65 		.len_in = len_,						\
66 	}
67 
68 /*
69  * The set of flags that only affect watermark checking and reclaim
70  * behaviour. This is used by the MM to obey the caller constraints
71  * about IO, FS and watermark checking while ignoring placement
72  * hints such as HIGHMEM usage.
73  */
74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
75 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
76 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
77 			__GFP_NOLOCKDEP)
78 
79 /* The GFP flags allowed during early boot */
80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
81 
82 /* Control allocation cpuset and node placement constraints */
83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
84 
85 /* Do not use these with a slab allocator */
86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
87 
88 /*
89  * Different from WARN_ON_ONCE(), no warning will be issued
90  * when we specify __GFP_NOWARN.
91  */
92 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
93 	static bool __section(".data..once") __warned;			\
94 	int __ret_warn_once = !!(cond);					\
95 									\
96 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
97 		__warned = true;					\
98 		WARN_ON(1);						\
99 	}								\
100 	unlikely(__ret_warn_once);					\
101 })
102 
103 void page_writeback_init(void);
104 
105 /*
106  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
107  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
108  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
109  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
110  */
111 #define ENTIRELY_MAPPED		0x800000
112 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
113 
114 /*
115  * Flags passed to __show_mem() and show_free_areas() to suppress output in
116  * various contexts.
117  */
118 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
119 
120 /*
121  * How many individual pages have an elevated _mapcount.  Excludes
122  * the folio's entire_mapcount.
123  *
124  * Don't use this function outside of debugging code.
125  */
126 static inline int folio_nr_pages_mapped(const struct folio *folio)
127 {
128 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
129 		return -1;
130 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
131 }
132 
133 /*
134  * Retrieve the first entry of a folio based on a provided entry within the
135  * folio. We cannot rely on folio->swap as there is no guarantee that it has
136  * been initialized. Used for calling arch_swap_restore()
137  */
138 static inline swp_entry_t folio_swap(swp_entry_t entry,
139 		const struct folio *folio)
140 {
141 	swp_entry_t swap = {
142 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
143 	};
144 
145 	return swap;
146 }
147 
148 static inline void *folio_raw_mapping(const struct folio *folio)
149 {
150 	unsigned long mapping = (unsigned long)folio->mapping;
151 
152 	return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
153 }
154 
155 /*
156  * This is a file-backed mapping, and is about to be memory mapped - invoke its
157  * mmap hook and safely handle error conditions. On error, VMA hooks will be
158  * mutated.
159  *
160  * @file: File which backs the mapping.
161  * @vma:  VMA which we are mapping.
162  *
163  * Returns: 0 if success, error otherwise.
164  */
165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
166 {
167 	int err = vfs_mmap(file, vma);
168 
169 	if (likely(!err))
170 		return 0;
171 
172 	/*
173 	 * OK, we tried to call the file hook for mmap(), but an error
174 	 * arose. The mapping is in an inconsistent state and we most not invoke
175 	 * any further hooks on it.
176 	 */
177 	vma->vm_ops = &vma_dummy_vm_ops;
178 
179 	return err;
180 }
181 
182 /*
183  * If the VMA has a close hook then close it, and since closing it might leave
184  * it in an inconsistent state which makes the use of any hooks suspect, clear
185  * them down by installing dummy empty hooks.
186  */
187 static inline void vma_close(struct vm_area_struct *vma)
188 {
189 	if (vma->vm_ops && vma->vm_ops->close) {
190 		vma->vm_ops->close(vma);
191 
192 		/*
193 		 * The mapping is in an inconsistent state, and no further hooks
194 		 * may be invoked upon it.
195 		 */
196 		vma->vm_ops = &vma_dummy_vm_ops;
197 	}
198 }
199 
200 #ifdef CONFIG_MMU
201 
202 /* Flags for folio_pte_batch(). */
203 typedef int __bitwise fpb_t;
204 
205 /* Compare PTEs respecting the dirty bit. */
206 #define FPB_RESPECT_DIRTY		((__force fpb_t)BIT(0))
207 
208 /* Compare PTEs respecting the soft-dirty bit. */
209 #define FPB_RESPECT_SOFT_DIRTY		((__force fpb_t)BIT(1))
210 
211 /* Compare PTEs respecting the writable bit. */
212 #define FPB_RESPECT_WRITE		((__force fpb_t)BIT(2))
213 
214 /*
215  * Merge PTE write bits: if any PTE in the batch is writable, modify the
216  * PTE at @ptentp to be writable.
217  */
218 #define FPB_MERGE_WRITE			((__force fpb_t)BIT(3))
219 
220 /*
221  * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
222  * modify the PTE at @ptentp to be young or dirty, respectively.
223  */
224 #define FPB_MERGE_YOUNG_DIRTY		((__force fpb_t)BIT(4))
225 
226 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
227 {
228 	if (!(flags & FPB_RESPECT_DIRTY))
229 		pte = pte_mkclean(pte);
230 	if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
231 		pte = pte_clear_soft_dirty(pte);
232 	if (likely(!(flags & FPB_RESPECT_WRITE)))
233 		pte = pte_wrprotect(pte);
234 	return pte_mkold(pte);
235 }
236 
237 /**
238  * folio_pte_batch_flags - detect a PTE batch for a large folio
239  * @folio: The large folio to detect a PTE batch for.
240  * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
241  * @ptep: Page table pointer for the first entry.
242  * @ptentp: Pointer to a COPY of the first page table entry whose flags this
243  *	    function updates based on @flags if appropriate.
244  * @max_nr: The maximum number of table entries to consider.
245  * @flags: Flags to modify the PTE batch semantics.
246  *
247  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
248  * pages of the same large folio in a single VMA and a single page table.
249  *
250  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
251  * the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
252  * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
253  *
254  * @ptep must map any page of the folio. max_nr must be at least one and
255  * must be limited by the caller so scanning cannot exceed a single VMA and
256  * a single page table.
257  *
258  * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
259  * be updated: it's crucial that a pointer to a COPY of the first
260  * page table entry, obtained through ptep_get(), is provided as @ptentp.
261  *
262  * This function will be inlined to optimize based on the input parameters;
263  * consider using folio_pte_batch() instead if applicable.
264  *
265  * Return: the number of table entries in the batch.
266  */
267 static inline unsigned int folio_pte_batch_flags(struct folio *folio,
268 		struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
269 		unsigned int max_nr, fpb_t flags)
270 {
271 	bool any_writable = false, any_young = false, any_dirty = false;
272 	pte_t expected_pte, pte = *ptentp;
273 	unsigned int nr, cur_nr;
274 
275 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
276 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
277 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
278 	/*
279 	 * Ensure this is a pointer to a copy not a pointer into a page table.
280 	 * If this is a stack value, it won't be a valid virtual address, but
281 	 * that's fine because it also cannot be pointing into the page table.
282 	 */
283 	VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
284 
285 	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
286 	max_nr = min_t(unsigned long, max_nr,
287 		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
288 
289 	nr = pte_batch_hint(ptep, pte);
290 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
291 	ptep = ptep + nr;
292 
293 	while (nr < max_nr) {
294 		pte = ptep_get(ptep);
295 
296 		if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
297 			break;
298 
299 		if (flags & FPB_MERGE_WRITE)
300 			any_writable |= pte_write(pte);
301 		if (flags & FPB_MERGE_YOUNG_DIRTY) {
302 			any_young |= pte_young(pte);
303 			any_dirty |= pte_dirty(pte);
304 		}
305 
306 		cur_nr = pte_batch_hint(ptep, pte);
307 		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
308 		ptep += cur_nr;
309 		nr += cur_nr;
310 	}
311 
312 	if (any_writable)
313 		*ptentp = pte_mkwrite(*ptentp, vma);
314 	if (any_young)
315 		*ptentp = pte_mkyoung(*ptentp);
316 	if (any_dirty)
317 		*ptentp = pte_mkdirty(*ptentp);
318 
319 	return min(nr, max_nr);
320 }
321 
322 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
323 		unsigned int max_nr);
324 
325 /**
326  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
327  *	 forward or backward by delta
328  * @pte: The initial pte state; must be a swap entry
329  * @delta: The direction and the offset we are moving; forward if delta
330  *	 is positive; backward if delta is negative
331  *
332  * Moves the swap offset, while maintaining all other fields, including
333  * swap type, and any swp pte bits. The resulting pte is returned.
334  */
335 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
336 {
337 	const softleaf_t entry = softleaf_from_pte(pte);
338 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
339 						   (swp_offset(entry) + delta)));
340 
341 	if (pte_swp_soft_dirty(pte))
342 		new = pte_swp_mksoft_dirty(new);
343 	if (pte_swp_exclusive(pte))
344 		new = pte_swp_mkexclusive(new);
345 	if (pte_swp_uffd_wp(pte))
346 		new = pte_swp_mkuffd_wp(new);
347 
348 	return new;
349 }
350 
351 
352 /**
353  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
354  * @pte: The initial pte state; must be a swap entry.
355  *
356  * Increments the swap offset, while maintaining all other fields, including
357  * swap type, and any swp pte bits. The resulting pte is returned.
358  */
359 static inline pte_t pte_next_swp_offset(pte_t pte)
360 {
361 	return pte_move_swp_offset(pte, 1);
362 }
363 
364 /**
365  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
366  * @start_ptep: Page table pointer for the first entry.
367  * @max_nr: The maximum number of table entries to consider.
368  * @pte: Page table entry for the first entry.
369  *
370  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
371  * containing swap entries all with consecutive offsets and targeting the same
372  * swap type, all with matching swp pte bits.
373  *
374  * max_nr must be at least one and must be limited by the caller so scanning
375  * cannot exceed a single page table.
376  *
377  * Return: the number of table entries in the batch.
378  */
379 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
380 {
381 	pte_t expected_pte = pte_next_swp_offset(pte);
382 	const pte_t *end_ptep = start_ptep + max_nr;
383 	const softleaf_t entry = softleaf_from_pte(pte);
384 	pte_t *ptep = start_ptep + 1;
385 	unsigned short cgroup_id;
386 
387 	VM_WARN_ON(max_nr < 1);
388 	VM_WARN_ON(!softleaf_is_swap(entry));
389 
390 	cgroup_id = lookup_swap_cgroup_id(entry);
391 	while (ptep < end_ptep) {
392 		softleaf_t entry;
393 
394 		pte = ptep_get(ptep);
395 
396 		if (!pte_same(pte, expected_pte))
397 			break;
398 		entry = softleaf_from_pte(pte);
399 		if (lookup_swap_cgroup_id(entry) != cgroup_id)
400 			break;
401 		expected_pte = pte_next_swp_offset(expected_pte);
402 		ptep++;
403 	}
404 
405 	return ptep - start_ptep;
406 }
407 #endif /* CONFIG_MMU */
408 
409 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
410 						int nr_throttled);
411 static inline void acct_reclaim_writeback(struct folio *folio)
412 {
413 	pg_data_t *pgdat = folio_pgdat(folio);
414 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
415 
416 	if (nr_throttled)
417 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
418 }
419 
420 static inline void wake_throttle_isolated(pg_data_t *pgdat)
421 {
422 	wait_queue_head_t *wqh;
423 
424 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
425 	if (waitqueue_active(wqh))
426 		wake_up(wqh);
427 }
428 
429 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
430 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
431 {
432 	vm_fault_t ret = __vmf_anon_prepare(vmf);
433 
434 	if (unlikely(ret & VM_FAULT_RETRY))
435 		vma_end_read(vmf->vma);
436 	return ret;
437 }
438 
439 vm_fault_t do_swap_page(struct vm_fault *vmf);
440 void folio_rotate_reclaimable(struct folio *folio);
441 bool __folio_end_writeback(struct folio *folio);
442 void deactivate_file_folio(struct folio *folio);
443 void folio_activate(struct folio *folio);
444 
445 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
446 		   struct vm_area_struct *start_vma, unsigned long floor,
447 		   unsigned long ceiling, bool mm_wr_locked);
448 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
449 
450 struct zap_details;
451 void unmap_page_range(struct mmu_gather *tlb,
452 			     struct vm_area_struct *vma,
453 			     unsigned long addr, unsigned long end,
454 			     struct zap_details *details);
455 void zap_page_range_single_batched(struct mmu_gather *tlb,
456 		struct vm_area_struct *vma, unsigned long addr,
457 		unsigned long size, struct zap_details *details);
458 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
459 			   gfp_t gfp);
460 
461 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
462 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
463 static inline void force_page_cache_readahead(struct address_space *mapping,
464 		struct file *file, pgoff_t index, unsigned long nr_to_read)
465 {
466 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
467 	force_page_cache_ra(&ractl, nr_to_read);
468 }
469 
470 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
471 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
472 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
473 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
474 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
475 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
476 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
477 		loff_t end);
478 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
479 unsigned long mapping_try_invalidate(struct address_space *mapping,
480 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
481 
482 /**
483  * folio_evictable - Test whether a folio is evictable.
484  * @folio: The folio to test.
485  *
486  * Test whether @folio is evictable -- i.e., should be placed on
487  * active/inactive lists vs unevictable list.
488  *
489  * Reasons folio might not be evictable:
490  * 1. folio's mapping marked unevictable
491  * 2. One of the pages in the folio is part of an mlocked VMA
492  */
493 static inline bool folio_evictable(struct folio *folio)
494 {
495 	bool ret;
496 
497 	/* Prevent address_space of inode and swap cache from being freed */
498 	rcu_read_lock();
499 	ret = !mapping_unevictable(folio_mapping(folio)) &&
500 			!folio_test_mlocked(folio);
501 	rcu_read_unlock();
502 	return ret;
503 }
504 
505 /*
506  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
507  * a count of one.
508  */
509 static inline void set_page_refcounted(struct page *page)
510 {
511 	VM_BUG_ON_PAGE(PageTail(page), page);
512 	VM_BUG_ON_PAGE(page_ref_count(page), page);
513 	set_page_count(page, 1);
514 }
515 
516 /*
517  * Return true if a folio needs ->release_folio() calling upon it.
518  */
519 static inline bool folio_needs_release(struct folio *folio)
520 {
521 	struct address_space *mapping = folio_mapping(folio);
522 
523 	return folio_has_private(folio) ||
524 		(mapping && mapping_release_always(mapping));
525 }
526 
527 extern unsigned long highest_memmap_pfn;
528 
529 /*
530  * Maximum number of reclaim retries without progress before the OOM
531  * killer is consider the only way forward.
532  */
533 #define MAX_RECLAIM_RETRIES 16
534 
535 /*
536  * in mm/vmscan.c:
537  */
538 bool folio_isolate_lru(struct folio *folio);
539 void folio_putback_lru(struct folio *folio);
540 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
541 int user_proactive_reclaim(char *buf,
542 			   struct mem_cgroup *memcg, pg_data_t *pgdat);
543 
544 /*
545  * in mm/rmap.c:
546  */
547 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
548 
549 /*
550  * in mm/page_alloc.c
551  */
552 #define K(x) ((x) << (PAGE_SHIFT-10))
553 
554 extern char * const zone_names[MAX_NR_ZONES];
555 
556 /* perform sanity checks on struct pages being allocated or freed */
557 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
558 
559 extern int min_free_kbytes;
560 extern int defrag_mode;
561 
562 void setup_per_zone_wmarks(void);
563 void calculate_min_free_kbytes(void);
564 int __meminit init_per_zone_wmark_min(void);
565 void page_alloc_sysctl_init(void);
566 
567 /*
568  * Structure for holding the mostly immutable allocation parameters passed
569  * between functions involved in allocations, including the alloc_pages*
570  * family of functions.
571  *
572  * nodemask, migratetype and highest_zoneidx are initialized only once in
573  * __alloc_pages() and then never change.
574  *
575  * zonelist, preferred_zone and highest_zoneidx are set first in
576  * __alloc_pages() for the fast path, and might be later changed
577  * in __alloc_pages_slowpath(). All other functions pass the whole structure
578  * by a const pointer.
579  */
580 struct alloc_context {
581 	struct zonelist *zonelist;
582 	nodemask_t *nodemask;
583 	struct zoneref *preferred_zoneref;
584 	int migratetype;
585 
586 	/*
587 	 * highest_zoneidx represents highest usable zone index of
588 	 * the allocation request. Due to the nature of the zone,
589 	 * memory on lower zone than the highest_zoneidx will be
590 	 * protected by lowmem_reserve[highest_zoneidx].
591 	 *
592 	 * highest_zoneidx is also used by reclaim/compaction to limit
593 	 * the target zone since higher zone than this index cannot be
594 	 * usable for this allocation request.
595 	 */
596 	enum zone_type highest_zoneidx;
597 	bool spread_dirty_pages;
598 };
599 
600 /*
601  * This function returns the order of a free page in the buddy system. In
602  * general, page_zone(page)->lock must be held by the caller to prevent the
603  * page from being allocated in parallel and returning garbage as the order.
604  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
605  * page cannot be allocated or merged in parallel. Alternatively, it must
606  * handle invalid values gracefully, and use buddy_order_unsafe() below.
607  */
608 static inline unsigned int buddy_order(struct page *page)
609 {
610 	/* PageBuddy() must be checked by the caller */
611 	return page_private(page);
612 }
613 
614 /*
615  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
616  * PageBuddy() should be checked first by the caller to minimize race window,
617  * and invalid values must be handled gracefully.
618  *
619  * READ_ONCE is used so that if the caller assigns the result into a local
620  * variable and e.g. tests it for valid range before using, the compiler cannot
621  * decide to remove the variable and inline the page_private(page) multiple
622  * times, potentially observing different values in the tests and the actual
623  * use of the result.
624  */
625 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
626 
627 /*
628  * This function checks whether a page is free && is the buddy
629  * we can coalesce a page and its buddy if
630  * (a) the buddy is not in a hole (check before calling!) &&
631  * (b) the buddy is in the buddy system &&
632  * (c) a page and its buddy have the same order &&
633  * (d) a page and its buddy are in the same zone.
634  *
635  * For recording whether a page is in the buddy system, we set PageBuddy.
636  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
637  *
638  * For recording page's order, we use page_private(page).
639  */
640 static inline bool page_is_buddy(struct page *page, struct page *buddy,
641 				 unsigned int order)
642 {
643 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
644 		return false;
645 
646 	if (buddy_order(buddy) != order)
647 		return false;
648 
649 	/*
650 	 * zone check is done late to avoid uselessly calculating
651 	 * zone/node ids for pages that could never merge.
652 	 */
653 	if (page_zone_id(page) != page_zone_id(buddy))
654 		return false;
655 
656 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
657 
658 	return true;
659 }
660 
661 /*
662  * Locate the struct page for both the matching buddy in our
663  * pair (buddy1) and the combined O(n+1) page they form (page).
664  *
665  * 1) Any buddy B1 will have an order O twin B2 which satisfies
666  * the following equation:
667  *     B2 = B1 ^ (1 << O)
668  * For example, if the starting buddy (buddy2) is #8 its order
669  * 1 buddy is #10:
670  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
671  *
672  * 2) Any buddy B will have an order O+1 parent P which
673  * satisfies the following equation:
674  *     P = B & ~(1 << O)
675  *
676  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
677  */
678 static inline unsigned long
679 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
680 {
681 	return page_pfn ^ (1 << order);
682 }
683 
684 /*
685  * Find the buddy of @page and validate it.
686  * @page: The input page
687  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
688  *       function is used in the performance-critical __free_one_page().
689  * @order: The order of the page
690  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
691  *             page_to_pfn().
692  *
693  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
694  * not the same as @page. The validation is necessary before use it.
695  *
696  * Return: the found buddy page or NULL if not found.
697  */
698 static inline struct page *find_buddy_page_pfn(struct page *page,
699 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
700 {
701 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
702 	struct page *buddy;
703 
704 	buddy = page + (__buddy_pfn - pfn);
705 	if (buddy_pfn)
706 		*buddy_pfn = __buddy_pfn;
707 
708 	if (page_is_buddy(page, buddy, order))
709 		return buddy;
710 	return NULL;
711 }
712 
713 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
714 				unsigned long end_pfn, struct zone *zone);
715 
716 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
717 				unsigned long end_pfn, struct zone *zone)
718 {
719 	if (zone->contiguous)
720 		return pfn_to_page(start_pfn);
721 
722 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
723 }
724 
725 void set_zone_contiguous(struct zone *zone);
726 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
727 			   unsigned long nr_pages);
728 
729 static inline void clear_zone_contiguous(struct zone *zone)
730 {
731 	zone->contiguous = false;
732 }
733 
734 extern int __isolate_free_page(struct page *page, unsigned int order);
735 extern void __putback_isolated_page(struct page *page, unsigned int order,
736 				    int mt);
737 extern void memblock_free_pages(struct page *page, unsigned long pfn,
738 					unsigned int order);
739 extern void __free_pages_core(struct page *page, unsigned int order,
740 		enum meminit_context context);
741 
742 /*
743  * This will have no effect, other than possibly generating a warning, if the
744  * caller passes in a non-large folio.
745  */
746 static inline void folio_set_order(struct folio *folio, unsigned int order)
747 {
748 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
749 		return;
750 	VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER);
751 
752 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
753 #ifdef NR_PAGES_IN_LARGE_FOLIO
754 	folio->_nr_pages = 1U << order;
755 #endif
756 }
757 
758 bool __folio_unqueue_deferred_split(struct folio *folio);
759 static inline bool folio_unqueue_deferred_split(struct folio *folio)
760 {
761 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
762 		return false;
763 
764 	/*
765 	 * At this point, there is no one trying to add the folio to
766 	 * deferred_list. If folio is not in deferred_list, it's safe
767 	 * to check without acquiring the split_queue_lock.
768 	 */
769 	if (data_race(list_empty(&folio->_deferred_list)))
770 		return false;
771 
772 	return __folio_unqueue_deferred_split(folio);
773 }
774 
775 static inline struct folio *page_rmappable_folio(struct page *page)
776 {
777 	struct folio *folio = (struct folio *)page;
778 
779 	if (folio && folio_test_large(folio))
780 		folio_set_large_rmappable(folio);
781 	return folio;
782 }
783 
784 static inline void prep_compound_head(struct page *page, unsigned int order)
785 {
786 	struct folio *folio = (struct folio *)page;
787 
788 	folio_set_order(folio, order);
789 	atomic_set(&folio->_large_mapcount, -1);
790 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
791 		atomic_set(&folio->_nr_pages_mapped, 0);
792 	if (IS_ENABLED(CONFIG_MM_ID)) {
793 		folio->_mm_ids = 0;
794 		folio->_mm_id_mapcount[0] = -1;
795 		folio->_mm_id_mapcount[1] = -1;
796 	}
797 	if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
798 		atomic_set(&folio->_pincount, 0);
799 		atomic_set(&folio->_entire_mapcount, -1);
800 	}
801 	if (order > 1)
802 		INIT_LIST_HEAD(&folio->_deferred_list);
803 }
804 
805 static inline void prep_compound_tail(struct page *head, int tail_idx)
806 {
807 	struct page *p = head + tail_idx;
808 
809 	p->mapping = TAIL_MAPPING;
810 	set_compound_head(p, head);
811 	set_page_private(p, 0);
812 }
813 
814 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
815 extern bool free_pages_prepare(struct page *page, unsigned int order);
816 
817 extern int user_min_free_kbytes;
818 
819 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
820 		nodemask_t *);
821 #define __alloc_frozen_pages(...) \
822 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
823 void free_frozen_pages(struct page *page, unsigned int order);
824 void free_unref_folios(struct folio_batch *fbatch);
825 
826 #ifdef CONFIG_NUMA
827 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
828 #else
829 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
830 {
831 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
832 }
833 #endif
834 
835 #define alloc_frozen_pages(...) \
836 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
837 
838 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
839 #define alloc_frozen_pages_nolock(...) \
840 	alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
841 void free_frozen_pages_nolock(struct page *page, unsigned int order);
842 
843 extern void zone_pcp_reset(struct zone *zone);
844 extern void zone_pcp_disable(struct zone *zone);
845 extern void zone_pcp_enable(struct zone *zone);
846 extern void zone_pcp_init(struct zone *zone);
847 
848 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
849 			  phys_addr_t min_addr,
850 			  int nid, bool exact_nid);
851 
852 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
853 		unsigned long, enum meminit_context, struct vmem_altmap *, int,
854 		bool);
855 
856 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
857 
858 /*
859  * in mm/compaction.c
860  */
861 /*
862  * compact_control is used to track pages being migrated and the free pages
863  * they are being migrated to during memory compaction. The free_pfn starts
864  * at the end of a zone and migrate_pfn begins at the start. Movable pages
865  * are moved to the end of a zone during a compaction run and the run
866  * completes when free_pfn <= migrate_pfn
867  */
868 struct compact_control {
869 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
870 	struct list_head migratepages;	/* List of pages being migrated */
871 	unsigned int nr_freepages;	/* Number of isolated free pages */
872 	unsigned int nr_migratepages;	/* Number of pages to migrate */
873 	unsigned long free_pfn;		/* isolate_freepages search base */
874 	/*
875 	 * Acts as an in/out parameter to page isolation for migration.
876 	 * isolate_migratepages uses it as a search base.
877 	 * isolate_migratepages_block will update the value to the next pfn
878 	 * after the last isolated one.
879 	 */
880 	unsigned long migrate_pfn;
881 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
882 	struct zone *zone;
883 	unsigned long total_migrate_scanned;
884 	unsigned long total_free_scanned;
885 	unsigned short fast_search_fail;/* failures to use free list searches */
886 	short search_order;		/* order to start a fast search at */
887 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
888 	int order;			/* order a direct compactor needs */
889 	int migratetype;		/* migratetype of direct compactor */
890 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
891 	const int highest_zoneidx;	/* zone index of a direct compactor */
892 	enum migrate_mode mode;		/* Async or sync migration mode */
893 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
894 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
895 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
896 	bool direct_compaction;		/* False from kcompactd or /proc/... */
897 	bool proactive_compaction;	/* kcompactd proactive compaction */
898 	bool whole_zone;		/* Whole zone should/has been scanned */
899 	bool contended;			/* Signal lock contention */
900 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
901 					 * when there are potentially transient
902 					 * isolation or migration failures to
903 					 * ensure forward progress.
904 					 */
905 	bool alloc_contig;		/* alloc_contig_range allocation */
906 };
907 
908 /*
909  * Used in direct compaction when a page should be taken from the freelists
910  * immediately when one is created during the free path.
911  */
912 struct capture_control {
913 	struct compact_control *cc;
914 	struct page *page;
915 };
916 
917 unsigned long
918 isolate_freepages_range(struct compact_control *cc,
919 			unsigned long start_pfn, unsigned long end_pfn);
920 int
921 isolate_migratepages_range(struct compact_control *cc,
922 			   unsigned long low_pfn, unsigned long end_pfn);
923 
924 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
925 void init_cma_reserved_pageblock(struct page *page);
926 
927 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
928 
929 struct cma;
930 
931 #ifdef CONFIG_CMA
932 void *cma_reserve_early(struct cma *cma, unsigned long size);
933 void init_cma_pageblock(struct page *page);
934 #else
935 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
936 {
937 	return NULL;
938 }
939 static inline void init_cma_pageblock(struct page *page)
940 {
941 }
942 #endif
943 
944 
945 int find_suitable_fallback(struct free_area *area, unsigned int order,
946 			   int migratetype, bool claimable);
947 
948 static inline bool free_area_empty(struct free_area *area, int migratetype)
949 {
950 	return list_empty(&area->free_list[migratetype]);
951 }
952 
953 /* mm/util.c */
954 struct anon_vma *folio_anon_vma(const struct folio *folio);
955 
956 #ifdef CONFIG_MMU
957 void unmap_mapping_folio(struct folio *folio);
958 extern long populate_vma_page_range(struct vm_area_struct *vma,
959 		unsigned long start, unsigned long end, int *locked);
960 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
961 		unsigned long end, bool write, int *locked);
962 bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
963 		unsigned long bytes);
964 
965 /*
966  * NOTE: This function can't tell whether the folio is "fully mapped" in the
967  * range.
968  * "fully mapped" means all the pages of folio is associated with the page
969  * table of range while this function just check whether the folio range is
970  * within the range [start, end). Function caller needs to do page table
971  * check if it cares about the page table association.
972  *
973  * Typical usage (like mlock or madvise) is:
974  * Caller knows at least 1 page of folio is associated with page table of VMA
975  * and the range [start, end) is intersect with the VMA range. Caller wants
976  * to know whether the folio is fully associated with the range. It calls
977  * this function to check whether the folio is in the range first. Then checks
978  * the page table to know whether the folio is fully mapped to the range.
979  */
980 static inline bool
981 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
982 		unsigned long start, unsigned long end)
983 {
984 	pgoff_t pgoff, addr;
985 	unsigned long vma_pglen = vma_pages(vma);
986 
987 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
988 	if (start > end)
989 		return false;
990 
991 	if (start < vma->vm_start)
992 		start = vma->vm_start;
993 
994 	if (end > vma->vm_end)
995 		end = vma->vm_end;
996 
997 	pgoff = folio_pgoff(folio);
998 
999 	/* if folio start address is not in vma range */
1000 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
1001 		return false;
1002 
1003 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1004 
1005 	return !(addr < start || end - addr < folio_size(folio));
1006 }
1007 
1008 static inline bool
1009 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
1010 {
1011 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
1012 }
1013 
1014 /*
1015  * mlock_vma_folio() and munlock_vma_folio():
1016  * should be called with vma's mmap_lock held for read or write,
1017  * under page table lock for the pte/pmd being added or removed.
1018  *
1019  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
1020  * the end of folio_remove_rmap_*(); but new anon folios are managed by
1021  * folio_add_lru_vma() calling mlock_new_folio().
1022  */
1023 void mlock_folio(struct folio *folio);
1024 static inline void mlock_vma_folio(struct folio *folio,
1025 				struct vm_area_struct *vma)
1026 {
1027 	/*
1028 	 * The VM_SPECIAL check here serves two purposes.
1029 	 * 1) VM_IO check prevents migration from double-counting during mlock.
1030 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
1031 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
1032 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1033 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
1034 	 */
1035 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1036 		mlock_folio(folio);
1037 }
1038 
1039 void munlock_folio(struct folio *folio);
1040 static inline void munlock_vma_folio(struct folio *folio,
1041 					struct vm_area_struct *vma)
1042 {
1043 	/*
1044 	 * munlock if the function is called. Ideally, we should only
1045 	 * do munlock if any page of folio is unmapped from VMA and
1046 	 * cause folio not fully mapped to VMA.
1047 	 *
1048 	 * But it's not easy to confirm that's the situation. So we
1049 	 * always munlock the folio and page reclaim will correct it
1050 	 * if it's wrong.
1051 	 */
1052 	if (unlikely(vma->vm_flags & VM_LOCKED))
1053 		munlock_folio(folio);
1054 }
1055 
1056 void mlock_new_folio(struct folio *folio);
1057 bool need_mlock_drain(int cpu);
1058 void mlock_drain_local(void);
1059 void mlock_drain_remote(int cpu);
1060 
1061 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1062 
1063 /**
1064  * vma_address - Find the virtual address a page range is mapped at
1065  * @vma: The vma which maps this object.
1066  * @pgoff: The page offset within its object.
1067  * @nr_pages: The number of pages to consider.
1068  *
1069  * If any page in this range is mapped by this VMA, return the first address
1070  * where any of these pages appear.  Otherwise, return -EFAULT.
1071  */
1072 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1073 		pgoff_t pgoff, unsigned long nr_pages)
1074 {
1075 	unsigned long address;
1076 
1077 	if (pgoff >= vma->vm_pgoff) {
1078 		address = vma->vm_start +
1079 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1080 		/* Check for address beyond vma (or wrapped through 0?) */
1081 		if (address < vma->vm_start || address >= vma->vm_end)
1082 			address = -EFAULT;
1083 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1084 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1085 		address = vma->vm_start;
1086 	} else {
1087 		address = -EFAULT;
1088 	}
1089 	return address;
1090 }
1091 
1092 /*
1093  * Then at what user virtual address will none of the range be found in vma?
1094  * Assumes that vma_address() already returned a good starting address.
1095  */
1096 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1097 {
1098 	struct vm_area_struct *vma = pvmw->vma;
1099 	pgoff_t pgoff;
1100 	unsigned long address;
1101 
1102 	/* Common case, plus ->pgoff is invalid for KSM */
1103 	if (pvmw->nr_pages == 1)
1104 		return pvmw->address + PAGE_SIZE;
1105 
1106 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1107 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1108 	/* Check for address beyond vma (or wrapped through 0?) */
1109 	if (address < vma->vm_start || address > vma->vm_end)
1110 		address = vma->vm_end;
1111 	return address;
1112 }
1113 
1114 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1115 						    struct file *fpin)
1116 {
1117 	int flags = vmf->flags;
1118 
1119 	if (fpin)
1120 		return fpin;
1121 
1122 	/*
1123 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1124 	 * anything, so we only pin the file and drop the mmap_lock if only
1125 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1126 	 */
1127 	if (fault_flag_allow_retry_first(flags) &&
1128 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1129 		fpin = get_file(vmf->vma->vm_file);
1130 		release_fault_lock(vmf);
1131 	}
1132 	return fpin;
1133 }
1134 #else /* !CONFIG_MMU */
1135 static inline void unmap_mapping_folio(struct folio *folio) { }
1136 static inline void mlock_new_folio(struct folio *folio) { }
1137 static inline bool need_mlock_drain(int cpu) { return false; }
1138 static inline void mlock_drain_local(void) { }
1139 static inline void mlock_drain_remote(int cpu) { }
1140 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1141 {
1142 }
1143 #endif /* !CONFIG_MMU */
1144 
1145 /* Memory initialisation debug and verification */
1146 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1147 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1148 
1149 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1150 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1151 
1152 void init_deferred_page(unsigned long pfn, int nid);
1153 
1154 enum mminit_level {
1155 	MMINIT_WARNING,
1156 	MMINIT_VERIFY,
1157 	MMINIT_TRACE
1158 };
1159 
1160 #ifdef CONFIG_DEBUG_MEMORY_INIT
1161 
1162 extern int mminit_loglevel;
1163 
1164 #define mminit_dprintk(level, prefix, fmt, arg...) \
1165 do { \
1166 	if (level < mminit_loglevel) { \
1167 		if (level <= MMINIT_WARNING) \
1168 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1169 		else \
1170 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1171 	} \
1172 } while (0)
1173 
1174 extern void mminit_verify_pageflags_layout(void);
1175 extern void mminit_verify_zonelist(void);
1176 #else
1177 
1178 static inline void mminit_dprintk(enum mminit_level level,
1179 				const char *prefix, const char *fmt, ...)
1180 {
1181 }
1182 
1183 static inline void mminit_verify_pageflags_layout(void)
1184 {
1185 }
1186 
1187 static inline void mminit_verify_zonelist(void)
1188 {
1189 }
1190 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1191 
1192 #define NODE_RECLAIM_NOSCAN	-2
1193 #define NODE_RECLAIM_FULL	-1
1194 #define NODE_RECLAIM_SOME	0
1195 #define NODE_RECLAIM_SUCCESS	1
1196 
1197 #ifdef CONFIG_NUMA
1198 extern int node_reclaim_mode;
1199 
1200 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1201 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1202 #else
1203 #define node_reclaim_mode 0
1204 
1205 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1206 				unsigned int order)
1207 {
1208 	return NODE_RECLAIM_NOSCAN;
1209 }
1210 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1211 {
1212 	return NUMA_NO_NODE;
1213 }
1214 #endif
1215 
1216 static inline bool node_reclaim_enabled(void)
1217 {
1218 	/* Is any node_reclaim_mode bit set? */
1219 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1220 }
1221 
1222 /*
1223  * mm/memory-failure.c
1224  */
1225 #ifdef CONFIG_MEMORY_FAILURE
1226 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1227 void shake_folio(struct folio *folio);
1228 typedef int hwpoison_filter_func_t(struct page *p);
1229 void hwpoison_filter_register(hwpoison_filter_func_t *filter);
1230 void hwpoison_filter_unregister(void);
1231 
1232 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1233 void SetPageHWPoisonTakenOff(struct page *page);
1234 void ClearPageHWPoisonTakenOff(struct page *page);
1235 bool take_page_off_buddy(struct page *page);
1236 bool put_page_back_buddy(struct page *page);
1237 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1238 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1239 		     struct vm_area_struct *vma, struct list_head *to_kill,
1240 		     unsigned long ksm_addr);
1241 unsigned long page_mapped_in_vma(const struct page *page,
1242 		struct vm_area_struct *vma);
1243 
1244 #else
1245 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1246 {
1247 	return -EBUSY;
1248 }
1249 #endif
1250 
1251 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1252         unsigned long, unsigned long,
1253         unsigned long, unsigned long);
1254 
1255 extern void set_pageblock_order(void);
1256 unsigned long reclaim_pages(struct list_head *folio_list);
1257 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1258 					    struct list_head *folio_list);
1259 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1260 #define ALLOC_WMARK_MIN		WMARK_MIN
1261 #define ALLOC_WMARK_LOW		WMARK_LOW
1262 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1263 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1264 
1265 /* Mask to get the watermark bits */
1266 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1267 
1268 /*
1269  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1270  * cannot assume a reduced access to memory reserves is sufficient for
1271  * !MMU
1272  */
1273 #ifdef CONFIG_MMU
1274 #define ALLOC_OOM		0x08
1275 #else
1276 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1277 #endif
1278 
1279 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1280 				       * to 25% of the min watermark or
1281 				       * 62.5% if __GFP_HIGH is set.
1282 				       */
1283 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1284 				       * of the min watermark.
1285 				       */
1286 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1287 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1288 #ifdef CONFIG_ZONE_DMA32
1289 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1290 #else
1291 #define ALLOC_NOFRAGMENT	  0x0
1292 #endif
1293 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1294 #define ALLOC_TRYLOCK		0x400 /* Only use spin_trylock in allocation path */
1295 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1296 
1297 /* Flags that allow allocations below the min watermark. */
1298 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1299 
1300 enum ttu_flags;
1301 struct tlbflush_unmap_batch;
1302 
1303 
1304 /*
1305  * only for MM internal work items which do not depend on
1306  * any allocations or locks which might depend on allocations
1307  */
1308 extern struct workqueue_struct *mm_percpu_wq;
1309 
1310 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1311 void try_to_unmap_flush(void);
1312 void try_to_unmap_flush_dirty(void);
1313 void flush_tlb_batched_pending(struct mm_struct *mm);
1314 #else
1315 static inline void try_to_unmap_flush(void)
1316 {
1317 }
1318 static inline void try_to_unmap_flush_dirty(void)
1319 {
1320 }
1321 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1322 {
1323 }
1324 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1325 
1326 extern const struct trace_print_flags pageflag_names[];
1327 extern const struct trace_print_flags vmaflag_names[];
1328 extern const struct trace_print_flags gfpflag_names[];
1329 
1330 void setup_zone_pageset(struct zone *zone);
1331 
1332 struct migration_target_control {
1333 	int nid;		/* preferred node id */
1334 	nodemask_t *nmask;
1335 	gfp_t gfp_mask;
1336 	enum migrate_reason reason;
1337 };
1338 
1339 /*
1340  * mm/filemap.c
1341  */
1342 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1343 			      struct folio *folio, loff_t fpos, size_t size);
1344 
1345 /*
1346  * mm/vmalloc.c
1347  */
1348 #ifdef CONFIG_MMU
1349 void __init vmalloc_init(void);
1350 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1351 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask);
1352 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1353 #else
1354 static inline void vmalloc_init(void)
1355 {
1356 }
1357 
1358 static inline
1359 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1360 	pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
1361 {
1362 	return -EINVAL;
1363 }
1364 #endif
1365 
1366 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1367 			       unsigned long end, pgprot_t prot,
1368 			       struct page **pages, unsigned int page_shift);
1369 
1370 void vunmap_range_noflush(unsigned long start, unsigned long end);
1371 
1372 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1373 
1374 static inline bool vma_is_single_threaded_private(struct vm_area_struct *vma)
1375 {
1376 	if (vma->vm_flags & VM_SHARED)
1377 		return false;
1378 
1379 	return atomic_read(&vma->vm_mm->mm_users) == 1;
1380 }
1381 
1382 #ifdef CONFIG_NUMA_BALANCING
1383 bool folio_can_map_prot_numa(struct folio *folio, struct vm_area_struct *vma,
1384 		bool is_private_single_threaded);
1385 
1386 #else
1387 static inline bool folio_can_map_prot_numa(struct folio *folio,
1388 		struct vm_area_struct *vma, bool is_private_single_threaded)
1389 {
1390 	return false;
1391 }
1392 #endif
1393 
1394 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1395 		      unsigned long addr, int *flags, bool writable,
1396 		      int *last_cpupid);
1397 
1398 void free_zone_device_folio(struct folio *folio);
1399 int migrate_device_coherent_folio(struct folio *folio);
1400 
1401 struct vm_struct *__get_vm_area_node(unsigned long size,
1402 				     unsigned long align, unsigned long shift,
1403 				     unsigned long vm_flags, unsigned long start,
1404 				     unsigned long end, int node, gfp_t gfp_mask,
1405 				     const void *caller);
1406 
1407 /*
1408  * mm/gup.c
1409  */
1410 int __must_check try_grab_folio(struct folio *folio, int refs,
1411 				unsigned int flags);
1412 
1413 /*
1414  * mm/huge_memory.c
1415  */
1416 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1417 	       pud_t *pud, bool write);
1418 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1419 	       pmd_t *pmd, bool write);
1420 
1421 /*
1422  * Parses a string with mem suffixes into its order. Useful to parse kernel
1423  * parameters.
1424  */
1425 static inline int get_order_from_str(const char *size_str,
1426 				     unsigned long valid_orders)
1427 {
1428 	unsigned long size;
1429 	char *endptr;
1430 	int order;
1431 
1432 	size = memparse(size_str, &endptr);
1433 
1434 	if (!is_power_of_2(size))
1435 		return -EINVAL;
1436 	order = get_order(size);
1437 	if (BIT(order) & ~valid_orders)
1438 		return -EINVAL;
1439 
1440 	return order;
1441 }
1442 
1443 enum {
1444 	/* mark page accessed */
1445 	FOLL_TOUCH = 1 << 16,
1446 	/* a retry, previous pass started an IO */
1447 	FOLL_TRIED = 1 << 17,
1448 	/* we are working on non-current tsk/mm */
1449 	FOLL_REMOTE = 1 << 18,
1450 	/* pages must be released via unpin_user_page */
1451 	FOLL_PIN = 1 << 19,
1452 	/* gup_fast: prevent fall-back to slow gup */
1453 	FOLL_FAST_ONLY = 1 << 20,
1454 	/* allow unlocking the mmap lock */
1455 	FOLL_UNLOCKABLE = 1 << 21,
1456 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1457 	FOLL_MADV_POPULATE = 1 << 22,
1458 };
1459 
1460 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1461 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1462 			    FOLL_MADV_POPULATE)
1463 
1464 /*
1465  * Indicates for which pages that are write-protected in the page table,
1466  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1467  * GUP pin will remain consistent with the pages mapped into the page tables
1468  * of the MM.
1469  *
1470  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1471  * PageAnonExclusive() has to protect against concurrent GUP:
1472  * * Ordinary GUP: Using the PT lock
1473  * * GUP-fast and fork(): mm->write_protect_seq
1474  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1475  *    folio_try_share_anon_rmap_*()
1476  *
1477  * Must be called with the (sub)page that's actually referenced via the
1478  * page table entry, which might not necessarily be the head page for a
1479  * PTE-mapped THP.
1480  *
1481  * If the vma is NULL, we're coming from the GUP-fast path and might have
1482  * to fallback to the slow path just to lookup the vma.
1483  */
1484 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1485 				    unsigned int flags, struct page *page)
1486 {
1487 	/*
1488 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1489 	 * has to be writable -- and if it references (part of) an anonymous
1490 	 * folio, that part is required to be marked exclusive.
1491 	 */
1492 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1493 		return false;
1494 	/*
1495 	 * Note: PageAnon(page) is stable until the page is actually getting
1496 	 * freed.
1497 	 */
1498 	if (!PageAnon(page)) {
1499 		/*
1500 		 * We only care about R/O long-term pining: R/O short-term
1501 		 * pinning does not have the semantics to observe successive
1502 		 * changes through the process page tables.
1503 		 */
1504 		if (!(flags & FOLL_LONGTERM))
1505 			return false;
1506 
1507 		/* We really need the vma ... */
1508 		if (!vma)
1509 			return true;
1510 
1511 		/*
1512 		 * ... because we only care about writable private ("COW")
1513 		 * mappings where we have to break COW early.
1514 		 */
1515 		return is_cow_mapping(vma->vm_flags);
1516 	}
1517 
1518 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1519 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1520 		smp_rmb();
1521 
1522 	/*
1523 	 * Note that KSM pages cannot be exclusive, and consequently,
1524 	 * cannot get pinned.
1525 	 */
1526 	return !PageAnonExclusive(page);
1527 }
1528 
1529 extern bool mirrored_kernelcore;
1530 bool memblock_has_mirror(void);
1531 void memblock_free_all(void);
1532 
1533 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1534 					  unsigned long start, unsigned long end,
1535 					  pgoff_t pgoff)
1536 {
1537 	vma->vm_start = start;
1538 	vma->vm_end = end;
1539 	vma->vm_pgoff = pgoff;
1540 }
1541 
1542 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1543 {
1544 	/*
1545 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1546 	 * enablements, because when without soft-dirty being compiled in,
1547 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1548 	 * will be constantly true.
1549 	 */
1550 	if (!pgtable_supports_soft_dirty())
1551 		return false;
1552 
1553 	/*
1554 	 * Soft-dirty is kind of special: its tracking is enabled when the
1555 	 * vma flags not set.
1556 	 */
1557 	return !(vma->vm_flags & VM_SOFTDIRTY);
1558 }
1559 
1560 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1561 {
1562 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1563 }
1564 
1565 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1566 {
1567 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1568 }
1569 
1570 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1571 				unsigned long zone, int nid);
1572 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1573 
1574 /* shrinker related functions */
1575 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1576 			  int priority);
1577 
1578 int shmem_add_to_page_cache(struct folio *folio,
1579 			    struct address_space *mapping,
1580 			    pgoff_t index, void *expected, gfp_t gfp);
1581 int shmem_inode_acct_blocks(struct inode *inode, long pages);
1582 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
1583 
1584 #ifdef CONFIG_SHRINKER_DEBUG
1585 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1586 			struct shrinker *shrinker, const char *fmt, va_list ap)
1587 {
1588 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1589 
1590 	return shrinker->name ? 0 : -ENOMEM;
1591 }
1592 
1593 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1594 {
1595 	kfree_const(shrinker->name);
1596 	shrinker->name = NULL;
1597 }
1598 
1599 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1600 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1601 					      int *debugfs_id);
1602 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1603 				    int debugfs_id);
1604 #else /* CONFIG_SHRINKER_DEBUG */
1605 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1606 {
1607 	return 0;
1608 }
1609 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1610 					      const char *fmt, va_list ap)
1611 {
1612 	return 0;
1613 }
1614 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1615 {
1616 }
1617 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1618 						     int *debugfs_id)
1619 {
1620 	*debugfs_id = -1;
1621 	return NULL;
1622 }
1623 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1624 					   int debugfs_id)
1625 {
1626 }
1627 #endif /* CONFIG_SHRINKER_DEBUG */
1628 
1629 /* Only track the nodes of mappings with shadow entries */
1630 void workingset_update_node(struct xa_node *node);
1631 extern struct list_lru shadow_nodes;
1632 #define mapping_set_update(xas, mapping) do {			\
1633 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1634 		xas_set_update(xas, workingset_update_node);	\
1635 		xas_set_lru(xas, &shadow_nodes);		\
1636 	}							\
1637 } while (0)
1638 
1639 /* mremap.c */
1640 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1641 
1642 #ifdef CONFIG_UNACCEPTED_MEMORY
1643 void accept_page(struct page *page);
1644 #else /* CONFIG_UNACCEPTED_MEMORY */
1645 static inline void accept_page(struct page *page)
1646 {
1647 }
1648 #endif /* CONFIG_UNACCEPTED_MEMORY */
1649 
1650 /* pagewalk.c */
1651 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
1652 		unsigned long end, const struct mm_walk_ops *ops,
1653 		void *private);
1654 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
1655 		unsigned long end, const struct mm_walk_ops *ops,
1656 		void *private);
1657 int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
1658 			  unsigned long end, const struct mm_walk_ops *ops,
1659 			  pgd_t *pgd, void *private);
1660 
1661 /* pt_reclaim.c */
1662 bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
1663 void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
1664 	      pmd_t pmdval);
1665 void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
1666 		     struct mmu_gather *tlb);
1667 
1668 #ifdef CONFIG_PT_RECLAIM
1669 bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1670 			   struct zap_details *details);
1671 #else
1672 static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1673 					 struct zap_details *details)
1674 {
1675 	return false;
1676 }
1677 #endif /* CONFIG_PT_RECLAIM */
1678 
1679 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
1680 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
1681 
1682 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn);
1683 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
1684 		unsigned long pfn, unsigned long size, pgprot_t pgprot);
1685 
1686 static inline void io_remap_pfn_range_prepare(struct vm_area_desc *desc,
1687 		unsigned long orig_pfn, unsigned long size)
1688 {
1689 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1690 
1691 	return remap_pfn_range_prepare(desc, pfn);
1692 }
1693 
1694 static inline int io_remap_pfn_range_complete(struct vm_area_struct *vma,
1695 		unsigned long addr, unsigned long orig_pfn, unsigned long size,
1696 		pgprot_t orig_prot)
1697 {
1698 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
1699 	const pgprot_t prot = pgprot_decrypted(orig_prot);
1700 
1701 	return remap_pfn_range_complete(vma, addr, pfn, size, prot);
1702 }
1703 
1704 #endif	/* __MM_INTERNAL_H */
1705