xref: /linux/mm/internal.h (revision a58f3dcf20ea9e7e968ee8369fd782bbb53dff73)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21 
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24 
25 struct folio_batch;
26 
27 /*
28  * The set of flags that only affect watermark checking and reclaim
29  * behaviour. This is used by the MM to obey the caller constraints
30  * about IO, FS and watermark checking while ignoring placement
31  * hints such as HIGHMEM usage.
32  */
33 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
34 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
35 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
36 			__GFP_NOLOCKDEP)
37 
38 /* The GFP flags allowed during early boot */
39 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
40 
41 /* Control allocation cpuset and node placement constraints */
42 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
43 
44 /* Do not use these with a slab allocator */
45 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
46 
47 /*
48  * Different from WARN_ON_ONCE(), no warning will be issued
49  * when we specify __GFP_NOWARN.
50  */
51 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
52 	static bool __section(".data..once") __warned;			\
53 	int __ret_warn_once = !!(cond);					\
54 									\
55 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
56 		__warned = true;					\
57 		WARN_ON(1);						\
58 	}								\
59 	unlikely(__ret_warn_once);					\
60 })
61 
62 void page_writeback_init(void);
63 
64 /*
65  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
66  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
67  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
68  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
69  */
70 #define ENTIRELY_MAPPED		0x800000
71 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
72 
73 /*
74  * Flags passed to __show_mem() and show_free_areas() to suppress output in
75  * various contexts.
76  */
77 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
78 
79 /*
80  * How many individual pages have an elevated _mapcount.  Excludes
81  * the folio's entire_mapcount.
82  *
83  * Don't use this function outside of debugging code.
84  */
85 static inline int folio_nr_pages_mapped(const struct folio *folio)
86 {
87 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
88 }
89 
90 /*
91  * Retrieve the first entry of a folio based on a provided entry within the
92  * folio. We cannot rely on folio->swap as there is no guarantee that it has
93  * been initialized. Used for calling arch_swap_restore()
94  */
95 static inline swp_entry_t folio_swap(swp_entry_t entry,
96 		const struct folio *folio)
97 {
98 	swp_entry_t swap = {
99 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
100 	};
101 
102 	return swap;
103 }
104 
105 static inline void *folio_raw_mapping(const struct folio *folio)
106 {
107 	unsigned long mapping = (unsigned long)folio->mapping;
108 
109 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
110 }
111 
112 /*
113  * This is a file-backed mapping, and is about to be memory mapped - invoke its
114  * mmap hook and safely handle error conditions. On error, VMA hooks will be
115  * mutated.
116  *
117  * @file: File which backs the mapping.
118  * @vma:  VMA which we are mapping.
119  *
120  * Returns: 0 if success, error otherwise.
121  */
122 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
123 {
124 	int err = call_mmap(file, vma);
125 
126 	if (likely(!err))
127 		return 0;
128 
129 	/*
130 	 * OK, we tried to call the file hook for mmap(), but an error
131 	 * arose. The mapping is in an inconsistent state and we most not invoke
132 	 * any further hooks on it.
133 	 */
134 	vma->vm_ops = &vma_dummy_vm_ops;
135 
136 	return err;
137 }
138 
139 /*
140  * If the VMA has a close hook then close it, and since closing it might leave
141  * it in an inconsistent state which makes the use of any hooks suspect, clear
142  * them down by installing dummy empty hooks.
143  */
144 static inline void vma_close(struct vm_area_struct *vma)
145 {
146 	if (vma->vm_ops && vma->vm_ops->close) {
147 		vma->vm_ops->close(vma);
148 
149 		/*
150 		 * The mapping is in an inconsistent state, and no further hooks
151 		 * may be invoked upon it.
152 		 */
153 		vma->vm_ops = &vma_dummy_vm_ops;
154 	}
155 }
156 
157 #ifdef CONFIG_MMU
158 
159 /* Flags for folio_pte_batch(). */
160 typedef int __bitwise fpb_t;
161 
162 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
163 #define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
164 
165 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
166 #define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
167 
168 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
169 {
170 	if (flags & FPB_IGNORE_DIRTY)
171 		pte = pte_mkclean(pte);
172 	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
173 		pte = pte_clear_soft_dirty(pte);
174 	return pte_wrprotect(pte_mkold(pte));
175 }
176 
177 /**
178  * folio_pte_batch - detect a PTE batch for a large folio
179  * @folio: The large folio to detect a PTE batch for.
180  * @addr: The user virtual address the first page is mapped at.
181  * @start_ptep: Page table pointer for the first entry.
182  * @pte: Page table entry for the first page.
183  * @max_nr: The maximum number of table entries to consider.
184  * @flags: Flags to modify the PTE batch semantics.
185  * @any_writable: Optional pointer to indicate whether any entry except the
186  *		  first one is writable.
187  * @any_young: Optional pointer to indicate whether any entry except the
188  *		  first one is young.
189  * @any_dirty: Optional pointer to indicate whether any entry except the
190  *		  first one is dirty.
191  *
192  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
193  * pages of the same large folio.
194  *
195  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
196  * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
197  * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
198  *
199  * start_ptep must map any page of the folio. max_nr must be at least one and
200  * must be limited by the caller so scanning cannot exceed a single page table.
201  *
202  * Return: the number of table entries in the batch.
203  */
204 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
205 		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
206 		bool *any_writable, bool *any_young, bool *any_dirty)
207 {
208 	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
209 	const pte_t *end_ptep = start_ptep + max_nr;
210 	pte_t expected_pte, *ptep;
211 	bool writable, young, dirty;
212 	int nr;
213 
214 	if (any_writable)
215 		*any_writable = false;
216 	if (any_young)
217 		*any_young = false;
218 	if (any_dirty)
219 		*any_dirty = false;
220 
221 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
222 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
223 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
224 
225 	nr = pte_batch_hint(start_ptep, pte);
226 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
227 	ptep = start_ptep + nr;
228 
229 	while (ptep < end_ptep) {
230 		pte = ptep_get(ptep);
231 		if (any_writable)
232 			writable = !!pte_write(pte);
233 		if (any_young)
234 			young = !!pte_young(pte);
235 		if (any_dirty)
236 			dirty = !!pte_dirty(pte);
237 		pte = __pte_batch_clear_ignored(pte, flags);
238 
239 		if (!pte_same(pte, expected_pte))
240 			break;
241 
242 		/*
243 		 * Stop immediately once we reached the end of the folio. In
244 		 * corner cases the next PFN might fall into a different
245 		 * folio.
246 		 */
247 		if (pte_pfn(pte) >= folio_end_pfn)
248 			break;
249 
250 		if (any_writable)
251 			*any_writable |= writable;
252 		if (any_young)
253 			*any_young |= young;
254 		if (any_dirty)
255 			*any_dirty |= dirty;
256 
257 		nr = pte_batch_hint(ptep, pte);
258 		expected_pte = pte_advance_pfn(expected_pte, nr);
259 		ptep += nr;
260 	}
261 
262 	return min(ptep - start_ptep, max_nr);
263 }
264 
265 /**
266  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
267  *	 forward or backward by delta
268  * @pte: The initial pte state; is_swap_pte(pte) must be true and
269  *	 non_swap_entry() must be false.
270  * @delta: The direction and the offset we are moving; forward if delta
271  *	 is positive; backward if delta is negative
272  *
273  * Moves the swap offset, while maintaining all other fields, including
274  * swap type, and any swp pte bits. The resulting pte is returned.
275  */
276 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
277 {
278 	swp_entry_t entry = pte_to_swp_entry(pte);
279 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
280 						   (swp_offset(entry) + delta)));
281 
282 	if (pte_swp_soft_dirty(pte))
283 		new = pte_swp_mksoft_dirty(new);
284 	if (pte_swp_exclusive(pte))
285 		new = pte_swp_mkexclusive(new);
286 	if (pte_swp_uffd_wp(pte))
287 		new = pte_swp_mkuffd_wp(new);
288 
289 	return new;
290 }
291 
292 
293 /**
294  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
295  * @pte: The initial pte state; is_swap_pte(pte) must be true and
296  *	 non_swap_entry() must be false.
297  *
298  * Increments the swap offset, while maintaining all other fields, including
299  * swap type, and any swp pte bits. The resulting pte is returned.
300  */
301 static inline pte_t pte_next_swp_offset(pte_t pte)
302 {
303 	return pte_move_swp_offset(pte, 1);
304 }
305 
306 /**
307  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
308  * @start_ptep: Page table pointer for the first entry.
309  * @max_nr: The maximum number of table entries to consider.
310  * @pte: Page table entry for the first entry.
311  *
312  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
313  * containing swap entries all with consecutive offsets and targeting the same
314  * swap type, all with matching swp pte bits.
315  *
316  * max_nr must be at least one and must be limited by the caller so scanning
317  * cannot exceed a single page table.
318  *
319  * Return: the number of table entries in the batch.
320  */
321 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
322 {
323 	pte_t expected_pte = pte_next_swp_offset(pte);
324 	const pte_t *end_ptep = start_ptep + max_nr;
325 	swp_entry_t entry = pte_to_swp_entry(pte);
326 	pte_t *ptep = start_ptep + 1;
327 	unsigned short cgroup_id;
328 
329 	VM_WARN_ON(max_nr < 1);
330 	VM_WARN_ON(!is_swap_pte(pte));
331 	VM_WARN_ON(non_swap_entry(entry));
332 
333 	cgroup_id = lookup_swap_cgroup_id(entry);
334 	while (ptep < end_ptep) {
335 		pte = ptep_get(ptep);
336 
337 		if (!pte_same(pte, expected_pte))
338 			break;
339 		if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
340 			break;
341 		expected_pte = pte_next_swp_offset(expected_pte);
342 		ptep++;
343 	}
344 
345 	return ptep - start_ptep;
346 }
347 #endif /* CONFIG_MMU */
348 
349 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
350 						int nr_throttled);
351 static inline void acct_reclaim_writeback(struct folio *folio)
352 {
353 	pg_data_t *pgdat = folio_pgdat(folio);
354 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
355 
356 	if (nr_throttled)
357 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
358 }
359 
360 static inline void wake_throttle_isolated(pg_data_t *pgdat)
361 {
362 	wait_queue_head_t *wqh;
363 
364 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
365 	if (waitqueue_active(wqh))
366 		wake_up(wqh);
367 }
368 
369 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
370 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
371 {
372 	vm_fault_t ret = __vmf_anon_prepare(vmf);
373 
374 	if (unlikely(ret & VM_FAULT_RETRY))
375 		vma_end_read(vmf->vma);
376 	return ret;
377 }
378 
379 vm_fault_t do_swap_page(struct vm_fault *vmf);
380 void folio_rotate_reclaimable(struct folio *folio);
381 bool __folio_end_writeback(struct folio *folio);
382 void deactivate_file_folio(struct folio *folio);
383 void folio_activate(struct folio *folio);
384 
385 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
386 		   struct vm_area_struct *start_vma, unsigned long floor,
387 		   unsigned long ceiling, bool mm_wr_locked);
388 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
389 
390 struct zap_details;
391 void unmap_page_range(struct mmu_gather *tlb,
392 			     struct vm_area_struct *vma,
393 			     unsigned long addr, unsigned long end,
394 			     struct zap_details *details);
395 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
396 			   gfp_t gfp);
397 
398 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
399 		unsigned int order);
400 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
401 static inline void force_page_cache_readahead(struct address_space *mapping,
402 		struct file *file, pgoff_t index, unsigned long nr_to_read)
403 {
404 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
405 	force_page_cache_ra(&ractl, nr_to_read);
406 }
407 
408 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
409 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
410 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
411 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
412 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
413 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
414 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
415 		loff_t end);
416 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
417 unsigned long mapping_try_invalidate(struct address_space *mapping,
418 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
419 
420 /**
421  * folio_evictable - Test whether a folio is evictable.
422  * @folio: The folio to test.
423  *
424  * Test whether @folio is evictable -- i.e., should be placed on
425  * active/inactive lists vs unevictable list.
426  *
427  * Reasons folio might not be evictable:
428  * 1. folio's mapping marked unevictable
429  * 2. One of the pages in the folio is part of an mlocked VMA
430  */
431 static inline bool folio_evictable(struct folio *folio)
432 {
433 	bool ret;
434 
435 	/* Prevent address_space of inode and swap cache from being freed */
436 	rcu_read_lock();
437 	ret = !mapping_unevictable(folio_mapping(folio)) &&
438 			!folio_test_mlocked(folio);
439 	rcu_read_unlock();
440 	return ret;
441 }
442 
443 /*
444  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
445  * a count of one.
446  */
447 static inline void set_page_refcounted(struct page *page)
448 {
449 	VM_BUG_ON_PAGE(PageTail(page), page);
450 	VM_BUG_ON_PAGE(page_ref_count(page), page);
451 	set_page_count(page, 1);
452 }
453 
454 /*
455  * Return true if a folio needs ->release_folio() calling upon it.
456  */
457 static inline bool folio_needs_release(struct folio *folio)
458 {
459 	struct address_space *mapping = folio_mapping(folio);
460 
461 	return folio_has_private(folio) ||
462 		(mapping && mapping_release_always(mapping));
463 }
464 
465 extern unsigned long highest_memmap_pfn;
466 
467 /*
468  * Maximum number of reclaim retries without progress before the OOM
469  * killer is consider the only way forward.
470  */
471 #define MAX_RECLAIM_RETRIES 16
472 
473 /*
474  * in mm/vmscan.c:
475  */
476 bool folio_isolate_lru(struct folio *folio);
477 void folio_putback_lru(struct folio *folio);
478 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
479 
480 /*
481  * in mm/rmap.c:
482  */
483 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
484 
485 /*
486  * in mm/page_alloc.c
487  */
488 #define K(x) ((x) << (PAGE_SHIFT-10))
489 
490 extern char * const zone_names[MAX_NR_ZONES];
491 
492 /* perform sanity checks on struct pages being allocated or freed */
493 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
494 
495 extern int min_free_kbytes;
496 
497 void setup_per_zone_wmarks(void);
498 void calculate_min_free_kbytes(void);
499 int __meminit init_per_zone_wmark_min(void);
500 void page_alloc_sysctl_init(void);
501 
502 /*
503  * Structure for holding the mostly immutable allocation parameters passed
504  * between functions involved in allocations, including the alloc_pages*
505  * family of functions.
506  *
507  * nodemask, migratetype and highest_zoneidx are initialized only once in
508  * __alloc_pages() and then never change.
509  *
510  * zonelist, preferred_zone and highest_zoneidx are set first in
511  * __alloc_pages() for the fast path, and might be later changed
512  * in __alloc_pages_slowpath(). All other functions pass the whole structure
513  * by a const pointer.
514  */
515 struct alloc_context {
516 	struct zonelist *zonelist;
517 	nodemask_t *nodemask;
518 	struct zoneref *preferred_zoneref;
519 	int migratetype;
520 
521 	/*
522 	 * highest_zoneidx represents highest usable zone index of
523 	 * the allocation request. Due to the nature of the zone,
524 	 * memory on lower zone than the highest_zoneidx will be
525 	 * protected by lowmem_reserve[highest_zoneidx].
526 	 *
527 	 * highest_zoneidx is also used by reclaim/compaction to limit
528 	 * the target zone since higher zone than this index cannot be
529 	 * usable for this allocation request.
530 	 */
531 	enum zone_type highest_zoneidx;
532 	bool spread_dirty_pages;
533 };
534 
535 /*
536  * This function returns the order of a free page in the buddy system. In
537  * general, page_zone(page)->lock must be held by the caller to prevent the
538  * page from being allocated in parallel and returning garbage as the order.
539  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
540  * page cannot be allocated or merged in parallel. Alternatively, it must
541  * handle invalid values gracefully, and use buddy_order_unsafe() below.
542  */
543 static inline unsigned int buddy_order(struct page *page)
544 {
545 	/* PageBuddy() must be checked by the caller */
546 	return page_private(page);
547 }
548 
549 /*
550  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
551  * PageBuddy() should be checked first by the caller to minimize race window,
552  * and invalid values must be handled gracefully.
553  *
554  * READ_ONCE is used so that if the caller assigns the result into a local
555  * variable and e.g. tests it for valid range before using, the compiler cannot
556  * decide to remove the variable and inline the page_private(page) multiple
557  * times, potentially observing different values in the tests and the actual
558  * use of the result.
559  */
560 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
561 
562 /*
563  * This function checks whether a page is free && is the buddy
564  * we can coalesce a page and its buddy if
565  * (a) the buddy is not in a hole (check before calling!) &&
566  * (b) the buddy is in the buddy system &&
567  * (c) a page and its buddy have the same order &&
568  * (d) a page and its buddy are in the same zone.
569  *
570  * For recording whether a page is in the buddy system, we set PageBuddy.
571  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
572  *
573  * For recording page's order, we use page_private(page).
574  */
575 static inline bool page_is_buddy(struct page *page, struct page *buddy,
576 				 unsigned int order)
577 {
578 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
579 		return false;
580 
581 	if (buddy_order(buddy) != order)
582 		return false;
583 
584 	/*
585 	 * zone check is done late to avoid uselessly calculating
586 	 * zone/node ids for pages that could never merge.
587 	 */
588 	if (page_zone_id(page) != page_zone_id(buddy))
589 		return false;
590 
591 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
592 
593 	return true;
594 }
595 
596 /*
597  * Locate the struct page for both the matching buddy in our
598  * pair (buddy1) and the combined O(n+1) page they form (page).
599  *
600  * 1) Any buddy B1 will have an order O twin B2 which satisfies
601  * the following equation:
602  *     B2 = B1 ^ (1 << O)
603  * For example, if the starting buddy (buddy2) is #8 its order
604  * 1 buddy is #10:
605  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
606  *
607  * 2) Any buddy B will have an order O+1 parent P which
608  * satisfies the following equation:
609  *     P = B & ~(1 << O)
610  *
611  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
612  */
613 static inline unsigned long
614 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
615 {
616 	return page_pfn ^ (1 << order);
617 }
618 
619 /*
620  * Find the buddy of @page and validate it.
621  * @page: The input page
622  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
623  *       function is used in the performance-critical __free_one_page().
624  * @order: The order of the page
625  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
626  *             page_to_pfn().
627  *
628  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
629  * not the same as @page. The validation is necessary before use it.
630  *
631  * Return: the found buddy page or NULL if not found.
632  */
633 static inline struct page *find_buddy_page_pfn(struct page *page,
634 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
635 {
636 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
637 	struct page *buddy;
638 
639 	buddy = page + (__buddy_pfn - pfn);
640 	if (buddy_pfn)
641 		*buddy_pfn = __buddy_pfn;
642 
643 	if (page_is_buddy(page, buddy, order))
644 		return buddy;
645 	return NULL;
646 }
647 
648 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
649 				unsigned long end_pfn, struct zone *zone);
650 
651 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
652 				unsigned long end_pfn, struct zone *zone)
653 {
654 	if (zone->contiguous)
655 		return pfn_to_page(start_pfn);
656 
657 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
658 }
659 
660 void set_zone_contiguous(struct zone *zone);
661 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
662 			   unsigned long nr_pages);
663 
664 static inline void clear_zone_contiguous(struct zone *zone)
665 {
666 	zone->contiguous = false;
667 }
668 
669 extern int __isolate_free_page(struct page *page, unsigned int order);
670 extern void __putback_isolated_page(struct page *page, unsigned int order,
671 				    int mt);
672 extern void memblock_free_pages(struct page *page, unsigned long pfn,
673 					unsigned int order);
674 extern void __free_pages_core(struct page *page, unsigned int order,
675 		enum meminit_context context);
676 
677 /*
678  * This will have no effect, other than possibly generating a warning, if the
679  * caller passes in a non-large folio.
680  */
681 static inline void folio_set_order(struct folio *folio, unsigned int order)
682 {
683 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
684 		return;
685 
686 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
687 #ifdef CONFIG_64BIT
688 	folio->_folio_nr_pages = 1U << order;
689 #endif
690 }
691 
692 bool __folio_unqueue_deferred_split(struct folio *folio);
693 static inline bool folio_unqueue_deferred_split(struct folio *folio)
694 {
695 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
696 		return false;
697 
698 	/*
699 	 * At this point, there is no one trying to add the folio to
700 	 * deferred_list. If folio is not in deferred_list, it's safe
701 	 * to check without acquiring the split_queue_lock.
702 	 */
703 	if (data_race(list_empty(&folio->_deferred_list)))
704 		return false;
705 
706 	return __folio_unqueue_deferred_split(folio);
707 }
708 
709 static inline struct folio *page_rmappable_folio(struct page *page)
710 {
711 	struct folio *folio = (struct folio *)page;
712 
713 	if (folio && folio_test_large(folio))
714 		folio_set_large_rmappable(folio);
715 	return folio;
716 }
717 
718 static inline void prep_compound_head(struct page *page, unsigned int order)
719 {
720 	struct folio *folio = (struct folio *)page;
721 
722 	folio_set_order(folio, order);
723 	atomic_set(&folio->_large_mapcount, -1);
724 	atomic_set(&folio->_entire_mapcount, -1);
725 	atomic_set(&folio->_nr_pages_mapped, 0);
726 	atomic_set(&folio->_pincount, 0);
727 	if (order > 1)
728 		INIT_LIST_HEAD(&folio->_deferred_list);
729 }
730 
731 static inline void prep_compound_tail(struct page *head, int tail_idx)
732 {
733 	struct page *p = head + tail_idx;
734 
735 	p->mapping = TAIL_MAPPING;
736 	set_compound_head(p, head);
737 	set_page_private(p, 0);
738 }
739 
740 extern void prep_compound_page(struct page *page, unsigned int order);
741 
742 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
743 extern bool free_pages_prepare(struct page *page, unsigned int order);
744 
745 extern int user_min_free_kbytes;
746 
747 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
748 		nodemask_t *);
749 #define __alloc_frozen_pages(...) \
750 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
751 void free_frozen_pages(struct page *page, unsigned int order);
752 void free_unref_folios(struct folio_batch *fbatch);
753 
754 #ifdef CONFIG_NUMA
755 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
756 #else
757 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
758 {
759 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
760 }
761 #endif
762 
763 #define alloc_frozen_pages(...) \
764 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
765 
766 extern void zone_pcp_reset(struct zone *zone);
767 extern void zone_pcp_disable(struct zone *zone);
768 extern void zone_pcp_enable(struct zone *zone);
769 extern void zone_pcp_init(struct zone *zone);
770 
771 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
772 			  phys_addr_t min_addr,
773 			  int nid, bool exact_nid);
774 
775 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
776 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
777 
778 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
779 
780 /*
781  * in mm/compaction.c
782  */
783 /*
784  * compact_control is used to track pages being migrated and the free pages
785  * they are being migrated to during memory compaction. The free_pfn starts
786  * at the end of a zone and migrate_pfn begins at the start. Movable pages
787  * are moved to the end of a zone during a compaction run and the run
788  * completes when free_pfn <= migrate_pfn
789  */
790 struct compact_control {
791 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
792 	struct list_head migratepages;	/* List of pages being migrated */
793 	unsigned int nr_freepages;	/* Number of isolated free pages */
794 	unsigned int nr_migratepages;	/* Number of pages to migrate */
795 	unsigned long free_pfn;		/* isolate_freepages search base */
796 	/*
797 	 * Acts as an in/out parameter to page isolation for migration.
798 	 * isolate_migratepages uses it as a search base.
799 	 * isolate_migratepages_block will update the value to the next pfn
800 	 * after the last isolated one.
801 	 */
802 	unsigned long migrate_pfn;
803 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
804 	struct zone *zone;
805 	unsigned long total_migrate_scanned;
806 	unsigned long total_free_scanned;
807 	unsigned short fast_search_fail;/* failures to use free list searches */
808 	short search_order;		/* order to start a fast search at */
809 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
810 	int order;			/* order a direct compactor needs */
811 	int migratetype;		/* migratetype of direct compactor */
812 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
813 	const int highest_zoneidx;	/* zone index of a direct compactor */
814 	enum migrate_mode mode;		/* Async or sync migration mode */
815 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
816 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
817 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
818 	bool direct_compaction;		/* False from kcompactd or /proc/... */
819 	bool proactive_compaction;	/* kcompactd proactive compaction */
820 	bool whole_zone;		/* Whole zone should/has been scanned */
821 	bool contended;			/* Signal lock contention */
822 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
823 					 * when there are potentially transient
824 					 * isolation or migration failures to
825 					 * ensure forward progress.
826 					 */
827 	bool alloc_contig;		/* alloc_contig_range allocation */
828 };
829 
830 /*
831  * Used in direct compaction when a page should be taken from the freelists
832  * immediately when one is created during the free path.
833  */
834 struct capture_control {
835 	struct compact_control *cc;
836 	struct page *page;
837 };
838 
839 unsigned long
840 isolate_freepages_range(struct compact_control *cc,
841 			unsigned long start_pfn, unsigned long end_pfn);
842 int
843 isolate_migratepages_range(struct compact_control *cc,
844 			   unsigned long low_pfn, unsigned long end_pfn);
845 
846 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
847 void init_cma_reserved_pageblock(struct page *page);
848 
849 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
850 
851 struct cma;
852 
853 #ifdef CONFIG_CMA
854 void *cma_reserve_early(struct cma *cma, unsigned long size);
855 void init_cma_pageblock(struct page *page);
856 #else
857 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
858 {
859 	return NULL;
860 }
861 static inline void init_cma_pageblock(struct page *page)
862 {
863 }
864 #endif
865 
866 
867 int find_suitable_fallback(struct free_area *area, unsigned int order,
868 			int migratetype, bool only_stealable, bool *can_steal);
869 
870 static inline bool free_area_empty(struct free_area *area, int migratetype)
871 {
872 	return list_empty(&area->free_list[migratetype]);
873 }
874 
875 /* mm/util.c */
876 struct anon_vma *folio_anon_vma(const struct folio *folio);
877 
878 #ifdef CONFIG_MMU
879 void unmap_mapping_folio(struct folio *folio);
880 extern long populate_vma_page_range(struct vm_area_struct *vma,
881 		unsigned long start, unsigned long end, int *locked);
882 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
883 		unsigned long end, bool write, int *locked);
884 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
885 			       unsigned long bytes);
886 
887 /*
888  * NOTE: This function can't tell whether the folio is "fully mapped" in the
889  * range.
890  * "fully mapped" means all the pages of folio is associated with the page
891  * table of range while this function just check whether the folio range is
892  * within the range [start, end). Function caller needs to do page table
893  * check if it cares about the page table association.
894  *
895  * Typical usage (like mlock or madvise) is:
896  * Caller knows at least 1 page of folio is associated with page table of VMA
897  * and the range [start, end) is intersect with the VMA range. Caller wants
898  * to know whether the folio is fully associated with the range. It calls
899  * this function to check whether the folio is in the range first. Then checks
900  * the page table to know whether the folio is fully mapped to the range.
901  */
902 static inline bool
903 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
904 		unsigned long start, unsigned long end)
905 {
906 	pgoff_t pgoff, addr;
907 	unsigned long vma_pglen = vma_pages(vma);
908 
909 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
910 	if (start > end)
911 		return false;
912 
913 	if (start < vma->vm_start)
914 		start = vma->vm_start;
915 
916 	if (end > vma->vm_end)
917 		end = vma->vm_end;
918 
919 	pgoff = folio_pgoff(folio);
920 
921 	/* if folio start address is not in vma range */
922 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
923 		return false;
924 
925 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
926 
927 	return !(addr < start || end - addr < folio_size(folio));
928 }
929 
930 static inline bool
931 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
932 {
933 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
934 }
935 
936 /*
937  * mlock_vma_folio() and munlock_vma_folio():
938  * should be called with vma's mmap_lock held for read or write,
939  * under page table lock for the pte/pmd being added or removed.
940  *
941  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
942  * the end of folio_remove_rmap_*(); but new anon folios are managed by
943  * folio_add_lru_vma() calling mlock_new_folio().
944  */
945 void mlock_folio(struct folio *folio);
946 static inline void mlock_vma_folio(struct folio *folio,
947 				struct vm_area_struct *vma)
948 {
949 	/*
950 	 * The VM_SPECIAL check here serves two purposes.
951 	 * 1) VM_IO check prevents migration from double-counting during mlock.
952 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
953 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
954 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
955 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
956 	 */
957 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
958 		mlock_folio(folio);
959 }
960 
961 void munlock_folio(struct folio *folio);
962 static inline void munlock_vma_folio(struct folio *folio,
963 					struct vm_area_struct *vma)
964 {
965 	/*
966 	 * munlock if the function is called. Ideally, we should only
967 	 * do munlock if any page of folio is unmapped from VMA and
968 	 * cause folio not fully mapped to VMA.
969 	 *
970 	 * But it's not easy to confirm that's the situation. So we
971 	 * always munlock the folio and page reclaim will correct it
972 	 * if it's wrong.
973 	 */
974 	if (unlikely(vma->vm_flags & VM_LOCKED))
975 		munlock_folio(folio);
976 }
977 
978 void mlock_new_folio(struct folio *folio);
979 bool need_mlock_drain(int cpu);
980 void mlock_drain_local(void);
981 void mlock_drain_remote(int cpu);
982 
983 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
984 
985 /**
986  * vma_address - Find the virtual address a page range is mapped at
987  * @vma: The vma which maps this object.
988  * @pgoff: The page offset within its object.
989  * @nr_pages: The number of pages to consider.
990  *
991  * If any page in this range is mapped by this VMA, return the first address
992  * where any of these pages appear.  Otherwise, return -EFAULT.
993  */
994 static inline unsigned long vma_address(const struct vm_area_struct *vma,
995 		pgoff_t pgoff, unsigned long nr_pages)
996 {
997 	unsigned long address;
998 
999 	if (pgoff >= vma->vm_pgoff) {
1000 		address = vma->vm_start +
1001 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1002 		/* Check for address beyond vma (or wrapped through 0?) */
1003 		if (address < vma->vm_start || address >= vma->vm_end)
1004 			address = -EFAULT;
1005 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1006 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1007 		address = vma->vm_start;
1008 	} else {
1009 		address = -EFAULT;
1010 	}
1011 	return address;
1012 }
1013 
1014 /*
1015  * Then at what user virtual address will none of the range be found in vma?
1016  * Assumes that vma_address() already returned a good starting address.
1017  */
1018 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1019 {
1020 	struct vm_area_struct *vma = pvmw->vma;
1021 	pgoff_t pgoff;
1022 	unsigned long address;
1023 
1024 	/* Common case, plus ->pgoff is invalid for KSM */
1025 	if (pvmw->nr_pages == 1)
1026 		return pvmw->address + PAGE_SIZE;
1027 
1028 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1029 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1030 	/* Check for address beyond vma (or wrapped through 0?) */
1031 	if (address < vma->vm_start || address > vma->vm_end)
1032 		address = vma->vm_end;
1033 	return address;
1034 }
1035 
1036 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1037 						    struct file *fpin)
1038 {
1039 	int flags = vmf->flags;
1040 
1041 	if (fpin)
1042 		return fpin;
1043 
1044 	/*
1045 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1046 	 * anything, so we only pin the file and drop the mmap_lock if only
1047 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1048 	 */
1049 	if (fault_flag_allow_retry_first(flags) &&
1050 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1051 		fpin = get_file(vmf->vma->vm_file);
1052 		release_fault_lock(vmf);
1053 	}
1054 	return fpin;
1055 }
1056 #else /* !CONFIG_MMU */
1057 static inline void unmap_mapping_folio(struct folio *folio) { }
1058 static inline void mlock_new_folio(struct folio *folio) { }
1059 static inline bool need_mlock_drain(int cpu) { return false; }
1060 static inline void mlock_drain_local(void) { }
1061 static inline void mlock_drain_remote(int cpu) { }
1062 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1063 {
1064 }
1065 #endif /* !CONFIG_MMU */
1066 
1067 /* Memory initialisation debug and verification */
1068 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1069 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1070 
1071 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1072 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1073 
1074 enum mminit_level {
1075 	MMINIT_WARNING,
1076 	MMINIT_VERIFY,
1077 	MMINIT_TRACE
1078 };
1079 
1080 #ifdef CONFIG_DEBUG_MEMORY_INIT
1081 
1082 extern int mminit_loglevel;
1083 
1084 #define mminit_dprintk(level, prefix, fmt, arg...) \
1085 do { \
1086 	if (level < mminit_loglevel) { \
1087 		if (level <= MMINIT_WARNING) \
1088 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1089 		else \
1090 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1091 	} \
1092 } while (0)
1093 
1094 extern void mminit_verify_pageflags_layout(void);
1095 extern void mminit_verify_zonelist(void);
1096 #else
1097 
1098 static inline void mminit_dprintk(enum mminit_level level,
1099 				const char *prefix, const char *fmt, ...)
1100 {
1101 }
1102 
1103 static inline void mminit_verify_pageflags_layout(void)
1104 {
1105 }
1106 
1107 static inline void mminit_verify_zonelist(void)
1108 {
1109 }
1110 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1111 
1112 #define NODE_RECLAIM_NOSCAN	-2
1113 #define NODE_RECLAIM_FULL	-1
1114 #define NODE_RECLAIM_SOME	0
1115 #define NODE_RECLAIM_SUCCESS	1
1116 
1117 #ifdef CONFIG_NUMA
1118 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1119 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1120 #else
1121 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1122 				unsigned int order)
1123 {
1124 	return NODE_RECLAIM_NOSCAN;
1125 }
1126 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1127 {
1128 	return NUMA_NO_NODE;
1129 }
1130 #endif
1131 
1132 /*
1133  * mm/memory-failure.c
1134  */
1135 #ifdef CONFIG_MEMORY_FAILURE
1136 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1137 void shake_folio(struct folio *folio);
1138 extern int hwpoison_filter(struct page *p);
1139 
1140 extern u32 hwpoison_filter_dev_major;
1141 extern u32 hwpoison_filter_dev_minor;
1142 extern u64 hwpoison_filter_flags_mask;
1143 extern u64 hwpoison_filter_flags_value;
1144 extern u64 hwpoison_filter_memcg;
1145 extern u32 hwpoison_filter_enable;
1146 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1147 void SetPageHWPoisonTakenOff(struct page *page);
1148 void ClearPageHWPoisonTakenOff(struct page *page);
1149 bool take_page_off_buddy(struct page *page);
1150 bool put_page_back_buddy(struct page *page);
1151 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1152 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1153 		     struct vm_area_struct *vma, struct list_head *to_kill,
1154 		     unsigned long ksm_addr);
1155 unsigned long page_mapped_in_vma(const struct page *page,
1156 		struct vm_area_struct *vma);
1157 
1158 #else
1159 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1160 {
1161 	return -EBUSY;
1162 }
1163 #endif
1164 
1165 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1166         unsigned long, unsigned long,
1167         unsigned long, unsigned long);
1168 
1169 extern void set_pageblock_order(void);
1170 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1171 unsigned long reclaim_pages(struct list_head *folio_list);
1172 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1173 					    struct list_head *folio_list);
1174 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1175 #define ALLOC_WMARK_MIN		WMARK_MIN
1176 #define ALLOC_WMARK_LOW		WMARK_LOW
1177 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1178 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1179 
1180 /* Mask to get the watermark bits */
1181 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1182 
1183 /*
1184  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1185  * cannot assume a reduced access to memory reserves is sufficient for
1186  * !MMU
1187  */
1188 #ifdef CONFIG_MMU
1189 #define ALLOC_OOM		0x08
1190 #else
1191 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1192 #endif
1193 
1194 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1195 				       * to 25% of the min watermark or
1196 				       * 62.5% if __GFP_HIGH is set.
1197 				       */
1198 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1199 				       * of the min watermark.
1200 				       */
1201 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1202 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1203 #ifdef CONFIG_ZONE_DMA32
1204 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1205 #else
1206 #define ALLOC_NOFRAGMENT	  0x0
1207 #endif
1208 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1209 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1210 
1211 /* Flags that allow allocations below the min watermark. */
1212 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1213 
1214 enum ttu_flags;
1215 struct tlbflush_unmap_batch;
1216 
1217 
1218 /*
1219  * only for MM internal work items which do not depend on
1220  * any allocations or locks which might depend on allocations
1221  */
1222 extern struct workqueue_struct *mm_percpu_wq;
1223 
1224 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1225 void try_to_unmap_flush(void);
1226 void try_to_unmap_flush_dirty(void);
1227 void flush_tlb_batched_pending(struct mm_struct *mm);
1228 #else
1229 static inline void try_to_unmap_flush(void)
1230 {
1231 }
1232 static inline void try_to_unmap_flush_dirty(void)
1233 {
1234 }
1235 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1236 {
1237 }
1238 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1239 
1240 extern const struct trace_print_flags pageflag_names[];
1241 extern const struct trace_print_flags vmaflag_names[];
1242 extern const struct trace_print_flags gfpflag_names[];
1243 
1244 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1245 {
1246 	return migratetype == MIGRATE_HIGHATOMIC;
1247 }
1248 
1249 void setup_zone_pageset(struct zone *zone);
1250 
1251 struct migration_target_control {
1252 	int nid;		/* preferred node id */
1253 	nodemask_t *nmask;
1254 	gfp_t gfp_mask;
1255 	enum migrate_reason reason;
1256 };
1257 
1258 /*
1259  * mm/filemap.c
1260  */
1261 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1262 			      struct folio *folio, loff_t fpos, size_t size);
1263 
1264 /*
1265  * mm/vmalloc.c
1266  */
1267 #ifdef CONFIG_MMU
1268 void __init vmalloc_init(void);
1269 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1270                 pgprot_t prot, struct page **pages, unsigned int page_shift);
1271 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1272 #else
1273 static inline void vmalloc_init(void)
1274 {
1275 }
1276 
1277 static inline
1278 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1279                 pgprot_t prot, struct page **pages, unsigned int page_shift)
1280 {
1281 	return -EINVAL;
1282 }
1283 #endif
1284 
1285 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1286 			       unsigned long end, pgprot_t prot,
1287 			       struct page **pages, unsigned int page_shift);
1288 
1289 void vunmap_range_noflush(unsigned long start, unsigned long end);
1290 
1291 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1292 
1293 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1294 		      unsigned long addr, int *flags, bool writable,
1295 		      int *last_cpupid);
1296 
1297 void free_zone_device_folio(struct folio *folio);
1298 int migrate_device_coherent_folio(struct folio *folio);
1299 
1300 struct vm_struct *__get_vm_area_node(unsigned long size,
1301 				     unsigned long align, unsigned long shift,
1302 				     unsigned long flags, unsigned long start,
1303 				     unsigned long end, int node, gfp_t gfp_mask,
1304 				     const void *caller);
1305 
1306 /*
1307  * mm/gup.c
1308  */
1309 int __must_check try_grab_folio(struct folio *folio, int refs,
1310 				unsigned int flags);
1311 
1312 /*
1313  * mm/huge_memory.c
1314  */
1315 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1316 	       pud_t *pud, bool write);
1317 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1318 	       pmd_t *pmd, bool write);
1319 
1320 /*
1321  * Parses a string with mem suffixes into its order. Useful to parse kernel
1322  * parameters.
1323  */
1324 static inline int get_order_from_str(const char *size_str,
1325 				     unsigned long valid_orders)
1326 {
1327 	unsigned long size;
1328 	char *endptr;
1329 	int order;
1330 
1331 	size = memparse(size_str, &endptr);
1332 
1333 	if (!is_power_of_2(size))
1334 		return -EINVAL;
1335 	order = get_order(size);
1336 	if (BIT(order) & ~valid_orders)
1337 		return -EINVAL;
1338 
1339 	return order;
1340 }
1341 
1342 enum {
1343 	/* mark page accessed */
1344 	FOLL_TOUCH = 1 << 16,
1345 	/* a retry, previous pass started an IO */
1346 	FOLL_TRIED = 1 << 17,
1347 	/* we are working on non-current tsk/mm */
1348 	FOLL_REMOTE = 1 << 18,
1349 	/* pages must be released via unpin_user_page */
1350 	FOLL_PIN = 1 << 19,
1351 	/* gup_fast: prevent fall-back to slow gup */
1352 	FOLL_FAST_ONLY = 1 << 20,
1353 	/* allow unlocking the mmap lock */
1354 	FOLL_UNLOCKABLE = 1 << 21,
1355 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1356 	FOLL_MADV_POPULATE = 1 << 22,
1357 };
1358 
1359 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1360 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1361 			    FOLL_MADV_POPULATE)
1362 
1363 /*
1364  * Indicates for which pages that are write-protected in the page table,
1365  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1366  * GUP pin will remain consistent with the pages mapped into the page tables
1367  * of the MM.
1368  *
1369  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1370  * PageAnonExclusive() has to protect against concurrent GUP:
1371  * * Ordinary GUP: Using the PT lock
1372  * * GUP-fast and fork(): mm->write_protect_seq
1373  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1374  *    folio_try_share_anon_rmap_*()
1375  *
1376  * Must be called with the (sub)page that's actually referenced via the
1377  * page table entry, which might not necessarily be the head page for a
1378  * PTE-mapped THP.
1379  *
1380  * If the vma is NULL, we're coming from the GUP-fast path and might have
1381  * to fallback to the slow path just to lookup the vma.
1382  */
1383 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1384 				    unsigned int flags, struct page *page)
1385 {
1386 	/*
1387 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1388 	 * has to be writable -- and if it references (part of) an anonymous
1389 	 * folio, that part is required to be marked exclusive.
1390 	 */
1391 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1392 		return false;
1393 	/*
1394 	 * Note: PageAnon(page) is stable until the page is actually getting
1395 	 * freed.
1396 	 */
1397 	if (!PageAnon(page)) {
1398 		/*
1399 		 * We only care about R/O long-term pining: R/O short-term
1400 		 * pinning does not have the semantics to observe successive
1401 		 * changes through the process page tables.
1402 		 */
1403 		if (!(flags & FOLL_LONGTERM))
1404 			return false;
1405 
1406 		/* We really need the vma ... */
1407 		if (!vma)
1408 			return true;
1409 
1410 		/*
1411 		 * ... because we only care about writable private ("COW")
1412 		 * mappings where we have to break COW early.
1413 		 */
1414 		return is_cow_mapping(vma->vm_flags);
1415 	}
1416 
1417 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1418 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1419 		smp_rmb();
1420 
1421 	/*
1422 	 * Note that KSM pages cannot be exclusive, and consequently,
1423 	 * cannot get pinned.
1424 	 */
1425 	return !PageAnonExclusive(page);
1426 }
1427 
1428 extern bool mirrored_kernelcore;
1429 extern bool memblock_has_mirror(void);
1430 
1431 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1432 					  unsigned long start, unsigned long end,
1433 					  pgoff_t pgoff)
1434 {
1435 	vma->vm_start = start;
1436 	vma->vm_end = end;
1437 	vma->vm_pgoff = pgoff;
1438 }
1439 
1440 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1441 {
1442 	/*
1443 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1444 	 * enablements, because when without soft-dirty being compiled in,
1445 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1446 	 * will be constantly true.
1447 	 */
1448 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1449 		return false;
1450 
1451 	/*
1452 	 * Soft-dirty is kind of special: its tracking is enabled when the
1453 	 * vma flags not set.
1454 	 */
1455 	return !(vma->vm_flags & VM_SOFTDIRTY);
1456 }
1457 
1458 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1459 {
1460 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1461 }
1462 
1463 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1464 {
1465 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1466 }
1467 
1468 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1469 				unsigned long zone, int nid);
1470 void __meminit __init_reserved_page_zone(unsigned long pfn, int nid);
1471 
1472 /* shrinker related functions */
1473 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1474 			  int priority);
1475 
1476 #ifdef CONFIG_SHRINKER_DEBUG
1477 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1478 			struct shrinker *shrinker, const char *fmt, va_list ap)
1479 {
1480 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1481 
1482 	return shrinker->name ? 0 : -ENOMEM;
1483 }
1484 
1485 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1486 {
1487 	kfree_const(shrinker->name);
1488 	shrinker->name = NULL;
1489 }
1490 
1491 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1492 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1493 					      int *debugfs_id);
1494 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1495 				    int debugfs_id);
1496 #else /* CONFIG_SHRINKER_DEBUG */
1497 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1498 {
1499 	return 0;
1500 }
1501 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1502 					      const char *fmt, va_list ap)
1503 {
1504 	return 0;
1505 }
1506 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1507 {
1508 }
1509 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1510 						     int *debugfs_id)
1511 {
1512 	*debugfs_id = -1;
1513 	return NULL;
1514 }
1515 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1516 					   int debugfs_id)
1517 {
1518 }
1519 #endif /* CONFIG_SHRINKER_DEBUG */
1520 
1521 /* Only track the nodes of mappings with shadow entries */
1522 void workingset_update_node(struct xa_node *node);
1523 extern struct list_lru shadow_nodes;
1524 #define mapping_set_update(xas, mapping) do {			\
1525 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1526 		xas_set_update(xas, workingset_update_node);	\
1527 		xas_set_lru(xas, &shadow_nodes);		\
1528 	}							\
1529 } while (0)
1530 
1531 /* mremap.c */
1532 unsigned long move_page_tables(struct vm_area_struct *vma,
1533 	unsigned long old_addr, struct vm_area_struct *new_vma,
1534 	unsigned long new_addr, unsigned long len,
1535 	bool need_rmap_locks, bool for_stack);
1536 
1537 #ifdef CONFIG_UNACCEPTED_MEMORY
1538 void accept_page(struct page *page);
1539 #else /* CONFIG_UNACCEPTED_MEMORY */
1540 static inline void accept_page(struct page *page)
1541 {
1542 }
1543 #endif /* CONFIG_UNACCEPTED_MEMORY */
1544 
1545 /* pagewalk.c */
1546 int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
1547 		unsigned long end, const struct mm_walk_ops *ops,
1548 		void *private);
1549 
1550 /* pt_reclaim.c */
1551 bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
1552 void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
1553 	      pmd_t pmdval);
1554 void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
1555 		     struct mmu_gather *tlb);
1556 
1557 #ifdef CONFIG_PT_RECLAIM
1558 bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1559 			   struct zap_details *details);
1560 #else
1561 static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1562 					 struct zap_details *details)
1563 {
1564 	return false;
1565 }
1566 #endif /* CONFIG_PT_RECLAIM */
1567 
1568 
1569 #endif	/* __MM_INTERNAL_H */
1570