xref: /linux/mm/internal.h (revision c5288cda69ee2d8607f5026bd599a5cebf0ee783)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/tracepoint-defs.h>
17 
18 struct folio_batch;
19 
20 /*
21  * The set of flags that only affect watermark checking and reclaim
22  * behaviour. This is used by the MM to obey the caller constraints
23  * about IO, FS and watermark checking while ignoring placement
24  * hints such as HIGHMEM usage.
25  */
26 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
28 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
29 			__GFP_NOLOCKDEP)
30 
31 /* The GFP flags allowed during early boot */
32 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
33 
34 /* Control allocation cpuset and node placement constraints */
35 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
36 
37 /* Do not use these with a slab allocator */
38 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
39 
40 /*
41  * Different from WARN_ON_ONCE(), no warning will be issued
42  * when we specify __GFP_NOWARN.
43  */
44 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
45 	static bool __section(".data.once") __warned;			\
46 	int __ret_warn_once = !!(cond);					\
47 									\
48 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
49 		__warned = true;					\
50 		WARN_ON(1);						\
51 	}								\
52 	unlikely(__ret_warn_once);					\
53 })
54 
55 void page_writeback_init(void);
56 
57 /*
58  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
59  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
60  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
61  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
62  */
63 #define ENTIRELY_MAPPED		0x800000
64 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
65 
66 /*
67  * Flags passed to __show_mem() and show_free_areas() to suppress output in
68  * various contexts.
69  */
70 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
71 
72 /*
73  * How many individual pages have an elevated _mapcount.  Excludes
74  * the folio's entire_mapcount.
75  *
76  * Don't use this function outside of debugging code.
77  */
78 static inline int folio_nr_pages_mapped(const struct folio *folio)
79 {
80 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
81 }
82 
83 /*
84  * Retrieve the first entry of a folio based on a provided entry within the
85  * folio. We cannot rely on folio->swap as there is no guarantee that it has
86  * been initialized. Used for calling arch_swap_restore()
87  */
88 static inline swp_entry_t folio_swap(swp_entry_t entry,
89 		const struct folio *folio)
90 {
91 	swp_entry_t swap = {
92 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
93 	};
94 
95 	return swap;
96 }
97 
98 static inline void *folio_raw_mapping(const struct folio *folio)
99 {
100 	unsigned long mapping = (unsigned long)folio->mapping;
101 
102 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
103 }
104 
105 #ifdef CONFIG_MMU
106 
107 /* Flags for folio_pte_batch(). */
108 typedef int __bitwise fpb_t;
109 
110 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111 #define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
112 
113 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114 #define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
115 
116 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
117 {
118 	if (flags & FPB_IGNORE_DIRTY)
119 		pte = pte_mkclean(pte);
120 	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 		pte = pte_clear_soft_dirty(pte);
122 	return pte_wrprotect(pte_mkold(pte));
123 }
124 
125 /**
126  * folio_pte_batch - detect a PTE batch for a large folio
127  * @folio: The large folio to detect a PTE batch for.
128  * @addr: The user virtual address the first page is mapped at.
129  * @start_ptep: Page table pointer for the first entry.
130  * @pte: Page table entry for the first page.
131  * @max_nr: The maximum number of table entries to consider.
132  * @flags: Flags to modify the PTE batch semantics.
133  * @any_writable: Optional pointer to indicate whether any entry except the
134  *		  first one is writable.
135  * @any_young: Optional pointer to indicate whether any entry except the
136  *		  first one is young.
137  * @any_dirty: Optional pointer to indicate whether any entry except the
138  *		  first one is dirty.
139  *
140  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141  * pages of the same large folio.
142  *
143  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144  * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145  * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
146  *
147  * start_ptep must map any page of the folio. max_nr must be at least one and
148  * must be limited by the caller so scanning cannot exceed a single page table.
149  *
150  * Return: the number of table entries in the batch.
151  */
152 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
154 		bool *any_writable, bool *any_young, bool *any_dirty)
155 {
156 	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 	const pte_t *end_ptep = start_ptep + max_nr;
158 	pte_t expected_pte, *ptep;
159 	bool writable, young, dirty;
160 	int nr;
161 
162 	if (any_writable)
163 		*any_writable = false;
164 	if (any_young)
165 		*any_young = false;
166 	if (any_dirty)
167 		*any_dirty = false;
168 
169 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
172 
173 	nr = pte_batch_hint(start_ptep, pte);
174 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 	ptep = start_ptep + nr;
176 
177 	while (ptep < end_ptep) {
178 		pte = ptep_get(ptep);
179 		if (any_writable)
180 			writable = !!pte_write(pte);
181 		if (any_young)
182 			young = !!pte_young(pte);
183 		if (any_dirty)
184 			dirty = !!pte_dirty(pte);
185 		pte = __pte_batch_clear_ignored(pte, flags);
186 
187 		if (!pte_same(pte, expected_pte))
188 			break;
189 
190 		/*
191 		 * Stop immediately once we reached the end of the folio. In
192 		 * corner cases the next PFN might fall into a different
193 		 * folio.
194 		 */
195 		if (pte_pfn(pte) >= folio_end_pfn)
196 			break;
197 
198 		if (any_writable)
199 			*any_writable |= writable;
200 		if (any_young)
201 			*any_young |= young;
202 		if (any_dirty)
203 			*any_dirty |= dirty;
204 
205 		nr = pte_batch_hint(ptep, pte);
206 		expected_pte = pte_advance_pfn(expected_pte, nr);
207 		ptep += nr;
208 	}
209 
210 	return min(ptep - start_ptep, max_nr);
211 }
212 
213 /**
214  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
215  * @pte: The initial pte state; is_swap_pte(pte) must be true and
216  *	 non_swap_entry() must be false.
217  *
218  * Increments the swap offset, while maintaining all other fields, including
219  * swap type, and any swp pte bits. The resulting pte is returned.
220  */
221 static inline pte_t pte_next_swp_offset(pte_t pte)
222 {
223 	swp_entry_t entry = pte_to_swp_entry(pte);
224 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
225 						   (swp_offset(entry) + 1)));
226 
227 	if (pte_swp_soft_dirty(pte))
228 		new = pte_swp_mksoft_dirty(new);
229 	if (pte_swp_exclusive(pte))
230 		new = pte_swp_mkexclusive(new);
231 	if (pte_swp_uffd_wp(pte))
232 		new = pte_swp_mkuffd_wp(new);
233 
234 	return new;
235 }
236 
237 /**
238  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
239  * @start_ptep: Page table pointer for the first entry.
240  * @max_nr: The maximum number of table entries to consider.
241  * @pte: Page table entry for the first entry.
242  *
243  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
244  * containing swap entries all with consecutive offsets and targeting the same
245  * swap type, all with matching swp pte bits.
246  *
247  * max_nr must be at least one and must be limited by the caller so scanning
248  * cannot exceed a single page table.
249  *
250  * Return: the number of table entries in the batch.
251  */
252 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
253 {
254 	pte_t expected_pte = pte_next_swp_offset(pte);
255 	const pte_t *end_ptep = start_ptep + max_nr;
256 	pte_t *ptep = start_ptep + 1;
257 
258 	VM_WARN_ON(max_nr < 1);
259 	VM_WARN_ON(!is_swap_pte(pte));
260 	VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
261 
262 	while (ptep < end_ptep) {
263 		pte = ptep_get(ptep);
264 
265 		if (!pte_same(pte, expected_pte))
266 			break;
267 
268 		expected_pte = pte_next_swp_offset(expected_pte);
269 		ptep++;
270 	}
271 
272 	return ptep - start_ptep;
273 }
274 #endif /* CONFIG_MMU */
275 
276 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
277 						int nr_throttled);
278 static inline void acct_reclaim_writeback(struct folio *folio)
279 {
280 	pg_data_t *pgdat = folio_pgdat(folio);
281 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
282 
283 	if (nr_throttled)
284 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
285 }
286 
287 static inline void wake_throttle_isolated(pg_data_t *pgdat)
288 {
289 	wait_queue_head_t *wqh;
290 
291 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
292 	if (waitqueue_active(wqh))
293 		wake_up(wqh);
294 }
295 
296 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
297 vm_fault_t do_swap_page(struct vm_fault *vmf);
298 void folio_rotate_reclaimable(struct folio *folio);
299 bool __folio_end_writeback(struct folio *folio);
300 void deactivate_file_folio(struct folio *folio);
301 void folio_activate(struct folio *folio);
302 
303 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
304 		   struct vm_area_struct *start_vma, unsigned long floor,
305 		   unsigned long ceiling, bool mm_wr_locked);
306 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
307 
308 struct zap_details;
309 void unmap_page_range(struct mmu_gather *tlb,
310 			     struct vm_area_struct *vma,
311 			     unsigned long addr, unsigned long end,
312 			     struct zap_details *details);
313 
314 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
315 		unsigned int order);
316 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
317 static inline void force_page_cache_readahead(struct address_space *mapping,
318 		struct file *file, pgoff_t index, unsigned long nr_to_read)
319 {
320 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
321 	force_page_cache_ra(&ractl, nr_to_read);
322 }
323 
324 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
325 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
326 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
327 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
328 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
329 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
330 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
331 		loff_t end);
332 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
333 unsigned long mapping_try_invalidate(struct address_space *mapping,
334 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
335 
336 /**
337  * folio_evictable - Test whether a folio is evictable.
338  * @folio: The folio to test.
339  *
340  * Test whether @folio is evictable -- i.e., should be placed on
341  * active/inactive lists vs unevictable list.
342  *
343  * Reasons folio might not be evictable:
344  * 1. folio's mapping marked unevictable
345  * 2. One of the pages in the folio is part of an mlocked VMA
346  */
347 static inline bool folio_evictable(struct folio *folio)
348 {
349 	bool ret;
350 
351 	/* Prevent address_space of inode and swap cache from being freed */
352 	rcu_read_lock();
353 	ret = !mapping_unevictable(folio_mapping(folio)) &&
354 			!folio_test_mlocked(folio);
355 	rcu_read_unlock();
356 	return ret;
357 }
358 
359 /*
360  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
361  * a count of one.
362  */
363 static inline void set_page_refcounted(struct page *page)
364 {
365 	VM_BUG_ON_PAGE(PageTail(page), page);
366 	VM_BUG_ON_PAGE(page_ref_count(page), page);
367 	set_page_count(page, 1);
368 }
369 
370 /*
371  * Return true if a folio needs ->release_folio() calling upon it.
372  */
373 static inline bool folio_needs_release(struct folio *folio)
374 {
375 	struct address_space *mapping = folio_mapping(folio);
376 
377 	return folio_has_private(folio) ||
378 		(mapping && mapping_release_always(mapping));
379 }
380 
381 extern unsigned long highest_memmap_pfn;
382 
383 /*
384  * Maximum number of reclaim retries without progress before the OOM
385  * killer is consider the only way forward.
386  */
387 #define MAX_RECLAIM_RETRIES 16
388 
389 /*
390  * in mm/vmscan.c:
391  */
392 bool isolate_lru_page(struct page *page);
393 bool folio_isolate_lru(struct folio *folio);
394 void putback_lru_page(struct page *page);
395 void folio_putback_lru(struct folio *folio);
396 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
397 
398 /*
399  * in mm/rmap.c:
400  */
401 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
402 
403 /*
404  * in mm/page_alloc.c
405  */
406 #define K(x) ((x) << (PAGE_SHIFT-10))
407 
408 extern char * const zone_names[MAX_NR_ZONES];
409 
410 /* perform sanity checks on struct pages being allocated or freed */
411 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
412 
413 extern int min_free_kbytes;
414 
415 void setup_per_zone_wmarks(void);
416 void calculate_min_free_kbytes(void);
417 int __meminit init_per_zone_wmark_min(void);
418 void page_alloc_sysctl_init(void);
419 
420 /*
421  * Structure for holding the mostly immutable allocation parameters passed
422  * between functions involved in allocations, including the alloc_pages*
423  * family of functions.
424  *
425  * nodemask, migratetype and highest_zoneidx are initialized only once in
426  * __alloc_pages() and then never change.
427  *
428  * zonelist, preferred_zone and highest_zoneidx are set first in
429  * __alloc_pages() for the fast path, and might be later changed
430  * in __alloc_pages_slowpath(). All other functions pass the whole structure
431  * by a const pointer.
432  */
433 struct alloc_context {
434 	struct zonelist *zonelist;
435 	nodemask_t *nodemask;
436 	struct zoneref *preferred_zoneref;
437 	int migratetype;
438 
439 	/*
440 	 * highest_zoneidx represents highest usable zone index of
441 	 * the allocation request. Due to the nature of the zone,
442 	 * memory on lower zone than the highest_zoneidx will be
443 	 * protected by lowmem_reserve[highest_zoneidx].
444 	 *
445 	 * highest_zoneidx is also used by reclaim/compaction to limit
446 	 * the target zone since higher zone than this index cannot be
447 	 * usable for this allocation request.
448 	 */
449 	enum zone_type highest_zoneidx;
450 	bool spread_dirty_pages;
451 };
452 
453 /*
454  * This function returns the order of a free page in the buddy system. In
455  * general, page_zone(page)->lock must be held by the caller to prevent the
456  * page from being allocated in parallel and returning garbage as the order.
457  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
458  * page cannot be allocated or merged in parallel. Alternatively, it must
459  * handle invalid values gracefully, and use buddy_order_unsafe() below.
460  */
461 static inline unsigned int buddy_order(struct page *page)
462 {
463 	/* PageBuddy() must be checked by the caller */
464 	return page_private(page);
465 }
466 
467 /*
468  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
469  * PageBuddy() should be checked first by the caller to minimize race window,
470  * and invalid values must be handled gracefully.
471  *
472  * READ_ONCE is used so that if the caller assigns the result into a local
473  * variable and e.g. tests it for valid range before using, the compiler cannot
474  * decide to remove the variable and inline the page_private(page) multiple
475  * times, potentially observing different values in the tests and the actual
476  * use of the result.
477  */
478 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
479 
480 /*
481  * This function checks whether a page is free && is the buddy
482  * we can coalesce a page and its buddy if
483  * (a) the buddy is not in a hole (check before calling!) &&
484  * (b) the buddy is in the buddy system &&
485  * (c) a page and its buddy have the same order &&
486  * (d) a page and its buddy are in the same zone.
487  *
488  * For recording whether a page is in the buddy system, we set PageBuddy.
489  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
490  *
491  * For recording page's order, we use page_private(page).
492  */
493 static inline bool page_is_buddy(struct page *page, struct page *buddy,
494 				 unsigned int order)
495 {
496 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
497 		return false;
498 
499 	if (buddy_order(buddy) != order)
500 		return false;
501 
502 	/*
503 	 * zone check is done late to avoid uselessly calculating
504 	 * zone/node ids for pages that could never merge.
505 	 */
506 	if (page_zone_id(page) != page_zone_id(buddy))
507 		return false;
508 
509 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
510 
511 	return true;
512 }
513 
514 /*
515  * Locate the struct page for both the matching buddy in our
516  * pair (buddy1) and the combined O(n+1) page they form (page).
517  *
518  * 1) Any buddy B1 will have an order O twin B2 which satisfies
519  * the following equation:
520  *     B2 = B1 ^ (1 << O)
521  * For example, if the starting buddy (buddy2) is #8 its order
522  * 1 buddy is #10:
523  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
524  *
525  * 2) Any buddy B will have an order O+1 parent P which
526  * satisfies the following equation:
527  *     P = B & ~(1 << O)
528  *
529  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
530  */
531 static inline unsigned long
532 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
533 {
534 	return page_pfn ^ (1 << order);
535 }
536 
537 /*
538  * Find the buddy of @page and validate it.
539  * @page: The input page
540  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
541  *       function is used in the performance-critical __free_one_page().
542  * @order: The order of the page
543  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
544  *             page_to_pfn().
545  *
546  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
547  * not the same as @page. The validation is necessary before use it.
548  *
549  * Return: the found buddy page or NULL if not found.
550  */
551 static inline struct page *find_buddy_page_pfn(struct page *page,
552 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
553 {
554 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
555 	struct page *buddy;
556 
557 	buddy = page + (__buddy_pfn - pfn);
558 	if (buddy_pfn)
559 		*buddy_pfn = __buddy_pfn;
560 
561 	if (page_is_buddy(page, buddy, order))
562 		return buddy;
563 	return NULL;
564 }
565 
566 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
567 				unsigned long end_pfn, struct zone *zone);
568 
569 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
570 				unsigned long end_pfn, struct zone *zone)
571 {
572 	if (zone->contiguous)
573 		return pfn_to_page(start_pfn);
574 
575 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
576 }
577 
578 void set_zone_contiguous(struct zone *zone);
579 
580 static inline void clear_zone_contiguous(struct zone *zone)
581 {
582 	zone->contiguous = false;
583 }
584 
585 extern int __isolate_free_page(struct page *page, unsigned int order);
586 extern void __putback_isolated_page(struct page *page, unsigned int order,
587 				    int mt);
588 extern void memblock_free_pages(struct page *page, unsigned long pfn,
589 					unsigned int order);
590 extern void __free_pages_core(struct page *page, unsigned int order);
591 extern void kernel_init_pages(struct page *page, int numpages);
592 
593 /*
594  * This will have no effect, other than possibly generating a warning, if the
595  * caller passes in a non-large folio.
596  */
597 static inline void folio_set_order(struct folio *folio, unsigned int order)
598 {
599 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
600 		return;
601 
602 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
603 #ifdef CONFIG_64BIT
604 	folio->_folio_nr_pages = 1U << order;
605 #endif
606 }
607 
608 void folio_undo_large_rmappable(struct folio *folio);
609 
610 static inline struct folio *page_rmappable_folio(struct page *page)
611 {
612 	struct folio *folio = (struct folio *)page;
613 
614 	if (folio && folio_test_large(folio))
615 		folio_set_large_rmappable(folio);
616 	return folio;
617 }
618 
619 static inline void prep_compound_head(struct page *page, unsigned int order)
620 {
621 	struct folio *folio = (struct folio *)page;
622 
623 	folio_set_order(folio, order);
624 	atomic_set(&folio->_large_mapcount, -1);
625 	atomic_set(&folio->_entire_mapcount, -1);
626 	atomic_set(&folio->_nr_pages_mapped, 0);
627 	atomic_set(&folio->_pincount, 0);
628 	if (order > 1)
629 		INIT_LIST_HEAD(&folio->_deferred_list);
630 }
631 
632 static inline void prep_compound_tail(struct page *head, int tail_idx)
633 {
634 	struct page *p = head + tail_idx;
635 
636 	p->mapping = TAIL_MAPPING;
637 	set_compound_head(p, head);
638 	set_page_private(p, 0);
639 }
640 
641 extern void prep_compound_page(struct page *page, unsigned int order);
642 
643 extern void post_alloc_hook(struct page *page, unsigned int order,
644 					gfp_t gfp_flags);
645 extern bool free_pages_prepare(struct page *page, unsigned int order);
646 
647 extern int user_min_free_kbytes;
648 
649 void free_unref_page(struct page *page, unsigned int order);
650 void free_unref_folios(struct folio_batch *fbatch);
651 
652 extern void zone_pcp_reset(struct zone *zone);
653 extern void zone_pcp_disable(struct zone *zone);
654 extern void zone_pcp_enable(struct zone *zone);
655 extern void zone_pcp_init(struct zone *zone);
656 
657 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
658 			  phys_addr_t min_addr,
659 			  int nid, bool exact_nid);
660 
661 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
662 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
663 
664 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
665 
666 /*
667  * in mm/compaction.c
668  */
669 /*
670  * compact_control is used to track pages being migrated and the free pages
671  * they are being migrated to during memory compaction. The free_pfn starts
672  * at the end of a zone and migrate_pfn begins at the start. Movable pages
673  * are moved to the end of a zone during a compaction run and the run
674  * completes when free_pfn <= migrate_pfn
675  */
676 struct compact_control {
677 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
678 	struct list_head migratepages;	/* List of pages being migrated */
679 	unsigned int nr_freepages;	/* Number of isolated free pages */
680 	unsigned int nr_migratepages;	/* Number of pages to migrate */
681 	unsigned long free_pfn;		/* isolate_freepages search base */
682 	/*
683 	 * Acts as an in/out parameter to page isolation for migration.
684 	 * isolate_migratepages uses it as a search base.
685 	 * isolate_migratepages_block will update the value to the next pfn
686 	 * after the last isolated one.
687 	 */
688 	unsigned long migrate_pfn;
689 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
690 	struct zone *zone;
691 	unsigned long total_migrate_scanned;
692 	unsigned long total_free_scanned;
693 	unsigned short fast_search_fail;/* failures to use free list searches */
694 	short search_order;		/* order to start a fast search at */
695 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
696 	int order;			/* order a direct compactor needs */
697 	int migratetype;		/* migratetype of direct compactor */
698 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
699 	const int highest_zoneidx;	/* zone index of a direct compactor */
700 	enum migrate_mode mode;		/* Async or sync migration mode */
701 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
702 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
703 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
704 	bool direct_compaction;		/* False from kcompactd or /proc/... */
705 	bool proactive_compaction;	/* kcompactd proactive compaction */
706 	bool whole_zone;		/* Whole zone should/has been scanned */
707 	bool contended;			/* Signal lock contention */
708 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
709 					 * when there are potentially transient
710 					 * isolation or migration failures to
711 					 * ensure forward progress.
712 					 */
713 	bool alloc_contig;		/* alloc_contig_range allocation */
714 };
715 
716 /*
717  * Used in direct compaction when a page should be taken from the freelists
718  * immediately when one is created during the free path.
719  */
720 struct capture_control {
721 	struct compact_control *cc;
722 	struct page *page;
723 };
724 
725 unsigned long
726 isolate_freepages_range(struct compact_control *cc,
727 			unsigned long start_pfn, unsigned long end_pfn);
728 int
729 isolate_migratepages_range(struct compact_control *cc,
730 			   unsigned long low_pfn, unsigned long end_pfn);
731 
732 int __alloc_contig_migrate_range(struct compact_control *cc,
733 					unsigned long start, unsigned long end,
734 					int migratetype);
735 
736 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
737 void init_cma_reserved_pageblock(struct page *page);
738 
739 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
740 
741 int find_suitable_fallback(struct free_area *area, unsigned int order,
742 			int migratetype, bool only_stealable, bool *can_steal);
743 
744 static inline bool free_area_empty(struct free_area *area, int migratetype)
745 {
746 	return list_empty(&area->free_list[migratetype]);
747 }
748 
749 /*
750  * These three helpers classifies VMAs for virtual memory accounting.
751  */
752 
753 /*
754  * Executable code area - executable, not writable, not stack
755  */
756 static inline bool is_exec_mapping(vm_flags_t flags)
757 {
758 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
759 }
760 
761 /*
762  * Stack area (including shadow stacks)
763  *
764  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
765  * do_mmap() forbids all other combinations.
766  */
767 static inline bool is_stack_mapping(vm_flags_t flags)
768 {
769 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
770 }
771 
772 /*
773  * Data area - private, writable, not stack
774  */
775 static inline bool is_data_mapping(vm_flags_t flags)
776 {
777 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
778 }
779 
780 /* mm/util.c */
781 struct anon_vma *folio_anon_vma(struct folio *folio);
782 
783 #ifdef CONFIG_MMU
784 void unmap_mapping_folio(struct folio *folio);
785 extern long populate_vma_page_range(struct vm_area_struct *vma,
786 		unsigned long start, unsigned long end, int *locked);
787 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
788 		unsigned long end, bool write, int *locked);
789 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
790 			       unsigned long bytes);
791 
792 /*
793  * NOTE: This function can't tell whether the folio is "fully mapped" in the
794  * range.
795  * "fully mapped" means all the pages of folio is associated with the page
796  * table of range while this function just check whether the folio range is
797  * within the range [start, end). Function caller needs to do page table
798  * check if it cares about the page table association.
799  *
800  * Typical usage (like mlock or madvise) is:
801  * Caller knows at least 1 page of folio is associated with page table of VMA
802  * and the range [start, end) is intersect with the VMA range. Caller wants
803  * to know whether the folio is fully associated with the range. It calls
804  * this function to check whether the folio is in the range first. Then checks
805  * the page table to know whether the folio is fully mapped to the range.
806  */
807 static inline bool
808 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
809 		unsigned long start, unsigned long end)
810 {
811 	pgoff_t pgoff, addr;
812 	unsigned long vma_pglen = vma_pages(vma);
813 
814 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
815 	if (start > end)
816 		return false;
817 
818 	if (start < vma->vm_start)
819 		start = vma->vm_start;
820 
821 	if (end > vma->vm_end)
822 		end = vma->vm_end;
823 
824 	pgoff = folio_pgoff(folio);
825 
826 	/* if folio start address is not in vma range */
827 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
828 		return false;
829 
830 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
831 
832 	return !(addr < start || end - addr < folio_size(folio));
833 }
834 
835 static inline bool
836 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
837 {
838 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
839 }
840 
841 /*
842  * mlock_vma_folio() and munlock_vma_folio():
843  * should be called with vma's mmap_lock held for read or write,
844  * under page table lock for the pte/pmd being added or removed.
845  *
846  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
847  * the end of folio_remove_rmap_*(); but new anon folios are managed by
848  * folio_add_lru_vma() calling mlock_new_folio().
849  */
850 void mlock_folio(struct folio *folio);
851 static inline void mlock_vma_folio(struct folio *folio,
852 				struct vm_area_struct *vma)
853 {
854 	/*
855 	 * The VM_SPECIAL check here serves two purposes.
856 	 * 1) VM_IO check prevents migration from double-counting during mlock.
857 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
858 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
859 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
860 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
861 	 */
862 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
863 		mlock_folio(folio);
864 }
865 
866 void munlock_folio(struct folio *folio);
867 static inline void munlock_vma_folio(struct folio *folio,
868 					struct vm_area_struct *vma)
869 {
870 	/*
871 	 * munlock if the function is called. Ideally, we should only
872 	 * do munlock if any page of folio is unmapped from VMA and
873 	 * cause folio not fully mapped to VMA.
874 	 *
875 	 * But it's not easy to confirm that's the situation. So we
876 	 * always munlock the folio and page reclaim will correct it
877 	 * if it's wrong.
878 	 */
879 	if (unlikely(vma->vm_flags & VM_LOCKED))
880 		munlock_folio(folio);
881 }
882 
883 void mlock_new_folio(struct folio *folio);
884 bool need_mlock_drain(int cpu);
885 void mlock_drain_local(void);
886 void mlock_drain_remote(int cpu);
887 
888 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
889 
890 /**
891  * vma_address - Find the virtual address a page range is mapped at
892  * @vma: The vma which maps this object.
893  * @pgoff: The page offset within its object.
894  * @nr_pages: The number of pages to consider.
895  *
896  * If any page in this range is mapped by this VMA, return the first address
897  * where any of these pages appear.  Otherwise, return -EFAULT.
898  */
899 static inline unsigned long vma_address(struct vm_area_struct *vma,
900 		pgoff_t pgoff, unsigned long nr_pages)
901 {
902 	unsigned long address;
903 
904 	if (pgoff >= vma->vm_pgoff) {
905 		address = vma->vm_start +
906 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
907 		/* Check for address beyond vma (or wrapped through 0?) */
908 		if (address < vma->vm_start || address >= vma->vm_end)
909 			address = -EFAULT;
910 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
911 		/* Test above avoids possibility of wrap to 0 on 32-bit */
912 		address = vma->vm_start;
913 	} else {
914 		address = -EFAULT;
915 	}
916 	return address;
917 }
918 
919 /*
920  * Then at what user virtual address will none of the range be found in vma?
921  * Assumes that vma_address() already returned a good starting address.
922  */
923 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
924 {
925 	struct vm_area_struct *vma = pvmw->vma;
926 	pgoff_t pgoff;
927 	unsigned long address;
928 
929 	/* Common case, plus ->pgoff is invalid for KSM */
930 	if (pvmw->nr_pages == 1)
931 		return pvmw->address + PAGE_SIZE;
932 
933 	pgoff = pvmw->pgoff + pvmw->nr_pages;
934 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
935 	/* Check for address beyond vma (or wrapped through 0?) */
936 	if (address < vma->vm_start || address > vma->vm_end)
937 		address = vma->vm_end;
938 	return address;
939 }
940 
941 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
942 						    struct file *fpin)
943 {
944 	int flags = vmf->flags;
945 
946 	if (fpin)
947 		return fpin;
948 
949 	/*
950 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
951 	 * anything, so we only pin the file and drop the mmap_lock if only
952 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
953 	 */
954 	if (fault_flag_allow_retry_first(flags) &&
955 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
956 		fpin = get_file(vmf->vma->vm_file);
957 		release_fault_lock(vmf);
958 	}
959 	return fpin;
960 }
961 #else /* !CONFIG_MMU */
962 static inline void unmap_mapping_folio(struct folio *folio) { }
963 static inline void mlock_new_folio(struct folio *folio) { }
964 static inline bool need_mlock_drain(int cpu) { return false; }
965 static inline void mlock_drain_local(void) { }
966 static inline void mlock_drain_remote(int cpu) { }
967 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
968 {
969 }
970 #endif /* !CONFIG_MMU */
971 
972 /* Memory initialisation debug and verification */
973 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
974 DECLARE_STATIC_KEY_TRUE(deferred_pages);
975 
976 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
977 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
978 
979 enum mminit_level {
980 	MMINIT_WARNING,
981 	MMINIT_VERIFY,
982 	MMINIT_TRACE
983 };
984 
985 #ifdef CONFIG_DEBUG_MEMORY_INIT
986 
987 extern int mminit_loglevel;
988 
989 #define mminit_dprintk(level, prefix, fmt, arg...) \
990 do { \
991 	if (level < mminit_loglevel) { \
992 		if (level <= MMINIT_WARNING) \
993 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
994 		else \
995 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
996 	} \
997 } while (0)
998 
999 extern void mminit_verify_pageflags_layout(void);
1000 extern void mminit_verify_zonelist(void);
1001 #else
1002 
1003 static inline void mminit_dprintk(enum mminit_level level,
1004 				const char *prefix, const char *fmt, ...)
1005 {
1006 }
1007 
1008 static inline void mminit_verify_pageflags_layout(void)
1009 {
1010 }
1011 
1012 static inline void mminit_verify_zonelist(void)
1013 {
1014 }
1015 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1016 
1017 #define NODE_RECLAIM_NOSCAN	-2
1018 #define NODE_RECLAIM_FULL	-1
1019 #define NODE_RECLAIM_SOME	0
1020 #define NODE_RECLAIM_SUCCESS	1
1021 
1022 #ifdef CONFIG_NUMA
1023 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1024 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1025 #else
1026 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1027 				unsigned int order)
1028 {
1029 	return NODE_RECLAIM_NOSCAN;
1030 }
1031 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1032 {
1033 	return NUMA_NO_NODE;
1034 }
1035 #endif
1036 
1037 /*
1038  * mm/memory-failure.c
1039  */
1040 void shake_folio(struct folio *folio);
1041 extern int hwpoison_filter(struct page *p);
1042 
1043 extern u32 hwpoison_filter_dev_major;
1044 extern u32 hwpoison_filter_dev_minor;
1045 extern u64 hwpoison_filter_flags_mask;
1046 extern u64 hwpoison_filter_flags_value;
1047 extern u64 hwpoison_filter_memcg;
1048 extern u32 hwpoison_filter_enable;
1049 
1050 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1051         unsigned long, unsigned long,
1052         unsigned long, unsigned long);
1053 
1054 extern void set_pageblock_order(void);
1055 unsigned long reclaim_pages(struct list_head *folio_list);
1056 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1057 					    struct list_head *folio_list);
1058 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1059 #define ALLOC_WMARK_MIN		WMARK_MIN
1060 #define ALLOC_WMARK_LOW		WMARK_LOW
1061 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1062 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1063 
1064 /* Mask to get the watermark bits */
1065 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1066 
1067 /*
1068  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1069  * cannot assume a reduced access to memory reserves is sufficient for
1070  * !MMU
1071  */
1072 #ifdef CONFIG_MMU
1073 #define ALLOC_OOM		0x08
1074 #else
1075 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1076 #endif
1077 
1078 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1079 				       * to 25% of the min watermark or
1080 				       * 62.5% if __GFP_HIGH is set.
1081 				       */
1082 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1083 				       * of the min watermark.
1084 				       */
1085 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1086 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1087 #ifdef CONFIG_ZONE_DMA32
1088 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1089 #else
1090 #define ALLOC_NOFRAGMENT	  0x0
1091 #endif
1092 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1093 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1094 
1095 /* Flags that allow allocations below the min watermark. */
1096 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1097 
1098 enum ttu_flags;
1099 struct tlbflush_unmap_batch;
1100 
1101 
1102 /*
1103  * only for MM internal work items which do not depend on
1104  * any allocations or locks which might depend on allocations
1105  */
1106 extern struct workqueue_struct *mm_percpu_wq;
1107 
1108 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1109 void try_to_unmap_flush(void);
1110 void try_to_unmap_flush_dirty(void);
1111 void flush_tlb_batched_pending(struct mm_struct *mm);
1112 #else
1113 static inline void try_to_unmap_flush(void)
1114 {
1115 }
1116 static inline void try_to_unmap_flush_dirty(void)
1117 {
1118 }
1119 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1120 {
1121 }
1122 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1123 
1124 extern const struct trace_print_flags pageflag_names[];
1125 extern const struct trace_print_flags pagetype_names[];
1126 extern const struct trace_print_flags vmaflag_names[];
1127 extern const struct trace_print_flags gfpflag_names[];
1128 
1129 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1130 {
1131 	return migratetype == MIGRATE_HIGHATOMIC;
1132 }
1133 
1134 void setup_zone_pageset(struct zone *zone);
1135 
1136 struct migration_target_control {
1137 	int nid;		/* preferred node id */
1138 	nodemask_t *nmask;
1139 	gfp_t gfp_mask;
1140 	enum migrate_reason reason;
1141 };
1142 
1143 /*
1144  * mm/filemap.c
1145  */
1146 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1147 			      struct folio *folio, loff_t fpos, size_t size);
1148 
1149 /*
1150  * mm/vmalloc.c
1151  */
1152 #ifdef CONFIG_MMU
1153 void __init vmalloc_init(void);
1154 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1155                 pgprot_t prot, struct page **pages, unsigned int page_shift);
1156 #else
1157 static inline void vmalloc_init(void)
1158 {
1159 }
1160 
1161 static inline
1162 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1163                 pgprot_t prot, struct page **pages, unsigned int page_shift)
1164 {
1165 	return -EINVAL;
1166 }
1167 #endif
1168 
1169 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1170 			       unsigned long end, pgprot_t prot,
1171 			       struct page **pages, unsigned int page_shift);
1172 
1173 void vunmap_range_noflush(unsigned long start, unsigned long end);
1174 
1175 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1176 
1177 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
1178 		      unsigned long addr, int page_nid, int *flags);
1179 
1180 void free_zone_device_folio(struct folio *folio);
1181 int migrate_device_coherent_page(struct page *page);
1182 
1183 /*
1184  * mm/gup.c
1185  */
1186 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1187 int __must_check try_grab_page(struct page *page, unsigned int flags);
1188 
1189 /*
1190  * mm/huge_memory.c
1191  */
1192 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1193 	       pud_t *pud, bool write);
1194 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1195 	       pmd_t *pmd, bool write);
1196 
1197 /*
1198  * mm/mmap.c
1199  */
1200 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1201 					struct vm_area_struct *vma,
1202 					unsigned long delta);
1203 
1204 enum {
1205 	/* mark page accessed */
1206 	FOLL_TOUCH = 1 << 16,
1207 	/* a retry, previous pass started an IO */
1208 	FOLL_TRIED = 1 << 17,
1209 	/* we are working on non-current tsk/mm */
1210 	FOLL_REMOTE = 1 << 18,
1211 	/* pages must be released via unpin_user_page */
1212 	FOLL_PIN = 1 << 19,
1213 	/* gup_fast: prevent fall-back to slow gup */
1214 	FOLL_FAST_ONLY = 1 << 20,
1215 	/* allow unlocking the mmap lock */
1216 	FOLL_UNLOCKABLE = 1 << 21,
1217 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1218 	FOLL_MADV_POPULATE = 1 << 22,
1219 };
1220 
1221 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1222 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1223 			    FOLL_MADV_POPULATE)
1224 
1225 /*
1226  * Indicates for which pages that are write-protected in the page table,
1227  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1228  * GUP pin will remain consistent with the pages mapped into the page tables
1229  * of the MM.
1230  *
1231  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1232  * PageAnonExclusive() has to protect against concurrent GUP:
1233  * * Ordinary GUP: Using the PT lock
1234  * * GUP-fast and fork(): mm->write_protect_seq
1235  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1236  *    folio_try_share_anon_rmap_*()
1237  *
1238  * Must be called with the (sub)page that's actually referenced via the
1239  * page table entry, which might not necessarily be the head page for a
1240  * PTE-mapped THP.
1241  *
1242  * If the vma is NULL, we're coming from the GUP-fast path and might have
1243  * to fallback to the slow path just to lookup the vma.
1244  */
1245 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1246 				    unsigned int flags, struct page *page)
1247 {
1248 	/*
1249 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1250 	 * has to be writable -- and if it references (part of) an anonymous
1251 	 * folio, that part is required to be marked exclusive.
1252 	 */
1253 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1254 		return false;
1255 	/*
1256 	 * Note: PageAnon(page) is stable until the page is actually getting
1257 	 * freed.
1258 	 */
1259 	if (!PageAnon(page)) {
1260 		/*
1261 		 * We only care about R/O long-term pining: R/O short-term
1262 		 * pinning does not have the semantics to observe successive
1263 		 * changes through the process page tables.
1264 		 */
1265 		if (!(flags & FOLL_LONGTERM))
1266 			return false;
1267 
1268 		/* We really need the vma ... */
1269 		if (!vma)
1270 			return true;
1271 
1272 		/*
1273 		 * ... because we only care about writable private ("COW")
1274 		 * mappings where we have to break COW early.
1275 		 */
1276 		return is_cow_mapping(vma->vm_flags);
1277 	}
1278 
1279 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1280 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1281 		smp_rmb();
1282 
1283 	/*
1284 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1285 	 * cannot get pinned.
1286 	 */
1287 	return !PageAnonExclusive(page);
1288 }
1289 
1290 extern bool mirrored_kernelcore;
1291 extern bool memblock_has_mirror(void);
1292 
1293 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1294 					  unsigned long start, unsigned long end,
1295 					  pgoff_t pgoff)
1296 {
1297 	vma->vm_start = start;
1298 	vma->vm_end = end;
1299 	vma->vm_pgoff = pgoff;
1300 }
1301 
1302 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1303 {
1304 	/*
1305 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1306 	 * enablements, because when without soft-dirty being compiled in,
1307 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1308 	 * will be constantly true.
1309 	 */
1310 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1311 		return false;
1312 
1313 	/*
1314 	 * Soft-dirty is kind of special: its tracking is enabled when the
1315 	 * vma flags not set.
1316 	 */
1317 	return !(vma->vm_flags & VM_SOFTDIRTY);
1318 }
1319 
1320 static inline void vma_iter_config(struct vma_iterator *vmi,
1321 		unsigned long index, unsigned long last)
1322 {
1323 	__mas_set_range(&vmi->mas, index, last - 1);
1324 }
1325 
1326 static inline void vma_iter_reset(struct vma_iterator *vmi)
1327 {
1328 	mas_reset(&vmi->mas);
1329 }
1330 
1331 static inline
1332 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1333 {
1334 	return mas_prev_range(&vmi->mas, min);
1335 }
1336 
1337 static inline
1338 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1339 {
1340 	return mas_next_range(&vmi->mas, max);
1341 }
1342 
1343 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1344 				       unsigned long max, unsigned long size)
1345 {
1346 	return mas_empty_area(&vmi->mas, min, max - 1, size);
1347 }
1348 
1349 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1350 					unsigned long max, unsigned long size)
1351 {
1352 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1353 }
1354 
1355 /*
1356  * VMA Iterator functions shared between nommu and mmap
1357  */
1358 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1359 		struct vm_area_struct *vma)
1360 {
1361 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1362 }
1363 
1364 static inline void vma_iter_clear(struct vma_iterator *vmi)
1365 {
1366 	mas_store_prealloc(&vmi->mas, NULL);
1367 }
1368 
1369 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1370 {
1371 	return mas_walk(&vmi->mas);
1372 }
1373 
1374 /* Store a VMA with preallocated memory */
1375 static inline void vma_iter_store(struct vma_iterator *vmi,
1376 				  struct vm_area_struct *vma)
1377 {
1378 
1379 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1380 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1381 			vmi->mas.index > vma->vm_start)) {
1382 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1383 			vmi->mas.index, vma->vm_start, vma->vm_start,
1384 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1385 	}
1386 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1387 			vmi->mas.last <  vma->vm_start)) {
1388 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1389 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1390 		       vmi->mas.index, vmi->mas.last);
1391 	}
1392 #endif
1393 
1394 	if (vmi->mas.status != ma_start &&
1395 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1396 		vma_iter_invalidate(vmi);
1397 
1398 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1399 	mas_store_prealloc(&vmi->mas, vma);
1400 }
1401 
1402 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1403 			struct vm_area_struct *vma, gfp_t gfp)
1404 {
1405 	if (vmi->mas.status != ma_start &&
1406 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1407 		vma_iter_invalidate(vmi);
1408 
1409 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1410 	mas_store_gfp(&vmi->mas, vma, gfp);
1411 	if (unlikely(mas_is_err(&vmi->mas)))
1412 		return -ENOMEM;
1413 
1414 	return 0;
1415 }
1416 
1417 /*
1418  * VMA lock generalization
1419  */
1420 struct vma_prepare {
1421 	struct vm_area_struct *vma;
1422 	struct vm_area_struct *adj_next;
1423 	struct file *file;
1424 	struct address_space *mapping;
1425 	struct anon_vma *anon_vma;
1426 	struct vm_area_struct *insert;
1427 	struct vm_area_struct *remove;
1428 	struct vm_area_struct *remove2;
1429 };
1430 
1431 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1432 				unsigned long zone, int nid);
1433 
1434 /* shrinker related functions */
1435 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1436 			  int priority);
1437 
1438 #ifdef CONFIG_64BIT
1439 /* VM is sealed, in vm_flags */
1440 #define VM_SEALED	_BITUL(63)
1441 #endif
1442 
1443 #ifdef CONFIG_64BIT
1444 static inline int can_do_mseal(unsigned long flags)
1445 {
1446 	if (flags)
1447 		return -EINVAL;
1448 
1449 	return 0;
1450 }
1451 
1452 bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1453 		unsigned long end);
1454 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1455 		unsigned long end, int behavior);
1456 #else
1457 static inline int can_do_mseal(unsigned long flags)
1458 {
1459 	return -EPERM;
1460 }
1461 
1462 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1463 		unsigned long end)
1464 {
1465 	return true;
1466 }
1467 
1468 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1469 		unsigned long end, int behavior)
1470 {
1471 	return true;
1472 }
1473 #endif
1474 
1475 #ifdef CONFIG_SHRINKER_DEBUG
1476 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1477 			struct shrinker *shrinker, const char *fmt, va_list ap)
1478 {
1479 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1480 
1481 	return shrinker->name ? 0 : -ENOMEM;
1482 }
1483 
1484 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1485 {
1486 	kfree_const(shrinker->name);
1487 	shrinker->name = NULL;
1488 }
1489 
1490 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1491 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1492 					      int *debugfs_id);
1493 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1494 				    int debugfs_id);
1495 #else /* CONFIG_SHRINKER_DEBUG */
1496 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1497 {
1498 	return 0;
1499 }
1500 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1501 					      const char *fmt, va_list ap)
1502 {
1503 	return 0;
1504 }
1505 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1506 {
1507 }
1508 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1509 						     int *debugfs_id)
1510 {
1511 	*debugfs_id = -1;
1512 	return NULL;
1513 }
1514 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1515 					   int debugfs_id)
1516 {
1517 }
1518 #endif /* CONFIG_SHRINKER_DEBUG */
1519 
1520 /* Only track the nodes of mappings with shadow entries */
1521 void workingset_update_node(struct xa_node *node);
1522 extern struct list_lru shadow_nodes;
1523 
1524 #endif	/* __MM_INTERNAL_H */
1525