xref: /linux/mm/internal.h (revision dcb8cbb58a218c99aab0dbf3f76cf06a04d44f37)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/tracepoint-defs.h>
15 
16 struct folio_batch;
17 
18 /*
19  * The set of flags that only affect watermark checking and reclaim
20  * behaviour. This is used by the MM to obey the caller constraints
21  * about IO, FS and watermark checking while ignoring placement
22  * hints such as HIGHMEM usage.
23  */
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 			__GFP_NOLOCKDEP)
28 
29 /* The GFP flags allowed during early boot */
30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31 
32 /* Control allocation cpuset and node placement constraints */
33 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34 
35 /* Do not use these with a slab allocator */
36 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37 
38 /*
39  * Different from WARN_ON_ONCE(), no warning will be issued
40  * when we specify __GFP_NOWARN.
41  */
42 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
43 	static bool __section(".data.once") __warned;			\
44 	int __ret_warn_once = !!(cond);					\
45 									\
46 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 		__warned = true;					\
48 		WARN_ON(1);						\
49 	}								\
50 	unlikely(__ret_warn_once);					\
51 })
52 
53 void page_writeback_init(void);
54 
55 /*
56  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
59  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60  */
61 #define COMPOUND_MAPPED		0x800000
62 #define FOLIO_PAGES_MAPPED	(COMPOUND_MAPPED - 1)
63 
64 /*
65  * How many individual pages have an elevated _mapcount.  Excludes
66  * the folio's entire_mapcount.
67  */
68 static inline int folio_nr_pages_mapped(struct folio *folio)
69 {
70 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
71 }
72 
73 static inline void *folio_raw_mapping(struct folio *folio)
74 {
75 	unsigned long mapping = (unsigned long)folio->mapping;
76 
77 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
78 }
79 
80 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
81 						int nr_throttled);
82 static inline void acct_reclaim_writeback(struct folio *folio)
83 {
84 	pg_data_t *pgdat = folio_pgdat(folio);
85 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
86 
87 	if (nr_throttled)
88 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
89 }
90 
91 static inline void wake_throttle_isolated(pg_data_t *pgdat)
92 {
93 	wait_queue_head_t *wqh;
94 
95 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
96 	if (waitqueue_active(wqh))
97 		wake_up(wqh);
98 }
99 
100 vm_fault_t do_swap_page(struct vm_fault *vmf);
101 void folio_rotate_reclaimable(struct folio *folio);
102 bool __folio_end_writeback(struct folio *folio);
103 void deactivate_file_folio(struct folio *folio);
104 void folio_activate(struct folio *folio);
105 
106 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
107 		   struct vm_area_struct *start_vma, unsigned long floor,
108 		   unsigned long ceiling, bool mm_wr_locked);
109 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
110 
111 struct zap_details;
112 void unmap_page_range(struct mmu_gather *tlb,
113 			     struct vm_area_struct *vma,
114 			     unsigned long addr, unsigned long end,
115 			     struct zap_details *details);
116 
117 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
118 		unsigned int order);
119 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
120 static inline void force_page_cache_readahead(struct address_space *mapping,
121 		struct file *file, pgoff_t index, unsigned long nr_to_read)
122 {
123 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
124 	force_page_cache_ra(&ractl, nr_to_read);
125 }
126 
127 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
128 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
129 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
130 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
131 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
132 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
133 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
134 		loff_t end);
135 long invalidate_inode_page(struct page *page);
136 unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
137 		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
138 
139 /**
140  * folio_evictable - Test whether a folio is evictable.
141  * @folio: The folio to test.
142  *
143  * Test whether @folio is evictable -- i.e., should be placed on
144  * active/inactive lists vs unevictable list.
145  *
146  * Reasons folio might not be evictable:
147  * 1. folio's mapping marked unevictable
148  * 2. One of the pages in the folio is part of an mlocked VMA
149  */
150 static inline bool folio_evictable(struct folio *folio)
151 {
152 	bool ret;
153 
154 	/* Prevent address_space of inode and swap cache from being freed */
155 	rcu_read_lock();
156 	ret = !mapping_unevictable(folio_mapping(folio)) &&
157 			!folio_test_mlocked(folio);
158 	rcu_read_unlock();
159 	return ret;
160 }
161 
162 /*
163  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
164  * a count of one.
165  */
166 static inline void set_page_refcounted(struct page *page)
167 {
168 	VM_BUG_ON_PAGE(PageTail(page), page);
169 	VM_BUG_ON_PAGE(page_ref_count(page), page);
170 	set_page_count(page, 1);
171 }
172 
173 extern unsigned long highest_memmap_pfn;
174 
175 /*
176  * Maximum number of reclaim retries without progress before the OOM
177  * killer is consider the only way forward.
178  */
179 #define MAX_RECLAIM_RETRIES 16
180 
181 /*
182  * in mm/early_ioremap.c
183  */
184 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
185 					unsigned long size, pgprot_t prot);
186 
187 /*
188  * in mm/vmscan.c:
189  */
190 bool isolate_lru_page(struct page *page);
191 bool folio_isolate_lru(struct folio *folio);
192 void putback_lru_page(struct page *page);
193 void folio_putback_lru(struct folio *folio);
194 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
195 
196 /*
197  * in mm/rmap.c:
198  */
199 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
200 
201 /*
202  * in mm/page_alloc.c
203  */
204 #define K(x) ((x) << (PAGE_SHIFT-10))
205 
206 extern char * const zone_names[MAX_NR_ZONES];
207 
208 /* perform sanity checks on struct pages being allocated or freed */
209 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
210 
211 extern int min_free_kbytes;
212 
213 void setup_per_zone_wmarks(void);
214 void calculate_min_free_kbytes(void);
215 int __meminit init_per_zone_wmark_min(void);
216 void page_alloc_sysctl_init(void);
217 
218 /*
219  * Structure for holding the mostly immutable allocation parameters passed
220  * between functions involved in allocations, including the alloc_pages*
221  * family of functions.
222  *
223  * nodemask, migratetype and highest_zoneidx are initialized only once in
224  * __alloc_pages() and then never change.
225  *
226  * zonelist, preferred_zone and highest_zoneidx are set first in
227  * __alloc_pages() for the fast path, and might be later changed
228  * in __alloc_pages_slowpath(). All other functions pass the whole structure
229  * by a const pointer.
230  */
231 struct alloc_context {
232 	struct zonelist *zonelist;
233 	nodemask_t *nodemask;
234 	struct zoneref *preferred_zoneref;
235 	int migratetype;
236 
237 	/*
238 	 * highest_zoneidx represents highest usable zone index of
239 	 * the allocation request. Due to the nature of the zone,
240 	 * memory on lower zone than the highest_zoneidx will be
241 	 * protected by lowmem_reserve[highest_zoneidx].
242 	 *
243 	 * highest_zoneidx is also used by reclaim/compaction to limit
244 	 * the target zone since higher zone than this index cannot be
245 	 * usable for this allocation request.
246 	 */
247 	enum zone_type highest_zoneidx;
248 	bool spread_dirty_pages;
249 };
250 
251 /*
252  * This function returns the order of a free page in the buddy system. In
253  * general, page_zone(page)->lock must be held by the caller to prevent the
254  * page from being allocated in parallel and returning garbage as the order.
255  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
256  * page cannot be allocated or merged in parallel. Alternatively, it must
257  * handle invalid values gracefully, and use buddy_order_unsafe() below.
258  */
259 static inline unsigned int buddy_order(struct page *page)
260 {
261 	/* PageBuddy() must be checked by the caller */
262 	return page_private(page);
263 }
264 
265 /*
266  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
267  * PageBuddy() should be checked first by the caller to minimize race window,
268  * and invalid values must be handled gracefully.
269  *
270  * READ_ONCE is used so that if the caller assigns the result into a local
271  * variable and e.g. tests it for valid range before using, the compiler cannot
272  * decide to remove the variable and inline the page_private(page) multiple
273  * times, potentially observing different values in the tests and the actual
274  * use of the result.
275  */
276 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
277 
278 /*
279  * This function checks whether a page is free && is the buddy
280  * we can coalesce a page and its buddy if
281  * (a) the buddy is not in a hole (check before calling!) &&
282  * (b) the buddy is in the buddy system &&
283  * (c) a page and its buddy have the same order &&
284  * (d) a page and its buddy are in the same zone.
285  *
286  * For recording whether a page is in the buddy system, we set PageBuddy.
287  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
288  *
289  * For recording page's order, we use page_private(page).
290  */
291 static inline bool page_is_buddy(struct page *page, struct page *buddy,
292 				 unsigned int order)
293 {
294 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
295 		return false;
296 
297 	if (buddy_order(buddy) != order)
298 		return false;
299 
300 	/*
301 	 * zone check is done late to avoid uselessly calculating
302 	 * zone/node ids for pages that could never merge.
303 	 */
304 	if (page_zone_id(page) != page_zone_id(buddy))
305 		return false;
306 
307 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
308 
309 	return true;
310 }
311 
312 /*
313  * Locate the struct page for both the matching buddy in our
314  * pair (buddy1) and the combined O(n+1) page they form (page).
315  *
316  * 1) Any buddy B1 will have an order O twin B2 which satisfies
317  * the following equation:
318  *     B2 = B1 ^ (1 << O)
319  * For example, if the starting buddy (buddy2) is #8 its order
320  * 1 buddy is #10:
321  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
322  *
323  * 2) Any buddy B will have an order O+1 parent P which
324  * satisfies the following equation:
325  *     P = B & ~(1 << O)
326  *
327  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
328  */
329 static inline unsigned long
330 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
331 {
332 	return page_pfn ^ (1 << order);
333 }
334 
335 /*
336  * Find the buddy of @page and validate it.
337  * @page: The input page
338  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
339  *       function is used in the performance-critical __free_one_page().
340  * @order: The order of the page
341  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
342  *             page_to_pfn().
343  *
344  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
345  * not the same as @page. The validation is necessary before use it.
346  *
347  * Return: the found buddy page or NULL if not found.
348  */
349 static inline struct page *find_buddy_page_pfn(struct page *page,
350 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
351 {
352 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
353 	struct page *buddy;
354 
355 	buddy = page + (__buddy_pfn - pfn);
356 	if (buddy_pfn)
357 		*buddy_pfn = __buddy_pfn;
358 
359 	if (page_is_buddy(page, buddy, order))
360 		return buddy;
361 	return NULL;
362 }
363 
364 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
365 				unsigned long end_pfn, struct zone *zone);
366 
367 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
368 				unsigned long end_pfn, struct zone *zone)
369 {
370 	if (zone->contiguous)
371 		return pfn_to_page(start_pfn);
372 
373 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
374 }
375 
376 void set_zone_contiguous(struct zone *zone);
377 
378 static inline void clear_zone_contiguous(struct zone *zone)
379 {
380 	zone->contiguous = false;
381 }
382 
383 extern int __isolate_free_page(struct page *page, unsigned int order);
384 extern void __putback_isolated_page(struct page *page, unsigned int order,
385 				    int mt);
386 extern void memblock_free_pages(struct page *page, unsigned long pfn,
387 					unsigned int order);
388 extern void __free_pages_core(struct page *page, unsigned int order);
389 
390 static inline void prep_compound_head(struct page *page, unsigned int order)
391 {
392 	struct folio *folio = (struct folio *)page;
393 
394 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
395 	set_compound_order(page, order);
396 	atomic_set(&folio->_entire_mapcount, -1);
397 	atomic_set(&folio->_nr_pages_mapped, 0);
398 	atomic_set(&folio->_pincount, 0);
399 }
400 
401 static inline void prep_compound_tail(struct page *head, int tail_idx)
402 {
403 	struct page *p = head + tail_idx;
404 
405 	p->mapping = TAIL_MAPPING;
406 	set_compound_head(p, head);
407 	set_page_private(p, 0);
408 }
409 
410 extern void prep_compound_page(struct page *page, unsigned int order);
411 
412 extern void post_alloc_hook(struct page *page, unsigned int order,
413 					gfp_t gfp_flags);
414 extern int user_min_free_kbytes;
415 
416 extern void free_unref_page(struct page *page, unsigned int order);
417 extern void free_unref_page_list(struct list_head *list);
418 
419 extern void zone_pcp_reset(struct zone *zone);
420 extern void zone_pcp_disable(struct zone *zone);
421 extern void zone_pcp_enable(struct zone *zone);
422 extern void zone_pcp_init(struct zone *zone);
423 
424 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
425 			  phys_addr_t min_addr,
426 			  int nid, bool exact_nid);
427 
428 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
429 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
430 
431 
432 int split_free_page(struct page *free_page,
433 			unsigned int order, unsigned long split_pfn_offset);
434 
435 /*
436  * This will have no effect, other than possibly generating a warning, if the
437  * caller passes in a non-large folio.
438  */
439 static inline void folio_set_order(struct folio *folio, unsigned int order)
440 {
441 	if (WARN_ON_ONCE(!folio_test_large(folio)))
442 		return;
443 
444 	folio->_folio_order = order;
445 #ifdef CONFIG_64BIT
446 	/*
447 	 * When hugetlb dissolves a folio, we need to clear the tail
448 	 * page, rather than setting nr_pages to 1.
449 	 */
450 	folio->_folio_nr_pages = order ? 1U << order : 0;
451 #endif
452 }
453 
454 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
455 
456 /*
457  * in mm/compaction.c
458  */
459 /*
460  * compact_control is used to track pages being migrated and the free pages
461  * they are being migrated to during memory compaction. The free_pfn starts
462  * at the end of a zone and migrate_pfn begins at the start. Movable pages
463  * are moved to the end of a zone during a compaction run and the run
464  * completes when free_pfn <= migrate_pfn
465  */
466 struct compact_control {
467 	struct list_head freepages;	/* List of free pages to migrate to */
468 	struct list_head migratepages;	/* List of pages being migrated */
469 	unsigned int nr_freepages;	/* Number of isolated free pages */
470 	unsigned int nr_migratepages;	/* Number of pages to migrate */
471 	unsigned long free_pfn;		/* isolate_freepages search base */
472 	/*
473 	 * Acts as an in/out parameter to page isolation for migration.
474 	 * isolate_migratepages uses it as a search base.
475 	 * isolate_migratepages_block will update the value to the next pfn
476 	 * after the last isolated one.
477 	 */
478 	unsigned long migrate_pfn;
479 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
480 	struct zone *zone;
481 	unsigned long total_migrate_scanned;
482 	unsigned long total_free_scanned;
483 	unsigned short fast_search_fail;/* failures to use free list searches */
484 	short search_order;		/* order to start a fast search at */
485 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
486 	int order;			/* order a direct compactor needs */
487 	int migratetype;		/* migratetype of direct compactor */
488 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
489 	const int highest_zoneidx;	/* zone index of a direct compactor */
490 	enum migrate_mode mode;		/* Async or sync migration mode */
491 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
492 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
493 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
494 	bool direct_compaction;		/* False from kcompactd or /proc/... */
495 	bool proactive_compaction;	/* kcompactd proactive compaction */
496 	bool whole_zone;		/* Whole zone should/has been scanned */
497 	bool contended;			/* Signal lock contention */
498 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
499 					 * when there are potentially transient
500 					 * isolation or migration failures to
501 					 * ensure forward progress.
502 					 */
503 	bool alloc_contig;		/* alloc_contig_range allocation */
504 };
505 
506 /*
507  * Used in direct compaction when a page should be taken from the freelists
508  * immediately when one is created during the free path.
509  */
510 struct capture_control {
511 	struct compact_control *cc;
512 	struct page *page;
513 };
514 
515 unsigned long
516 isolate_freepages_range(struct compact_control *cc,
517 			unsigned long start_pfn, unsigned long end_pfn);
518 int
519 isolate_migratepages_range(struct compact_control *cc,
520 			   unsigned long low_pfn, unsigned long end_pfn);
521 
522 int __alloc_contig_migrate_range(struct compact_control *cc,
523 					unsigned long start, unsigned long end);
524 
525 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
526 void init_cma_reserved_pageblock(struct page *page);
527 
528 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
529 
530 int find_suitable_fallback(struct free_area *area, unsigned int order,
531 			int migratetype, bool only_stealable, bool *can_steal);
532 
533 static inline bool free_area_empty(struct free_area *area, int migratetype)
534 {
535 	return list_empty(&area->free_list[migratetype]);
536 }
537 
538 /*
539  * These three helpers classifies VMAs for virtual memory accounting.
540  */
541 
542 /*
543  * Executable code area - executable, not writable, not stack
544  */
545 static inline bool is_exec_mapping(vm_flags_t flags)
546 {
547 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
548 }
549 
550 /*
551  * Stack area - automatically grows in one direction
552  *
553  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
554  * do_mmap() forbids all other combinations.
555  */
556 static inline bool is_stack_mapping(vm_flags_t flags)
557 {
558 	return (flags & VM_STACK) == VM_STACK;
559 }
560 
561 /*
562  * Data area - private, writable, not stack
563  */
564 static inline bool is_data_mapping(vm_flags_t flags)
565 {
566 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
567 }
568 
569 /* mm/util.c */
570 struct anon_vma *folio_anon_vma(struct folio *folio);
571 
572 #ifdef CONFIG_MMU
573 void unmap_mapping_folio(struct folio *folio);
574 extern long populate_vma_page_range(struct vm_area_struct *vma,
575 		unsigned long start, unsigned long end, int *locked);
576 extern long faultin_vma_page_range(struct vm_area_struct *vma,
577 				   unsigned long start, unsigned long end,
578 				   bool write, int *locked);
579 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
580 			       unsigned long bytes);
581 /*
582  * mlock_vma_folio() and munlock_vma_folio():
583  * should be called with vma's mmap_lock held for read or write,
584  * under page table lock for the pte/pmd being added or removed.
585  *
586  * mlock is usually called at the end of page_add_*_rmap(), munlock at
587  * the end of page_remove_rmap(); but new anon folios are managed by
588  * folio_add_lru_vma() calling mlock_new_folio().
589  *
590  * @compound is used to include pmd mappings of THPs, but filter out
591  * pte mappings of THPs, which cannot be consistently counted: a pte
592  * mapping of the THP head cannot be distinguished by the page alone.
593  */
594 void mlock_folio(struct folio *folio);
595 static inline void mlock_vma_folio(struct folio *folio,
596 			struct vm_area_struct *vma, bool compound)
597 {
598 	/*
599 	 * The VM_SPECIAL check here serves two purposes.
600 	 * 1) VM_IO check prevents migration from double-counting during mlock.
601 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
602 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
603 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
604 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
605 	 */
606 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
607 	    (compound || !folio_test_large(folio)))
608 		mlock_folio(folio);
609 }
610 
611 void munlock_folio(struct folio *folio);
612 static inline void munlock_vma_folio(struct folio *folio,
613 			struct vm_area_struct *vma, bool compound)
614 {
615 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
616 	    (compound || !folio_test_large(folio)))
617 		munlock_folio(folio);
618 }
619 
620 void mlock_new_folio(struct folio *folio);
621 bool need_mlock_drain(int cpu);
622 void mlock_drain_local(void);
623 void mlock_drain_remote(int cpu);
624 
625 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
626 
627 /*
628  * Return the start of user virtual address at the specific offset within
629  * a vma.
630  */
631 static inline unsigned long
632 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
633 		  struct vm_area_struct *vma)
634 {
635 	unsigned long address;
636 
637 	if (pgoff >= vma->vm_pgoff) {
638 		address = vma->vm_start +
639 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
640 		/* Check for address beyond vma (or wrapped through 0?) */
641 		if (address < vma->vm_start || address >= vma->vm_end)
642 			address = -EFAULT;
643 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
644 		/* Test above avoids possibility of wrap to 0 on 32-bit */
645 		address = vma->vm_start;
646 	} else {
647 		address = -EFAULT;
648 	}
649 	return address;
650 }
651 
652 /*
653  * Return the start of user virtual address of a page within a vma.
654  * Returns -EFAULT if all of the page is outside the range of vma.
655  * If page is a compound head, the entire compound page is considered.
656  */
657 static inline unsigned long
658 vma_address(struct page *page, struct vm_area_struct *vma)
659 {
660 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
661 	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
662 }
663 
664 /*
665  * Then at what user virtual address will none of the range be found in vma?
666  * Assumes that vma_address() already returned a good starting address.
667  */
668 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
669 {
670 	struct vm_area_struct *vma = pvmw->vma;
671 	pgoff_t pgoff;
672 	unsigned long address;
673 
674 	/* Common case, plus ->pgoff is invalid for KSM */
675 	if (pvmw->nr_pages == 1)
676 		return pvmw->address + PAGE_SIZE;
677 
678 	pgoff = pvmw->pgoff + pvmw->nr_pages;
679 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
680 	/* Check for address beyond vma (or wrapped through 0?) */
681 	if (address < vma->vm_start || address > vma->vm_end)
682 		address = vma->vm_end;
683 	return address;
684 }
685 
686 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
687 						    struct file *fpin)
688 {
689 	int flags = vmf->flags;
690 
691 	if (fpin)
692 		return fpin;
693 
694 	/*
695 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
696 	 * anything, so we only pin the file and drop the mmap_lock if only
697 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
698 	 */
699 	if (fault_flag_allow_retry_first(flags) &&
700 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
701 		fpin = get_file(vmf->vma->vm_file);
702 		mmap_read_unlock(vmf->vma->vm_mm);
703 	}
704 	return fpin;
705 }
706 #else /* !CONFIG_MMU */
707 static inline void unmap_mapping_folio(struct folio *folio) { }
708 static inline void mlock_new_folio(struct folio *folio) { }
709 static inline bool need_mlock_drain(int cpu) { return false; }
710 static inline void mlock_drain_local(void) { }
711 static inline void mlock_drain_remote(int cpu) { }
712 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
713 {
714 }
715 #endif /* !CONFIG_MMU */
716 
717 /* Memory initialisation debug and verification */
718 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
719 DECLARE_STATIC_KEY_TRUE(deferred_pages);
720 
721 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
722 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
723 
724 enum mminit_level {
725 	MMINIT_WARNING,
726 	MMINIT_VERIFY,
727 	MMINIT_TRACE
728 };
729 
730 #ifdef CONFIG_DEBUG_MEMORY_INIT
731 
732 extern int mminit_loglevel;
733 
734 #define mminit_dprintk(level, prefix, fmt, arg...) \
735 do { \
736 	if (level < mminit_loglevel) { \
737 		if (level <= MMINIT_WARNING) \
738 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
739 		else \
740 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
741 	} \
742 } while (0)
743 
744 extern void mminit_verify_pageflags_layout(void);
745 extern void mminit_verify_zonelist(void);
746 #else
747 
748 static inline void mminit_dprintk(enum mminit_level level,
749 				const char *prefix, const char *fmt, ...)
750 {
751 }
752 
753 static inline void mminit_verify_pageflags_layout(void)
754 {
755 }
756 
757 static inline void mminit_verify_zonelist(void)
758 {
759 }
760 #endif /* CONFIG_DEBUG_MEMORY_INIT */
761 
762 #define NODE_RECLAIM_NOSCAN	-2
763 #define NODE_RECLAIM_FULL	-1
764 #define NODE_RECLAIM_SOME	0
765 #define NODE_RECLAIM_SUCCESS	1
766 
767 #ifdef CONFIG_NUMA
768 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
769 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
770 #else
771 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
772 				unsigned int order)
773 {
774 	return NODE_RECLAIM_NOSCAN;
775 }
776 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
777 {
778 	return NUMA_NO_NODE;
779 }
780 #endif
781 
782 /*
783  * mm/memory-failure.c
784  */
785 extern int hwpoison_filter(struct page *p);
786 
787 extern u32 hwpoison_filter_dev_major;
788 extern u32 hwpoison_filter_dev_minor;
789 extern u64 hwpoison_filter_flags_mask;
790 extern u64 hwpoison_filter_flags_value;
791 extern u64 hwpoison_filter_memcg;
792 extern u32 hwpoison_filter_enable;
793 
794 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
795         unsigned long, unsigned long,
796         unsigned long, unsigned long);
797 
798 extern void set_pageblock_order(void);
799 unsigned long reclaim_pages(struct list_head *folio_list);
800 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
801 					    struct list_head *folio_list);
802 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
803 #define ALLOC_WMARK_MIN		WMARK_MIN
804 #define ALLOC_WMARK_LOW		WMARK_LOW
805 #define ALLOC_WMARK_HIGH	WMARK_HIGH
806 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
807 
808 /* Mask to get the watermark bits */
809 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
810 
811 /*
812  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
813  * cannot assume a reduced access to memory reserves is sufficient for
814  * !MMU
815  */
816 #ifdef CONFIG_MMU
817 #define ALLOC_OOM		0x08
818 #else
819 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
820 #endif
821 
822 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
823 				       * to 25% of the min watermark or
824 				       * 62.5% if __GFP_HIGH is set.
825 				       */
826 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
827 				       * of the min watermark.
828 				       */
829 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
830 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
831 #ifdef CONFIG_ZONE_DMA32
832 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
833 #else
834 #define ALLOC_NOFRAGMENT	  0x0
835 #endif
836 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
837 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
838 
839 /* Flags that allow allocations below the min watermark. */
840 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
841 
842 enum ttu_flags;
843 struct tlbflush_unmap_batch;
844 
845 
846 /*
847  * only for MM internal work items which do not depend on
848  * any allocations or locks which might depend on allocations
849  */
850 extern struct workqueue_struct *mm_percpu_wq;
851 
852 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
853 void try_to_unmap_flush(void);
854 void try_to_unmap_flush_dirty(void);
855 void flush_tlb_batched_pending(struct mm_struct *mm);
856 #else
857 static inline void try_to_unmap_flush(void)
858 {
859 }
860 static inline void try_to_unmap_flush_dirty(void)
861 {
862 }
863 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
864 {
865 }
866 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
867 
868 extern const struct trace_print_flags pageflag_names[];
869 extern const struct trace_print_flags pagetype_names[];
870 extern const struct trace_print_flags vmaflag_names[];
871 extern const struct trace_print_flags gfpflag_names[];
872 
873 static inline bool is_migrate_highatomic(enum migratetype migratetype)
874 {
875 	return migratetype == MIGRATE_HIGHATOMIC;
876 }
877 
878 static inline bool is_migrate_highatomic_page(struct page *page)
879 {
880 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
881 }
882 
883 void setup_zone_pageset(struct zone *zone);
884 
885 struct migration_target_control {
886 	int nid;		/* preferred node id */
887 	nodemask_t *nmask;
888 	gfp_t gfp_mask;
889 };
890 
891 /*
892  * mm/filemap.c
893  */
894 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
895 			      struct folio *folio, loff_t fpos, size_t size);
896 
897 /*
898  * mm/vmalloc.c
899  */
900 #ifdef CONFIG_MMU
901 void __init vmalloc_init(void);
902 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
903                 pgprot_t prot, struct page **pages, unsigned int page_shift);
904 #else
905 static inline void vmalloc_init(void)
906 {
907 }
908 
909 static inline
910 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
911                 pgprot_t prot, struct page **pages, unsigned int page_shift)
912 {
913 	return -EINVAL;
914 }
915 #endif
916 
917 int __must_check __vmap_pages_range_noflush(unsigned long addr,
918 			       unsigned long end, pgprot_t prot,
919 			       struct page **pages, unsigned int page_shift);
920 
921 void vunmap_range_noflush(unsigned long start, unsigned long end);
922 
923 void __vunmap_range_noflush(unsigned long start, unsigned long end);
924 
925 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
926 		      unsigned long addr, int page_nid, int *flags);
927 
928 void free_zone_device_page(struct page *page);
929 int migrate_device_coherent_page(struct page *page);
930 
931 /*
932  * mm/gup.c
933  */
934 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
935 int __must_check try_grab_page(struct page *page, unsigned int flags);
936 
937 enum {
938 	/* mark page accessed */
939 	FOLL_TOUCH = 1 << 16,
940 	/* a retry, previous pass started an IO */
941 	FOLL_TRIED = 1 << 17,
942 	/* we are working on non-current tsk/mm */
943 	FOLL_REMOTE = 1 << 18,
944 	/* pages must be released via unpin_user_page */
945 	FOLL_PIN = 1 << 19,
946 	/* gup_fast: prevent fall-back to slow gup */
947 	FOLL_FAST_ONLY = 1 << 20,
948 	/* allow unlocking the mmap lock */
949 	FOLL_UNLOCKABLE = 1 << 21,
950 };
951 
952 /*
953  * Indicates for which pages that are write-protected in the page table,
954  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
955  * GUP pin will remain consistent with the pages mapped into the page tables
956  * of the MM.
957  *
958  * Temporary unmapping of PageAnonExclusive() pages or clearing of
959  * PageAnonExclusive() has to protect against concurrent GUP:
960  * * Ordinary GUP: Using the PT lock
961  * * GUP-fast and fork(): mm->write_protect_seq
962  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
963  *    page_try_share_anon_rmap()
964  *
965  * Must be called with the (sub)page that's actually referenced via the
966  * page table entry, which might not necessarily be the head page for a
967  * PTE-mapped THP.
968  *
969  * If the vma is NULL, we're coming from the GUP-fast path and might have
970  * to fallback to the slow path just to lookup the vma.
971  */
972 static inline bool gup_must_unshare(struct vm_area_struct *vma,
973 				    unsigned int flags, struct page *page)
974 {
975 	/*
976 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
977 	 * has to be writable -- and if it references (part of) an anonymous
978 	 * folio, that part is required to be marked exclusive.
979 	 */
980 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
981 		return false;
982 	/*
983 	 * Note: PageAnon(page) is stable until the page is actually getting
984 	 * freed.
985 	 */
986 	if (!PageAnon(page)) {
987 		/*
988 		 * We only care about R/O long-term pining: R/O short-term
989 		 * pinning does not have the semantics to observe successive
990 		 * changes through the process page tables.
991 		 */
992 		if (!(flags & FOLL_LONGTERM))
993 			return false;
994 
995 		/* We really need the vma ... */
996 		if (!vma)
997 			return true;
998 
999 		/*
1000 		 * ... because we only care about writable private ("COW")
1001 		 * mappings where we have to break COW early.
1002 		 */
1003 		return is_cow_mapping(vma->vm_flags);
1004 	}
1005 
1006 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
1007 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
1008 		smp_rmb();
1009 
1010 	/*
1011 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1012 	 * cannot get pinned.
1013 	 */
1014 	return !PageAnonExclusive(page);
1015 }
1016 
1017 extern bool mirrored_kernelcore;
1018 
1019 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1020 {
1021 	/*
1022 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1023 	 * enablements, because when without soft-dirty being compiled in,
1024 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1025 	 * will be constantly true.
1026 	 */
1027 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1028 		return false;
1029 
1030 	/*
1031 	 * Soft-dirty is kind of special: its tracking is enabled when the
1032 	 * vma flags not set.
1033 	 */
1034 	return !(vma->vm_flags & VM_SOFTDIRTY);
1035 }
1036 
1037 /*
1038  * VMA Iterator functions shared between nommu and mmap
1039  */
1040 static inline int vma_iter_prealloc(struct vma_iterator *vmi)
1041 {
1042 	return mas_preallocate(&vmi->mas, GFP_KERNEL);
1043 }
1044 
1045 static inline void vma_iter_clear(struct vma_iterator *vmi,
1046 				  unsigned long start, unsigned long end)
1047 {
1048 	mas_set_range(&vmi->mas, start, end - 1);
1049 	mas_store_prealloc(&vmi->mas, NULL);
1050 }
1051 
1052 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1053 {
1054 	return mas_walk(&vmi->mas);
1055 }
1056 
1057 /* Store a VMA with preallocated memory */
1058 static inline void vma_iter_store(struct vma_iterator *vmi,
1059 				  struct vm_area_struct *vma)
1060 {
1061 
1062 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1063 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1064 			vmi->mas.index > vma->vm_start)) {
1065 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1066 			vmi->mas.index, vma->vm_start, vma->vm_start,
1067 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1068 	}
1069 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1070 			vmi->mas.last <  vma->vm_start)) {
1071 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1072 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1073 		       vmi->mas.index, vmi->mas.last);
1074 	}
1075 #endif
1076 
1077 	if (vmi->mas.node != MAS_START &&
1078 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1079 		vma_iter_invalidate(vmi);
1080 
1081 	vmi->mas.index = vma->vm_start;
1082 	vmi->mas.last = vma->vm_end - 1;
1083 	mas_store_prealloc(&vmi->mas, vma);
1084 }
1085 
1086 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1087 			struct vm_area_struct *vma, gfp_t gfp)
1088 {
1089 	if (vmi->mas.node != MAS_START &&
1090 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1091 		vma_iter_invalidate(vmi);
1092 
1093 	vmi->mas.index = vma->vm_start;
1094 	vmi->mas.last = vma->vm_end - 1;
1095 	mas_store_gfp(&vmi->mas, vma, gfp);
1096 	if (unlikely(mas_is_err(&vmi->mas)))
1097 		return -ENOMEM;
1098 
1099 	return 0;
1100 }
1101 
1102 /*
1103  * VMA lock generalization
1104  */
1105 struct vma_prepare {
1106 	struct vm_area_struct *vma;
1107 	struct vm_area_struct *adj_next;
1108 	struct file *file;
1109 	struct address_space *mapping;
1110 	struct anon_vma *anon_vma;
1111 	struct vm_area_struct *insert;
1112 	struct vm_area_struct *remove;
1113 	struct vm_area_struct *remove2;
1114 };
1115 #endif	/* __MM_INTERNAL_H */
1116