xref: /linux/mm/internal.h (revision 839c4f596f898edc424070dc8b517381572f8502)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/tracepoint-defs.h>
17 
18 struct folio_batch;
19 
20 /*
21  * The set of flags that only affect watermark checking and reclaim
22  * behaviour. This is used by the MM to obey the caller constraints
23  * about IO, FS and watermark checking while ignoring placement
24  * hints such as HIGHMEM usage.
25  */
26 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
28 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
29 			__GFP_NOLOCKDEP)
30 
31 /* The GFP flags allowed during early boot */
32 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
33 
34 /* Control allocation cpuset and node placement constraints */
35 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
36 
37 /* Do not use these with a slab allocator */
38 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
39 
40 /*
41  * Different from WARN_ON_ONCE(), no warning will be issued
42  * when we specify __GFP_NOWARN.
43  */
44 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
45 	static bool __section(".data.once") __warned;			\
46 	int __ret_warn_once = !!(cond);					\
47 									\
48 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
49 		__warned = true;					\
50 		WARN_ON(1);						\
51 	}								\
52 	unlikely(__ret_warn_once);					\
53 })
54 
55 void page_writeback_init(void);
56 
57 /*
58  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
59  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
60  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
61  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
62  */
63 #define ENTIRELY_MAPPED		0x800000
64 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
65 
66 /*
67  * Flags passed to __show_mem() and show_free_areas() to suppress output in
68  * various contexts.
69  */
70 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
71 
72 /*
73  * How many individual pages have an elevated _mapcount.  Excludes
74  * the folio's entire_mapcount.
75  *
76  * Don't use this function outside of debugging code.
77  */
78 static inline int folio_nr_pages_mapped(const struct folio *folio)
79 {
80 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
81 }
82 
83 /*
84  * Retrieve the first entry of a folio based on a provided entry within the
85  * folio. We cannot rely on folio->swap as there is no guarantee that it has
86  * been initialized. Used for calling arch_swap_restore()
87  */
88 static inline swp_entry_t folio_swap(swp_entry_t entry,
89 		const struct folio *folio)
90 {
91 	swp_entry_t swap = {
92 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
93 	};
94 
95 	return swap;
96 }
97 
98 static inline void *folio_raw_mapping(const struct folio *folio)
99 {
100 	unsigned long mapping = (unsigned long)folio->mapping;
101 
102 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
103 }
104 
105 #ifdef CONFIG_MMU
106 
107 /* Flags for folio_pte_batch(). */
108 typedef int __bitwise fpb_t;
109 
110 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111 #define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
112 
113 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114 #define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
115 
116 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
117 {
118 	if (flags & FPB_IGNORE_DIRTY)
119 		pte = pte_mkclean(pte);
120 	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 		pte = pte_clear_soft_dirty(pte);
122 	return pte_wrprotect(pte_mkold(pte));
123 }
124 
125 /**
126  * folio_pte_batch - detect a PTE batch for a large folio
127  * @folio: The large folio to detect a PTE batch for.
128  * @addr: The user virtual address the first page is mapped at.
129  * @start_ptep: Page table pointer for the first entry.
130  * @pte: Page table entry for the first page.
131  * @max_nr: The maximum number of table entries to consider.
132  * @flags: Flags to modify the PTE batch semantics.
133  * @any_writable: Optional pointer to indicate whether any entry except the
134  *		  first one is writable.
135  * @any_young: Optional pointer to indicate whether any entry except the
136  *		  first one is young.
137  * @any_dirty: Optional pointer to indicate whether any entry except the
138  *		  first one is dirty.
139  *
140  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141  * pages of the same large folio.
142  *
143  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144  * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145  * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
146  *
147  * start_ptep must map any page of the folio. max_nr must be at least one and
148  * must be limited by the caller so scanning cannot exceed a single page table.
149  *
150  * Return: the number of table entries in the batch.
151  */
152 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
154 		bool *any_writable, bool *any_young, bool *any_dirty)
155 {
156 	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 	const pte_t *end_ptep = start_ptep + max_nr;
158 	pte_t expected_pte, *ptep;
159 	bool writable, young, dirty;
160 	int nr;
161 
162 	if (any_writable)
163 		*any_writable = false;
164 	if (any_young)
165 		*any_young = false;
166 	if (any_dirty)
167 		*any_dirty = false;
168 
169 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
172 
173 	nr = pte_batch_hint(start_ptep, pte);
174 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 	ptep = start_ptep + nr;
176 
177 	while (ptep < end_ptep) {
178 		pte = ptep_get(ptep);
179 		if (any_writable)
180 			writable = !!pte_write(pte);
181 		if (any_young)
182 			young = !!pte_young(pte);
183 		if (any_dirty)
184 			dirty = !!pte_dirty(pte);
185 		pte = __pte_batch_clear_ignored(pte, flags);
186 
187 		if (!pte_same(pte, expected_pte))
188 			break;
189 
190 		/*
191 		 * Stop immediately once we reached the end of the folio. In
192 		 * corner cases the next PFN might fall into a different
193 		 * folio.
194 		 */
195 		if (pte_pfn(pte) >= folio_end_pfn)
196 			break;
197 
198 		if (any_writable)
199 			*any_writable |= writable;
200 		if (any_young)
201 			*any_young |= young;
202 		if (any_dirty)
203 			*any_dirty |= dirty;
204 
205 		nr = pte_batch_hint(ptep, pte);
206 		expected_pte = pte_advance_pfn(expected_pte, nr);
207 		ptep += nr;
208 	}
209 
210 	return min(ptep - start_ptep, max_nr);
211 }
212 
213 /**
214  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
215  *	 forward or backward by delta
216  * @pte: The initial pte state; is_swap_pte(pte) must be true and
217  *	 non_swap_entry() must be false.
218  * @delta: The direction and the offset we are moving; forward if delta
219  *	 is positive; backward if delta is negative
220  *
221  * Moves the swap offset, while maintaining all other fields, including
222  * swap type, and any swp pte bits. The resulting pte is returned.
223  */
224 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
225 {
226 	swp_entry_t entry = pte_to_swp_entry(pte);
227 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
228 						   (swp_offset(entry) + delta)));
229 
230 	if (pte_swp_soft_dirty(pte))
231 		new = pte_swp_mksoft_dirty(new);
232 	if (pte_swp_exclusive(pte))
233 		new = pte_swp_mkexclusive(new);
234 	if (pte_swp_uffd_wp(pte))
235 		new = pte_swp_mkuffd_wp(new);
236 
237 	return new;
238 }
239 
240 
241 /**
242  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
243  * @pte: The initial pte state; is_swap_pte(pte) must be true and
244  *	 non_swap_entry() must be false.
245  *
246  * Increments the swap offset, while maintaining all other fields, including
247  * swap type, and any swp pte bits. The resulting pte is returned.
248  */
249 static inline pte_t pte_next_swp_offset(pte_t pte)
250 {
251 	return pte_move_swp_offset(pte, 1);
252 }
253 
254 /**
255  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
256  * @start_ptep: Page table pointer for the first entry.
257  * @max_nr: The maximum number of table entries to consider.
258  * @pte: Page table entry for the first entry.
259  *
260  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
261  * containing swap entries all with consecutive offsets and targeting the same
262  * swap type, all with matching swp pte bits.
263  *
264  * max_nr must be at least one and must be limited by the caller so scanning
265  * cannot exceed a single page table.
266  *
267  * Return: the number of table entries in the batch.
268  */
269 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
270 {
271 	pte_t expected_pte = pte_next_swp_offset(pte);
272 	const pte_t *end_ptep = start_ptep + max_nr;
273 	pte_t *ptep = start_ptep + 1;
274 
275 	VM_WARN_ON(max_nr < 1);
276 	VM_WARN_ON(!is_swap_pte(pte));
277 	VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
278 
279 	while (ptep < end_ptep) {
280 		pte = ptep_get(ptep);
281 
282 		if (!pte_same(pte, expected_pte))
283 			break;
284 
285 		expected_pte = pte_next_swp_offset(expected_pte);
286 		ptep++;
287 	}
288 
289 	return ptep - start_ptep;
290 }
291 #endif /* CONFIG_MMU */
292 
293 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
294 						int nr_throttled);
295 static inline void acct_reclaim_writeback(struct folio *folio)
296 {
297 	pg_data_t *pgdat = folio_pgdat(folio);
298 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
299 
300 	if (nr_throttled)
301 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
302 }
303 
304 static inline void wake_throttle_isolated(pg_data_t *pgdat)
305 {
306 	wait_queue_head_t *wqh;
307 
308 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
309 	if (waitqueue_active(wqh))
310 		wake_up(wqh);
311 }
312 
313 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
314 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
315 {
316 	vm_fault_t ret = __vmf_anon_prepare(vmf);
317 
318 	if (unlikely(ret & VM_FAULT_RETRY))
319 		vma_end_read(vmf->vma);
320 	return ret;
321 }
322 
323 vm_fault_t do_swap_page(struct vm_fault *vmf);
324 void folio_rotate_reclaimable(struct folio *folio);
325 bool __folio_end_writeback(struct folio *folio);
326 void deactivate_file_folio(struct folio *folio);
327 void folio_activate(struct folio *folio);
328 
329 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
330 		   struct vm_area_struct *start_vma, unsigned long floor,
331 		   unsigned long ceiling, bool mm_wr_locked);
332 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
333 
334 struct zap_details;
335 void unmap_page_range(struct mmu_gather *tlb,
336 			     struct vm_area_struct *vma,
337 			     unsigned long addr, unsigned long end,
338 			     struct zap_details *details);
339 
340 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
341 		unsigned int order);
342 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
343 static inline void force_page_cache_readahead(struct address_space *mapping,
344 		struct file *file, pgoff_t index, unsigned long nr_to_read)
345 {
346 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
347 	force_page_cache_ra(&ractl, nr_to_read);
348 }
349 
350 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
351 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
352 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
353 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
354 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
355 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
356 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
357 		loff_t end);
358 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
359 unsigned long mapping_try_invalidate(struct address_space *mapping,
360 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
361 
362 /**
363  * folio_evictable - Test whether a folio is evictable.
364  * @folio: The folio to test.
365  *
366  * Test whether @folio is evictable -- i.e., should be placed on
367  * active/inactive lists vs unevictable list.
368  *
369  * Reasons folio might not be evictable:
370  * 1. folio's mapping marked unevictable
371  * 2. One of the pages in the folio is part of an mlocked VMA
372  */
373 static inline bool folio_evictable(struct folio *folio)
374 {
375 	bool ret;
376 
377 	/* Prevent address_space of inode and swap cache from being freed */
378 	rcu_read_lock();
379 	ret = !mapping_unevictable(folio_mapping(folio)) &&
380 			!folio_test_mlocked(folio);
381 	rcu_read_unlock();
382 	return ret;
383 }
384 
385 /*
386  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
387  * a count of one.
388  */
389 static inline void set_page_refcounted(struct page *page)
390 {
391 	VM_BUG_ON_PAGE(PageTail(page), page);
392 	VM_BUG_ON_PAGE(page_ref_count(page), page);
393 	set_page_count(page, 1);
394 }
395 
396 /*
397  * Return true if a folio needs ->release_folio() calling upon it.
398  */
399 static inline bool folio_needs_release(struct folio *folio)
400 {
401 	struct address_space *mapping = folio_mapping(folio);
402 
403 	return folio_has_private(folio) ||
404 		(mapping && mapping_release_always(mapping));
405 }
406 
407 extern unsigned long highest_memmap_pfn;
408 
409 /*
410  * Maximum number of reclaim retries without progress before the OOM
411  * killer is consider the only way forward.
412  */
413 #define MAX_RECLAIM_RETRIES 16
414 
415 /*
416  * in mm/vmscan.c:
417  */
418 bool isolate_lru_page(struct page *page);
419 bool folio_isolate_lru(struct folio *folio);
420 void putback_lru_page(struct page *page);
421 void folio_putback_lru(struct folio *folio);
422 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
423 
424 /*
425  * in mm/rmap.c:
426  */
427 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
428 
429 /*
430  * in mm/page_alloc.c
431  */
432 #define K(x) ((x) << (PAGE_SHIFT-10))
433 
434 extern char * const zone_names[MAX_NR_ZONES];
435 
436 /* perform sanity checks on struct pages being allocated or freed */
437 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
438 
439 extern int min_free_kbytes;
440 
441 void setup_per_zone_wmarks(void);
442 void calculate_min_free_kbytes(void);
443 int __meminit init_per_zone_wmark_min(void);
444 void page_alloc_sysctl_init(void);
445 
446 /*
447  * Structure for holding the mostly immutable allocation parameters passed
448  * between functions involved in allocations, including the alloc_pages*
449  * family of functions.
450  *
451  * nodemask, migratetype and highest_zoneidx are initialized only once in
452  * __alloc_pages() and then never change.
453  *
454  * zonelist, preferred_zone and highest_zoneidx are set first in
455  * __alloc_pages() for the fast path, and might be later changed
456  * in __alloc_pages_slowpath(). All other functions pass the whole structure
457  * by a const pointer.
458  */
459 struct alloc_context {
460 	struct zonelist *zonelist;
461 	nodemask_t *nodemask;
462 	struct zoneref *preferred_zoneref;
463 	int migratetype;
464 
465 	/*
466 	 * highest_zoneidx represents highest usable zone index of
467 	 * the allocation request. Due to the nature of the zone,
468 	 * memory on lower zone than the highest_zoneidx will be
469 	 * protected by lowmem_reserve[highest_zoneidx].
470 	 *
471 	 * highest_zoneidx is also used by reclaim/compaction to limit
472 	 * the target zone since higher zone than this index cannot be
473 	 * usable for this allocation request.
474 	 */
475 	enum zone_type highest_zoneidx;
476 	bool spread_dirty_pages;
477 };
478 
479 /*
480  * This function returns the order of a free page in the buddy system. In
481  * general, page_zone(page)->lock must be held by the caller to prevent the
482  * page from being allocated in parallel and returning garbage as the order.
483  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
484  * page cannot be allocated or merged in parallel. Alternatively, it must
485  * handle invalid values gracefully, and use buddy_order_unsafe() below.
486  */
487 static inline unsigned int buddy_order(struct page *page)
488 {
489 	/* PageBuddy() must be checked by the caller */
490 	return page_private(page);
491 }
492 
493 /*
494  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
495  * PageBuddy() should be checked first by the caller to minimize race window,
496  * and invalid values must be handled gracefully.
497  *
498  * READ_ONCE is used so that if the caller assigns the result into a local
499  * variable and e.g. tests it for valid range before using, the compiler cannot
500  * decide to remove the variable and inline the page_private(page) multiple
501  * times, potentially observing different values in the tests and the actual
502  * use of the result.
503  */
504 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
505 
506 /*
507  * This function checks whether a page is free && is the buddy
508  * we can coalesce a page and its buddy if
509  * (a) the buddy is not in a hole (check before calling!) &&
510  * (b) the buddy is in the buddy system &&
511  * (c) a page and its buddy have the same order &&
512  * (d) a page and its buddy are in the same zone.
513  *
514  * For recording whether a page is in the buddy system, we set PageBuddy.
515  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
516  *
517  * For recording page's order, we use page_private(page).
518  */
519 static inline bool page_is_buddy(struct page *page, struct page *buddy,
520 				 unsigned int order)
521 {
522 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
523 		return false;
524 
525 	if (buddy_order(buddy) != order)
526 		return false;
527 
528 	/*
529 	 * zone check is done late to avoid uselessly calculating
530 	 * zone/node ids for pages that could never merge.
531 	 */
532 	if (page_zone_id(page) != page_zone_id(buddy))
533 		return false;
534 
535 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
536 
537 	return true;
538 }
539 
540 /*
541  * Locate the struct page for both the matching buddy in our
542  * pair (buddy1) and the combined O(n+1) page they form (page).
543  *
544  * 1) Any buddy B1 will have an order O twin B2 which satisfies
545  * the following equation:
546  *     B2 = B1 ^ (1 << O)
547  * For example, if the starting buddy (buddy2) is #8 its order
548  * 1 buddy is #10:
549  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
550  *
551  * 2) Any buddy B will have an order O+1 parent P which
552  * satisfies the following equation:
553  *     P = B & ~(1 << O)
554  *
555  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
556  */
557 static inline unsigned long
558 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
559 {
560 	return page_pfn ^ (1 << order);
561 }
562 
563 /*
564  * Find the buddy of @page and validate it.
565  * @page: The input page
566  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
567  *       function is used in the performance-critical __free_one_page().
568  * @order: The order of the page
569  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
570  *             page_to_pfn().
571  *
572  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
573  * not the same as @page. The validation is necessary before use it.
574  *
575  * Return: the found buddy page or NULL if not found.
576  */
577 static inline struct page *find_buddy_page_pfn(struct page *page,
578 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
579 {
580 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
581 	struct page *buddy;
582 
583 	buddy = page + (__buddy_pfn - pfn);
584 	if (buddy_pfn)
585 		*buddy_pfn = __buddy_pfn;
586 
587 	if (page_is_buddy(page, buddy, order))
588 		return buddy;
589 	return NULL;
590 }
591 
592 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
593 				unsigned long end_pfn, struct zone *zone);
594 
595 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
596 				unsigned long end_pfn, struct zone *zone)
597 {
598 	if (zone->contiguous)
599 		return pfn_to_page(start_pfn);
600 
601 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
602 }
603 
604 void set_zone_contiguous(struct zone *zone);
605 
606 static inline void clear_zone_contiguous(struct zone *zone)
607 {
608 	zone->contiguous = false;
609 }
610 
611 extern int __isolate_free_page(struct page *page, unsigned int order);
612 extern void __putback_isolated_page(struct page *page, unsigned int order,
613 				    int mt);
614 extern void memblock_free_pages(struct page *page, unsigned long pfn,
615 					unsigned int order);
616 extern void __free_pages_core(struct page *page, unsigned int order,
617 		enum meminit_context context);
618 
619 /*
620  * This will have no effect, other than possibly generating a warning, if the
621  * caller passes in a non-large folio.
622  */
623 static inline void folio_set_order(struct folio *folio, unsigned int order)
624 {
625 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
626 		return;
627 
628 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
629 #ifdef CONFIG_64BIT
630 	folio->_folio_nr_pages = 1U << order;
631 #endif
632 }
633 
634 void __folio_undo_large_rmappable(struct folio *folio);
635 static inline void folio_undo_large_rmappable(struct folio *folio)
636 {
637 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
638 		return;
639 
640 	/*
641 	 * At this point, there is no one trying to add the folio to
642 	 * deferred_list. If folio is not in deferred_list, it's safe
643 	 * to check without acquiring the split_queue_lock.
644 	 */
645 	if (data_race(list_empty(&folio->_deferred_list)))
646 		return;
647 
648 	__folio_undo_large_rmappable(folio);
649 }
650 
651 static inline struct folio *page_rmappable_folio(struct page *page)
652 {
653 	struct folio *folio = (struct folio *)page;
654 
655 	if (folio && folio_test_large(folio))
656 		folio_set_large_rmappable(folio);
657 	return folio;
658 }
659 
660 static inline void prep_compound_head(struct page *page, unsigned int order)
661 {
662 	struct folio *folio = (struct folio *)page;
663 
664 	folio_set_order(folio, order);
665 	atomic_set(&folio->_large_mapcount, -1);
666 	atomic_set(&folio->_entire_mapcount, -1);
667 	atomic_set(&folio->_nr_pages_mapped, 0);
668 	atomic_set(&folio->_pincount, 0);
669 	if (order > 1)
670 		INIT_LIST_HEAD(&folio->_deferred_list);
671 }
672 
673 static inline void prep_compound_tail(struct page *head, int tail_idx)
674 {
675 	struct page *p = head + tail_idx;
676 
677 	p->mapping = TAIL_MAPPING;
678 	set_compound_head(p, head);
679 	set_page_private(p, 0);
680 }
681 
682 extern void prep_compound_page(struct page *page, unsigned int order);
683 
684 extern void post_alloc_hook(struct page *page, unsigned int order,
685 					gfp_t gfp_flags);
686 extern bool free_pages_prepare(struct page *page, unsigned int order);
687 
688 extern int user_min_free_kbytes;
689 
690 void free_unref_page(struct page *page, unsigned int order);
691 void free_unref_folios(struct folio_batch *fbatch);
692 
693 extern void zone_pcp_reset(struct zone *zone);
694 extern void zone_pcp_disable(struct zone *zone);
695 extern void zone_pcp_enable(struct zone *zone);
696 extern void zone_pcp_init(struct zone *zone);
697 
698 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
699 			  phys_addr_t min_addr,
700 			  int nid, bool exact_nid);
701 
702 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
703 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
704 
705 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
706 
707 /*
708  * in mm/compaction.c
709  */
710 /*
711  * compact_control is used to track pages being migrated and the free pages
712  * they are being migrated to during memory compaction. The free_pfn starts
713  * at the end of a zone and migrate_pfn begins at the start. Movable pages
714  * are moved to the end of a zone during a compaction run and the run
715  * completes when free_pfn <= migrate_pfn
716  */
717 struct compact_control {
718 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
719 	struct list_head migratepages;	/* List of pages being migrated */
720 	unsigned int nr_freepages;	/* Number of isolated free pages */
721 	unsigned int nr_migratepages;	/* Number of pages to migrate */
722 	unsigned long free_pfn;		/* isolate_freepages search base */
723 	/*
724 	 * Acts as an in/out parameter to page isolation for migration.
725 	 * isolate_migratepages uses it as a search base.
726 	 * isolate_migratepages_block will update the value to the next pfn
727 	 * after the last isolated one.
728 	 */
729 	unsigned long migrate_pfn;
730 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
731 	struct zone *zone;
732 	unsigned long total_migrate_scanned;
733 	unsigned long total_free_scanned;
734 	unsigned short fast_search_fail;/* failures to use free list searches */
735 	short search_order;		/* order to start a fast search at */
736 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
737 	int order;			/* order a direct compactor needs */
738 	int migratetype;		/* migratetype of direct compactor */
739 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
740 	const int highest_zoneidx;	/* zone index of a direct compactor */
741 	enum migrate_mode mode;		/* Async or sync migration mode */
742 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
743 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
744 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
745 	bool direct_compaction;		/* False from kcompactd or /proc/... */
746 	bool proactive_compaction;	/* kcompactd proactive compaction */
747 	bool whole_zone;		/* Whole zone should/has been scanned */
748 	bool contended;			/* Signal lock contention */
749 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
750 					 * when there are potentially transient
751 					 * isolation or migration failures to
752 					 * ensure forward progress.
753 					 */
754 	bool alloc_contig;		/* alloc_contig_range allocation */
755 };
756 
757 /*
758  * Used in direct compaction when a page should be taken from the freelists
759  * immediately when one is created during the free path.
760  */
761 struct capture_control {
762 	struct compact_control *cc;
763 	struct page *page;
764 };
765 
766 unsigned long
767 isolate_freepages_range(struct compact_control *cc,
768 			unsigned long start_pfn, unsigned long end_pfn);
769 int
770 isolate_migratepages_range(struct compact_control *cc,
771 			   unsigned long low_pfn, unsigned long end_pfn);
772 
773 int __alloc_contig_migrate_range(struct compact_control *cc,
774 					unsigned long start, unsigned long end,
775 					int migratetype);
776 
777 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
778 void init_cma_reserved_pageblock(struct page *page);
779 
780 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
781 
782 int find_suitable_fallback(struct free_area *area, unsigned int order,
783 			int migratetype, bool only_stealable, bool *can_steal);
784 
785 static inline bool free_area_empty(struct free_area *area, int migratetype)
786 {
787 	return list_empty(&area->free_list[migratetype]);
788 }
789 
790 /*
791  * These three helpers classifies VMAs for virtual memory accounting.
792  */
793 
794 /*
795  * Executable code area - executable, not writable, not stack
796  */
797 static inline bool is_exec_mapping(vm_flags_t flags)
798 {
799 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
800 }
801 
802 /*
803  * Stack area (including shadow stacks)
804  *
805  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
806  * do_mmap() forbids all other combinations.
807  */
808 static inline bool is_stack_mapping(vm_flags_t flags)
809 {
810 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
811 }
812 
813 /*
814  * Data area - private, writable, not stack
815  */
816 static inline bool is_data_mapping(vm_flags_t flags)
817 {
818 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
819 }
820 
821 /* mm/util.c */
822 struct anon_vma *folio_anon_vma(struct folio *folio);
823 
824 #ifdef CONFIG_MMU
825 void unmap_mapping_folio(struct folio *folio);
826 extern long populate_vma_page_range(struct vm_area_struct *vma,
827 		unsigned long start, unsigned long end, int *locked);
828 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
829 		unsigned long end, bool write, int *locked);
830 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
831 			       unsigned long bytes);
832 
833 /*
834  * NOTE: This function can't tell whether the folio is "fully mapped" in the
835  * range.
836  * "fully mapped" means all the pages of folio is associated with the page
837  * table of range while this function just check whether the folio range is
838  * within the range [start, end). Function caller needs to do page table
839  * check if it cares about the page table association.
840  *
841  * Typical usage (like mlock or madvise) is:
842  * Caller knows at least 1 page of folio is associated with page table of VMA
843  * and the range [start, end) is intersect with the VMA range. Caller wants
844  * to know whether the folio is fully associated with the range. It calls
845  * this function to check whether the folio is in the range first. Then checks
846  * the page table to know whether the folio is fully mapped to the range.
847  */
848 static inline bool
849 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
850 		unsigned long start, unsigned long end)
851 {
852 	pgoff_t pgoff, addr;
853 	unsigned long vma_pglen = vma_pages(vma);
854 
855 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
856 	if (start > end)
857 		return false;
858 
859 	if (start < vma->vm_start)
860 		start = vma->vm_start;
861 
862 	if (end > vma->vm_end)
863 		end = vma->vm_end;
864 
865 	pgoff = folio_pgoff(folio);
866 
867 	/* if folio start address is not in vma range */
868 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
869 		return false;
870 
871 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
872 
873 	return !(addr < start || end - addr < folio_size(folio));
874 }
875 
876 static inline bool
877 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
878 {
879 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
880 }
881 
882 /*
883  * mlock_vma_folio() and munlock_vma_folio():
884  * should be called with vma's mmap_lock held for read or write,
885  * under page table lock for the pte/pmd being added or removed.
886  *
887  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
888  * the end of folio_remove_rmap_*(); but new anon folios are managed by
889  * folio_add_lru_vma() calling mlock_new_folio().
890  */
891 void mlock_folio(struct folio *folio);
892 static inline void mlock_vma_folio(struct folio *folio,
893 				struct vm_area_struct *vma)
894 {
895 	/*
896 	 * The VM_SPECIAL check here serves two purposes.
897 	 * 1) VM_IO check prevents migration from double-counting during mlock.
898 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
899 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
900 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
901 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
902 	 */
903 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
904 		mlock_folio(folio);
905 }
906 
907 void munlock_folio(struct folio *folio);
908 static inline void munlock_vma_folio(struct folio *folio,
909 					struct vm_area_struct *vma)
910 {
911 	/*
912 	 * munlock if the function is called. Ideally, we should only
913 	 * do munlock if any page of folio is unmapped from VMA and
914 	 * cause folio not fully mapped to VMA.
915 	 *
916 	 * But it's not easy to confirm that's the situation. So we
917 	 * always munlock the folio and page reclaim will correct it
918 	 * if it's wrong.
919 	 */
920 	if (unlikely(vma->vm_flags & VM_LOCKED))
921 		munlock_folio(folio);
922 }
923 
924 void mlock_new_folio(struct folio *folio);
925 bool need_mlock_drain(int cpu);
926 void mlock_drain_local(void);
927 void mlock_drain_remote(int cpu);
928 
929 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
930 
931 /**
932  * vma_address - Find the virtual address a page range is mapped at
933  * @vma: The vma which maps this object.
934  * @pgoff: The page offset within its object.
935  * @nr_pages: The number of pages to consider.
936  *
937  * If any page in this range is mapped by this VMA, return the first address
938  * where any of these pages appear.  Otherwise, return -EFAULT.
939  */
940 static inline unsigned long vma_address(struct vm_area_struct *vma,
941 		pgoff_t pgoff, unsigned long nr_pages)
942 {
943 	unsigned long address;
944 
945 	if (pgoff >= vma->vm_pgoff) {
946 		address = vma->vm_start +
947 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
948 		/* Check for address beyond vma (or wrapped through 0?) */
949 		if (address < vma->vm_start || address >= vma->vm_end)
950 			address = -EFAULT;
951 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
952 		/* Test above avoids possibility of wrap to 0 on 32-bit */
953 		address = vma->vm_start;
954 	} else {
955 		address = -EFAULT;
956 	}
957 	return address;
958 }
959 
960 /*
961  * Then at what user virtual address will none of the range be found in vma?
962  * Assumes that vma_address() already returned a good starting address.
963  */
964 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
965 {
966 	struct vm_area_struct *vma = pvmw->vma;
967 	pgoff_t pgoff;
968 	unsigned long address;
969 
970 	/* Common case, plus ->pgoff is invalid for KSM */
971 	if (pvmw->nr_pages == 1)
972 		return pvmw->address + PAGE_SIZE;
973 
974 	pgoff = pvmw->pgoff + pvmw->nr_pages;
975 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
976 	/* Check for address beyond vma (or wrapped through 0?) */
977 	if (address < vma->vm_start || address > vma->vm_end)
978 		address = vma->vm_end;
979 	return address;
980 }
981 
982 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
983 						    struct file *fpin)
984 {
985 	int flags = vmf->flags;
986 
987 	if (fpin)
988 		return fpin;
989 
990 	/*
991 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
992 	 * anything, so we only pin the file and drop the mmap_lock if only
993 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
994 	 */
995 	if (fault_flag_allow_retry_first(flags) &&
996 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
997 		fpin = get_file(vmf->vma->vm_file);
998 		release_fault_lock(vmf);
999 	}
1000 	return fpin;
1001 }
1002 #else /* !CONFIG_MMU */
1003 static inline void unmap_mapping_folio(struct folio *folio) { }
1004 static inline void mlock_new_folio(struct folio *folio) { }
1005 static inline bool need_mlock_drain(int cpu) { return false; }
1006 static inline void mlock_drain_local(void) { }
1007 static inline void mlock_drain_remote(int cpu) { }
1008 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1009 {
1010 }
1011 #endif /* !CONFIG_MMU */
1012 
1013 /* Memory initialisation debug and verification */
1014 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1015 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1016 
1017 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1018 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1019 
1020 enum mminit_level {
1021 	MMINIT_WARNING,
1022 	MMINIT_VERIFY,
1023 	MMINIT_TRACE
1024 };
1025 
1026 #ifdef CONFIG_DEBUG_MEMORY_INIT
1027 
1028 extern int mminit_loglevel;
1029 
1030 #define mminit_dprintk(level, prefix, fmt, arg...) \
1031 do { \
1032 	if (level < mminit_loglevel) { \
1033 		if (level <= MMINIT_WARNING) \
1034 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1035 		else \
1036 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1037 	} \
1038 } while (0)
1039 
1040 extern void mminit_verify_pageflags_layout(void);
1041 extern void mminit_verify_zonelist(void);
1042 #else
1043 
1044 static inline void mminit_dprintk(enum mminit_level level,
1045 				const char *prefix, const char *fmt, ...)
1046 {
1047 }
1048 
1049 static inline void mminit_verify_pageflags_layout(void)
1050 {
1051 }
1052 
1053 static inline void mminit_verify_zonelist(void)
1054 {
1055 }
1056 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1057 
1058 #define NODE_RECLAIM_NOSCAN	-2
1059 #define NODE_RECLAIM_FULL	-1
1060 #define NODE_RECLAIM_SOME	0
1061 #define NODE_RECLAIM_SUCCESS	1
1062 
1063 #ifdef CONFIG_NUMA
1064 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1065 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1066 #else
1067 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1068 				unsigned int order)
1069 {
1070 	return NODE_RECLAIM_NOSCAN;
1071 }
1072 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1073 {
1074 	return NUMA_NO_NODE;
1075 }
1076 #endif
1077 
1078 /*
1079  * mm/memory-failure.c
1080  */
1081 void shake_folio(struct folio *folio);
1082 extern int hwpoison_filter(struct page *p);
1083 
1084 extern u32 hwpoison_filter_dev_major;
1085 extern u32 hwpoison_filter_dev_minor;
1086 extern u64 hwpoison_filter_flags_mask;
1087 extern u64 hwpoison_filter_flags_value;
1088 extern u64 hwpoison_filter_memcg;
1089 extern u32 hwpoison_filter_enable;
1090 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1091 void SetPageHWPoisonTakenOff(struct page *page);
1092 void ClearPageHWPoisonTakenOff(struct page *page);
1093 bool take_page_off_buddy(struct page *page);
1094 bool put_page_back_buddy(struct page *page);
1095 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1096 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
1097 		     struct vm_area_struct *vma, struct list_head *to_kill,
1098 		     unsigned long ksm_addr);
1099 unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
1100 
1101 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1102         unsigned long, unsigned long,
1103         unsigned long, unsigned long);
1104 
1105 extern void set_pageblock_order(void);
1106 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1107 unsigned long reclaim_pages(struct list_head *folio_list);
1108 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1109 					    struct list_head *folio_list);
1110 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1111 #define ALLOC_WMARK_MIN		WMARK_MIN
1112 #define ALLOC_WMARK_LOW		WMARK_LOW
1113 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1114 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1115 
1116 /* Mask to get the watermark bits */
1117 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1118 
1119 /*
1120  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1121  * cannot assume a reduced access to memory reserves is sufficient for
1122  * !MMU
1123  */
1124 #ifdef CONFIG_MMU
1125 #define ALLOC_OOM		0x08
1126 #else
1127 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1128 #endif
1129 
1130 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1131 				       * to 25% of the min watermark or
1132 				       * 62.5% if __GFP_HIGH is set.
1133 				       */
1134 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1135 				       * of the min watermark.
1136 				       */
1137 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1138 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1139 #ifdef CONFIG_ZONE_DMA32
1140 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1141 #else
1142 #define ALLOC_NOFRAGMENT	  0x0
1143 #endif
1144 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1145 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1146 
1147 /* Flags that allow allocations below the min watermark. */
1148 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1149 
1150 enum ttu_flags;
1151 struct tlbflush_unmap_batch;
1152 
1153 
1154 /*
1155  * only for MM internal work items which do not depend on
1156  * any allocations or locks which might depend on allocations
1157  */
1158 extern struct workqueue_struct *mm_percpu_wq;
1159 
1160 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1161 void try_to_unmap_flush(void);
1162 void try_to_unmap_flush_dirty(void);
1163 void flush_tlb_batched_pending(struct mm_struct *mm);
1164 #else
1165 static inline void try_to_unmap_flush(void)
1166 {
1167 }
1168 static inline void try_to_unmap_flush_dirty(void)
1169 {
1170 }
1171 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1172 {
1173 }
1174 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1175 
1176 extern const struct trace_print_flags pageflag_names[];
1177 extern const struct trace_print_flags pagetype_names[];
1178 extern const struct trace_print_flags vmaflag_names[];
1179 extern const struct trace_print_flags gfpflag_names[];
1180 
1181 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1182 {
1183 	return migratetype == MIGRATE_HIGHATOMIC;
1184 }
1185 
1186 void setup_zone_pageset(struct zone *zone);
1187 
1188 struct migration_target_control {
1189 	int nid;		/* preferred node id */
1190 	nodemask_t *nmask;
1191 	gfp_t gfp_mask;
1192 	enum migrate_reason reason;
1193 };
1194 
1195 /*
1196  * mm/filemap.c
1197  */
1198 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1199 			      struct folio *folio, loff_t fpos, size_t size);
1200 
1201 /*
1202  * mm/vmalloc.c
1203  */
1204 #ifdef CONFIG_MMU
1205 void __init vmalloc_init(void);
1206 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1207                 pgprot_t prot, struct page **pages, unsigned int page_shift);
1208 #else
1209 static inline void vmalloc_init(void)
1210 {
1211 }
1212 
1213 static inline
1214 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1215                 pgprot_t prot, struct page **pages, unsigned int page_shift)
1216 {
1217 	return -EINVAL;
1218 }
1219 #endif
1220 
1221 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1222 			       unsigned long end, pgprot_t prot,
1223 			       struct page **pages, unsigned int page_shift);
1224 
1225 void vunmap_range_noflush(unsigned long start, unsigned long end);
1226 
1227 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1228 
1229 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
1230 		      unsigned long addr, int page_nid, int *flags);
1231 
1232 void free_zone_device_folio(struct folio *folio);
1233 int migrate_device_coherent_page(struct page *page);
1234 
1235 /*
1236  * mm/gup.c
1237  */
1238 int __must_check try_grab_folio(struct folio *folio, int refs,
1239 				unsigned int flags);
1240 
1241 /*
1242  * mm/huge_memory.c
1243  */
1244 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1245 	       pud_t *pud, bool write);
1246 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1247 	       pmd_t *pmd, bool write);
1248 
1249 /*
1250  * mm/mmap.c
1251  */
1252 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1253 					struct vm_area_struct *vma,
1254 					unsigned long delta);
1255 
1256 enum {
1257 	/* mark page accessed */
1258 	FOLL_TOUCH = 1 << 16,
1259 	/* a retry, previous pass started an IO */
1260 	FOLL_TRIED = 1 << 17,
1261 	/* we are working on non-current tsk/mm */
1262 	FOLL_REMOTE = 1 << 18,
1263 	/* pages must be released via unpin_user_page */
1264 	FOLL_PIN = 1 << 19,
1265 	/* gup_fast: prevent fall-back to slow gup */
1266 	FOLL_FAST_ONLY = 1 << 20,
1267 	/* allow unlocking the mmap lock */
1268 	FOLL_UNLOCKABLE = 1 << 21,
1269 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1270 	FOLL_MADV_POPULATE = 1 << 22,
1271 };
1272 
1273 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1274 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1275 			    FOLL_MADV_POPULATE)
1276 
1277 /*
1278  * Indicates for which pages that are write-protected in the page table,
1279  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1280  * GUP pin will remain consistent with the pages mapped into the page tables
1281  * of the MM.
1282  *
1283  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1284  * PageAnonExclusive() has to protect against concurrent GUP:
1285  * * Ordinary GUP: Using the PT lock
1286  * * GUP-fast and fork(): mm->write_protect_seq
1287  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1288  *    folio_try_share_anon_rmap_*()
1289  *
1290  * Must be called with the (sub)page that's actually referenced via the
1291  * page table entry, which might not necessarily be the head page for a
1292  * PTE-mapped THP.
1293  *
1294  * If the vma is NULL, we're coming from the GUP-fast path and might have
1295  * to fallback to the slow path just to lookup the vma.
1296  */
1297 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1298 				    unsigned int flags, struct page *page)
1299 {
1300 	/*
1301 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1302 	 * has to be writable -- and if it references (part of) an anonymous
1303 	 * folio, that part is required to be marked exclusive.
1304 	 */
1305 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1306 		return false;
1307 	/*
1308 	 * Note: PageAnon(page) is stable until the page is actually getting
1309 	 * freed.
1310 	 */
1311 	if (!PageAnon(page)) {
1312 		/*
1313 		 * We only care about R/O long-term pining: R/O short-term
1314 		 * pinning does not have the semantics to observe successive
1315 		 * changes through the process page tables.
1316 		 */
1317 		if (!(flags & FOLL_LONGTERM))
1318 			return false;
1319 
1320 		/* We really need the vma ... */
1321 		if (!vma)
1322 			return true;
1323 
1324 		/*
1325 		 * ... because we only care about writable private ("COW")
1326 		 * mappings where we have to break COW early.
1327 		 */
1328 		return is_cow_mapping(vma->vm_flags);
1329 	}
1330 
1331 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1332 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1333 		smp_rmb();
1334 
1335 	/*
1336 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1337 	 * cannot get pinned.
1338 	 */
1339 	return !PageAnonExclusive(page);
1340 }
1341 
1342 extern bool mirrored_kernelcore;
1343 extern bool memblock_has_mirror(void);
1344 
1345 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1346 					  unsigned long start, unsigned long end,
1347 					  pgoff_t pgoff)
1348 {
1349 	vma->vm_start = start;
1350 	vma->vm_end = end;
1351 	vma->vm_pgoff = pgoff;
1352 }
1353 
1354 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1355 {
1356 	/*
1357 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1358 	 * enablements, because when without soft-dirty being compiled in,
1359 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1360 	 * will be constantly true.
1361 	 */
1362 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1363 		return false;
1364 
1365 	/*
1366 	 * Soft-dirty is kind of special: its tracking is enabled when the
1367 	 * vma flags not set.
1368 	 */
1369 	return !(vma->vm_flags & VM_SOFTDIRTY);
1370 }
1371 
1372 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1373 {
1374 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1375 }
1376 
1377 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1378 {
1379 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1380 }
1381 
1382 static inline void vma_iter_config(struct vma_iterator *vmi,
1383 		unsigned long index, unsigned long last)
1384 {
1385 	__mas_set_range(&vmi->mas, index, last - 1);
1386 }
1387 
1388 static inline void vma_iter_reset(struct vma_iterator *vmi)
1389 {
1390 	mas_reset(&vmi->mas);
1391 }
1392 
1393 static inline
1394 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1395 {
1396 	return mas_prev_range(&vmi->mas, min);
1397 }
1398 
1399 static inline
1400 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1401 {
1402 	return mas_next_range(&vmi->mas, max);
1403 }
1404 
1405 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1406 				       unsigned long max, unsigned long size)
1407 {
1408 	return mas_empty_area(&vmi->mas, min, max - 1, size);
1409 }
1410 
1411 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1412 					unsigned long max, unsigned long size)
1413 {
1414 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1415 }
1416 
1417 /*
1418  * VMA Iterator functions shared between nommu and mmap
1419  */
1420 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1421 		struct vm_area_struct *vma)
1422 {
1423 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1424 }
1425 
1426 static inline void vma_iter_clear(struct vma_iterator *vmi)
1427 {
1428 	mas_store_prealloc(&vmi->mas, NULL);
1429 }
1430 
1431 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1432 {
1433 	return mas_walk(&vmi->mas);
1434 }
1435 
1436 /* Store a VMA with preallocated memory */
1437 static inline void vma_iter_store(struct vma_iterator *vmi,
1438 				  struct vm_area_struct *vma)
1439 {
1440 
1441 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1442 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1443 			vmi->mas.index > vma->vm_start)) {
1444 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1445 			vmi->mas.index, vma->vm_start, vma->vm_start,
1446 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1447 	}
1448 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1449 			vmi->mas.last <  vma->vm_start)) {
1450 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1451 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1452 		       vmi->mas.index, vmi->mas.last);
1453 	}
1454 #endif
1455 
1456 	if (vmi->mas.status != ma_start &&
1457 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1458 		vma_iter_invalidate(vmi);
1459 
1460 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1461 	mas_store_prealloc(&vmi->mas, vma);
1462 }
1463 
1464 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1465 			struct vm_area_struct *vma, gfp_t gfp)
1466 {
1467 	if (vmi->mas.status != ma_start &&
1468 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1469 		vma_iter_invalidate(vmi);
1470 
1471 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1472 	mas_store_gfp(&vmi->mas, vma, gfp);
1473 	if (unlikely(mas_is_err(&vmi->mas)))
1474 		return -ENOMEM;
1475 
1476 	return 0;
1477 }
1478 
1479 /*
1480  * VMA lock generalization
1481  */
1482 struct vma_prepare {
1483 	struct vm_area_struct *vma;
1484 	struct vm_area_struct *adj_next;
1485 	struct file *file;
1486 	struct address_space *mapping;
1487 	struct anon_vma *anon_vma;
1488 	struct vm_area_struct *insert;
1489 	struct vm_area_struct *remove;
1490 	struct vm_area_struct *remove2;
1491 };
1492 
1493 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1494 				unsigned long zone, int nid);
1495 
1496 /* shrinker related functions */
1497 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1498 			  int priority);
1499 
1500 #ifdef CONFIG_64BIT
1501 static inline int can_do_mseal(unsigned long flags)
1502 {
1503 	if (flags)
1504 		return -EINVAL;
1505 
1506 	return 0;
1507 }
1508 
1509 bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1510 		unsigned long end);
1511 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1512 		unsigned long end, int behavior);
1513 #else
1514 static inline int can_do_mseal(unsigned long flags)
1515 {
1516 	return -EPERM;
1517 }
1518 
1519 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1520 		unsigned long end)
1521 {
1522 	return true;
1523 }
1524 
1525 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1526 		unsigned long end, int behavior)
1527 {
1528 	return true;
1529 }
1530 #endif
1531 
1532 #ifdef CONFIG_SHRINKER_DEBUG
1533 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1534 			struct shrinker *shrinker, const char *fmt, va_list ap)
1535 {
1536 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1537 
1538 	return shrinker->name ? 0 : -ENOMEM;
1539 }
1540 
1541 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1542 {
1543 	kfree_const(shrinker->name);
1544 	shrinker->name = NULL;
1545 }
1546 
1547 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1548 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1549 					      int *debugfs_id);
1550 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1551 				    int debugfs_id);
1552 #else /* CONFIG_SHRINKER_DEBUG */
1553 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1554 {
1555 	return 0;
1556 }
1557 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1558 					      const char *fmt, va_list ap)
1559 {
1560 	return 0;
1561 }
1562 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1563 {
1564 }
1565 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1566 						     int *debugfs_id)
1567 {
1568 	*debugfs_id = -1;
1569 	return NULL;
1570 }
1571 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1572 					   int debugfs_id)
1573 {
1574 }
1575 #endif /* CONFIG_SHRINKER_DEBUG */
1576 
1577 /* Only track the nodes of mappings with shadow entries */
1578 void workingset_update_node(struct xa_node *node);
1579 extern struct list_lru shadow_nodes;
1580 
1581 struct unlink_vma_file_batch {
1582 	int count;
1583 	struct vm_area_struct *vmas[8];
1584 };
1585 
1586 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
1587 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
1588 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
1589 
1590 #endif	/* __MM_INTERNAL_H */
1591