xref: /linux/mm/internal.h (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /* internal.h: mm/ internal definitions
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
13 
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 
17 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
18 		unsigned long floor, unsigned long ceiling);
19 
20 static inline void set_page_count(struct page *page, int v)
21 {
22 	atomic_set(&page->_count, v);
23 }
24 
25 extern int __do_page_cache_readahead(struct address_space *mapping,
26 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
27 		unsigned long lookahead_size);
28 
29 /*
30  * Submit IO for the read-ahead request in file_ra_state.
31  */
32 static inline unsigned long ra_submit(struct file_ra_state *ra,
33 		struct address_space *mapping, struct file *filp)
34 {
35 	return __do_page_cache_readahead(mapping, filp,
36 					ra->start, ra->size, ra->async_size);
37 }
38 
39 /*
40  * Turn a non-refcounted page (->_count == 0) into refcounted with
41  * a count of one.
42  */
43 static inline void set_page_refcounted(struct page *page)
44 {
45 	VM_BUG_ON_PAGE(PageTail(page), page);
46 	VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
47 	set_page_count(page, 1);
48 }
49 
50 static inline void __get_page_tail_foll(struct page *page,
51 					bool get_page_head)
52 {
53 	/*
54 	 * If we're getting a tail page, the elevated page->_count is
55 	 * required only in the head page and we will elevate the head
56 	 * page->_count and tail page->_mapcount.
57 	 *
58 	 * We elevate page_tail->_mapcount for tail pages to force
59 	 * page_tail->_count to be zero at all times to avoid getting
60 	 * false positives from get_page_unless_zero() with
61 	 * speculative page access (like in
62 	 * page_cache_get_speculative()) on tail pages.
63 	 */
64 	VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
65 	if (get_page_head)
66 		atomic_inc(&page->first_page->_count);
67 	get_huge_page_tail(page);
68 }
69 
70 /*
71  * This is meant to be called as the FOLL_GET operation of
72  * follow_page() and it must be called while holding the proper PT
73  * lock while the pte (or pmd_trans_huge) is still mapping the page.
74  */
75 static inline void get_page_foll(struct page *page)
76 {
77 	if (unlikely(PageTail(page)))
78 		/*
79 		 * This is safe only because
80 		 * __split_huge_page_refcount() can't run under
81 		 * get_page_foll() because we hold the proper PT lock.
82 		 */
83 		__get_page_tail_foll(page, true);
84 	else {
85 		/*
86 		 * Getting a normal page or the head of a compound page
87 		 * requires to already have an elevated page->_count.
88 		 */
89 		VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
90 		atomic_inc(&page->_count);
91 	}
92 }
93 
94 extern unsigned long highest_memmap_pfn;
95 
96 /*
97  * in mm/vmscan.c:
98  */
99 extern int isolate_lru_page(struct page *page);
100 extern void putback_lru_page(struct page *page);
101 extern bool zone_reclaimable(struct zone *zone);
102 
103 /*
104  * in mm/rmap.c:
105  */
106 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
107 
108 /*
109  * in mm/page_alloc.c
110  */
111 
112 /*
113  * Structure for holding the mostly immutable allocation parameters passed
114  * between functions involved in allocations, including the alloc_pages*
115  * family of functions.
116  *
117  * nodemask, migratetype and high_zoneidx are initialized only once in
118  * __alloc_pages_nodemask() and then never change.
119  *
120  * zonelist, preferred_zone and classzone_idx are set first in
121  * __alloc_pages_nodemask() for the fast path, and might be later changed
122  * in __alloc_pages_slowpath(). All other functions pass the whole strucure
123  * by a const pointer.
124  */
125 struct alloc_context {
126 	struct zonelist *zonelist;
127 	nodemask_t *nodemask;
128 	struct zone *preferred_zone;
129 	int classzone_idx;
130 	int migratetype;
131 	enum zone_type high_zoneidx;
132 };
133 
134 /*
135  * Locate the struct page for both the matching buddy in our
136  * pair (buddy1) and the combined O(n+1) page they form (page).
137  *
138  * 1) Any buddy B1 will have an order O twin B2 which satisfies
139  * the following equation:
140  *     B2 = B1 ^ (1 << O)
141  * For example, if the starting buddy (buddy2) is #8 its order
142  * 1 buddy is #10:
143  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
144  *
145  * 2) Any buddy B will have an order O+1 parent P which
146  * satisfies the following equation:
147  *     P = B & ~(1 << O)
148  *
149  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
150  */
151 static inline unsigned long
152 __find_buddy_index(unsigned long page_idx, unsigned int order)
153 {
154 	return page_idx ^ (1 << order);
155 }
156 
157 extern int __isolate_free_page(struct page *page, unsigned int order);
158 extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
159 					unsigned int order);
160 extern void prep_compound_page(struct page *page, unsigned long order);
161 #ifdef CONFIG_MEMORY_FAILURE
162 extern bool is_free_buddy_page(struct page *page);
163 #endif
164 extern int user_min_free_kbytes;
165 
166 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
167 
168 /*
169  * in mm/compaction.c
170  */
171 /*
172  * compact_control is used to track pages being migrated and the free pages
173  * they are being migrated to during memory compaction. The free_pfn starts
174  * at the end of a zone and migrate_pfn begins at the start. Movable pages
175  * are moved to the end of a zone during a compaction run and the run
176  * completes when free_pfn <= migrate_pfn
177  */
178 struct compact_control {
179 	struct list_head freepages;	/* List of free pages to migrate to */
180 	struct list_head migratepages;	/* List of pages being migrated */
181 	unsigned long nr_freepages;	/* Number of isolated free pages */
182 	unsigned long nr_migratepages;	/* Number of pages to migrate */
183 	unsigned long free_pfn;		/* isolate_freepages search base */
184 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
185 	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
186 	enum migrate_mode mode;		/* Async or sync migration mode */
187 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
188 	int order;			/* order a direct compactor needs */
189 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
190 	const int alloc_flags;		/* alloc flags of a direct compactor */
191 	const int classzone_idx;	/* zone index of a direct compactor */
192 	struct zone *zone;
193 	int contended;			/* Signal need_sched() or lock
194 					 * contention detected during
195 					 * compaction
196 					 */
197 };
198 
199 unsigned long
200 isolate_freepages_range(struct compact_control *cc,
201 			unsigned long start_pfn, unsigned long end_pfn);
202 unsigned long
203 isolate_migratepages_range(struct compact_control *cc,
204 			   unsigned long low_pfn, unsigned long end_pfn);
205 int find_suitable_fallback(struct free_area *area, unsigned int order,
206 			int migratetype, bool only_stealable, bool *can_steal);
207 
208 #endif
209 
210 /*
211  * This function returns the order of a free page in the buddy system. In
212  * general, page_zone(page)->lock must be held by the caller to prevent the
213  * page from being allocated in parallel and returning garbage as the order.
214  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
215  * page cannot be allocated or merged in parallel. Alternatively, it must
216  * handle invalid values gracefully, and use page_order_unsafe() below.
217  */
218 static inline unsigned long page_order(struct page *page)
219 {
220 	/* PageBuddy() must be checked by the caller */
221 	return page_private(page);
222 }
223 
224 /*
225  * Like page_order(), but for callers who cannot afford to hold the zone lock.
226  * PageBuddy() should be checked first by the caller to minimize race window,
227  * and invalid values must be handled gracefully.
228  *
229  * READ_ONCE is used so that if the caller assigns the result into a local
230  * variable and e.g. tests it for valid range before using, the compiler cannot
231  * decide to remove the variable and inline the page_private(page) multiple
232  * times, potentially observing different values in the tests and the actual
233  * use of the result.
234  */
235 #define page_order_unsafe(page)		READ_ONCE(page_private(page))
236 
237 static inline bool is_cow_mapping(vm_flags_t flags)
238 {
239 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
240 }
241 
242 /* mm/util.c */
243 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
244 		struct vm_area_struct *prev, struct rb_node *rb_parent);
245 
246 #ifdef CONFIG_MMU
247 extern long populate_vma_page_range(struct vm_area_struct *vma,
248 		unsigned long start, unsigned long end, int *nonblocking);
249 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
250 			unsigned long start, unsigned long end);
251 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
252 {
253 	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
254 }
255 
256 /*
257  * must be called with vma's mmap_sem held for read or write, and page locked.
258  */
259 extern void mlock_vma_page(struct page *page);
260 extern unsigned int munlock_vma_page(struct page *page);
261 
262 /*
263  * Clear the page's PageMlocked().  This can be useful in a situation where
264  * we want to unconditionally remove a page from the pagecache -- e.g.,
265  * on truncation or freeing.
266  *
267  * It is legal to call this function for any page, mlocked or not.
268  * If called for a page that is still mapped by mlocked vmas, all we do
269  * is revert to lazy LRU behaviour -- semantics are not broken.
270  */
271 extern void clear_page_mlock(struct page *page);
272 
273 /*
274  * mlock_migrate_page - called only from migrate_page_copy() to
275  * migrate the Mlocked page flag; update statistics.
276  */
277 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
278 {
279 	if (TestClearPageMlocked(page)) {
280 		unsigned long flags;
281 		int nr_pages = hpage_nr_pages(page);
282 
283 		local_irq_save(flags);
284 		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
285 		SetPageMlocked(newpage);
286 		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
287 		local_irq_restore(flags);
288 	}
289 }
290 
291 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
292 
293 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
294 extern unsigned long vma_address(struct page *page,
295 				 struct vm_area_struct *vma);
296 #endif
297 #else /* !CONFIG_MMU */
298 static inline void clear_page_mlock(struct page *page) { }
299 static inline void mlock_vma_page(struct page *page) { }
300 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
301 
302 #endif /* !CONFIG_MMU */
303 
304 /*
305  * Return the mem_map entry representing the 'offset' subpage within
306  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
307  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
308  */
309 static inline struct page *mem_map_offset(struct page *base, int offset)
310 {
311 	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
312 		return nth_page(base, offset);
313 	return base + offset;
314 }
315 
316 /*
317  * Iterator over all subpages within the maximally aligned gigantic
318  * page 'base'.  Handle any discontiguity in the mem_map.
319  */
320 static inline struct page *mem_map_next(struct page *iter,
321 						struct page *base, int offset)
322 {
323 	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
324 		unsigned long pfn = page_to_pfn(base) + offset;
325 		if (!pfn_valid(pfn))
326 			return NULL;
327 		return pfn_to_page(pfn);
328 	}
329 	return iter + 1;
330 }
331 
332 /*
333  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
334  * so all functions starting at paging_init should be marked __init
335  * in those cases. SPARSEMEM, however, allows for memory hotplug,
336  * and alloc_bootmem_node is not used.
337  */
338 #ifdef CONFIG_SPARSEMEM
339 #define __paginginit __meminit
340 #else
341 #define __paginginit __init
342 #endif
343 
344 /* Memory initialisation debug and verification */
345 enum mminit_level {
346 	MMINIT_WARNING,
347 	MMINIT_VERIFY,
348 	MMINIT_TRACE
349 };
350 
351 #ifdef CONFIG_DEBUG_MEMORY_INIT
352 
353 extern int mminit_loglevel;
354 
355 #define mminit_dprintk(level, prefix, fmt, arg...) \
356 do { \
357 	if (level < mminit_loglevel) { \
358 		if (level <= MMINIT_WARNING) \
359 			printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \
360 		else \
361 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
362 	} \
363 } while (0)
364 
365 extern void mminit_verify_pageflags_layout(void);
366 extern void mminit_verify_zonelist(void);
367 #else
368 
369 static inline void mminit_dprintk(enum mminit_level level,
370 				const char *prefix, const char *fmt, ...)
371 {
372 }
373 
374 static inline void mminit_verify_pageflags_layout(void)
375 {
376 }
377 
378 static inline void mminit_verify_zonelist(void)
379 {
380 }
381 #endif /* CONFIG_DEBUG_MEMORY_INIT */
382 
383 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
384 #if defined(CONFIG_SPARSEMEM)
385 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
386 				unsigned long *end_pfn);
387 #else
388 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
389 				unsigned long *end_pfn)
390 {
391 }
392 #endif /* CONFIG_SPARSEMEM */
393 
394 #define ZONE_RECLAIM_NOSCAN	-2
395 #define ZONE_RECLAIM_FULL	-1
396 #define ZONE_RECLAIM_SOME	0
397 #define ZONE_RECLAIM_SUCCESS	1
398 
399 extern int hwpoison_filter(struct page *p);
400 
401 extern u32 hwpoison_filter_dev_major;
402 extern u32 hwpoison_filter_dev_minor;
403 extern u64 hwpoison_filter_flags_mask;
404 extern u64 hwpoison_filter_flags_value;
405 extern u64 hwpoison_filter_memcg;
406 extern u32 hwpoison_filter_enable;
407 
408 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
409         unsigned long, unsigned long,
410         unsigned long, unsigned long);
411 
412 extern void set_pageblock_order(void);
413 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
414 					    struct list_head *page_list);
415 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
416 #define ALLOC_WMARK_MIN		WMARK_MIN
417 #define ALLOC_WMARK_LOW		WMARK_LOW
418 #define ALLOC_WMARK_HIGH	WMARK_HIGH
419 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
420 
421 /* Mask to get the watermark bits */
422 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
423 
424 #define ALLOC_HARDER		0x10 /* try to alloc harder */
425 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
426 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
427 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
428 #define ALLOC_FAIR		0x100 /* fair zone allocation */
429 
430 enum ttu_flags;
431 struct tlbflush_unmap_batch;
432 
433 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
434 void try_to_unmap_flush(void);
435 void try_to_unmap_flush_dirty(void);
436 #else
437 static inline void try_to_unmap_flush(void)
438 {
439 }
440 static inline void try_to_unmap_flush_dirty(void)
441 {
442 }
443 
444 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
445 #endif	/* __MM_INTERNAL_H */
446