xref: /linux/include/linux/page-flags.h (revision 4aa748dd1abf337426b4c941ae1b606ed0e2a5aa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
34  *   control pages, vmcoreinfo)
35  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
36  *   not marked PG_reserved (as they might be in use by somebody else who does
37  *   not respect the caching strategy).
38  * - MCA pages on ia64
39  * - Pages holding CPU notes for POWER Firmware Assisted Dump
40  * - Device memory (e.g. PMEM, DAX, HMM)
41  * Some PG_reserved pages will be excluded from the hibernation image.
42  * PG_reserved does in general not hinder anybody from dumping or swapping
43  * and is no longer required for remap_pfn_range(). ioremap might require it.
44  * Consequently, PG_reserved for a page mapped into user space can indicate
45  * the zero page, the vDSO, MMIO pages or device memory.
46  *
47  * The PG_private bitflag is set on pagecache pages if they contain filesystem
48  * specific data (which is normally at page->private). It can be used by
49  * private allocations for its own usage.
50  *
51  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
52  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
53  * is set before writeback starts and cleared when it finishes.
54  *
55  * PG_locked also pins a page in pagecache, and blocks truncation of the file
56  * while it is held.
57  *
58  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
59  * to become unlocked.
60  *
61  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
62  * usually PageAnon or shmem pages but please note that even anonymous pages
63  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
64  * a result of MADV_FREE).
65  *
66  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
67  * file-backed pagecache (see mm/vmscan.c).
68  *
69  * PG_arch_1 is an architecture specific page state bit.  The generic code
70  * guarantees that this bit is cleared for a page when it first is entered into
71  * the page cache.
72  *
73  * PG_hwpoison indicates that a page got corrupted in hardware and contains
74  * data with incorrect ECC bits that triggered a machine check. Accessing is
75  * not safe since it may cause another machine check. Don't touch!
76  */
77 
78 /*
79  * Don't use the pageflags directly.  Use the PageFoo macros.
80  *
81  * The page flags field is split into two parts, the main flags area
82  * which extends from the low bits upwards, and the fields area which
83  * extends from the high bits downwards.
84  *
85  *  | FIELD | ... | FLAGS |
86  *  N-1           ^       0
87  *               (NR_PAGEFLAGS)
88  *
89  * The fields area is reserved for fields mapping zone, node (for NUMA) and
90  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
91  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
92  */
93 enum pageflags {
94 	PG_locked,		/* Page is locked. Don't touch. */
95 	PG_writeback,		/* Page is under writeback */
96 	PG_referenced,
97 	PG_uptodate,
98 	PG_dirty,
99 	PG_lru,
100 	PG_head,		/* Must be in bit 6 */
101 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
102 	PG_active,
103 	PG_workingset,
104 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use */
105 	PG_owner_2,		/* Owner use. If pagecache, fs may use */
106 	PG_arch_1,
107 	PG_reserved,
108 	PG_private,		/* If pagecache, has fs-private data */
109 	PG_private_2,		/* If pagecache, has fs aux data */
110 	PG_reclaim,		/* To be reclaimed asap */
111 	PG_swapbacked,		/* Page is backed by RAM/swap */
112 	PG_unevictable,		/* Page is "unevictable"  */
113 #ifdef CONFIG_MMU
114 	PG_mlocked,		/* Page is vma mlocked */
115 #endif
116 #ifdef CONFIG_MEMORY_FAILURE
117 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
118 #endif
119 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
120 	PG_young,
121 	PG_idle,
122 #endif
123 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
124 	PG_arch_2,
125 #endif
126 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
127 	PG_arch_3,
128 #endif
129 	__NR_PAGEFLAGS,
130 
131 	PG_readahead = PG_reclaim,
132 
133 	/* Anonymous memory (and shmem) */
134 	PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
135 	/* Some filesystems */
136 	PG_checked = PG_owner_priv_1,
137 
138 	/*
139 	 * Depending on the way an anonymous folio can be mapped into a page
140 	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
141 	 * THP), PG_anon_exclusive may be set only for the head page or for
142 	 * tail pages of an anonymous folio. For now, we only expect it to be
143 	 * set on tail pages for PTE-mapped THP.
144 	 */
145 	PG_anon_exclusive = PG_owner_2,
146 
147 	/*
148 	 * Set if all buffer heads in the folio are mapped.
149 	 * Filesystems which do not use BHs can use it for their own purpose.
150 	 */
151 	PG_mappedtodisk = PG_owner_2,
152 
153 	/* Two page bits are conscripted by FS-Cache to maintain local caching
154 	 * state.  These bits are set on pages belonging to the netfs's inodes
155 	 * when those inodes are being locally cached.
156 	 */
157 	PG_fscache = PG_private_2,	/* page backed by cache */
158 
159 	/* XEN */
160 	/* Pinned in Xen as a read-only pagetable page. */
161 	PG_pinned = PG_owner_priv_1,
162 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
163 	PG_savepinned = PG_dirty,
164 	/* Has a grant mapping of another (foreign) domain's page. */
165 	PG_foreign = PG_owner_priv_1,
166 	/* Remapped by swiotlb-xen. */
167 	PG_xen_remapped = PG_owner_priv_1,
168 
169 	/* non-lru isolated movable page */
170 	PG_isolated = PG_reclaim,
171 
172 	/* Only valid for buddy pages. Used to track pages that are reported */
173 	PG_reported = PG_uptodate,
174 
175 #ifdef CONFIG_MEMORY_HOTPLUG
176 	/* For self-hosted memmap pages */
177 	PG_vmemmap_self_hosted = PG_owner_priv_1,
178 #endif
179 
180 	/*
181 	 * Flags only valid for compound pages.  Stored in first tail page's
182 	 * flags word.  Cannot use the first 8 flags or any flag marked as
183 	 * PF_ANY.
184 	 */
185 
186 	/* At least one page in this folio has the hwpoison flag set */
187 	PG_has_hwpoisoned = PG_active,
188 	PG_large_rmappable = PG_workingset, /* anon or file-backed */
189 	PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
190 };
191 
192 #define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
193 
194 #ifndef __GENERATING_BOUNDS_H
195 
196 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
197 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
198 
199 /*
200  * Return the real head page struct iff the @page is a fake head page, otherwise
201  * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
202  */
page_fixed_fake_head(const struct page * page)203 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
204 {
205 	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
206 		return page;
207 
208 	/*
209 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
210 	 * struct page. The alignment check aims to avoid access the fields (
211 	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
212 	 * cold cacheline in some cases.
213 	 */
214 	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
215 	    test_bit(PG_head, &page->flags)) {
216 		/*
217 		 * We can safely access the field of the @page[1] with PG_head
218 		 * because the @page is a compound page composed with at least
219 		 * two contiguous pages.
220 		 */
221 		unsigned long head = READ_ONCE(page[1].compound_head);
222 
223 		if (likely(head & 1))
224 			return (const struct page *)(head - 1);
225 	}
226 	return page;
227 }
228 #else
page_fixed_fake_head(const struct page * page)229 static inline const struct page *page_fixed_fake_head(const struct page *page)
230 {
231 	return page;
232 }
233 #endif
234 
page_is_fake_head(const struct page * page)235 static __always_inline int page_is_fake_head(const struct page *page)
236 {
237 	return page_fixed_fake_head(page) != page;
238 }
239 
_compound_head(const struct page * page)240 static __always_inline unsigned long _compound_head(const struct page *page)
241 {
242 	unsigned long head = READ_ONCE(page->compound_head);
243 
244 	if (unlikely(head & 1))
245 		return head - 1;
246 	return (unsigned long)page_fixed_fake_head(page);
247 }
248 
249 #define compound_head(page)	((typeof(page))_compound_head(page))
250 
251 /**
252  * page_folio - Converts from page to folio.
253  * @p: The page.
254  *
255  * Every page is part of a folio.  This function cannot be called on a
256  * NULL pointer.
257  *
258  * Context: No reference, nor lock is required on @page.  If the caller
259  * does not hold a reference, this call may race with a folio split, so
260  * it should re-check the folio still contains this page after gaining
261  * a reference on the folio.
262  * Return: The folio which contains this page.
263  */
264 #define page_folio(p)		(_Generic((p),				\
265 	const struct page *:	(const struct folio *)_compound_head(p), \
266 	struct page *:		(struct folio *)_compound_head(p)))
267 
268 /**
269  * folio_page - Return a page from a folio.
270  * @folio: The folio.
271  * @n: The page number to return.
272  *
273  * @n is relative to the start of the folio.  This function does not
274  * check that the page number lies within @folio; the caller is presumed
275  * to have a reference to the page.
276  */
277 #define folio_page(folio, n)	nth_page(&(folio)->page, n)
278 
PageTail(const struct page * page)279 static __always_inline int PageTail(const struct page *page)
280 {
281 	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
282 }
283 
PageCompound(const struct page * page)284 static __always_inline int PageCompound(const struct page *page)
285 {
286 	return test_bit(PG_head, &page->flags) ||
287 	       READ_ONCE(page->compound_head) & 1;
288 }
289 
290 #define	PAGE_POISON_PATTERN	-1l
PagePoisoned(const struct page * page)291 static inline int PagePoisoned(const struct page *page)
292 {
293 	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
294 }
295 
296 #ifdef CONFIG_DEBUG_VM
297 void page_init_poison(struct page *page, size_t size);
298 #else
page_init_poison(struct page * page,size_t size)299 static inline void page_init_poison(struct page *page, size_t size)
300 {
301 }
302 #endif
303 
const_folio_flags(const struct folio * folio,unsigned n)304 static const unsigned long *const_folio_flags(const struct folio *folio,
305 		unsigned n)
306 {
307 	const struct page *page = &folio->page;
308 
309 	VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
310 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
311 	return &page[n].flags;
312 }
313 
folio_flags(struct folio * folio,unsigned n)314 static unsigned long *folio_flags(struct folio *folio, unsigned n)
315 {
316 	struct page *page = &folio->page;
317 
318 	VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
319 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
320 	return &page[n].flags;
321 }
322 
323 /*
324  * Page flags policies wrt compound pages
325  *
326  * PF_POISONED_CHECK
327  *     check if this struct page poisoned/uninitialized
328  *
329  * PF_ANY:
330  *     the page flag is relevant for small, head and tail pages.
331  *
332  * PF_HEAD:
333  *     for compound page all operations related to the page flag applied to
334  *     head page.
335  *
336  * PF_NO_TAIL:
337  *     modifications of the page flag must be done on small or head pages,
338  *     checks can be done on tail pages too.
339  *
340  * PF_NO_COMPOUND:
341  *     the page flag is not relevant for compound pages.
342  *
343  * PF_SECOND:
344  *     the page flag is stored in the first tail page.
345  */
346 #define PF_POISONED_CHECK(page) ({					\
347 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
348 		page; })
349 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
350 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
351 #define PF_NO_TAIL(page, enforce) ({					\
352 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
353 		PF_POISONED_CHECK(compound_head(page)); })
354 #define PF_NO_COMPOUND(page, enforce) ({				\
355 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
356 		PF_POISONED_CHECK(page); })
357 #define PF_SECOND(page, enforce) ({					\
358 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
359 		PF_POISONED_CHECK(&page[1]); })
360 
361 /* Which page is the flag stored in */
362 #define FOLIO_PF_ANY		0
363 #define FOLIO_PF_HEAD		0
364 #define FOLIO_PF_NO_TAIL	0
365 #define FOLIO_PF_NO_COMPOUND	0
366 #define FOLIO_PF_SECOND		1
367 
368 #define FOLIO_HEAD_PAGE		0
369 #define FOLIO_SECOND_PAGE	1
370 
371 /*
372  * Macros to create function definitions for page flags
373  */
374 #define FOLIO_TEST_FLAG(name, page)					\
375 static __always_inline bool folio_test_##name(const struct folio *folio) \
376 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
377 
378 #define FOLIO_SET_FLAG(name, page)					\
379 static __always_inline void folio_set_##name(struct folio *folio)	\
380 { set_bit(PG_##name, folio_flags(folio, page)); }
381 
382 #define FOLIO_CLEAR_FLAG(name, page)					\
383 static __always_inline void folio_clear_##name(struct folio *folio)	\
384 { clear_bit(PG_##name, folio_flags(folio, page)); }
385 
386 #define __FOLIO_SET_FLAG(name, page)					\
387 static __always_inline void __folio_set_##name(struct folio *folio)	\
388 { __set_bit(PG_##name, folio_flags(folio, page)); }
389 
390 #define __FOLIO_CLEAR_FLAG(name, page)					\
391 static __always_inline void __folio_clear_##name(struct folio *folio)	\
392 { __clear_bit(PG_##name, folio_flags(folio, page)); }
393 
394 #define FOLIO_TEST_SET_FLAG(name, page)					\
395 static __always_inline bool folio_test_set_##name(struct folio *folio)	\
396 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
397 
398 #define FOLIO_TEST_CLEAR_FLAG(name, page)				\
399 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
400 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
401 
402 #define FOLIO_FLAG(name, page)						\
403 FOLIO_TEST_FLAG(name, page)						\
404 FOLIO_SET_FLAG(name, page)						\
405 FOLIO_CLEAR_FLAG(name, page)
406 
407 #define TESTPAGEFLAG(uname, lname, policy)				\
408 FOLIO_TEST_FLAG(lname, FOLIO_##policy)					\
409 static __always_inline int Page##uname(const struct page *page)		\
410 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
411 
412 #define SETPAGEFLAG(uname, lname, policy)				\
413 FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
414 static __always_inline void SetPage##uname(struct page *page)		\
415 { set_bit(PG_##lname, &policy(page, 1)->flags); }
416 
417 #define CLEARPAGEFLAG(uname, lname, policy)				\
418 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)					\
419 static __always_inline void ClearPage##uname(struct page *page)		\
420 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
421 
422 #define __SETPAGEFLAG(uname, lname, policy)				\
423 __FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
424 static __always_inline void __SetPage##uname(struct page *page)		\
425 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
426 
427 #define __CLEARPAGEFLAG(uname, lname, policy)				\
428 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)				\
429 static __always_inline void __ClearPage##uname(struct page *page)	\
430 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
431 
432 #define TESTSETFLAG(uname, lname, policy)				\
433 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy)				\
434 static __always_inline int TestSetPage##uname(struct page *page)	\
435 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
436 
437 #define TESTCLEARFLAG(uname, lname, policy)				\
438 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy)				\
439 static __always_inline int TestClearPage##uname(struct page *page)	\
440 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
441 
442 #define PAGEFLAG(uname, lname, policy)					\
443 	TESTPAGEFLAG(uname, lname, policy)				\
444 	SETPAGEFLAG(uname, lname, policy)				\
445 	CLEARPAGEFLAG(uname, lname, policy)
446 
447 #define __PAGEFLAG(uname, lname, policy)				\
448 	TESTPAGEFLAG(uname, lname, policy)				\
449 	__SETPAGEFLAG(uname, lname, policy)				\
450 	__CLEARPAGEFLAG(uname, lname, policy)
451 
452 #define TESTSCFLAG(uname, lname, policy)				\
453 	TESTSETFLAG(uname, lname, policy)				\
454 	TESTCLEARFLAG(uname, lname, policy)
455 
456 #define FOLIO_TEST_FLAG_FALSE(name)					\
457 static inline bool folio_test_##name(const struct folio *folio)		\
458 { return false; }
459 #define FOLIO_SET_FLAG_NOOP(name)					\
460 static inline void folio_set_##name(struct folio *folio) { }
461 #define FOLIO_CLEAR_FLAG_NOOP(name)					\
462 static inline void folio_clear_##name(struct folio *folio) { }
463 #define __FOLIO_SET_FLAG_NOOP(name)					\
464 static inline void __folio_set_##name(struct folio *folio) { }
465 #define __FOLIO_CLEAR_FLAG_NOOP(name)					\
466 static inline void __folio_clear_##name(struct folio *folio) { }
467 #define FOLIO_TEST_SET_FLAG_FALSE(name)					\
468 static inline bool folio_test_set_##name(struct folio *folio)		\
469 { return false; }
470 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name)				\
471 static inline bool folio_test_clear_##name(struct folio *folio)		\
472 { return false; }
473 
474 #define FOLIO_FLAG_FALSE(name)						\
475 FOLIO_TEST_FLAG_FALSE(name)						\
476 FOLIO_SET_FLAG_NOOP(name)						\
477 FOLIO_CLEAR_FLAG_NOOP(name)
478 
479 #define TESTPAGEFLAG_FALSE(uname, lname)				\
480 FOLIO_TEST_FLAG_FALSE(lname)						\
481 static inline int Page##uname(const struct page *page) { return 0; }
482 
483 #define SETPAGEFLAG_NOOP(uname, lname)					\
484 FOLIO_SET_FLAG_NOOP(lname)						\
485 static inline void SetPage##uname(struct page *page) {  }
486 
487 #define CLEARPAGEFLAG_NOOP(uname, lname)				\
488 FOLIO_CLEAR_FLAG_NOOP(lname)						\
489 static inline void ClearPage##uname(struct page *page) {  }
490 
491 #define __CLEARPAGEFLAG_NOOP(uname, lname)				\
492 __FOLIO_CLEAR_FLAG_NOOP(lname)						\
493 static inline void __ClearPage##uname(struct page *page) {  }
494 
495 #define TESTSETFLAG_FALSE(uname, lname)					\
496 FOLIO_TEST_SET_FLAG_FALSE(lname)					\
497 static inline int TestSetPage##uname(struct page *page) { return 0; }
498 
499 #define TESTCLEARFLAG_FALSE(uname, lname)				\
500 FOLIO_TEST_CLEAR_FLAG_FALSE(lname)					\
501 static inline int TestClearPage##uname(struct page *page) { return 0; }
502 
503 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
504 	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
505 
506 #define TESTSCFLAG_FALSE(uname, lname)					\
507 	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
508 
509 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
510 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
511 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
512 	FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
513 	__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
514 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
515 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
516 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
517 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
518 FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
519 	__FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
520 	FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
521 PAGEFLAG(Workingset, workingset, PF_HEAD)
522 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
523 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
524 
525 /* Xen */
526 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
527 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
528 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
529 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)530 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
531 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
532 
533 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
534 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
535 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
536 FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
537 	__FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
538 	__FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
539 
540 /*
541  * Private page markings that may be used by the filesystem that owns the page
542  * for its own purposes.
543  * - PG_private and PG_private_2 cause release_folio() and co to be invoked
544  */
545 PAGEFLAG(Private, private, PF_ANY)
546 FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
547 
548 /* owner_2 can be set on tail pages for anon memory */
549 FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
550 
551 /*
552  * Only test-and-set exist for PG_writeback.  The unconditional operators are
553  * risky: they bypass page accounting.
554  */
555 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
556 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
557 FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
558 
559 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
560 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
561 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
562 FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
563 	FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
564 
565 #ifdef CONFIG_HIGHMEM
566 /*
567  * Must use a macro here due to header dependency issues. page_zone() is not
568  * available at this point.
569  */
570 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
571 #define folio_test_highmem(__f)	is_highmem_idx(folio_zonenum(__f))
572 #else
573 PAGEFLAG_FALSE(HighMem, highmem)
574 #endif
575 
576 #ifdef CONFIG_SWAP
577 static __always_inline bool folio_test_swapcache(const struct folio *folio)
578 {
579 	return folio_test_swapbacked(folio) &&
580 			test_bit(PG_swapcache, const_folio_flags(folio, 0));
581 }
582 
FOLIO_SET_FLAG(swapcache,FOLIO_HEAD_PAGE)583 FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
584 FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
585 #else
586 FOLIO_FLAG_FALSE(swapcache)
587 #endif
588 
589 FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
590 	__FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
591 	FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
592 
593 #ifdef CONFIG_MMU
594 FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
595 	__FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
596 	FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
597 	FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
598 #else
599 FOLIO_FLAG_FALSE(mlocked)
600 	__FOLIO_CLEAR_FLAG_NOOP(mlocked)
601 	FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
602 	FOLIO_TEST_SET_FLAG_FALSE(mlocked)
603 #endif
604 
605 #ifdef CONFIG_MEMORY_FAILURE
606 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
607 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
608 #define __PG_HWPOISON (1UL << PG_hwpoison)
609 #else
610 PAGEFLAG_FALSE(HWPoison, hwpoison)
611 #define __PG_HWPOISON 0
612 #endif
613 
614 #ifdef CONFIG_PAGE_IDLE_FLAG
615 #ifdef CONFIG_64BIT
616 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
617 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
618 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
619 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
620 #endif
621 /* See page_idle.h for !64BIT workaround */
622 #else /* !CONFIG_PAGE_IDLE_FLAG */
623 FOLIO_FLAG_FALSE(young)
624 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
625 FOLIO_FLAG_FALSE(idle)
626 #endif
627 
628 /*
629  * PageReported() is used to track reported free pages within the Buddy
630  * allocator. We can use the non-atomic version of the test and set
631  * operations as both should be shielded with the zone lock to prevent
632  * any possible races on the setting or clearing of the bit.
633  */
634 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
635 
636 #ifdef CONFIG_MEMORY_HOTPLUG
637 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
638 #else
639 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
640 #endif
641 
642 /*
643  * On an anonymous folio mapped into a user virtual memory area,
644  * folio->mapping points to its anon_vma, not to a struct address_space;
645  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
646  *
647  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
648  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
649  * bit; and then folio->mapping points, not to an anon_vma, but to a private
650  * structure which KSM associates with that merged page.  See ksm.h.
651  *
652  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
653  * page and then folio->mapping points to a struct movable_operations.
654  *
655  * Please note that, confusingly, "folio_mapping" refers to the inode
656  * address_space which maps the folio from disk; whereas "folio_mapped"
657  * refers to user virtual address space into which the folio is mapped.
658  *
659  * For slab pages, since slab reuses the bits in struct page to store its
660  * internal states, the folio->mapping does not exist as such, nor do
661  * these flags below.  So in order to avoid testing non-existent bits,
662  * please make sure that folio_test_slab(folio) actually evaluates to
663  * false before calling the following functions (e.g., folio_test_anon).
664  * See mm/slab.h.
665  */
666 #define PAGE_MAPPING_ANON	0x1
667 #define PAGE_MAPPING_MOVABLE	0x2
668 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
669 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
670 
671 /*
672  * Different with flags above, this flag is used only for fsdax mode.  It
673  * indicates that this page->mapping is now under reflink case.
674  */
675 #define PAGE_MAPPING_DAX_SHARED	((void *)0x1)
676 
677 static __always_inline bool folio_mapping_flags(const struct folio *folio)
678 {
679 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
680 }
681 
PageMappingFlags(const struct page * page)682 static __always_inline bool PageMappingFlags(const struct page *page)
683 {
684 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
685 }
686 
folio_test_anon(const struct folio * folio)687 static __always_inline bool folio_test_anon(const struct folio *folio)
688 {
689 	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
690 }
691 
PageAnonNotKsm(const struct page * page)692 static __always_inline bool PageAnonNotKsm(const struct page *page)
693 {
694 	unsigned long flags = (unsigned long)page_folio(page)->mapping;
695 
696 	return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON;
697 }
698 
PageAnon(const struct page * page)699 static __always_inline bool PageAnon(const struct page *page)
700 {
701 	return folio_test_anon(page_folio(page));
702 }
703 
__folio_test_movable(const struct folio * folio)704 static __always_inline bool __folio_test_movable(const struct folio *folio)
705 {
706 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
707 			PAGE_MAPPING_MOVABLE;
708 }
709 
__PageMovable(const struct page * page)710 static __always_inline bool __PageMovable(const struct page *page)
711 {
712 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
713 				PAGE_MAPPING_MOVABLE;
714 }
715 
716 #ifdef CONFIG_KSM
717 /*
718  * A KSM page is one of those write-protected "shared pages" or "merged pages"
719  * which KSM maps into multiple mms, wherever identical anonymous page content
720  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
721  * anon_vma, but to that page's node of the stable tree.
722  */
folio_test_ksm(const struct folio * folio)723 static __always_inline bool folio_test_ksm(const struct folio *folio)
724 {
725 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
726 				PAGE_MAPPING_KSM;
727 }
728 #else
729 FOLIO_TEST_FLAG_FALSE(ksm)
730 #endif
731 
732 u64 stable_page_flags(const struct page *page);
733 
734 /**
735  * folio_xor_flags_has_waiters - Change some folio flags.
736  * @folio: The folio.
737  * @mask: Bits set in this word will be changed.
738  *
739  * This must only be used for flags which are changed with the folio
740  * lock held.  For example, it is unsafe to use for PG_dirty as that
741  * can be set without the folio lock held.  It can also only be used
742  * on flags which are in the range 0-6 as some of the implementations
743  * only affect those bits.
744  *
745  * Return: Whether there are tasks waiting on the folio.
746  */
folio_xor_flags_has_waiters(struct folio * folio,unsigned long mask)747 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
748 		unsigned long mask)
749 {
750 	return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
751 }
752 
753 /**
754  * folio_test_uptodate - Is this folio up to date?
755  * @folio: The folio.
756  *
757  * The uptodate flag is set on a folio when every byte in the folio is
758  * at least as new as the corresponding bytes on storage.  Anonymous
759  * and CoW folios are always uptodate.  If the folio is not uptodate,
760  * some of the bytes in it may be; see the is_partially_uptodate()
761  * address_space operation.
762  */
folio_test_uptodate(const struct folio * folio)763 static inline bool folio_test_uptodate(const struct folio *folio)
764 {
765 	bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
766 	/*
767 	 * Must ensure that the data we read out of the folio is loaded
768 	 * _after_ we've loaded folio->flags to check the uptodate bit.
769 	 * We can skip the barrier if the folio is not uptodate, because
770 	 * we wouldn't be reading anything from it.
771 	 *
772 	 * See folio_mark_uptodate() for the other side of the story.
773 	 */
774 	if (ret)
775 		smp_rmb();
776 
777 	return ret;
778 }
779 
PageUptodate(const struct page * page)780 static inline bool PageUptodate(const struct page *page)
781 {
782 	return folio_test_uptodate(page_folio(page));
783 }
784 
__folio_mark_uptodate(struct folio * folio)785 static __always_inline void __folio_mark_uptodate(struct folio *folio)
786 {
787 	smp_wmb();
788 	__set_bit(PG_uptodate, folio_flags(folio, 0));
789 }
790 
folio_mark_uptodate(struct folio * folio)791 static __always_inline void folio_mark_uptodate(struct folio *folio)
792 {
793 	/*
794 	 * Memory barrier must be issued before setting the PG_uptodate bit,
795 	 * so that all previous stores issued in order to bring the folio
796 	 * uptodate are actually visible before folio_test_uptodate becomes true.
797 	 */
798 	smp_wmb();
799 	set_bit(PG_uptodate, folio_flags(folio, 0));
800 }
801 
__SetPageUptodate(struct page * page)802 static __always_inline void __SetPageUptodate(struct page *page)
803 {
804 	__folio_mark_uptodate((struct folio *)page);
805 }
806 
SetPageUptodate(struct page * page)807 static __always_inline void SetPageUptodate(struct page *page)
808 {
809 	folio_mark_uptodate((struct folio *)page);
810 }
811 
812 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
813 
814 void __folio_start_writeback(struct folio *folio, bool keep_write);
815 void set_page_writeback(struct page *page);
816 
817 #define folio_start_writeback(folio)			\
818 	__folio_start_writeback(folio, false)
819 #define folio_start_writeback_keepwrite(folio)	\
820 	__folio_start_writeback(folio, true)
821 
folio_test_head(const struct folio * folio)822 static __always_inline bool folio_test_head(const struct folio *folio)
823 {
824 	return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
825 }
826 
PageHead(const struct page * page)827 static __always_inline int PageHead(const struct page *page)
828 {
829 	PF_POISONED_CHECK(page);
830 	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
831 }
832 
__SETPAGEFLAG(Head,head,PF_ANY)833 __SETPAGEFLAG(Head, head, PF_ANY)
834 __CLEARPAGEFLAG(Head, head, PF_ANY)
835 CLEARPAGEFLAG(Head, head, PF_ANY)
836 
837 /**
838  * folio_test_large() - Does this folio contain more than one page?
839  * @folio: The folio to test.
840  *
841  * Return: True if the folio is larger than one page.
842  */
843 static inline bool folio_test_large(const struct folio *folio)
844 {
845 	return folio_test_head(folio);
846 }
847 
set_compound_head(struct page * page,struct page * head)848 static __always_inline void set_compound_head(struct page *page, struct page *head)
849 {
850 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
851 }
852 
clear_compound_head(struct page * page)853 static __always_inline void clear_compound_head(struct page *page)
854 {
855 	WRITE_ONCE(page->compound_head, 0);
856 }
857 
858 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)859 static inline void ClearPageCompound(struct page *page)
860 {
861 	BUG_ON(!PageHead(page));
862 	ClearPageHead(page);
863 }
FOLIO_FLAG(large_rmappable,FOLIO_SECOND_PAGE)864 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
865 FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
866 #else
867 FOLIO_FLAG_FALSE(large_rmappable)
868 FOLIO_FLAG_FALSE(partially_mapped)
869 #endif
870 
871 #define PG_head_mask ((1UL << PG_head))
872 
873 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
874 /*
875  * PageHuge() only returns true for hugetlbfs pages, but not for
876  * normal or transparent huge pages.
877  *
878  * PageTransHuge() returns true for both transparent huge and
879  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
880  * called only in the core VM paths where hugetlbfs pages can't exist.
881  */
882 static inline int PageTransHuge(const struct page *page)
883 {
884 	VM_BUG_ON_PAGE(PageTail(page), page);
885 	return PageHead(page);
886 }
887 
888 /*
889  * PageTransCompound returns true for both transparent huge pages
890  * and hugetlbfs pages, so it should only be called when it's known
891  * that hugetlbfs pages aren't involved.
892  */
PageTransCompound(const struct page * page)893 static inline int PageTransCompound(const struct page *page)
894 {
895 	return PageCompound(page);
896 }
897 
898 /*
899  * PageTransTail returns true for both transparent huge pages
900  * and hugetlbfs pages, so it should only be called when it's known
901  * that hugetlbfs pages aren't involved.
902  */
PageTransTail(const struct page * page)903 static inline int PageTransTail(const struct page *page)
904 {
905 	return PageTail(page);
906 }
907 #else
908 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
909 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
910 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
911 TESTPAGEFLAG_FALSE(TransTail, transtail)
912 #endif
913 
914 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
915 /*
916  * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
917  * compound page.
918  *
919  * This flag is set by hwpoison handler.  Cleared by THP split or free page.
920  */
921 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
922 	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
923 #else
924 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
925 	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
926 #endif
927 
928 /*
929  * For pages that do not use mapcount, page_type may be used.
930  * The low 24 bits of pagetype may be used for your own purposes, as long
931  * as you are careful to not affect the top 8 bits.  The low bits of
932  * pagetype will be overwritten when you clear the page_type from the page.
933  */
934 enum pagetype {
935 	/* 0x00-0x7f are positive numbers, ie mapcount */
936 	/* Reserve 0x80-0xef for mapcount overflow. */
937 	PGTY_buddy	= 0xf0,
938 	PGTY_offline	= 0xf1,
939 	PGTY_table	= 0xf2,
940 	PGTY_guard	= 0xf3,
941 	PGTY_hugetlb	= 0xf4,
942 	PGTY_slab	= 0xf5,
943 	PGTY_zsmalloc	= 0xf6,
944 	PGTY_unaccepted	= 0xf7,
945 
946 	PGTY_mapcount_underflow = 0xff
947 };
948 
page_type_has_type(int page_type)949 static inline bool page_type_has_type(int page_type)
950 {
951 	return page_type < (PGTY_mapcount_underflow << 24);
952 }
953 
954 /* This takes a mapcount which is one more than page->_mapcount */
page_mapcount_is_type(unsigned int mapcount)955 static inline bool page_mapcount_is_type(unsigned int mapcount)
956 {
957 	return page_type_has_type(mapcount - 1);
958 }
959 
page_has_type(const struct page * page)960 static inline bool page_has_type(const struct page *page)
961 {
962 	return page_mapcount_is_type(data_race(page->page_type));
963 }
964 
965 #define FOLIO_TYPE_OPS(lname, fname)					\
966 static __always_inline bool folio_test_##fname(const struct folio *folio) \
967 {									\
968 	return data_race(folio->page.page_type >> 24) == PGTY_##lname;	\
969 }									\
970 static __always_inline void __folio_set_##fname(struct folio *folio)	\
971 {									\
972 	if (folio_test_##fname(folio))					\
973 		return;							\
974 	VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX,	\
975 			folio);						\
976 	folio->page.page_type = (unsigned int)PGTY_##lname << 24;	\
977 }									\
978 static __always_inline void __folio_clear_##fname(struct folio *folio)	\
979 {									\
980 	if (folio->page.page_type == UINT_MAX)				\
981 		return;							\
982 	VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio);		\
983 	folio->page.page_type = UINT_MAX;				\
984 }
985 
986 #define PAGE_TYPE_OPS(uname, lname, fname)				\
987 FOLIO_TYPE_OPS(lname, fname)						\
988 static __always_inline int Page##uname(const struct page *page)		\
989 {									\
990 	return data_race(page->page_type >> 24) == PGTY_##lname;	\
991 }									\
992 static __always_inline void __SetPage##uname(struct page *page)		\
993 {									\
994 	if (Page##uname(page))						\
995 		return;							\
996 	VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page);	\
997 	page->page_type = (unsigned int)PGTY_##lname << 24;		\
998 }									\
999 static __always_inline void __ClearPage##uname(struct page *page)	\
1000 {									\
1001 	if (page->page_type == UINT_MAX)				\
1002 		return;							\
1003 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
1004 	page->page_type = UINT_MAX;					\
1005 }
1006 
1007 /*
1008  * PageBuddy() indicates that the page is free and in the buddy system
1009  * (see mm/page_alloc.c).
1010  */
1011 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1012 
1013 /*
1014  * PageOffline() indicates that the page is logically offline although the
1015  * containing section is online. (e.g. inflated in a balloon driver or
1016  * not onlined when onlining the section).
1017  * The content of these pages is effectively stale. Such pages should not
1018  * be touched (read/write/dump/save) except by their owner.
1019  *
1020  * When a memory block gets onlined, all pages are initialized with a
1021  * refcount of 1 and PageOffline(). generic_online_page() will
1022  * take care of clearing PageOffline().
1023  *
1024  * If a driver wants to allow to offline unmovable PageOffline() pages without
1025  * putting them back to the buddy, it can do so via the memory notifier by
1026  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1027  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1028  * pages (now with a reference count of zero) are treated like free (unmanaged)
1029  * pages, allowing the containing memory block to get offlined. A driver that
1030  * relies on this feature is aware that re-onlining the memory block will
1031  * require not giving them to the buddy via generic_online_page().
1032  *
1033  * Memory offlining code will not adjust the managed page count for any
1034  * PageOffline() pages, treating them like they were never exposed to the
1035  * buddy using generic_online_page().
1036  *
1037  * There are drivers that mark a page PageOffline() and expect there won't be
1038  * any further access to page content. PFN walkers that read content of random
1039  * pages should check PageOffline() and synchronize with such drivers using
1040  * page_offline_freeze()/page_offline_thaw().
1041  */
1042 PAGE_TYPE_OPS(Offline, offline, offline)
1043 
1044 extern void page_offline_freeze(void);
1045 extern void page_offline_thaw(void);
1046 extern void page_offline_begin(void);
1047 extern void page_offline_end(void);
1048 
1049 /*
1050  * Marks pages in use as page tables.
1051  */
PAGE_TYPE_OPS(Table,table,pgtable)1052 PAGE_TYPE_OPS(Table, table, pgtable)
1053 
1054 /*
1055  * Marks guardpages used with debug_pagealloc.
1056  */
1057 PAGE_TYPE_OPS(Guard, guard, guard)
1058 
1059 FOLIO_TYPE_OPS(slab, slab)
1060 
1061 /**
1062  * PageSlab - Determine if the page belongs to the slab allocator
1063  * @page: The page to test.
1064  *
1065  * Context: Any context.
1066  * Return: True for slab pages, false for any other kind of page.
1067  */
1068 static inline bool PageSlab(const struct page *page)
1069 {
1070 	return folio_test_slab(page_folio(page));
1071 }
1072 
1073 #ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb,hugetlb)1074 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1075 #else
1076 FOLIO_TEST_FLAG_FALSE(hugetlb)
1077 #endif
1078 
1079 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
1080 
1081 /*
1082  * Mark pages that has to be accepted before touched for the first time.
1083  *
1084  * Serialized with zone lock.
1085  */
1086 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
1087 
1088 /**
1089  * PageHuge - Determine if the page belongs to hugetlbfs
1090  * @page: The page to test.
1091  *
1092  * Context: Any context.
1093  * Return: True for hugetlbfs pages, false for anon pages or pages
1094  * belonging to other filesystems.
1095  */
1096 static inline bool PageHuge(const struct page *page)
1097 {
1098 	return folio_test_hugetlb(page_folio(page));
1099 }
1100 
1101 /*
1102  * Check if a page is currently marked HWPoisoned. Note that this check is
1103  * best effort only and inherently racy: there is no way to synchronize with
1104  * failing hardware.
1105  */
is_page_hwpoison(const struct page * page)1106 static inline bool is_page_hwpoison(const struct page *page)
1107 {
1108 	const struct folio *folio;
1109 
1110 	if (PageHWPoison(page))
1111 		return true;
1112 	folio = page_folio(page);
1113 	return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1114 }
1115 
1116 bool is_free_buddy_page(const struct page *page);
1117 
1118 PAGEFLAG(Isolated, isolated, PF_ANY);
1119 
PageAnonExclusive(const struct page * page)1120 static __always_inline int PageAnonExclusive(const struct page *page)
1121 {
1122 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1123 	/*
1124 	 * HugeTLB stores this information on the head page; THP keeps it per
1125 	 * page
1126 	 */
1127 	if (PageHuge(page))
1128 		page = compound_head(page);
1129 	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1130 }
1131 
SetPageAnonExclusive(struct page * page)1132 static __always_inline void SetPageAnonExclusive(struct page *page)
1133 {
1134 	VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
1135 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1136 	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1137 }
1138 
ClearPageAnonExclusive(struct page * page)1139 static __always_inline void ClearPageAnonExclusive(struct page *page)
1140 {
1141 	VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
1142 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1143 	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1144 }
1145 
__ClearPageAnonExclusive(struct page * page)1146 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1147 {
1148 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1149 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1150 	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1151 }
1152 
1153 #ifdef CONFIG_MMU
1154 #define __PG_MLOCKED		(1UL << PG_mlocked)
1155 #else
1156 #define __PG_MLOCKED		0
1157 #endif
1158 
1159 /*
1160  * Flags checked when a page is freed.  Pages being freed should not have
1161  * these flags set.  If they are, there is a problem.
1162  */
1163 #define PAGE_FLAGS_CHECK_AT_FREE				\
1164 	(1UL << PG_lru		| 1UL << PG_locked	|	\
1165 	 1UL << PG_private	| 1UL << PG_private_2	|	\
1166 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1167 	 1UL << PG_active 	|				\
1168 	 1UL << PG_unevictable	| __PG_MLOCKED | LRU_GEN_MASK)
1169 
1170 /*
1171  * Flags checked when a page is prepped for return by the page allocator.
1172  * Pages being prepped should not have these flags set.  If they are set,
1173  * there has been a kernel bug or struct page corruption.
1174  *
1175  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1176  * alloc-free cycle to prevent from reusing the page.
1177  */
1178 #define PAGE_FLAGS_CHECK_AT_PREP	\
1179 	((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1180 
1181 /*
1182  * Flags stored in the second page of a compound page.  They may overlap
1183  * the CHECK_AT_FREE flags above, so need to be cleared.
1184  */
1185 #define PAGE_FLAGS_SECOND						\
1186 	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
1187 	 1UL << PG_large_rmappable	| 1UL << PG_partially_mapped)
1188 
1189 #define PAGE_FLAGS_PRIVATE				\
1190 	(1UL << PG_private | 1UL << PG_private_2)
1191 /**
1192  * folio_has_private - Determine if folio has private stuff
1193  * @folio: The folio to be checked
1194  *
1195  * Determine if a folio has private stuff, indicating that release routines
1196  * should be invoked upon it.
1197  */
folio_has_private(const struct folio * folio)1198 static inline int folio_has_private(const struct folio *folio)
1199 {
1200 	return !!(folio->flags & PAGE_FLAGS_PRIVATE);
1201 }
1202 
1203 #undef PF_ANY
1204 #undef PF_HEAD
1205 #undef PF_NO_TAIL
1206 #undef PF_NO_COMPOUND
1207 #undef PF_SECOND
1208 #endif /* !__GENERATING_BOUNDS_H */
1209 
1210 #endif	/* PAGE_FLAGS_H */
1211