xref: /linux/include/linux/page-flags.h (revision f73a058be5d70dd81a43f16b2bbff4b1576a7af8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72  * file-backed pagecache (see mm/vmscan.c).
73  *
74  * PG_error is set to indicate that an I/O error occurred on this page.
75  *
76  * PG_arch_1 is an architecture specific page state bit.  The generic code
77  * guarantees that this bit is cleared for a page when it first is entered into
78  * the page cache.
79  *
80  * PG_hwpoison indicates that a page got corrupted in hardware and contains
81  * data with incorrect ECC bits that triggered a machine check. Accessing is
82  * not safe since it may cause another machine check. Don't touch!
83  */
84 
85 /*
86  * Don't use the pageflags directly.  Use the PageFoo macros.
87  *
88  * The page flags field is split into two parts, the main flags area
89  * which extends from the low bits upwards, and the fields area which
90  * extends from the high bits downwards.
91  *
92  *  | FIELD | ... | FLAGS |
93  *  N-1           ^       0
94  *               (NR_PAGEFLAGS)
95  *
96  * The fields area is reserved for fields mapping zone, node (for NUMA) and
97  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99  */
100 enum pageflags {
101 	PG_locked,		/* Page is locked. Don't touch. */
102 	PG_writeback,		/* Page is under writeback */
103 	PG_referenced,
104 	PG_uptodate,
105 	PG_dirty,
106 	PG_lru,
107 	PG_head,		/* Must be in bit 6 */
108 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 	PG_active,
110 	PG_workingset,
111 	PG_error,
112 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
113 	PG_arch_1,
114 	PG_reserved,
115 	PG_private,		/* If pagecache, has fs-private data */
116 	PG_private_2,		/* If pagecache, has fs aux data */
117 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
118 	PG_reclaim,		/* To be reclaimed asap */
119 	PG_swapbacked,		/* Page is backed by RAM/swap */
120 	PG_unevictable,		/* Page is "unevictable"  */
121 #ifdef CONFIG_MMU
122 	PG_mlocked,		/* Page is vma mlocked */
123 #endif
124 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
125 	PG_uncached,		/* Page has been mapped as uncached */
126 #endif
127 #ifdef CONFIG_MEMORY_FAILURE
128 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
129 #endif
130 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
131 	PG_young,
132 	PG_idle,
133 #endif
134 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
135 	PG_arch_2,
136 	PG_arch_3,
137 #endif
138 	__NR_PAGEFLAGS,
139 
140 	PG_readahead = PG_reclaim,
141 
142 	/*
143 	 * Depending on the way an anonymous folio can be mapped into a page
144 	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
145 	 * THP), PG_anon_exclusive may be set only for the head page or for
146 	 * tail pages of an anonymous folio. For now, we only expect it to be
147 	 * set on tail pages for PTE-mapped THP.
148 	 */
149 	PG_anon_exclusive = PG_mappedtodisk,
150 
151 	/* Filesystems */
152 	PG_checked = PG_owner_priv_1,
153 
154 	/* SwapBacked */
155 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
156 
157 	/* Two page bits are conscripted by FS-Cache to maintain local caching
158 	 * state.  These bits are set on pages belonging to the netfs's inodes
159 	 * when those inodes are being locally cached.
160 	 */
161 	PG_fscache = PG_private_2,	/* page backed by cache */
162 
163 	/* XEN */
164 	/* Pinned in Xen as a read-only pagetable page. */
165 	PG_pinned = PG_owner_priv_1,
166 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
167 	PG_savepinned = PG_dirty,
168 	/* Has a grant mapping of another (foreign) domain's page. */
169 	PG_foreign = PG_owner_priv_1,
170 	/* Remapped by swiotlb-xen. */
171 	PG_xen_remapped = PG_owner_priv_1,
172 
173 	/* non-lru isolated movable page */
174 	PG_isolated = PG_reclaim,
175 
176 	/* Only valid for buddy pages. Used to track pages that are reported */
177 	PG_reported = PG_uptodate,
178 
179 #ifdef CONFIG_MEMORY_HOTPLUG
180 	/* For self-hosted memmap pages */
181 	PG_vmemmap_self_hosted = PG_owner_priv_1,
182 #endif
183 
184 	/*
185 	 * Flags only valid for compound pages.  Stored in first tail page's
186 	 * flags word.  Cannot use the first 8 flags or any flag marked as
187 	 * PF_ANY.
188 	 */
189 
190 	/* At least one page in this folio has the hwpoison flag set */
191 	PG_has_hwpoisoned = PG_error,
192 	PG_large_rmappable = PG_workingset, /* anon or file-backed */
193 };
194 
195 #define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
196 
197 #ifndef __GENERATING_BOUNDS_H
198 
199 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
200 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
201 
202 /*
203  * Return the real head page struct iff the @page is a fake head page, otherwise
204  * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
205  */
206 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
207 {
208 	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
209 		return page;
210 
211 	/*
212 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
213 	 * struct page. The alignment check aims to avoid access the fields (
214 	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
215 	 * cold cacheline in some cases.
216 	 */
217 	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
218 	    test_bit(PG_head, &page->flags)) {
219 		/*
220 		 * We can safely access the field of the @page[1] with PG_head
221 		 * because the @page is a compound page composed with at least
222 		 * two contiguous pages.
223 		 */
224 		unsigned long head = READ_ONCE(page[1].compound_head);
225 
226 		if (likely(head & 1))
227 			return (const struct page *)(head - 1);
228 	}
229 	return page;
230 }
231 #else
232 static inline const struct page *page_fixed_fake_head(const struct page *page)
233 {
234 	return page;
235 }
236 #endif
237 
238 static __always_inline int page_is_fake_head(const struct page *page)
239 {
240 	return page_fixed_fake_head(page) != page;
241 }
242 
243 static inline unsigned long _compound_head(const struct page *page)
244 {
245 	unsigned long head = READ_ONCE(page->compound_head);
246 
247 	if (unlikely(head & 1))
248 		return head - 1;
249 	return (unsigned long)page_fixed_fake_head(page);
250 }
251 
252 #define compound_head(page)	((typeof(page))_compound_head(page))
253 
254 /**
255  * page_folio - Converts from page to folio.
256  * @p: The page.
257  *
258  * Every page is part of a folio.  This function cannot be called on a
259  * NULL pointer.
260  *
261  * Context: No reference, nor lock is required on @page.  If the caller
262  * does not hold a reference, this call may race with a folio split, so
263  * it should re-check the folio still contains this page after gaining
264  * a reference on the folio.
265  * Return: The folio which contains this page.
266  */
267 #define page_folio(p)		(_Generic((p),				\
268 	const struct page *:	(const struct folio *)_compound_head(p), \
269 	struct page *:		(struct folio *)_compound_head(p)))
270 
271 /**
272  * folio_page - Return a page from a folio.
273  * @folio: The folio.
274  * @n: The page number to return.
275  *
276  * @n is relative to the start of the folio.  This function does not
277  * check that the page number lies within @folio; the caller is presumed
278  * to have a reference to the page.
279  */
280 #define folio_page(folio, n)	nth_page(&(folio)->page, n)
281 
282 static __always_inline int PageTail(const struct page *page)
283 {
284 	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
285 }
286 
287 static __always_inline int PageCompound(const struct page *page)
288 {
289 	return test_bit(PG_head, &page->flags) ||
290 	       READ_ONCE(page->compound_head) & 1;
291 }
292 
293 #define	PAGE_POISON_PATTERN	-1l
294 static inline int PagePoisoned(const struct page *page)
295 {
296 	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
297 }
298 
299 #ifdef CONFIG_DEBUG_VM
300 void page_init_poison(struct page *page, size_t size);
301 #else
302 static inline void page_init_poison(struct page *page, size_t size)
303 {
304 }
305 #endif
306 
307 static const unsigned long *const_folio_flags(const struct folio *folio,
308 		unsigned n)
309 {
310 	const struct page *page = &folio->page;
311 
312 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
313 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
314 	return &page[n].flags;
315 }
316 
317 static unsigned long *folio_flags(struct folio *folio, unsigned n)
318 {
319 	struct page *page = &folio->page;
320 
321 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
322 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
323 	return &page[n].flags;
324 }
325 
326 /*
327  * Page flags policies wrt compound pages
328  *
329  * PF_POISONED_CHECK
330  *     check if this struct page poisoned/uninitialized
331  *
332  * PF_ANY:
333  *     the page flag is relevant for small, head and tail pages.
334  *
335  * PF_HEAD:
336  *     for compound page all operations related to the page flag applied to
337  *     head page.
338  *
339  * PF_NO_TAIL:
340  *     modifications of the page flag must be done on small or head pages,
341  *     checks can be done on tail pages too.
342  *
343  * PF_NO_COMPOUND:
344  *     the page flag is not relevant for compound pages.
345  *
346  * PF_SECOND:
347  *     the page flag is stored in the first tail page.
348  */
349 #define PF_POISONED_CHECK(page) ({					\
350 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
351 		page; })
352 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
353 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
354 #define PF_NO_TAIL(page, enforce) ({					\
355 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
356 		PF_POISONED_CHECK(compound_head(page)); })
357 #define PF_NO_COMPOUND(page, enforce) ({				\
358 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
359 		PF_POISONED_CHECK(page); })
360 #define PF_SECOND(page, enforce) ({					\
361 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
362 		PF_POISONED_CHECK(&page[1]); })
363 
364 /* Which page is the flag stored in */
365 #define FOLIO_PF_ANY		0
366 #define FOLIO_PF_HEAD		0
367 #define FOLIO_PF_NO_TAIL	0
368 #define FOLIO_PF_NO_COMPOUND	0
369 #define FOLIO_PF_SECOND		1
370 
371 #define FOLIO_HEAD_PAGE		0
372 #define FOLIO_SECOND_PAGE	1
373 
374 /*
375  * Macros to create function definitions for page flags
376  */
377 #define FOLIO_TEST_FLAG(name, page)					\
378 static __always_inline bool folio_test_##name(const struct folio *folio) \
379 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
380 
381 #define FOLIO_SET_FLAG(name, page)					\
382 static __always_inline void folio_set_##name(struct folio *folio)	\
383 { set_bit(PG_##name, folio_flags(folio, page)); }
384 
385 #define FOLIO_CLEAR_FLAG(name, page)					\
386 static __always_inline void folio_clear_##name(struct folio *folio)	\
387 { clear_bit(PG_##name, folio_flags(folio, page)); }
388 
389 #define __FOLIO_SET_FLAG(name, page)					\
390 static __always_inline void __folio_set_##name(struct folio *folio)	\
391 { __set_bit(PG_##name, folio_flags(folio, page)); }
392 
393 #define __FOLIO_CLEAR_FLAG(name, page)					\
394 static __always_inline void __folio_clear_##name(struct folio *folio)	\
395 { __clear_bit(PG_##name, folio_flags(folio, page)); }
396 
397 #define FOLIO_TEST_SET_FLAG(name, page)					\
398 static __always_inline bool folio_test_set_##name(struct folio *folio)	\
399 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
400 
401 #define FOLIO_TEST_CLEAR_FLAG(name, page)				\
402 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
403 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
404 
405 #define FOLIO_FLAG(name, page)						\
406 FOLIO_TEST_FLAG(name, page)						\
407 FOLIO_SET_FLAG(name, page)						\
408 FOLIO_CLEAR_FLAG(name, page)
409 
410 #define TESTPAGEFLAG(uname, lname, policy)				\
411 FOLIO_TEST_FLAG(lname, FOLIO_##policy)					\
412 static __always_inline int Page##uname(const struct page *page)		\
413 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
414 
415 #define SETPAGEFLAG(uname, lname, policy)				\
416 FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
417 static __always_inline void SetPage##uname(struct page *page)		\
418 { set_bit(PG_##lname, &policy(page, 1)->flags); }
419 
420 #define CLEARPAGEFLAG(uname, lname, policy)				\
421 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)					\
422 static __always_inline void ClearPage##uname(struct page *page)		\
423 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
424 
425 #define __SETPAGEFLAG(uname, lname, policy)				\
426 __FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
427 static __always_inline void __SetPage##uname(struct page *page)		\
428 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
429 
430 #define __CLEARPAGEFLAG(uname, lname, policy)				\
431 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)				\
432 static __always_inline void __ClearPage##uname(struct page *page)	\
433 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
434 
435 #define TESTSETFLAG(uname, lname, policy)				\
436 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy)				\
437 static __always_inline int TestSetPage##uname(struct page *page)	\
438 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
439 
440 #define TESTCLEARFLAG(uname, lname, policy)				\
441 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy)				\
442 static __always_inline int TestClearPage##uname(struct page *page)	\
443 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
444 
445 #define PAGEFLAG(uname, lname, policy)					\
446 	TESTPAGEFLAG(uname, lname, policy)				\
447 	SETPAGEFLAG(uname, lname, policy)				\
448 	CLEARPAGEFLAG(uname, lname, policy)
449 
450 #define __PAGEFLAG(uname, lname, policy)				\
451 	TESTPAGEFLAG(uname, lname, policy)				\
452 	__SETPAGEFLAG(uname, lname, policy)				\
453 	__CLEARPAGEFLAG(uname, lname, policy)
454 
455 #define TESTSCFLAG(uname, lname, policy)				\
456 	TESTSETFLAG(uname, lname, policy)				\
457 	TESTCLEARFLAG(uname, lname, policy)
458 
459 #define FOLIO_TEST_FLAG_FALSE(name)					\
460 static inline bool folio_test_##name(const struct folio *folio)		\
461 { return false; }
462 #define FOLIO_SET_FLAG_NOOP(name)					\
463 static inline void folio_set_##name(struct folio *folio) { }
464 #define FOLIO_CLEAR_FLAG_NOOP(name)					\
465 static inline void folio_clear_##name(struct folio *folio) { }
466 #define __FOLIO_SET_FLAG_NOOP(name)					\
467 static inline void __folio_set_##name(struct folio *folio) { }
468 #define __FOLIO_CLEAR_FLAG_NOOP(name)					\
469 static inline void __folio_clear_##name(struct folio *folio) { }
470 #define FOLIO_TEST_SET_FLAG_FALSE(name)					\
471 static inline bool folio_test_set_##name(struct folio *folio)		\
472 { return false; }
473 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name)				\
474 static inline bool folio_test_clear_##name(struct folio *folio)		\
475 { return false; }
476 
477 #define FOLIO_FLAG_FALSE(name)						\
478 FOLIO_TEST_FLAG_FALSE(name)						\
479 FOLIO_SET_FLAG_NOOP(name)						\
480 FOLIO_CLEAR_FLAG_NOOP(name)
481 
482 #define TESTPAGEFLAG_FALSE(uname, lname)				\
483 FOLIO_TEST_FLAG_FALSE(lname)						\
484 static inline int Page##uname(const struct page *page) { return 0; }
485 
486 #define SETPAGEFLAG_NOOP(uname, lname)					\
487 FOLIO_SET_FLAG_NOOP(lname)						\
488 static inline void SetPage##uname(struct page *page) {  }
489 
490 #define CLEARPAGEFLAG_NOOP(uname, lname)				\
491 FOLIO_CLEAR_FLAG_NOOP(lname)						\
492 static inline void ClearPage##uname(struct page *page) {  }
493 
494 #define __CLEARPAGEFLAG_NOOP(uname, lname)				\
495 __FOLIO_CLEAR_FLAG_NOOP(lname)						\
496 static inline void __ClearPage##uname(struct page *page) {  }
497 
498 #define TESTSETFLAG_FALSE(uname, lname)					\
499 FOLIO_TEST_SET_FLAG_FALSE(lname)					\
500 static inline int TestSetPage##uname(struct page *page) { return 0; }
501 
502 #define TESTCLEARFLAG_FALSE(uname, lname)				\
503 FOLIO_TEST_CLEAR_FLAG_FALSE(lname)					\
504 static inline int TestClearPage##uname(struct page *page) { return 0; }
505 
506 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
507 	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
508 
509 #define TESTSCFLAG_FALSE(uname, lname)					\
510 	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
511 
512 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
513 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
514 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
515 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
516 	FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
517 	__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
518 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
519 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
520 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
521 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
522 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
523 	TESTCLEARFLAG(Active, active, PF_HEAD)
524 PAGEFLAG(Workingset, workingset, PF_HEAD)
525 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
526 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
527 
528 /* Xen */
529 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
530 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
531 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
532 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
533 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
534 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
535 
536 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
537 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
538 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
539 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
540 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
541 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
542 
543 /*
544  * Private page markings that may be used by the filesystem that owns the page
545  * for its own purposes.
546  * - PG_private and PG_private_2 cause release_folio() and co to be invoked
547  */
548 PAGEFLAG(Private, private, PF_ANY)
549 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
550 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
551 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
552 
553 /*
554  * Only test-and-set exist for PG_writeback.  The unconditional operators are
555  * risky: they bypass page accounting.
556  */
557 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
558 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
559 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
560 
561 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
562 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
563 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
564 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
565 	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
566 
567 #ifdef CONFIG_HIGHMEM
568 /*
569  * Must use a macro here due to header dependency issues. page_zone() is not
570  * available at this point.
571  */
572 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
573 #define folio_test_highmem(__f)	is_highmem_idx(folio_zonenum(__f))
574 #else
575 PAGEFLAG_FALSE(HighMem, highmem)
576 #endif
577 
578 #ifdef CONFIG_SWAP
579 static __always_inline bool folio_test_swapcache(const struct folio *folio)
580 {
581 	return folio_test_swapbacked(folio) &&
582 			test_bit(PG_swapcache, const_folio_flags(folio, 0));
583 }
584 
585 static __always_inline bool PageSwapCache(const struct page *page)
586 {
587 	return folio_test_swapcache(page_folio(page));
588 }
589 
590 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
591 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
592 #else
593 PAGEFLAG_FALSE(SwapCache, swapcache)
594 #endif
595 
596 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
597 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
598 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
599 
600 #ifdef CONFIG_MMU
601 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
602 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
603 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
604 #else
605 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
606 	TESTSCFLAG_FALSE(Mlocked, mlocked)
607 #endif
608 
609 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
610 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
611 #else
612 PAGEFLAG_FALSE(Uncached, uncached)
613 #endif
614 
615 #ifdef CONFIG_MEMORY_FAILURE
616 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
617 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
618 #define __PG_HWPOISON (1UL << PG_hwpoison)
619 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
620 extern void SetPageHWPoisonTakenOff(struct page *page);
621 extern void ClearPageHWPoisonTakenOff(struct page *page);
622 extern bool take_page_off_buddy(struct page *page);
623 extern bool put_page_back_buddy(struct page *page);
624 #else
625 PAGEFLAG_FALSE(HWPoison, hwpoison)
626 #define __PG_HWPOISON 0
627 #endif
628 
629 #ifdef CONFIG_PAGE_IDLE_FLAG
630 #ifdef CONFIG_64BIT
631 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
632 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
633 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
634 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
635 #endif
636 /* See page_idle.h for !64BIT workaround */
637 #else /* !CONFIG_PAGE_IDLE_FLAG */
638 FOLIO_FLAG_FALSE(young)
639 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
640 FOLIO_FLAG_FALSE(idle)
641 #endif
642 
643 /*
644  * PageReported() is used to track reported free pages within the Buddy
645  * allocator. We can use the non-atomic version of the test and set
646  * operations as both should be shielded with the zone lock to prevent
647  * any possible races on the setting or clearing of the bit.
648  */
649 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
650 
651 #ifdef CONFIG_MEMORY_HOTPLUG
652 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
653 #else
654 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
655 #endif
656 
657 /*
658  * On an anonymous page mapped into a user virtual memory area,
659  * page->mapping points to its anon_vma, not to a struct address_space;
660  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
661  *
662  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
663  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
664  * bit; and then page->mapping points, not to an anon_vma, but to a private
665  * structure which KSM associates with that merged page.  See ksm.h.
666  *
667  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
668  * page and then page->mapping points to a struct movable_operations.
669  *
670  * Please note that, confusingly, "page_mapping" refers to the inode
671  * address_space which maps the page from disk; whereas "page_mapped"
672  * refers to user virtual address space into which the page is mapped.
673  *
674  * For slab pages, since slab reuses the bits in struct page to store its
675  * internal states, the page->mapping does not exist as such, nor do these
676  * flags below.  So in order to avoid testing non-existent bits, please
677  * make sure that PageSlab(page) actually evaluates to false before calling
678  * the following functions (e.g., PageAnon).  See mm/slab.h.
679  */
680 #define PAGE_MAPPING_ANON	0x1
681 #define PAGE_MAPPING_MOVABLE	0x2
682 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
683 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
684 
685 /*
686  * Different with flags above, this flag is used only for fsdax mode.  It
687  * indicates that this page->mapping is now under reflink case.
688  */
689 #define PAGE_MAPPING_DAX_SHARED	((void *)0x1)
690 
691 static __always_inline bool folio_mapping_flags(const struct folio *folio)
692 {
693 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
694 }
695 
696 static __always_inline bool PageMappingFlags(const struct page *page)
697 {
698 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
699 }
700 
701 static __always_inline bool folio_test_anon(const struct folio *folio)
702 {
703 	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
704 }
705 
706 static __always_inline bool PageAnon(const struct page *page)
707 {
708 	return folio_test_anon(page_folio(page));
709 }
710 
711 static __always_inline bool __folio_test_movable(const struct folio *folio)
712 {
713 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
714 			PAGE_MAPPING_MOVABLE;
715 }
716 
717 static __always_inline bool __PageMovable(const struct page *page)
718 {
719 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
720 				PAGE_MAPPING_MOVABLE;
721 }
722 
723 #ifdef CONFIG_KSM
724 /*
725  * A KSM page is one of those write-protected "shared pages" or "merged pages"
726  * which KSM maps into multiple mms, wherever identical anonymous page content
727  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
728  * anon_vma, but to that page's node of the stable tree.
729  */
730 static __always_inline bool folio_test_ksm(const struct folio *folio)
731 {
732 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
733 				PAGE_MAPPING_KSM;
734 }
735 
736 static __always_inline bool PageKsm(const struct page *page)
737 {
738 	return folio_test_ksm(page_folio(page));
739 }
740 #else
741 TESTPAGEFLAG_FALSE(Ksm, ksm)
742 #endif
743 
744 u64 stable_page_flags(const struct page *page);
745 
746 /**
747  * folio_xor_flags_has_waiters - Change some folio flags.
748  * @folio: The folio.
749  * @mask: Bits set in this word will be changed.
750  *
751  * This must only be used for flags which are changed with the folio
752  * lock held.  For example, it is unsafe to use for PG_dirty as that
753  * can be set without the folio lock held.  It can also only be used
754  * on flags which are in the range 0-6 as some of the implementations
755  * only affect those bits.
756  *
757  * Return: Whether there are tasks waiting on the folio.
758  */
759 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
760 		unsigned long mask)
761 {
762 	return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
763 }
764 
765 /**
766  * folio_test_uptodate - Is this folio up to date?
767  * @folio: The folio.
768  *
769  * The uptodate flag is set on a folio when every byte in the folio is
770  * at least as new as the corresponding bytes on storage.  Anonymous
771  * and CoW folios are always uptodate.  If the folio is not uptodate,
772  * some of the bytes in it may be; see the is_partially_uptodate()
773  * address_space operation.
774  */
775 static inline bool folio_test_uptodate(const struct folio *folio)
776 {
777 	bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
778 	/*
779 	 * Must ensure that the data we read out of the folio is loaded
780 	 * _after_ we've loaded folio->flags to check the uptodate bit.
781 	 * We can skip the barrier if the folio is not uptodate, because
782 	 * we wouldn't be reading anything from it.
783 	 *
784 	 * See folio_mark_uptodate() for the other side of the story.
785 	 */
786 	if (ret)
787 		smp_rmb();
788 
789 	return ret;
790 }
791 
792 static inline bool PageUptodate(const struct page *page)
793 {
794 	return folio_test_uptodate(page_folio(page));
795 }
796 
797 static __always_inline void __folio_mark_uptodate(struct folio *folio)
798 {
799 	smp_wmb();
800 	__set_bit(PG_uptodate, folio_flags(folio, 0));
801 }
802 
803 static __always_inline void folio_mark_uptodate(struct folio *folio)
804 {
805 	/*
806 	 * Memory barrier must be issued before setting the PG_uptodate bit,
807 	 * so that all previous stores issued in order to bring the folio
808 	 * uptodate are actually visible before folio_test_uptodate becomes true.
809 	 */
810 	smp_wmb();
811 	set_bit(PG_uptodate, folio_flags(folio, 0));
812 }
813 
814 static __always_inline void __SetPageUptodate(struct page *page)
815 {
816 	__folio_mark_uptodate((struct folio *)page);
817 }
818 
819 static __always_inline void SetPageUptodate(struct page *page)
820 {
821 	folio_mark_uptodate((struct folio *)page);
822 }
823 
824 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
825 
826 void __folio_start_writeback(struct folio *folio, bool keep_write);
827 void set_page_writeback(struct page *page);
828 
829 #define folio_start_writeback(folio)			\
830 	__folio_start_writeback(folio, false)
831 #define folio_start_writeback_keepwrite(folio)	\
832 	__folio_start_writeback(folio, true)
833 
834 static __always_inline bool folio_test_head(const struct folio *folio)
835 {
836 	return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
837 }
838 
839 static __always_inline int PageHead(const struct page *page)
840 {
841 	PF_POISONED_CHECK(page);
842 	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
843 }
844 
845 __SETPAGEFLAG(Head, head, PF_ANY)
846 __CLEARPAGEFLAG(Head, head, PF_ANY)
847 CLEARPAGEFLAG(Head, head, PF_ANY)
848 
849 /**
850  * folio_test_large() - Does this folio contain more than one page?
851  * @folio: The folio to test.
852  *
853  * Return: True if the folio is larger than one page.
854  */
855 static inline bool folio_test_large(const struct folio *folio)
856 {
857 	return folio_test_head(folio);
858 }
859 
860 static __always_inline void set_compound_head(struct page *page, struct page *head)
861 {
862 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
863 }
864 
865 static __always_inline void clear_compound_head(struct page *page)
866 {
867 	WRITE_ONCE(page->compound_head, 0);
868 }
869 
870 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
871 static inline void ClearPageCompound(struct page *page)
872 {
873 	BUG_ON(!PageHead(page));
874 	ClearPageHead(page);
875 }
876 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
877 #else
878 FOLIO_FLAG_FALSE(large_rmappable)
879 #endif
880 
881 #define PG_head_mask ((1UL << PG_head))
882 
883 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
884 /*
885  * PageHuge() only returns true for hugetlbfs pages, but not for
886  * normal or transparent huge pages.
887  *
888  * PageTransHuge() returns true for both transparent huge and
889  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
890  * called only in the core VM paths where hugetlbfs pages can't exist.
891  */
892 static inline int PageTransHuge(const struct page *page)
893 {
894 	VM_BUG_ON_PAGE(PageTail(page), page);
895 	return PageHead(page);
896 }
897 
898 /*
899  * PageTransCompound returns true for both transparent huge pages
900  * and hugetlbfs pages, so it should only be called when it's known
901  * that hugetlbfs pages aren't involved.
902  */
903 static inline int PageTransCompound(const struct page *page)
904 {
905 	return PageCompound(page);
906 }
907 
908 /*
909  * PageTransTail returns true for both transparent huge pages
910  * and hugetlbfs pages, so it should only be called when it's known
911  * that hugetlbfs pages aren't involved.
912  */
913 static inline int PageTransTail(const struct page *page)
914 {
915 	return PageTail(page);
916 }
917 #else
918 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
919 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
920 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
921 TESTPAGEFLAG_FALSE(TransTail, transtail)
922 #endif
923 
924 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
925 /*
926  * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
927  * compound page.
928  *
929  * This flag is set by hwpoison handler.  Cleared by THP split or free page.
930  */
931 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
932 	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
933 #else
934 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
935 	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
936 #endif
937 
938 /*
939  * For pages that are never mapped to userspace,
940  * page_type may be used.  Because it is initialised to -1, we invert the
941  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
942  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
943  * low bits so that an underflow or overflow of _mapcount won't be
944  * mistaken for a page type value.
945  */
946 
947 #define PAGE_TYPE_BASE	0xf0000000
948 /* Reserve		0x0000007f to catch underflows of _mapcount */
949 #define PAGE_MAPCOUNT_RESERVE	-128
950 #define PG_buddy	0x00000080
951 #define PG_offline	0x00000100
952 #define PG_table	0x00000200
953 #define PG_guard	0x00000400
954 #define PG_hugetlb	0x00000800
955 #define PG_slab		0x00001000
956 
957 #define PageType(page, flag)						\
958 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
959 #define folio_test_type(folio, flag)					\
960 	((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
961 
962 static inline int page_type_has_type(unsigned int page_type)
963 {
964 	return (int)page_type < PAGE_MAPCOUNT_RESERVE;
965 }
966 
967 static inline int page_has_type(const struct page *page)
968 {
969 	return page_type_has_type(page->page_type);
970 }
971 
972 #define FOLIO_TYPE_OPS(lname, fname)					\
973 static __always_inline bool folio_test_##fname(const struct folio *folio)\
974 {									\
975 	return folio_test_type(folio, PG_##lname);			\
976 }									\
977 static __always_inline void __folio_set_##fname(struct folio *folio)	\
978 {									\
979 	VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio);		\
980 	folio->page.page_type &= ~PG_##lname;				\
981 }									\
982 static __always_inline void __folio_clear_##fname(struct folio *folio)	\
983 {									\
984 	VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio);		\
985 	folio->page.page_type |= PG_##lname;				\
986 }
987 
988 #define PAGE_TYPE_OPS(uname, lname, fname)				\
989 FOLIO_TYPE_OPS(lname, fname)						\
990 static __always_inline int Page##uname(const struct page *page)		\
991 {									\
992 	return PageType(page, PG_##lname);				\
993 }									\
994 static __always_inline void __SetPage##uname(struct page *page)		\
995 {									\
996 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
997 	page->page_type &= ~PG_##lname;					\
998 }									\
999 static __always_inline void __ClearPage##uname(struct page *page)	\
1000 {									\
1001 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
1002 	page->page_type |= PG_##lname;					\
1003 }
1004 
1005 /*
1006  * PageBuddy() indicates that the page is free and in the buddy system
1007  * (see mm/page_alloc.c).
1008  */
1009 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1010 
1011 /*
1012  * PageOffline() indicates that the page is logically offline although the
1013  * containing section is online. (e.g. inflated in a balloon driver or
1014  * not onlined when onlining the section).
1015  * The content of these pages is effectively stale. Such pages should not
1016  * be touched (read/write/dump/save) except by their owner.
1017  *
1018  * If a driver wants to allow to offline unmovable PageOffline() pages without
1019  * putting them back to the buddy, it can do so via the memory notifier by
1020  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1021  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1022  * pages (now with a reference count of zero) are treated like free pages,
1023  * allowing the containing memory block to get offlined. A driver that
1024  * relies on this feature is aware that re-onlining the memory block will
1025  * require to re-set the pages PageOffline() and not giving them to the
1026  * buddy via online_page_callback_t.
1027  *
1028  * There are drivers that mark a page PageOffline() and expect there won't be
1029  * any further access to page content. PFN walkers that read content of random
1030  * pages should check PageOffline() and synchronize with such drivers using
1031  * page_offline_freeze()/page_offline_thaw().
1032  */
1033 PAGE_TYPE_OPS(Offline, offline, offline)
1034 
1035 extern void page_offline_freeze(void);
1036 extern void page_offline_thaw(void);
1037 extern void page_offline_begin(void);
1038 extern void page_offline_end(void);
1039 
1040 /*
1041  * Marks pages in use as page tables.
1042  */
1043 PAGE_TYPE_OPS(Table, table, pgtable)
1044 
1045 /*
1046  * Marks guardpages used with debug_pagealloc.
1047  */
1048 PAGE_TYPE_OPS(Guard, guard, guard)
1049 
1050 FOLIO_TYPE_OPS(slab, slab)
1051 
1052 /**
1053  * PageSlab - Determine if the page belongs to the slab allocator
1054  * @page: The page to test.
1055  *
1056  * Context: Any context.
1057  * Return: True for slab pages, false for any other kind of page.
1058  */
1059 static inline bool PageSlab(const struct page *page)
1060 {
1061 	return folio_test_slab(page_folio(page));
1062 }
1063 
1064 #ifdef CONFIG_HUGETLB_PAGE
1065 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1066 #else
1067 FOLIO_TEST_FLAG_FALSE(hugetlb)
1068 #endif
1069 
1070 /**
1071  * PageHuge - Determine if the page belongs to hugetlbfs
1072  * @page: The page to test.
1073  *
1074  * Context: Any context.
1075  * Return: True for hugetlbfs pages, false for anon pages or pages
1076  * belonging to other filesystems.
1077  */
1078 static inline bool PageHuge(const struct page *page)
1079 {
1080 	return folio_test_hugetlb(page_folio(page));
1081 }
1082 
1083 /*
1084  * Check if a page is currently marked HWPoisoned. Note that this check is
1085  * best effort only and inherently racy: there is no way to synchronize with
1086  * failing hardware.
1087  */
1088 static inline bool is_page_hwpoison(const struct page *page)
1089 {
1090 	const struct folio *folio;
1091 
1092 	if (PageHWPoison(page))
1093 		return true;
1094 	folio = page_folio(page);
1095 	return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1096 }
1097 
1098 bool is_free_buddy_page(const struct page *page);
1099 
1100 PAGEFLAG(Isolated, isolated, PF_ANY);
1101 
1102 static __always_inline int PageAnonExclusive(const struct page *page)
1103 {
1104 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1105 	/*
1106 	 * HugeTLB stores this information on the head page; THP keeps it per
1107 	 * page
1108 	 */
1109 	if (PageHuge(page))
1110 		page = compound_head(page);
1111 	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1112 }
1113 
1114 static __always_inline void SetPageAnonExclusive(struct page *page)
1115 {
1116 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1117 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1118 	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1119 }
1120 
1121 static __always_inline void ClearPageAnonExclusive(struct page *page)
1122 {
1123 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1124 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1125 	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1126 }
1127 
1128 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1129 {
1130 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1131 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1132 	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1133 }
1134 
1135 #ifdef CONFIG_MMU
1136 #define __PG_MLOCKED		(1UL << PG_mlocked)
1137 #else
1138 #define __PG_MLOCKED		0
1139 #endif
1140 
1141 /*
1142  * Flags checked when a page is freed.  Pages being freed should not have
1143  * these flags set.  If they are, there is a problem.
1144  */
1145 #define PAGE_FLAGS_CHECK_AT_FREE				\
1146 	(1UL << PG_lru		| 1UL << PG_locked	|	\
1147 	 1UL << PG_private	| 1UL << PG_private_2	|	\
1148 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1149 	 1UL << PG_active 	|				\
1150 	 1UL << PG_unevictable	| __PG_MLOCKED | LRU_GEN_MASK)
1151 
1152 /*
1153  * Flags checked when a page is prepped for return by the page allocator.
1154  * Pages being prepped should not have these flags set.  If they are set,
1155  * there has been a kernel bug or struct page corruption.
1156  *
1157  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1158  * alloc-free cycle to prevent from reusing the page.
1159  */
1160 #define PAGE_FLAGS_CHECK_AT_PREP	\
1161 	((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1162 
1163 /*
1164  * Flags stored in the second page of a compound page.  They may overlap
1165  * the CHECK_AT_FREE flags above, so need to be cleared.
1166  */
1167 #define PAGE_FLAGS_SECOND						\
1168 	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
1169 	 1UL << PG_large_rmappable)
1170 
1171 #define PAGE_FLAGS_PRIVATE				\
1172 	(1UL << PG_private | 1UL << PG_private_2)
1173 /**
1174  * page_has_private - Determine if page has private stuff
1175  * @page: The page to be checked
1176  *
1177  * Determine if a page has private stuff, indicating that release routines
1178  * should be invoked upon it.
1179  */
1180 static inline int page_has_private(const struct page *page)
1181 {
1182 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
1183 }
1184 
1185 static inline bool folio_has_private(const struct folio *folio)
1186 {
1187 	return page_has_private(&folio->page);
1188 }
1189 
1190 #undef PF_ANY
1191 #undef PF_HEAD
1192 #undef PF_NO_TAIL
1193 #undef PF_NO_COMPOUND
1194 #undef PF_SECOND
1195 #endif /* !__GENERATING_BOUNDS_H */
1196 
1197 #endif	/* PAGE_FLAGS_H */
1198