1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4
5 #include <linux/mm_types_task.h>
6
7 #include <linux/auxvec.h>
8 #include <linux/kref.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/maple_tree.h>
13 #include <linux/rwsem.h>
14 #include <linux/completion.h>
15 #include <linux/cpumask.h>
16 #include <linux/uprobes.h>
17 #include <linux/rcupdate.h>
18 #include <linux/page-flags-layout.h>
19 #include <linux/workqueue.h>
20 #include <linux/seqlock.h>
21 #include <linux/percpu_counter.h>
22 #include <linux/types.h>
23 #include <linux/bitmap.h>
24
25 #include <asm/mmu.h>
26
27 #ifndef AT_VECTOR_SIZE_ARCH
28 #define AT_VECTOR_SIZE_ARCH 0
29 #endif
30 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
31
32
33 struct address_space;
34 struct futex_private_hash;
35 struct mem_cgroup;
36
37 typedef struct {
38 unsigned long f;
39 } memdesc_flags_t;
40
41 /*
42 * Each physical page in the system has a struct page associated with
43 * it to keep track of whatever it is we are using the page for at the
44 * moment. Note that we have no way to track which tasks are using
45 * a page, though if it is a pagecache page, rmap structures can tell us
46 * who is mapping it.
47 *
48 * If you allocate the page using alloc_pages(), you can use some of the
49 * space in struct page for your own purposes. The five words in the main
50 * union are available, except for bit 0 of the first word which must be
51 * kept clear. Many users use this word to store a pointer to an object
52 * which is guaranteed to be aligned. If you use the same storage as
53 * page->mapping, you must restore it to NULL before freeing the page.
54 *
55 * The mapcount field must not be used for own purposes.
56 *
57 * If you want to use the refcount field, it must be used in such a way
58 * that other CPUs temporarily incrementing and then decrementing the
59 * refcount does not cause problems. On receiving the page from
60 * alloc_pages(), the refcount will be positive.
61 *
62 * If you allocate pages of order > 0, you can use some of the fields
63 * in each subpage, but you may need to restore some of their values
64 * afterwards.
65 *
66 * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
67 * That requires that freelist & counters in struct slab be adjacent and
68 * double-word aligned. Because struct slab currently just reinterprets the
69 * bits of struct page, we align all struct pages to double-word boundaries,
70 * and ensure that 'freelist' is aligned within struct slab.
71 */
72 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
73 #define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
74 #else
75 #define _struct_page_alignment __aligned(sizeof(unsigned long))
76 #endif
77
78 struct page {
79 memdesc_flags_t flags; /* Atomic flags, some possibly
80 * updated asynchronously */
81 /*
82 * Five words (20/40 bytes) are available in this union.
83 * WARNING: bit 0 of the first word is used for PageTail(). That
84 * means the other users of this union MUST NOT use the bit to
85 * avoid collision and false-positive PageTail().
86 */
87 union {
88 struct { /* Page cache and anonymous pages */
89 /**
90 * @lru: Pageout list, eg. active_list protected by
91 * lruvec->lru_lock. Sometimes used as a generic list
92 * by the page owner.
93 */
94 union {
95 struct list_head lru;
96
97 /* Or, free page */
98 struct list_head buddy_list;
99 struct list_head pcp_list;
100 struct llist_node pcp_llist;
101 };
102 struct address_space *mapping;
103 union {
104 pgoff_t __folio_index; /* Our offset within mapping. */
105 unsigned long share; /* share count for fsdax */
106 };
107 /**
108 * @private: Mapping-private opaque data.
109 * Usually used for buffer_heads if PagePrivate.
110 * Used for swp_entry_t if swapcache flag set.
111 * Indicates order in the buddy system if PageBuddy
112 * or on pcp_llist.
113 */
114 unsigned long private;
115 };
116 struct { /* page_pool used by netstack */
117 /**
118 * @pp_magic: magic value to avoid recycling non
119 * page_pool allocated pages.
120 */
121 unsigned long pp_magic;
122 struct page_pool *pp;
123 unsigned long _pp_mapping_pad;
124 unsigned long dma_addr;
125 atomic_long_t pp_ref_count;
126 };
127 struct { /* Tail pages of compound page */
128 unsigned long compound_head; /* Bit zero is set */
129 };
130 struct { /* ZONE_DEVICE pages */
131 /*
132 * The first word is used for compound_head or folio
133 * pgmap
134 */
135 void *_unused_pgmap_compound_head;
136 void *zone_device_data;
137 /*
138 * ZONE_DEVICE private pages are counted as being
139 * mapped so the next 3 words hold the mapping, index,
140 * and private fields from the source anonymous or
141 * page cache page while the page is migrated to device
142 * private memory.
143 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
144 * use the mapping, index, and private fields when
145 * pmem backed DAX files are mapped.
146 */
147 };
148
149 /** @rcu_head: You can use this to free a page by RCU. */
150 struct rcu_head rcu_head;
151 };
152
153 union { /* This union is 4 bytes in size. */
154 /*
155 * For head pages of typed folios, the value stored here
156 * allows for determining what this page is used for. The
157 * tail pages of typed folios will not store a type
158 * (page_type == _mapcount == -1).
159 *
160 * See page-flags.h for a list of page types which are currently
161 * stored here.
162 *
163 * Owners of typed folios may reuse the lower 16 bit of the
164 * head page page_type field after setting the page type,
165 * but must reset these 16 bit to -1 before clearing the
166 * page type.
167 */
168 unsigned int page_type;
169
170 /*
171 * For pages that are part of non-typed folios for which mappings
172 * are tracked via the RMAP, encodes the number of times this page
173 * is directly referenced by a page table.
174 *
175 * Note that the mapcount is always initialized to -1, so that
176 * transitions both from it and to it can be tracked, using
177 * atomic_inc_and_test() and atomic_add_negative(-1).
178 */
179 atomic_t _mapcount;
180 };
181
182 /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
183 atomic_t _refcount;
184
185 #ifdef CONFIG_MEMCG
186 unsigned long memcg_data;
187 #elif defined(CONFIG_SLAB_OBJ_EXT)
188 unsigned long _unused_slab_obj_exts;
189 #endif
190
191 /*
192 * On machines where all RAM is mapped into kernel address space,
193 * we can simply calculate the virtual address. On machines with
194 * highmem some memory is mapped into kernel virtual memory
195 * dynamically, so we need a place to store that address.
196 * Note that this field could be 16 bits on x86 ... ;)
197 *
198 * Architectures with slow multiplication can define
199 * WANT_PAGE_VIRTUAL in asm/page.h
200 */
201 #if defined(WANT_PAGE_VIRTUAL)
202 void *virtual; /* Kernel virtual address (NULL if
203 not kmapped, ie. highmem) */
204 #endif /* WANT_PAGE_VIRTUAL */
205
206 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
207 int _last_cpupid;
208 #endif
209
210 #ifdef CONFIG_KMSAN
211 /*
212 * KMSAN metadata for this page:
213 * - shadow page: every bit indicates whether the corresponding
214 * bit of the original page is initialized (0) or not (1);
215 * - origin page: every 4 bytes contain an id of the stack trace
216 * where the uninitialized value was created.
217 */
218 struct page *kmsan_shadow;
219 struct page *kmsan_origin;
220 #endif
221 } _struct_page_alignment;
222
223 /*
224 * struct encoded_page - a nonexistent type marking this pointer
225 *
226 * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
227 * with the low bits of the pointer indicating extra context-dependent
228 * information. Only used in mmu_gather handling, and this acts as a type
229 * system check on that use.
230 *
231 * We only really have two guaranteed bits in general, although you could
232 * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
233 * for more.
234 *
235 * Use the supplied helper functions to endcode/decode the pointer and bits.
236 */
237 struct encoded_page;
238
239 #define ENCODED_PAGE_BITS 3ul
240
241 /* Perform rmap removal after we have flushed the TLB. */
242 #define ENCODED_PAGE_BIT_DELAY_RMAP 1ul
243
244 /*
245 * The next item in an encoded_page array is the "nr_pages" argument, specifying
246 * the number of consecutive pages starting from this page, that all belong to
247 * the same folio. For example, "nr_pages" corresponds to the number of folio
248 * references that must be dropped. If this bit is not set, "nr_pages" is
249 * implicitly 1.
250 */
251 #define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul
252
encode_page(struct page * page,unsigned long flags)253 static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
254 {
255 BUILD_BUG_ON(flags > ENCODED_PAGE_BITS);
256 return (struct encoded_page *)(flags | (unsigned long)page);
257 }
258
encoded_page_flags(struct encoded_page * page)259 static inline unsigned long encoded_page_flags(struct encoded_page *page)
260 {
261 return ENCODED_PAGE_BITS & (unsigned long)page;
262 }
263
encoded_page_ptr(struct encoded_page * page)264 static inline struct page *encoded_page_ptr(struct encoded_page *page)
265 {
266 return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
267 }
268
encode_nr_pages(unsigned long nr)269 static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr)
270 {
271 VM_WARN_ON_ONCE((nr << 2) >> 2 != nr);
272 return (struct encoded_page *)(nr << 2);
273 }
274
encoded_nr_pages(struct encoded_page * page)275 static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
276 {
277 return ((unsigned long)page) >> 2;
278 }
279
280 /*
281 * A swap entry has to fit into a "unsigned long", as the entry is hidden
282 * in the "index" field of the swapper address space.
283 */
284 typedef struct {
285 unsigned long val;
286 } swp_entry_t;
287
288 #if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT)
289 /* We have some extra room after the refcount in tail pages. */
290 #define NR_PAGES_IN_LARGE_FOLIO
291 #endif
292
293 /*
294 * On 32bit, we can cut the required metadata in half, because:
295 * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have,
296 * so we can limit MM IDs to 15 bit (32767).
297 * (b) We don't expect folios where even a single complete PTE mapping by
298 * one MM would exceed 15 bits (order-15).
299 */
300 #ifdef CONFIG_64BIT
301 typedef int mm_id_mapcount_t;
302 #define MM_ID_MAPCOUNT_MAX INT_MAX
303 typedef unsigned int mm_id_t;
304 #else /* !CONFIG_64BIT */
305 typedef short mm_id_mapcount_t;
306 #define MM_ID_MAPCOUNT_MAX SHRT_MAX
307 typedef unsigned short mm_id_t;
308 #endif /* CONFIG_64BIT */
309
310 /* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */
311 #define MM_ID_DUMMY 0
312 #define MM_ID_MIN (MM_ID_DUMMY + 1)
313
314 /*
315 * We leave the highest bit of each MM id unused, so we can store a flag
316 * in the highest bit of each folio->_mm_id[].
317 */
318 #define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1)
319 #define MM_ID_MASK ((1U << MM_ID_BITS) - 1)
320 #define MM_ID_MAX MM_ID_MASK
321
322 /*
323 * In order to use bit_spin_lock(), which requires an unsigned long, we
324 * operate on folio->_mm_ids when working on flags.
325 */
326 #define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS
327 #define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM)
328 #define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1)
329 #define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM)
330
331 /**
332 * struct folio - Represents a contiguous set of bytes.
333 * @flags: Identical to the page flags.
334 * @lru: Least Recently Used list; tracks how recently this folio was used.
335 * @mlock_count: Number of times this folio has been pinned by mlock().
336 * @mapping: The file this page belongs to, or refers to the anon_vma for
337 * anonymous memory.
338 * @index: Offset within the file, in units of pages. For anonymous memory,
339 * this is the index from the beginning of the mmap.
340 * @share: number of DAX mappings that reference this folio. See
341 * dax_associate_entry.
342 * @private: Filesystem per-folio data (see folio_attach_private()).
343 * @swap: Used for swp_entry_t if folio_test_swapcache().
344 * @_mapcount: Do not access this member directly. Use folio_mapcount() to
345 * find out how many times this folio is mapped by userspace.
346 * @_refcount: Do not access this member directly. Use folio_ref_count()
347 * to find how many references there are to this folio.
348 * @memcg_data: Memory Control Group data.
349 * @pgmap: Metadata for ZONE_DEVICE mappings
350 * @virtual: Virtual address in the kernel direct map.
351 * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
352 * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
353 * @_large_mapcount: Do not use directly, call folio_mapcount().
354 * @_nr_pages_mapped: Do not use outside of rmap and debug code.
355 * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
356 * @_nr_pages: Do not use directly, call folio_nr_pages().
357 * @_mm_id: Do not use outside of rmap code.
358 * @_mm_ids: Do not use outside of rmap code.
359 * @_mm_id_mapcount: Do not use outside of rmap code.
360 * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
361 * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
362 * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
363 * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
364 * @_deferred_list: Folios to be split under memory pressure.
365 * @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
366 *
367 * A folio is a physically, virtually and logically contiguous set
368 * of bytes. It is a power-of-two in size, and it is aligned to that
369 * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
370 * in the page cache, it is at a file offset which is a multiple of that
371 * power-of-two. It may be mapped into userspace at an address which is
372 * at an arbitrary page offset, but its kernel virtual address is aligned
373 * to its size.
374 */
375 struct folio {
376 /* private: don't document the anon union */
377 union {
378 struct {
379 /* public: */
380 memdesc_flags_t flags;
381 union {
382 struct list_head lru;
383 /* private: avoid cluttering the output */
384 /* For the Unevictable "LRU list" slot */
385 struct {
386 /* Avoid compound_head */
387 void *__filler;
388 /* public: */
389 unsigned int mlock_count;
390 /* private: */
391 };
392 /* public: */
393 struct dev_pagemap *pgmap;
394 };
395 struct address_space *mapping;
396 union {
397 pgoff_t index;
398 unsigned long share;
399 };
400 union {
401 void *private;
402 swp_entry_t swap;
403 };
404 atomic_t _mapcount;
405 atomic_t _refcount;
406 #ifdef CONFIG_MEMCG
407 unsigned long memcg_data;
408 #elif defined(CONFIG_SLAB_OBJ_EXT)
409 unsigned long _unused_slab_obj_exts;
410 #endif
411 #if defined(WANT_PAGE_VIRTUAL)
412 void *virtual;
413 #endif
414 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
415 int _last_cpupid;
416 #endif
417 /* private: the union with struct page is transitional */
418 };
419 struct page page;
420 };
421 union {
422 struct {
423 unsigned long _flags_1;
424 unsigned long _head_1;
425 union {
426 struct {
427 /* public: */
428 atomic_t _large_mapcount;
429 atomic_t _nr_pages_mapped;
430 #ifdef CONFIG_64BIT
431 atomic_t _entire_mapcount;
432 atomic_t _pincount;
433 #endif /* CONFIG_64BIT */
434 mm_id_mapcount_t _mm_id_mapcount[2];
435 union {
436 mm_id_t _mm_id[2];
437 unsigned long _mm_ids;
438 };
439 /* private: the union with struct page is transitional */
440 };
441 unsigned long _usable_1[4];
442 };
443 atomic_t _mapcount_1;
444 atomic_t _refcount_1;
445 /* public: */
446 #ifdef NR_PAGES_IN_LARGE_FOLIO
447 unsigned int _nr_pages;
448 #endif /* NR_PAGES_IN_LARGE_FOLIO */
449 /* private: the union with struct page is transitional */
450 };
451 struct page __page_1;
452 };
453 union {
454 struct {
455 unsigned long _flags_2;
456 unsigned long _head_2;
457 /* public: */
458 struct list_head _deferred_list;
459 #ifndef CONFIG_64BIT
460 atomic_t _entire_mapcount;
461 atomic_t _pincount;
462 #endif /* !CONFIG_64BIT */
463 /* private: the union with struct page is transitional */
464 };
465 struct page __page_2;
466 };
467 union {
468 struct {
469 unsigned long _flags_3;
470 unsigned long _head_3;
471 /* public: */
472 void *_hugetlb_subpool;
473 void *_hugetlb_cgroup;
474 void *_hugetlb_cgroup_rsvd;
475 void *_hugetlb_hwpoison;
476 /* private: the union with struct page is transitional */
477 };
478 struct page __page_3;
479 };
480 };
481
482 #define FOLIO_MATCH(pg, fl) \
483 static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
484 FOLIO_MATCH(flags, flags);
485 FOLIO_MATCH(lru, lru);
486 FOLIO_MATCH(mapping, mapping);
487 FOLIO_MATCH(compound_head, lru);
488 FOLIO_MATCH(__folio_index, index);
489 FOLIO_MATCH(private, private);
490 FOLIO_MATCH(_mapcount, _mapcount);
491 FOLIO_MATCH(_refcount, _refcount);
492 #ifdef CONFIG_MEMCG
493 FOLIO_MATCH(memcg_data, memcg_data);
494 #endif
495 #if defined(WANT_PAGE_VIRTUAL)
496 FOLIO_MATCH(virtual, virtual);
497 #endif
498 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
499 FOLIO_MATCH(_last_cpupid, _last_cpupid);
500 #endif
501 #undef FOLIO_MATCH
502 #define FOLIO_MATCH(pg, fl) \
503 static_assert(offsetof(struct folio, fl) == \
504 offsetof(struct page, pg) + sizeof(struct page))
505 FOLIO_MATCH(flags, _flags_1);
506 FOLIO_MATCH(compound_head, _head_1);
507 FOLIO_MATCH(_mapcount, _mapcount_1);
508 FOLIO_MATCH(_refcount, _refcount_1);
509 #undef FOLIO_MATCH
510 #define FOLIO_MATCH(pg, fl) \
511 static_assert(offsetof(struct folio, fl) == \
512 offsetof(struct page, pg) + 2 * sizeof(struct page))
513 FOLIO_MATCH(flags, _flags_2);
514 FOLIO_MATCH(compound_head, _head_2);
515 #undef FOLIO_MATCH
516 #define FOLIO_MATCH(pg, fl) \
517 static_assert(offsetof(struct folio, fl) == \
518 offsetof(struct page, pg) + 3 * sizeof(struct page))
519 FOLIO_MATCH(flags, _flags_3);
520 FOLIO_MATCH(compound_head, _head_3);
521 #undef FOLIO_MATCH
522
523 /**
524 * struct ptdesc - Memory descriptor for page tables.
525 * @pt_flags: enum pt_flags plus zone/node/section.
526 * @pt_rcu_head: For freeing page table pages.
527 * @pt_list: List of used page tables. Used for s390 gmap shadow pages
528 * (which are not linked into the user page tables) and x86
529 * pgds.
530 * @_pt_pad_1: Padding that aliases with page's compound head.
531 * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
532 * @__page_mapping: Aliases with page->mapping. Unused for page tables.
533 * @pt_index: Used for s390 gmap.
534 * @pt_mm: Used for x86 pgds.
535 * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
536 * @pt_share_count: Used for HugeTLB PMD page table share count.
537 * @_pt_pad_2: Padding to ensure proper alignment.
538 * @ptl: Lock for the page table.
539 * @__page_type: Same as page->page_type. Unused for page tables.
540 * @__page_refcount: Same as page refcount.
541 * @pt_memcg_data: Memcg data. Tracked for page tables here.
542 *
543 * This struct overlays struct page for now. Do not modify without a good
544 * understanding of the issues.
545 */
546 struct ptdesc {
547 memdesc_flags_t pt_flags;
548
549 union {
550 struct rcu_head pt_rcu_head;
551 struct list_head pt_list;
552 struct {
553 unsigned long _pt_pad_1;
554 pgtable_t pmd_huge_pte;
555 };
556 };
557 unsigned long __page_mapping;
558
559 union {
560 pgoff_t pt_index;
561 struct mm_struct *pt_mm;
562 atomic_t pt_frag_refcount;
563 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
564 atomic_t pt_share_count;
565 #endif
566 };
567
568 union {
569 unsigned long _pt_pad_2;
570 #if ALLOC_SPLIT_PTLOCKS
571 spinlock_t *ptl;
572 #else
573 spinlock_t ptl;
574 #endif
575 };
576 unsigned int __page_type;
577 atomic_t __page_refcount;
578 #ifdef CONFIG_MEMCG
579 unsigned long pt_memcg_data;
580 #endif
581 };
582
583 #define TABLE_MATCH(pg, pt) \
584 static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
585 TABLE_MATCH(flags, pt_flags);
586 TABLE_MATCH(compound_head, pt_list);
587 TABLE_MATCH(compound_head, _pt_pad_1);
588 TABLE_MATCH(mapping, __page_mapping);
589 TABLE_MATCH(__folio_index, pt_index);
590 TABLE_MATCH(rcu_head, pt_rcu_head);
591 TABLE_MATCH(page_type, __page_type);
592 TABLE_MATCH(_refcount, __page_refcount);
593 #ifdef CONFIG_MEMCG
594 TABLE_MATCH(memcg_data, pt_memcg_data);
595 #endif
596 #undef TABLE_MATCH
597 static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
598
599 #define ptdesc_page(pt) (_Generic((pt), \
600 const struct ptdesc *: (const struct page *)(pt), \
601 struct ptdesc *: (struct page *)(pt)))
602
603 #define ptdesc_folio(pt) (_Generic((pt), \
604 const struct ptdesc *: (const struct folio *)(pt), \
605 struct ptdesc *: (struct folio *)(pt)))
606
607 #define page_ptdesc(p) (_Generic((p), \
608 const struct page *: (const struct ptdesc *)(p), \
609 struct page *: (struct ptdesc *)(p)))
610
611 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
ptdesc_pmd_pts_init(struct ptdesc * ptdesc)612 static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
613 {
614 atomic_set(&ptdesc->pt_share_count, 0);
615 }
616
ptdesc_pmd_pts_inc(struct ptdesc * ptdesc)617 static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
618 {
619 atomic_inc(&ptdesc->pt_share_count);
620 }
621
ptdesc_pmd_pts_dec(struct ptdesc * ptdesc)622 static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
623 {
624 atomic_dec(&ptdesc->pt_share_count);
625 }
626
ptdesc_pmd_pts_count(const struct ptdesc * ptdesc)627 static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
628 {
629 return atomic_read(&ptdesc->pt_share_count);
630 }
631
ptdesc_pmd_is_shared(struct ptdesc * ptdesc)632 static inline bool ptdesc_pmd_is_shared(struct ptdesc *ptdesc)
633 {
634 return !!ptdesc_pmd_pts_count(ptdesc);
635 }
636 #else
ptdesc_pmd_pts_init(struct ptdesc * ptdesc)637 static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
638 {
639 }
640 #endif
641
642 /*
643 * Used for sizing the vmemmap region on some architectures
644 */
645 #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
646
647 /*
648 * page_private can be used on tail pages. However, PagePrivate is only
649 * checked by the VM on the head page. So page_private on the tail pages
650 * should be used for data that's ancillary to the head page (eg attaching
651 * buffer heads to tail pages after attaching buffer heads to the head page)
652 */
653 #define page_private(page) ((page)->private)
654
set_page_private(struct page * page,unsigned long private)655 static inline void set_page_private(struct page *page, unsigned long private)
656 {
657 page->private = private;
658 }
659
folio_get_private(const struct folio * folio)660 static inline void *folio_get_private(const struct folio *folio)
661 {
662 return folio->private;
663 }
664
665 typedef unsigned long vm_flags_t;
666
667 /*
668 * freeptr_t represents a SLUB freelist pointer, which might be encoded
669 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
670 */
671 typedef struct { unsigned long v; } freeptr_t;
672
673 /*
674 * A region containing a mapping of a non-memory backed file under NOMMU
675 * conditions. These are held in a global tree and are pinned by the VMAs that
676 * map parts of them.
677 */
678 struct vm_region {
679 struct rb_node vm_rb; /* link in global region tree */
680 vm_flags_t vm_flags; /* VMA vm_flags */
681 unsigned long vm_start; /* start address of region */
682 unsigned long vm_end; /* region initialised to here */
683 unsigned long vm_top; /* region allocated to here */
684 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
685 struct file *vm_file; /* the backing file or NULL */
686
687 int vm_usage; /* region usage count (access under nommu_region_sem) */
688 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
689 * this region */
690 };
691
692 #ifdef CONFIG_USERFAULTFD
693 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
694 struct vm_userfaultfd_ctx {
695 struct userfaultfd_ctx *ctx;
696 };
697 #else /* CONFIG_USERFAULTFD */
698 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
699 struct vm_userfaultfd_ctx {};
700 #endif /* CONFIG_USERFAULTFD */
701
702 struct anon_vma_name {
703 struct kref kref;
704 /* The name needs to be at the end because it is dynamically sized. */
705 char name[];
706 };
707
708 #ifdef CONFIG_ANON_VMA_NAME
709 /*
710 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
711 * either keep holding the lock while using the returned pointer or it should
712 * raise anon_vma_name refcount before releasing the lock.
713 */
714 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
715 struct anon_vma_name *anon_vma_name_alloc(const char *name);
716 void anon_vma_name_free(struct kref *kref);
717 #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name(struct vm_area_struct * vma)718 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
719 {
720 return NULL;
721 }
722
anon_vma_name_alloc(const char * name)723 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
724 {
725 return NULL;
726 }
727 #endif
728
729 #define VMA_LOCK_OFFSET 0x40000000
730 #define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
731
732 struct vma_numab_state {
733 /*
734 * Initialised as time in 'jiffies' after which VMA
735 * should be scanned. Delays first scan of new VMA by at
736 * least sysctl_numa_balancing_scan_delay:
737 */
738 unsigned long next_scan;
739
740 /*
741 * Time in jiffies when pids_active[] is reset to
742 * detect phase change behaviour:
743 */
744 unsigned long pids_active_reset;
745
746 /*
747 * Approximate tracking of PIDs that trapped a NUMA hinting
748 * fault. May produce false positives due to hash collisions.
749 *
750 * [0] Previous PID tracking
751 * [1] Current PID tracking
752 *
753 * Window moves after next_pid_reset has expired approximately
754 * every VMA_PID_RESET_PERIOD jiffies:
755 */
756 unsigned long pids_active[2];
757
758 /* MM scan sequence ID when scan first started after VMA creation */
759 int start_scan_seq;
760
761 /*
762 * MM scan sequence ID when the VMA was last completely scanned.
763 * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
764 */
765 int prev_scan_seq;
766 };
767
768 #ifdef __HAVE_PFNMAP_TRACKING
769 struct pfnmap_track_ctx {
770 struct kref kref;
771 unsigned long pfn;
772 unsigned long size; /* in bytes */
773 };
774 #endif
775
776 /*
777 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
778 * manipulate mutable fields which will cause those fields to be updated in the
779 * resultant VMA.
780 *
781 * Helper functions are not required for manipulating any field.
782 */
783 struct vm_area_desc {
784 /* Immutable state. */
785 const struct mm_struct *const mm;
786 struct file *const file; /* May vary from vm_file in stacked callers. */
787 unsigned long start;
788 unsigned long end;
789
790 /* Mutable fields. Populated with initial state. */
791 pgoff_t pgoff;
792 struct file *vm_file;
793 vm_flags_t vm_flags;
794 pgprot_t page_prot;
795
796 /* Write-only fields. */
797 const struct vm_operations_struct *vm_ops;
798 void *private_data;
799 };
800
801 /*
802 * This struct describes a virtual memory area. There is one of these
803 * per VM-area/task. A VM area is any part of the process virtual memory
804 * space that has a special rule for the page-fault handlers (ie a shared
805 * library, the executable area etc).
806 *
807 * Only explicitly marked struct members may be accessed by RCU readers before
808 * getting a stable reference.
809 *
810 * WARNING: when adding new members, please update vm_area_init_from() to copy
811 * them during vm_area_struct content duplication.
812 */
813 struct vm_area_struct {
814 /* The first cache line has the info for VMA tree walking. */
815
816 union {
817 struct {
818 /* VMA covers [vm_start; vm_end) addresses within mm */
819 unsigned long vm_start;
820 unsigned long vm_end;
821 };
822 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
823 };
824
825 /*
826 * The address space we belong to.
827 * Unstable RCU readers are allowed to read this.
828 */
829 struct mm_struct *vm_mm;
830 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
831
832 /*
833 * Flags, see mm.h.
834 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
835 */
836 union {
837 const vm_flags_t vm_flags;
838 vm_flags_t __private __vm_flags;
839 };
840
841 #ifdef CONFIG_PER_VMA_LOCK
842 /*
843 * Can only be written (using WRITE_ONCE()) while holding both:
844 * - mmap_lock (in write mode)
845 * - vm_refcnt bit at VMA_LOCK_OFFSET is set
846 * Can be read reliably while holding one of:
847 * - mmap_lock (in read or write mode)
848 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
849 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
850 * while holding nothing (except RCU to keep the VMA struct allocated).
851 *
852 * This sequence counter is explicitly allowed to overflow; sequence
853 * counter reuse can only lead to occasional unnecessary use of the
854 * slowpath.
855 */
856 unsigned int vm_lock_seq;
857 #endif
858 /*
859 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
860 * list, after a COW of one of the file pages. A MAP_SHARED vma
861 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
862 * or brk vma (with NULL file) can only be in an anon_vma list.
863 */
864 struct list_head anon_vma_chain; /* Serialized by mmap_lock &
865 * page_table_lock */
866 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
867
868 /* Function pointers to deal with this struct. */
869 const struct vm_operations_struct *vm_ops;
870
871 /* Information about our backing store: */
872 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
873 units */
874 struct file * vm_file; /* File we map to (can be NULL). */
875 void * vm_private_data; /* was vm_pte (shared mem) */
876
877 #ifdef CONFIG_SWAP
878 atomic_long_t swap_readahead_info;
879 #endif
880 #ifndef CONFIG_MMU
881 struct vm_region *vm_region; /* NOMMU mapping region */
882 #endif
883 #ifdef CONFIG_NUMA
884 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
885 #endif
886 #ifdef CONFIG_NUMA_BALANCING
887 struct vma_numab_state *numab_state; /* NUMA Balancing state */
888 #endif
889 #ifdef CONFIG_PER_VMA_LOCK
890 /* Unstable RCU readers are allowed to read this. */
891 refcount_t vm_refcnt ____cacheline_aligned_in_smp;
892 #ifdef CONFIG_DEBUG_LOCK_ALLOC
893 struct lockdep_map vmlock_dep_map;
894 #endif
895 #endif
896 /*
897 * For areas with an address space and backing store,
898 * linkage into the address_space->i_mmap interval tree.
899 *
900 */
901 struct {
902 struct rb_node rb;
903 unsigned long rb_subtree_last;
904 } shared;
905 #ifdef CONFIG_ANON_VMA_NAME
906 /*
907 * For private and shared anonymous mappings, a pointer to a null
908 * terminated string containing the name given to the vma, or NULL if
909 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
910 */
911 struct anon_vma_name *anon_name;
912 #endif
913 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
914 #ifdef __HAVE_PFNMAP_TRACKING
915 struct pfnmap_track_ctx *pfnmap_track_ctx;
916 #endif
917 } __randomize_layout;
918
919 #ifdef CONFIG_NUMA
920 #define vma_policy(vma) ((vma)->vm_policy)
921 #else
922 #define vma_policy(vma) NULL
923 #endif
924
925 #ifdef CONFIG_SCHED_MM_CID
926 struct mm_cid {
927 u64 time;
928 int cid;
929 int recent_cid;
930 };
931 #endif
932
933 /*
934 * Opaque type representing current mm_struct flag state. Must be accessed via
935 * mm_flags_xxx() helper functions.
936 */
937 #define NUM_MM_FLAG_BITS (64)
938 typedef struct {
939 DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
940 } __private mm_flags_t;
941
942 struct kioctx_table;
943 struct iommu_mm_data;
944 struct mm_struct {
945 struct {
946 /*
947 * Fields which are often written to are placed in a separate
948 * cache line.
949 */
950 struct {
951 /**
952 * @mm_count: The number of references to &struct
953 * mm_struct (@mm_users count as 1).
954 *
955 * Use mmgrab()/mmdrop() to modify. When this drops to
956 * 0, the &struct mm_struct is freed.
957 */
958 atomic_t mm_count;
959 } ____cacheline_aligned_in_smp;
960
961 struct maple_tree mm_mt;
962
963 unsigned long mmap_base; /* base of mmap area */
964 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
965 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
966 /* Base addresses for compatible mmap() */
967 unsigned long mmap_compat_base;
968 unsigned long mmap_compat_legacy_base;
969 #endif
970 unsigned long task_size; /* size of task vm space */
971 pgd_t * pgd;
972
973 #ifdef CONFIG_MEMBARRIER
974 /**
975 * @membarrier_state: Flags controlling membarrier behavior.
976 *
977 * This field is close to @pgd to hopefully fit in the same
978 * cache-line, which needs to be touched by switch_mm().
979 */
980 atomic_t membarrier_state;
981 #endif
982
983 /**
984 * @mm_users: The number of users including userspace.
985 *
986 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
987 * drops to 0 (i.e. when the task exits and there are no other
988 * temporary reference holders), we also release a reference on
989 * @mm_count (which may then free the &struct mm_struct if
990 * @mm_count also drops to 0).
991 */
992 atomic_t mm_users;
993
994 #ifdef CONFIG_SCHED_MM_CID
995 /**
996 * @pcpu_cid: Per-cpu current cid.
997 *
998 * Keep track of the currently allocated mm_cid for each cpu.
999 * The per-cpu mm_cid values are serialized by their respective
1000 * runqueue locks.
1001 */
1002 struct mm_cid __percpu *pcpu_cid;
1003 /*
1004 * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
1005 *
1006 * When the next mm_cid scan is due (in jiffies).
1007 */
1008 unsigned long mm_cid_next_scan;
1009 /**
1010 * @nr_cpus_allowed: Number of CPUs allowed for mm.
1011 *
1012 * Number of CPUs allowed in the union of all mm's
1013 * threads allowed CPUs.
1014 */
1015 unsigned int nr_cpus_allowed;
1016 /**
1017 * @max_nr_cid: Maximum number of allowed concurrency
1018 * IDs allocated.
1019 *
1020 * Track the highest number of allowed concurrency IDs
1021 * allocated for the mm.
1022 */
1023 atomic_t max_nr_cid;
1024 /**
1025 * @cpus_allowed_lock: Lock protecting mm cpus_allowed.
1026 *
1027 * Provide mutual exclusion for mm cpus_allowed and
1028 * mm nr_cpus_allowed updates.
1029 */
1030 raw_spinlock_t cpus_allowed_lock;
1031 #endif
1032 #ifdef CONFIG_MMU
1033 atomic_long_t pgtables_bytes; /* size of all page tables */
1034 #endif
1035 int map_count; /* number of VMAs */
1036
1037 spinlock_t page_table_lock; /* Protects page tables and some
1038 * counters
1039 */
1040 /*
1041 * Typically the current mmap_lock's offset is 56 bytes from
1042 * the last cacheline boundary, which is very optimal, as
1043 * its two hot fields 'count' and 'owner' sit in 2 different
1044 * cachelines, and when mmap_lock is highly contended, both
1045 * of the 2 fields will be accessed frequently, current layout
1046 * will help to reduce cache bouncing.
1047 *
1048 * So please be careful with adding new fields before
1049 * mmap_lock, which can easily push the 2 fields into one
1050 * cacheline.
1051 */
1052 struct rw_semaphore mmap_lock;
1053
1054 struct list_head mmlist; /* List of maybe swapped mm's. These
1055 * are globally strung together off
1056 * init_mm.mmlist, and are protected
1057 * by mmlist_lock
1058 */
1059 #ifdef CONFIG_PER_VMA_LOCK
1060 struct rcuwait vma_writer_wait;
1061 /*
1062 * This field has lock-like semantics, meaning it is sometimes
1063 * accessed with ACQUIRE/RELEASE semantics.
1064 * Roughly speaking, incrementing the sequence number is
1065 * equivalent to releasing locks on VMAs; reading the sequence
1066 * number can be part of taking a read lock on a VMA.
1067 * Incremented every time mmap_lock is write-locked/unlocked.
1068 * Initialized to 0, therefore odd values indicate mmap_lock
1069 * is write-locked and even values that it's released.
1070 *
1071 * Can be modified under write mmap_lock using RELEASE
1072 * semantics.
1073 * Can be read with no other protection when holding write
1074 * mmap_lock.
1075 * Can be read with ACQUIRE semantics if not holding write
1076 * mmap_lock.
1077 */
1078 seqcount_t mm_lock_seq;
1079 #endif
1080 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1081 struct mutex futex_hash_lock;
1082 struct futex_private_hash __rcu *futex_phash;
1083 struct futex_private_hash *futex_phash_new;
1084 /* futex-ref */
1085 unsigned long futex_batches;
1086 struct rcu_head futex_rcu;
1087 atomic_long_t futex_atomic;
1088 unsigned int __percpu *futex_ref;
1089 #endif
1090
1091 unsigned long hiwater_rss; /* High-watermark of RSS usage */
1092 unsigned long hiwater_vm; /* High-water virtual memory usage */
1093
1094 unsigned long total_vm; /* Total pages mapped */
1095 unsigned long locked_vm; /* Pages that have PG_mlocked set */
1096 atomic64_t pinned_vm; /* Refcount permanently increased */
1097 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
1098 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
1099 unsigned long stack_vm; /* VM_STACK */
1100 vm_flags_t def_flags;
1101
1102 /**
1103 * @write_protect_seq: Locked when any thread is write
1104 * protecting pages mapped by this mm to enforce a later COW,
1105 * for instance during page table copying for fork().
1106 */
1107 seqcount_t write_protect_seq;
1108
1109 spinlock_t arg_lock; /* protect the below fields */
1110
1111 unsigned long start_code, end_code, start_data, end_data;
1112 unsigned long start_brk, brk, start_stack;
1113 unsigned long arg_start, arg_end, env_start, env_end;
1114
1115 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
1116
1117 #ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
1118 /* the ABI-related flags from the ELF header. Used for core dump */
1119 unsigned long saved_e_flags;
1120 #endif
1121
1122 struct percpu_counter rss_stat[NR_MM_COUNTERS];
1123
1124 struct linux_binfmt *binfmt;
1125
1126 /* Architecture-specific MM context */
1127 mm_context_t context;
1128
1129 mm_flags_t flags; /* Must use mm_flags_* hlpers to access */
1130
1131 #ifdef CONFIG_AIO
1132 spinlock_t ioctx_lock;
1133 struct kioctx_table __rcu *ioctx_table;
1134 #endif
1135 #ifdef CONFIG_MEMCG
1136 /*
1137 * "owner" points to a task that is regarded as the canonical
1138 * user/owner of this mm. All of the following must be true in
1139 * order for it to be changed:
1140 *
1141 * current == mm->owner
1142 * current->mm != mm
1143 * new_owner->mm == mm
1144 * new_owner->alloc_lock is held
1145 */
1146 struct task_struct __rcu *owner;
1147 #endif
1148 struct user_namespace *user_ns;
1149
1150 /* store ref to file /proc/<pid>/exe symlink points to */
1151 struct file __rcu *exe_file;
1152 #ifdef CONFIG_MMU_NOTIFIER
1153 struct mmu_notifier_subscriptions *notifier_subscriptions;
1154 #endif
1155 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
1156 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
1157 #endif
1158 #ifdef CONFIG_NUMA_BALANCING
1159 /*
1160 * numa_next_scan is the next time that PTEs will be remapped
1161 * PROT_NONE to trigger NUMA hinting faults; such faults gather
1162 * statistics and migrate pages to new nodes if necessary.
1163 */
1164 unsigned long numa_next_scan;
1165
1166 /* Restart point for scanning and remapping PTEs. */
1167 unsigned long numa_scan_offset;
1168
1169 /* numa_scan_seq prevents two threads remapping PTEs. */
1170 int numa_scan_seq;
1171 #endif
1172 /*
1173 * An operation with batched TLB flushing is going on. Anything
1174 * that can move process memory needs to flush the TLB when
1175 * moving a PROT_NONE mapped page.
1176 */
1177 atomic_t tlb_flush_pending;
1178 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1179 /* See flush_tlb_batched_pending() */
1180 atomic_t tlb_flush_batched;
1181 #endif
1182 struct uprobes_state uprobes_state;
1183 #ifdef CONFIG_PREEMPT_RT
1184 struct rcu_head delayed_drop;
1185 #endif
1186 #ifdef CONFIG_HUGETLB_PAGE
1187 atomic_long_t hugetlb_usage;
1188 #endif
1189 struct work_struct async_put_work;
1190
1191 #ifdef CONFIG_IOMMU_MM_DATA
1192 struct iommu_mm_data *iommu_mm;
1193 #endif
1194 #ifdef CONFIG_KSM
1195 /*
1196 * Represent how many pages of this process are involved in KSM
1197 * merging (not including ksm_zero_pages).
1198 */
1199 unsigned long ksm_merging_pages;
1200 /*
1201 * Represent how many pages are checked for ksm merging
1202 * including merged and not merged.
1203 */
1204 unsigned long ksm_rmap_items;
1205 /*
1206 * Represent how many empty pages are merged with kernel zero
1207 * pages when enabling KSM use_zero_pages.
1208 */
1209 atomic_long_t ksm_zero_pages;
1210 #endif /* CONFIG_KSM */
1211 #ifdef CONFIG_LRU_GEN_WALKS_MMU
1212 struct {
1213 /* this mm_struct is on lru_gen_mm_list */
1214 struct list_head list;
1215 /*
1216 * Set when switching to this mm_struct, as a hint of
1217 * whether it has been used since the last time per-node
1218 * page table walkers cleared the corresponding bits.
1219 */
1220 unsigned long bitmap;
1221 #ifdef CONFIG_MEMCG
1222 /* points to the memcg of "owner" above */
1223 struct mem_cgroup *memcg;
1224 #endif
1225 } lru_gen;
1226 #endif /* CONFIG_LRU_GEN_WALKS_MMU */
1227 #ifdef CONFIG_MM_ID
1228 mm_id_t mm_id;
1229 #endif /* CONFIG_MM_ID */
1230 } __randomize_layout;
1231
1232 /*
1233 * The mm_cpumask needs to be at the end of mm_struct, because it
1234 * is dynamically sized based on nr_cpu_ids.
1235 */
1236 unsigned long cpu_bitmap[];
1237 };
1238
1239 /* Set the first system word of mm flags, non-atomically. */
__mm_flags_set_word(struct mm_struct * mm,unsigned long value)1240 static inline void __mm_flags_set_word(struct mm_struct *mm, unsigned long value)
1241 {
1242 unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
1243
1244 bitmap_copy(bitmap, &value, BITS_PER_LONG);
1245 }
1246
1247 /* Obtain a read-only view of the bitmap. */
__mm_flags_get_bitmap(const struct mm_struct * mm)1248 static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm)
1249 {
1250 return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags);
1251 }
1252
1253 /* Read the first system word of mm flags, non-atomically. */
__mm_flags_get_word(const struct mm_struct * mm)1254 static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm)
1255 {
1256 const unsigned long *bitmap = __mm_flags_get_bitmap(mm);
1257
1258 return bitmap_read(bitmap, 0, BITS_PER_LONG);
1259 }
1260
1261 /*
1262 * Update the first system word of mm flags ONLY, applying the specified mask to
1263 * it, then setting all flags specified by bits.
1264 */
__mm_flags_set_mask_bits_word(struct mm_struct * mm,unsigned long mask,unsigned long bits)1265 static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
1266 unsigned long mask, unsigned long bits)
1267 {
1268 unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
1269
1270 set_mask_bits(bitmap, mask, bits);
1271 }
1272
1273 #define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
1274 MT_FLAGS_USE_RCU)
1275 extern struct mm_struct init_mm;
1276
1277 /* Pointer magic because the dynamic array size confuses some compilers. */
mm_init_cpumask(struct mm_struct * mm)1278 static inline void mm_init_cpumask(struct mm_struct *mm)
1279 {
1280 unsigned long cpu_bitmap = (unsigned long)mm;
1281
1282 cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
1283 cpumask_clear((struct cpumask *)cpu_bitmap);
1284 }
1285
1286 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)1287 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
1288 {
1289 return (struct cpumask *)&mm->cpu_bitmap;
1290 }
1291
1292 #ifdef CONFIG_LRU_GEN
1293
1294 struct lru_gen_mm_list {
1295 /* mm_struct list for page table walkers */
1296 struct list_head fifo;
1297 /* protects the list above */
1298 spinlock_t lock;
1299 };
1300
1301 #endif /* CONFIG_LRU_GEN */
1302
1303 #ifdef CONFIG_LRU_GEN_WALKS_MMU
1304
1305 void lru_gen_add_mm(struct mm_struct *mm);
1306 void lru_gen_del_mm(struct mm_struct *mm);
1307 void lru_gen_migrate_mm(struct mm_struct *mm);
1308
lru_gen_init_mm(struct mm_struct * mm)1309 static inline void lru_gen_init_mm(struct mm_struct *mm)
1310 {
1311 INIT_LIST_HEAD(&mm->lru_gen.list);
1312 mm->lru_gen.bitmap = 0;
1313 #ifdef CONFIG_MEMCG
1314 mm->lru_gen.memcg = NULL;
1315 #endif
1316 }
1317
lru_gen_use_mm(struct mm_struct * mm)1318 static inline void lru_gen_use_mm(struct mm_struct *mm)
1319 {
1320 /*
1321 * When the bitmap is set, page reclaim knows this mm_struct has been
1322 * used since the last time it cleared the bitmap. So it might be worth
1323 * walking the page tables of this mm_struct to clear the accessed bit.
1324 */
1325 WRITE_ONCE(mm->lru_gen.bitmap, -1);
1326 }
1327
1328 #else /* !CONFIG_LRU_GEN_WALKS_MMU */
1329
lru_gen_add_mm(struct mm_struct * mm)1330 static inline void lru_gen_add_mm(struct mm_struct *mm)
1331 {
1332 }
1333
lru_gen_del_mm(struct mm_struct * mm)1334 static inline void lru_gen_del_mm(struct mm_struct *mm)
1335 {
1336 }
1337
lru_gen_migrate_mm(struct mm_struct * mm)1338 static inline void lru_gen_migrate_mm(struct mm_struct *mm)
1339 {
1340 }
1341
lru_gen_init_mm(struct mm_struct * mm)1342 static inline void lru_gen_init_mm(struct mm_struct *mm)
1343 {
1344 }
1345
lru_gen_use_mm(struct mm_struct * mm)1346 static inline void lru_gen_use_mm(struct mm_struct *mm)
1347 {
1348 }
1349
1350 #endif /* CONFIG_LRU_GEN_WALKS_MMU */
1351
1352 struct vma_iterator {
1353 struct ma_state mas;
1354 };
1355
1356 #define VMA_ITERATOR(name, __mm, __addr) \
1357 struct vma_iterator name = { \
1358 .mas = { \
1359 .tree = &(__mm)->mm_mt, \
1360 .index = __addr, \
1361 .node = NULL, \
1362 .status = ma_start, \
1363 }, \
1364 }
1365
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)1366 static inline void vma_iter_init(struct vma_iterator *vmi,
1367 struct mm_struct *mm, unsigned long addr)
1368 {
1369 mas_init(&vmi->mas, &mm->mm_mt, addr);
1370 }
1371
1372 #ifdef CONFIG_SCHED_MM_CID
1373
1374 enum mm_cid_state {
1375 MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
1376 MM_CID_LAZY_PUT = (1U << 31),
1377 };
1378
mm_cid_is_unset(int cid)1379 static inline bool mm_cid_is_unset(int cid)
1380 {
1381 return cid == MM_CID_UNSET;
1382 }
1383
mm_cid_is_lazy_put(int cid)1384 static inline bool mm_cid_is_lazy_put(int cid)
1385 {
1386 return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
1387 }
1388
mm_cid_is_valid(int cid)1389 static inline bool mm_cid_is_valid(int cid)
1390 {
1391 return !(cid & MM_CID_LAZY_PUT);
1392 }
1393
mm_cid_set_lazy_put(int cid)1394 static inline int mm_cid_set_lazy_put(int cid)
1395 {
1396 return cid | MM_CID_LAZY_PUT;
1397 }
1398
mm_cid_clear_lazy_put(int cid)1399 static inline int mm_cid_clear_lazy_put(int cid)
1400 {
1401 return cid & ~MM_CID_LAZY_PUT;
1402 }
1403
1404 /*
1405 * mm_cpus_allowed: Union of all mm's threads allowed CPUs.
1406 */
mm_cpus_allowed(struct mm_struct * mm)1407 static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
1408 {
1409 unsigned long bitmap = (unsigned long)mm;
1410
1411 bitmap += offsetof(struct mm_struct, cpu_bitmap);
1412 /* Skip cpu_bitmap */
1413 bitmap += cpumask_size();
1414 return (struct cpumask *)bitmap;
1415 }
1416
1417 /* Accessor for struct mm_struct's cidmask. */
mm_cidmask(struct mm_struct * mm)1418 static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
1419 {
1420 unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
1421
1422 /* Skip mm_cpus_allowed */
1423 cid_bitmap += cpumask_size();
1424 return (struct cpumask *)cid_bitmap;
1425 }
1426
mm_init_cid(struct mm_struct * mm,struct task_struct * p)1427 static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
1428 {
1429 int i;
1430
1431 for_each_possible_cpu(i) {
1432 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
1433
1434 pcpu_cid->cid = MM_CID_UNSET;
1435 pcpu_cid->recent_cid = MM_CID_UNSET;
1436 pcpu_cid->time = 0;
1437 }
1438 mm->nr_cpus_allowed = p->nr_cpus_allowed;
1439 atomic_set(&mm->max_nr_cid, 0);
1440 raw_spin_lock_init(&mm->cpus_allowed_lock);
1441 cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
1442 cpumask_clear(mm_cidmask(mm));
1443 }
1444
mm_alloc_cid_noprof(struct mm_struct * mm,struct task_struct * p)1445 static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
1446 {
1447 mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
1448 if (!mm->pcpu_cid)
1449 return -ENOMEM;
1450 mm_init_cid(mm, p);
1451 return 0;
1452 }
1453 #define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
1454
mm_destroy_cid(struct mm_struct * mm)1455 static inline void mm_destroy_cid(struct mm_struct *mm)
1456 {
1457 free_percpu(mm->pcpu_cid);
1458 mm->pcpu_cid = NULL;
1459 }
1460
mm_cid_size(void)1461 static inline unsigned int mm_cid_size(void)
1462 {
1463 return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
1464 }
1465
mm_set_cpus_allowed(struct mm_struct * mm,const struct cpumask * cpumask)1466 static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask)
1467 {
1468 struct cpumask *mm_allowed = mm_cpus_allowed(mm);
1469
1470 if (!mm)
1471 return;
1472 /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
1473 raw_spin_lock(&mm->cpus_allowed_lock);
1474 cpumask_or(mm_allowed, mm_allowed, cpumask);
1475 WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
1476 raw_spin_unlock(&mm->cpus_allowed_lock);
1477 }
1478 #else /* CONFIG_SCHED_MM_CID */
mm_init_cid(struct mm_struct * mm,struct task_struct * p)1479 static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
mm_alloc_cid(struct mm_struct * mm,struct task_struct * p)1480 static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
mm_destroy_cid(struct mm_struct * mm)1481 static inline void mm_destroy_cid(struct mm_struct *mm) { }
1482
mm_cid_size(void)1483 static inline unsigned int mm_cid_size(void)
1484 {
1485 return 0;
1486 }
mm_set_cpus_allowed(struct mm_struct * mm,const struct cpumask * cpumask)1487 static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
1488 #endif /* CONFIG_SCHED_MM_CID */
1489
1490 struct mmu_gather;
1491 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
1492 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
1493 extern void tlb_finish_mmu(struct mmu_gather *tlb);
1494
1495 struct vm_fault;
1496
1497 /**
1498 * typedef vm_fault_t - Return type for page fault handlers.
1499 *
1500 * Page fault handlers return a bitmask of %VM_FAULT values.
1501 */
1502 typedef __bitwise unsigned int vm_fault_t;
1503
1504 /**
1505 * enum vm_fault_reason - Page fault handlers return a bitmask of
1506 * these values to tell the core VM what happened when handling the
1507 * fault. Used to decide whether a process gets delivered SIGBUS or
1508 * just gets major/minor fault counters bumped up.
1509 *
1510 * @VM_FAULT_OOM: Out Of Memory
1511 * @VM_FAULT_SIGBUS: Bad access
1512 * @VM_FAULT_MAJOR: Page read from storage
1513 * @VM_FAULT_HWPOISON: Hit poisoned small page
1514 * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
1515 * in upper bits
1516 * @VM_FAULT_SIGSEGV: segmentation fault
1517 * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page
1518 * @VM_FAULT_LOCKED: ->fault locked the returned page
1519 * @VM_FAULT_RETRY: ->fault blocked, must retry
1520 * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small
1521 * @VM_FAULT_DONE_COW: ->fault has fully handled COW
1522 * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
1523 * fsync() to complete (for synchronous page faults
1524 * in DAX)
1525 * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
1526 * @VM_FAULT_HINDEX_MASK: mask HINDEX value
1527 *
1528 */
1529 enum vm_fault_reason {
1530 VM_FAULT_OOM = (__force vm_fault_t)0x000001,
1531 VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
1532 VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
1533 VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
1534 VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
1535 VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
1536 VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100,
1537 VM_FAULT_LOCKED = (__force vm_fault_t)0x000200,
1538 VM_FAULT_RETRY = (__force vm_fault_t)0x000400,
1539 VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
1540 VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
1541 VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
1542 VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
1543 VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
1544 };
1545
1546 /* Encode hstate index for a hwpoisoned large page */
1547 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
1548 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
1549
1550 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
1551 VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
1552 VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
1553
1554 #define VM_FAULT_RESULT_TRACE \
1555 { VM_FAULT_OOM, "OOM" }, \
1556 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1557 { VM_FAULT_MAJOR, "MAJOR" }, \
1558 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1559 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1560 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1561 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1562 { VM_FAULT_LOCKED, "LOCKED" }, \
1563 { VM_FAULT_RETRY, "RETRY" }, \
1564 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1565 { VM_FAULT_DONE_COW, "DONE_COW" }, \
1566 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
1567 { VM_FAULT_COMPLETED, "COMPLETED" }
1568
1569 struct vm_special_mapping {
1570 const char *name; /* The name, e.g. "[vdso]". */
1571
1572 /*
1573 * If .fault is not provided, this points to a
1574 * NULL-terminated array of pages that back the special mapping.
1575 *
1576 * This must not be NULL unless .fault is provided.
1577 */
1578 struct page **pages;
1579
1580 /*
1581 * If non-NULL, then this is called to resolve page faults
1582 * on the special mapping. If used, .pages is not checked.
1583 */
1584 vm_fault_t (*fault)(const struct vm_special_mapping *sm,
1585 struct vm_area_struct *vma,
1586 struct vm_fault *vmf);
1587
1588 int (*mremap)(const struct vm_special_mapping *sm,
1589 struct vm_area_struct *new_vma);
1590
1591 void (*close)(const struct vm_special_mapping *sm,
1592 struct vm_area_struct *vma);
1593 };
1594
1595 enum tlb_flush_reason {
1596 TLB_FLUSH_ON_TASK_SWITCH,
1597 TLB_REMOTE_SHOOTDOWN,
1598 TLB_LOCAL_SHOOTDOWN,
1599 TLB_LOCAL_MM_SHOOTDOWN,
1600 TLB_REMOTE_SEND_IPI,
1601 TLB_REMOTE_WRONG_CPU,
1602 NR_TLB_FLUSH_REASONS,
1603 };
1604
1605 /**
1606 * enum fault_flag - Fault flag definitions.
1607 * @FAULT_FLAG_WRITE: Fault was a write fault.
1608 * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
1609 * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
1610 * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
1611 * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
1612 * @FAULT_FLAG_TRIED: The fault has been tried once.
1613 * @FAULT_FLAG_USER: The fault originated in userspace.
1614 * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
1615 * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
1616 * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
1617 * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
1618 * COW mapping, making sure that an exclusive anon page is
1619 * mapped after the fault.
1620 * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
1621 * We should only access orig_pte if this flag set.
1622 * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
1623 *
1624 * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
1625 * whether we would allow page faults to retry by specifying these two
1626 * fault flags correctly. Currently there can be three legal combinations:
1627 *
1628 * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
1629 * this is the first try
1630 *
1631 * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
1632 * we've already tried at least once
1633 *
1634 * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1635 *
1636 * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
1637 * be used. Note that page faults can be allowed to retry for multiple times,
1638 * in which case we'll have an initial fault with flags (a) then later on
1639 * continuous faults with flags (b). We should always try to detect pending
1640 * signals before a retry to make sure the continuous page faults can still be
1641 * interrupted if necessary.
1642 *
1643 * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
1644 * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
1645 * applied to mappings that are not COW mappings.
1646 */
1647 enum fault_flag {
1648 FAULT_FLAG_WRITE = 1 << 0,
1649 FAULT_FLAG_MKWRITE = 1 << 1,
1650 FAULT_FLAG_ALLOW_RETRY = 1 << 2,
1651 FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
1652 FAULT_FLAG_KILLABLE = 1 << 4,
1653 FAULT_FLAG_TRIED = 1 << 5,
1654 FAULT_FLAG_USER = 1 << 6,
1655 FAULT_FLAG_REMOTE = 1 << 7,
1656 FAULT_FLAG_INSTRUCTION = 1 << 8,
1657 FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
1658 FAULT_FLAG_UNSHARE = 1 << 10,
1659 FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
1660 FAULT_FLAG_VMA_LOCK = 1 << 12,
1661 };
1662
1663 typedef unsigned int __bitwise zap_flags_t;
1664
1665 /* Flags for clear_young_dirty_ptes(). */
1666 typedef int __bitwise cydp_t;
1667
1668 /* Clear the access bit */
1669 #define CYDP_CLEAR_YOUNG ((__force cydp_t)BIT(0))
1670
1671 /* Clear the dirty bit */
1672 #define CYDP_CLEAR_DIRTY ((__force cydp_t)BIT(1))
1673
1674 /*
1675 * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
1676 * other. Here is what they mean, and how to use them:
1677 *
1678 *
1679 * FIXME: For pages which are part of a filesystem, mappings are subject to the
1680 * lifetime enforced by the filesystem and we need guarantees that longterm
1681 * users like RDMA and V4L2 only establish mappings which coordinate usage with
1682 * the filesystem. Ideas for this coordination include revoking the longterm
1683 * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
1684 * added after the problem with filesystems was found FS DAX VMAs are
1685 * specifically failed. Filesystem pages are still subject to bugs and use of
1686 * FOLL_LONGTERM should be avoided on those pages.
1687 *
1688 * In the CMA case: long term pins in a CMA region would unnecessarily fragment
1689 * that region. And so, CMA attempts to migrate the page before pinning, when
1690 * FOLL_LONGTERM is specified.
1691 *
1692 * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
1693 * but an additional pin counting system) will be invoked. This is intended for
1694 * anything that gets a page reference and then touches page data (for example,
1695 * Direct IO). This lets the filesystem know that some non-file-system entity is
1696 * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
1697 * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
1698 * a call to unpin_user_page().
1699 *
1700 * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
1701 * and separate refcounting mechanisms, however, and that means that each has
1702 * its own acquire and release mechanisms:
1703 *
1704 * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
1705 *
1706 * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
1707 *
1708 * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
1709 * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
1710 * calls applied to them, and that's perfectly OK. This is a constraint on the
1711 * callers, not on the pages.)
1712 *
1713 * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
1714 * directly by the caller. That's in order to help avoid mismatches when
1715 * releasing pages: get_user_pages*() pages must be released via put_page(),
1716 * while pin_user_pages*() pages must be released via unpin_user_page().
1717 *
1718 * Please see Documentation/core-api/pin_user_pages.rst for more information.
1719 */
1720
1721 enum {
1722 /* check pte is writable */
1723 FOLL_WRITE = 1 << 0,
1724 /* do get_page on page */
1725 FOLL_GET = 1 << 1,
1726 /* give error on hole if it would be zero */
1727 FOLL_DUMP = 1 << 2,
1728 /* get_user_pages read/write w/o permission */
1729 FOLL_FORCE = 1 << 3,
1730 /*
1731 * if a disk transfer is needed, start the IO and return without waiting
1732 * upon it
1733 */
1734 FOLL_NOWAIT = 1 << 4,
1735 /* do not fault in pages */
1736 FOLL_NOFAULT = 1 << 5,
1737 /* check page is hwpoisoned */
1738 FOLL_HWPOISON = 1 << 6,
1739 /* don't do file mappings */
1740 FOLL_ANON = 1 << 7,
1741 /*
1742 * FOLL_LONGTERM indicates that the page will be held for an indefinite
1743 * time period _often_ under userspace control. This is in contrast to
1744 * iov_iter_get_pages(), whose usages are transient.
1745 */
1746 FOLL_LONGTERM = 1 << 8,
1747 /* split huge pmd before returning */
1748 FOLL_SPLIT_PMD = 1 << 9,
1749 /* allow returning PCI P2PDMA pages */
1750 FOLL_PCI_P2PDMA = 1 << 10,
1751 /* allow interrupts from generic signals */
1752 FOLL_INTERRUPTIBLE = 1 << 11,
1753 /*
1754 * Always honor (trigger) NUMA hinting faults.
1755 *
1756 * FOLL_WRITE implicitly honors NUMA hinting faults because a
1757 * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
1758 * apply). get_user_pages_fast_only() always implicitly honors NUMA
1759 * hinting faults.
1760 */
1761 FOLL_HONOR_NUMA_FAULT = 1 << 12,
1762
1763 /* See also internal only FOLL flags in mm/internal.h */
1764 };
1765
1766 /* mm flags */
1767
1768 /*
1769 * The first two bits represent core dump modes for set-user-ID,
1770 * the modes are SUID_DUMP_* defined in linux/sched/coredump.h
1771 */
1772 #define MMF_DUMPABLE_BITS 2
1773 #define MMF_DUMPABLE_MASK (BIT(MMF_DUMPABLE_BITS) - 1)
1774 /* coredump filter bits */
1775 #define MMF_DUMP_ANON_PRIVATE 2
1776 #define MMF_DUMP_ANON_SHARED 3
1777 #define MMF_DUMP_MAPPED_PRIVATE 4
1778 #define MMF_DUMP_MAPPED_SHARED 5
1779 #define MMF_DUMP_ELF_HEADERS 6
1780 #define MMF_DUMP_HUGETLB_PRIVATE 7
1781 #define MMF_DUMP_HUGETLB_SHARED 8
1782 #define MMF_DUMP_DAX_PRIVATE 9
1783 #define MMF_DUMP_DAX_SHARED 10
1784
1785 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
1786 #define MMF_DUMP_FILTER_BITS 9
1787 #define MMF_DUMP_FILTER_MASK \
1788 ((BIT(MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
1789 #define MMF_DUMP_FILTER_DEFAULT \
1790 (BIT(MMF_DUMP_ANON_PRIVATE) | BIT(MMF_DUMP_ANON_SHARED) | \
1791 BIT(MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
1792
1793 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
1794 # define MMF_DUMP_MASK_DEFAULT_ELF BIT(MMF_DUMP_ELF_HEADERS)
1795 #else
1796 # define MMF_DUMP_MASK_DEFAULT_ELF 0
1797 #endif
1798 /* leave room for more dump flags */
1799 #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
1800 #define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */
1801
1802 #define MMF_HUGE_ZERO_FOLIO 18 /* mm has ever used the global huge zero folio */
1803
1804 #define MMF_HAS_UPROBES 19 /* has uprobes */
1805 #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
1806 #define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
1807 #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
1808 #define MMF_DISABLE_THP_EXCEPT_ADVISED 23 /* no THP except when advised (e.g., VM_HUGEPAGE) */
1809 #define MMF_DISABLE_THP_COMPLETELY 24 /* no THP for all VMAs */
1810 #define MMF_DISABLE_THP_MASK (BIT(MMF_DISABLE_THP_COMPLETELY) | \
1811 BIT(MMF_DISABLE_THP_EXCEPT_ADVISED))
1812 #define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
1813 #define MMF_MULTIPROCESS 26 /* mm is shared between processes */
1814 /*
1815 * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
1816 * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
1817 * a counter on its own. We're aggresive on this bit for now: even if the
1818 * pinned pages were unpinned later on, we'll still keep this bit set for the
1819 * lifecycle of this mm, just for simplicity.
1820 */
1821 #define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
1822
1823 #define MMF_HAS_MDWE 28
1824 #define MMF_HAS_MDWE_MASK BIT(MMF_HAS_MDWE)
1825
1826 #define MMF_HAS_MDWE_NO_INHERIT 29
1827
1828 #define MMF_VM_MERGE_ANY 30
1829 #define MMF_VM_MERGE_ANY_MASK BIT(MMF_VM_MERGE_ANY)
1830
1831 #define MMF_TOPDOWN 31 /* mm searches top down by default */
1832 #define MMF_TOPDOWN_MASK BIT(MMF_TOPDOWN)
1833
1834 #define MMF_INIT_LEGACY_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
1835 MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
1836 MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
1837
1838 /* Legacy flags must fit within 32 bits. */
1839 static_assert((u64)MMF_INIT_LEGACY_MASK <= (u64)UINT_MAX);
1840
1841 /*
1842 * Initialise legacy flags according to masks, propagating selected flags on
1843 * fork. Further flag manipulation can be performed by the caller.
1844 */
mmf_init_legacy_flags(unsigned long flags)1845 static inline unsigned long mmf_init_legacy_flags(unsigned long flags)
1846 {
1847 if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
1848 flags &= ~((1UL << MMF_HAS_MDWE) |
1849 (1UL << MMF_HAS_MDWE_NO_INHERIT));
1850 return flags & MMF_INIT_LEGACY_MASK;
1851 }
1852
1853 #endif /* _LINUX_MM_TYPES_H */
1854