xref: /linux/include/linux/mm_inline.h (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
4 
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/mm_types.h>
8 #include <linux/swap.h>
9 #include <linux/string.h>
10 #include <linux/userfaultfd_k.h>
11 #include <linux/swapops.h>
12 
13 /**
14  * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
15  * @folio: The folio to test.
16  *
17  * We would like to get this info without a page flag, but the state
18  * needs to survive until the folio is last deleted from the LRU, which
19  * could be as far down as __page_cache_release.
20  *
21  * Return: An integer (not a boolean!) used to sort a folio onto the
22  * right LRU list and to account folios correctly.
23  * 1 if @folio is a regular filesystem backed page cache folio
24  * or a lazily freed anonymous folio (e.g. via MADV_FREE).
25  * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
26  * ram or swap backed folio.
27  */
folio_is_file_lru(const struct folio * folio)28 static inline int folio_is_file_lru(const struct folio *folio)
29 {
30 	return !folio_test_swapbacked(folio);
31 }
32 
page_is_file_lru(struct page * page)33 static inline int page_is_file_lru(struct page *page)
34 {
35 	return folio_is_file_lru(page_folio(page));
36 }
37 
__update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)38 static __always_inline void __update_lru_size(struct lruvec *lruvec,
39 				enum lru_list lru, enum zone_type zid,
40 				long nr_pages)
41 {
42 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
43 
44 	lockdep_assert_held(&lruvec->lru_lock);
45 	WARN_ON_ONCE(nr_pages != (int)nr_pages);
46 
47 	__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
48 	__mod_zone_page_state(&pgdat->node_zones[zid],
49 				NR_ZONE_LRU_BASE + lru, nr_pages);
50 }
51 
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)52 static __always_inline void update_lru_size(struct lruvec *lruvec,
53 				enum lru_list lru, enum zone_type zid,
54 				long nr_pages)
55 {
56 	__update_lru_size(lruvec, lru, zid, nr_pages);
57 #ifdef CONFIG_MEMCG
58 	mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
59 #endif
60 }
61 
62 /**
63  * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
64  * @folio: The folio that was on lru and now has a zero reference.
65  */
__folio_clear_lru_flags(struct folio * folio)66 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
67 {
68 	VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
69 
70 	__folio_clear_lru(folio);
71 
72 	/* this shouldn't happen, so leave the flags to bad_page() */
73 	if (folio_test_active(folio) && folio_test_unevictable(folio))
74 		return;
75 
76 	__folio_clear_active(folio);
77 	__folio_clear_unevictable(folio);
78 }
79 
80 /**
81  * folio_lru_list - Which LRU list should a folio be on?
82  * @folio: The folio to test.
83  *
84  * Return: The LRU list a folio should be on, as an index
85  * into the array of LRU lists.
86  */
folio_lru_list(const struct folio * folio)87 static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
88 {
89 	enum lru_list lru;
90 
91 	VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
92 
93 	if (folio_test_unevictable(folio))
94 		return LRU_UNEVICTABLE;
95 
96 	lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
97 	if (folio_test_active(folio))
98 		lru += LRU_ACTIVE;
99 
100 	return lru;
101 }
102 
103 #ifdef CONFIG_LRU_GEN
104 
105 #ifdef CONFIG_LRU_GEN_ENABLED
lru_gen_enabled(void)106 static inline bool lru_gen_enabled(void)
107 {
108 	DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
109 
110 	return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
111 }
112 #else
lru_gen_enabled(void)113 static inline bool lru_gen_enabled(void)
114 {
115 	DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
116 
117 	return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
118 }
119 #endif
120 
lru_gen_in_fault(void)121 static inline bool lru_gen_in_fault(void)
122 {
123 	return current->in_lru_fault;
124 }
125 
lru_gen_from_seq(unsigned long seq)126 static inline int lru_gen_from_seq(unsigned long seq)
127 {
128 	return seq % MAX_NR_GENS;
129 }
130 
lru_hist_from_seq(unsigned long seq)131 static inline int lru_hist_from_seq(unsigned long seq)
132 {
133 	return seq % NR_HIST_GENS;
134 }
135 
lru_tier_from_refs(int refs,bool workingset)136 static inline int lru_tier_from_refs(int refs, bool workingset)
137 {
138 	VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
139 
140 	/* see the comment on MAX_NR_TIERS */
141 	return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
142 }
143 
folio_lru_refs(const struct folio * folio)144 static inline int folio_lru_refs(const struct folio *folio)
145 {
146 	unsigned long flags = READ_ONCE(folio->flags.f);
147 
148 	if (!(flags & BIT(PG_referenced)))
149 		return 0;
150 	/*
151 	 * Return the total number of accesses including PG_referenced. Also see
152 	 * the comment on LRU_REFS_FLAGS.
153 	 */
154 	return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
155 }
156 
folio_lru_gen(const struct folio * folio)157 static inline int folio_lru_gen(const struct folio *folio)
158 {
159 	unsigned long flags = READ_ONCE(folio->flags.f);
160 
161 	return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
162 }
163 
lru_gen_is_active(const struct lruvec * lruvec,int gen)164 static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
165 {
166 	unsigned long max_seq = lruvec->lrugen.max_seq;
167 
168 	VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
169 
170 	/* see the comment on MIN_NR_GENS */
171 	return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
172 }
173 
lru_gen_update_size(struct lruvec * lruvec,struct folio * folio,int old_gen,int new_gen)174 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
175 				       int old_gen, int new_gen)
176 {
177 	int type = folio_is_file_lru(folio);
178 	int zone = folio_zonenum(folio);
179 	int delta = folio_nr_pages(folio);
180 	enum lru_list lru = type * LRU_INACTIVE_FILE;
181 	struct lru_gen_folio *lrugen = &lruvec->lrugen;
182 
183 	VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
184 	VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
185 	VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
186 
187 	if (old_gen >= 0)
188 		WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
189 			   lrugen->nr_pages[old_gen][type][zone] - delta);
190 	if (new_gen >= 0)
191 		WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
192 			   lrugen->nr_pages[new_gen][type][zone] + delta);
193 
194 	/* addition */
195 	if (old_gen < 0) {
196 		if (lru_gen_is_active(lruvec, new_gen))
197 			lru += LRU_ACTIVE;
198 		__update_lru_size(lruvec, lru, zone, delta);
199 		return;
200 	}
201 
202 	/* deletion */
203 	if (new_gen < 0) {
204 		if (lru_gen_is_active(lruvec, old_gen))
205 			lru += LRU_ACTIVE;
206 		__update_lru_size(lruvec, lru, zone, -delta);
207 		return;
208 	}
209 
210 	/* promotion */
211 	if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
212 		__update_lru_size(lruvec, lru, zone, -delta);
213 		__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
214 	}
215 
216 	/* demotion requires isolation, e.g., lru_deactivate_fn() */
217 	VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
218 }
219 
lru_gen_folio_seq(const struct lruvec * lruvec,const struct folio * folio,bool reclaiming)220 static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
221 					      const struct folio *folio,
222 					      bool reclaiming)
223 {
224 	int gen;
225 	int type = folio_is_file_lru(folio);
226 	const struct lru_gen_folio *lrugen = &lruvec->lrugen;
227 
228 	/*
229 	 * +-----------------------------------+-----------------------------------+
230 	 * | Accessed through page tables and  | Accessed through file descriptors |
231 	 * | promoted by folio_update_gen()    | and protected by folio_inc_gen()  |
232 	 * +-----------------------------------+-----------------------------------+
233 	 * | PG_active (set while isolated)    |                                   |
234 	 * +-----------------+-----------------+-----------------+-----------------+
235 	 * |  PG_workingset  |  PG_referenced  |  PG_workingset  |  LRU_REFS_FLAGS |
236 	 * +-----------------------------------+-----------------------------------+
237 	 * |<---------- MIN_NR_GENS ---------->|                                   |
238 	 * |<---------------------------- MAX_NR_GENS ---------------------------->|
239 	 */
240 	if (folio_test_active(folio))
241 		gen = MIN_NR_GENS - folio_test_workingset(folio);
242 	else if (reclaiming)
243 		gen = MAX_NR_GENS;
244 	else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
245 		 (folio_test_reclaim(folio) &&
246 		  (folio_test_dirty(folio) || folio_test_writeback(folio))))
247 		gen = MIN_NR_GENS;
248 	else
249 		gen = MAX_NR_GENS - folio_test_workingset(folio);
250 
251 	return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
252 }
253 
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)254 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
255 {
256 	unsigned long seq;
257 	unsigned long flags;
258 	int gen = folio_lru_gen(folio);
259 	int type = folio_is_file_lru(folio);
260 	int zone = folio_zonenum(folio);
261 	struct lru_gen_folio *lrugen = &lruvec->lrugen;
262 
263 	VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
264 
265 	if (folio_test_unevictable(folio) || !lrugen->enabled)
266 		return false;
267 
268 	seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
269 	gen = lru_gen_from_seq(seq);
270 	flags = (gen + 1UL) << LRU_GEN_PGOFF;
271 	/* see the comment on MIN_NR_GENS about PG_active */
272 	set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
273 
274 	lru_gen_update_size(lruvec, folio, -1, gen);
275 	/* for folio_rotate_reclaimable() */
276 	if (reclaiming)
277 		list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
278 	else
279 		list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
280 
281 	return true;
282 }
283 
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)284 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
285 {
286 	unsigned long flags;
287 	int gen = folio_lru_gen(folio);
288 
289 	if (gen < 0)
290 		return false;
291 
292 	VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
293 	VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
294 
295 	/* for folio_migrate_flags() */
296 	flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
297 	flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
298 	gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
299 
300 	lru_gen_update_size(lruvec, folio, gen, -1);
301 	list_del(&folio->lru);
302 
303 	return true;
304 }
305 
folio_migrate_refs(struct folio * new,const struct folio * old)306 static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
307 {
308 	unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
309 
310 	set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
311 }
312 #else /* !CONFIG_LRU_GEN */
313 
lru_gen_enabled(void)314 static inline bool lru_gen_enabled(void)
315 {
316 	return false;
317 }
318 
lru_gen_in_fault(void)319 static inline bool lru_gen_in_fault(void)
320 {
321 	return false;
322 }
323 
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)324 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
325 {
326 	return false;
327 }
328 
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)329 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
330 {
331 	return false;
332 }
333 
folio_migrate_refs(struct folio * new,const struct folio * old)334 static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
335 {
336 
337 }
338 #endif /* CONFIG_LRU_GEN */
339 
340 static __always_inline
lruvec_add_folio(struct lruvec * lruvec,struct folio * folio)341 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
342 {
343 	enum lru_list lru = folio_lru_list(folio);
344 
345 	if (lru_gen_add_folio(lruvec, folio, false))
346 		return;
347 
348 	update_lru_size(lruvec, lru, folio_zonenum(folio),
349 			folio_nr_pages(folio));
350 	if (lru != LRU_UNEVICTABLE)
351 		list_add(&folio->lru, &lruvec->lists[lru]);
352 }
353 
354 static __always_inline
lruvec_add_folio_tail(struct lruvec * lruvec,struct folio * folio)355 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
356 {
357 	enum lru_list lru = folio_lru_list(folio);
358 
359 	if (lru_gen_add_folio(lruvec, folio, true))
360 		return;
361 
362 	update_lru_size(lruvec, lru, folio_zonenum(folio),
363 			folio_nr_pages(folio));
364 	/* This is not expected to be used on LRU_UNEVICTABLE */
365 	list_add_tail(&folio->lru, &lruvec->lists[lru]);
366 }
367 
368 static __always_inline
lruvec_del_folio(struct lruvec * lruvec,struct folio * folio)369 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
370 {
371 	enum lru_list lru = folio_lru_list(folio);
372 
373 	if (lru_gen_del_folio(lruvec, folio, false))
374 		return;
375 
376 	if (lru != LRU_UNEVICTABLE)
377 		list_del(&folio->lru);
378 	update_lru_size(lruvec, lru, folio_zonenum(folio),
379 			-folio_nr_pages(folio));
380 }
381 
382 #ifdef CONFIG_ANON_VMA_NAME
383 /* mmap_lock should be read-locked */
anon_vma_name_get(struct anon_vma_name * anon_name)384 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
385 {
386 	if (anon_name)
387 		kref_get(&anon_name->kref);
388 }
389 
anon_vma_name_put(struct anon_vma_name * anon_name)390 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
391 {
392 	if (anon_name)
393 		kref_put(&anon_name->kref, anon_vma_name_free);
394 }
395 
396 static inline
anon_vma_name_reuse(struct anon_vma_name * anon_name)397 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
398 {
399 	/* Prevent anon_name refcount saturation early on */
400 	if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
401 		anon_vma_name_get(anon_name);
402 		return anon_name;
403 
404 	}
405 	return anon_vma_name_alloc(anon_name->name);
406 }
407 
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)408 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
409 				     struct vm_area_struct *new_vma)
410 {
411 	struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
412 
413 	if (anon_name)
414 		new_vma->anon_name = anon_vma_name_reuse(anon_name);
415 }
416 
free_anon_vma_name(struct vm_area_struct * vma)417 static inline void free_anon_vma_name(struct vm_area_struct *vma)
418 {
419 	/*
420 	 * Not using anon_vma_name because it generates a warning if mmap_lock
421 	 * is not held, which might be the case here.
422 	 */
423 	anon_vma_name_put(vma->anon_name);
424 }
425 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)426 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
427 				    struct anon_vma_name *anon_name2)
428 {
429 	if (anon_name1 == anon_name2)
430 		return true;
431 
432 	return anon_name1 && anon_name2 &&
433 		!strcmp(anon_name1->name, anon_name2->name);
434 }
435 
436 #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name_get(struct anon_vma_name * anon_name)437 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
anon_vma_name_put(struct anon_vma_name * anon_name)438 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)439 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
440 				     struct vm_area_struct *new_vma) {}
free_anon_vma_name(struct vm_area_struct * vma)441 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
442 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)443 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
444 				    struct anon_vma_name *anon_name2)
445 {
446 	return true;
447 }
448 
449 #endif  /* CONFIG_ANON_VMA_NAME */
450 
451 void pfnmap_track_ctx_release(struct kref *ref);
452 
init_tlb_flush_pending(struct mm_struct * mm)453 static inline void init_tlb_flush_pending(struct mm_struct *mm)
454 {
455 	atomic_set(&mm->tlb_flush_pending, 0);
456 }
457 
inc_tlb_flush_pending(struct mm_struct * mm)458 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
459 {
460 	atomic_inc(&mm->tlb_flush_pending);
461 	/*
462 	 * The only time this value is relevant is when there are indeed pages
463 	 * to flush. And we'll only flush pages after changing them, which
464 	 * requires the PTL.
465 	 *
466 	 * So the ordering here is:
467 	 *
468 	 *	atomic_inc(&mm->tlb_flush_pending);
469 	 *	spin_lock(&ptl);
470 	 *	...
471 	 *	set_pte_at();
472 	 *	spin_unlock(&ptl);
473 	 *
474 	 *				spin_lock(&ptl)
475 	 *				mm_tlb_flush_pending();
476 	 *				....
477 	 *				spin_unlock(&ptl);
478 	 *
479 	 *	flush_tlb_range();
480 	 *	atomic_dec(&mm->tlb_flush_pending);
481 	 *
482 	 * Where the increment if constrained by the PTL unlock, it thus
483 	 * ensures that the increment is visible if the PTE modification is
484 	 * visible. After all, if there is no PTE modification, nobody cares
485 	 * about TLB flushes either.
486 	 *
487 	 * This very much relies on users (mm_tlb_flush_pending() and
488 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
489 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
490 	 * locks (PPC) the unlock of one doesn't order against the lock of
491 	 * another PTL.
492 	 *
493 	 * The decrement is ordered by the flush_tlb_range(), such that
494 	 * mm_tlb_flush_pending() will not return false unless all flushes have
495 	 * completed.
496 	 */
497 }
498 
dec_tlb_flush_pending(struct mm_struct * mm)499 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
500 {
501 	/*
502 	 * See inc_tlb_flush_pending().
503 	 *
504 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
505 	 * not order against TLB invalidate completion, which is what we need.
506 	 *
507 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
508 	 */
509 	atomic_dec(&mm->tlb_flush_pending);
510 }
511 
mm_tlb_flush_pending(const struct mm_struct * mm)512 static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
513 {
514 	/*
515 	 * Must be called after having acquired the PTL; orders against that
516 	 * PTLs release and therefore ensures that if we observe the modified
517 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
518 	 *
519 	 * That is, it only guarantees to return true if there is a flush
520 	 * pending for _this_ PTL.
521 	 */
522 	return atomic_read(&mm->tlb_flush_pending);
523 }
524 
mm_tlb_flush_nested(const struct mm_struct * mm)525 static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
526 {
527 	/*
528 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
529 	 * for which there is a TLB flush pending in order to guarantee
530 	 * we've seen both that PTE modification and the increment.
531 	 *
532 	 * (no requirement on actually still holding the PTL, that is irrelevant)
533 	 */
534 	return atomic_read(&mm->tlb_flush_pending) > 1;
535 }
536 
537 #ifdef CONFIG_MMU
538 /*
539  * Computes the pte marker to copy from the given source entry into dst_vma.
540  * If no marker should be copied, returns 0.
541  * The caller should insert a new pte created with make_pte_marker().
542  */
copy_pte_marker(swp_entry_t entry,struct vm_area_struct * dst_vma)543 static inline pte_marker copy_pte_marker(
544 		swp_entry_t entry, struct vm_area_struct *dst_vma)
545 {
546 	pte_marker srcm = pte_marker_get(entry);
547 	/* Always copy error entries. */
548 	pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
549 
550 	/* Only copy PTE markers if UFFD register matches. */
551 	if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
552 		dstm |= PTE_MARKER_UFFD_WP;
553 
554 	return dstm;
555 }
556 #endif
557 
558 /*
559  * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
560  * replace a none pte.  NOTE!  This should only be called when *pte is already
561  * cleared so we will never accidentally replace something valuable.  Meanwhile
562  * none pte also means we are not demoting the pte so tlb flushed is not needed.
563  * E.g., when pte cleared the caller should have taken care of the tlb flush.
564  *
565  * Must be called with pgtable lock held so that no thread will see the none
566  * pte, and if they see it, they'll fault and serialize at the pgtable lock.
567  *
568  * Returns true if an uffd-wp pte was installed, false otherwise.
569  */
570 static inline bool
pte_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t pteval)571 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
572 			      pte_t *pte, pte_t pteval)
573 {
574 #ifdef CONFIG_PTE_MARKER_UFFD_WP
575 	bool arm_uffd_pte = false;
576 
577 	/* The current status of the pte should be "cleared" before calling */
578 	WARN_ON_ONCE(!pte_none(ptep_get(pte)));
579 
580 	/*
581 	 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
582 	 * thing, because when zapping either it means it's dropping the
583 	 * page, or in TTU where the present pte will be quickly replaced
584 	 * with a swap pte.  There's no way of leaking the bit.
585 	 */
586 	if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
587 		return false;
588 
589 	/* A uffd-wp wr-protected normal pte */
590 	if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
591 		arm_uffd_pte = true;
592 
593 	/*
594 	 * A uffd-wp wr-protected swap pte.  Note: this should even cover an
595 	 * existing pte marker with uffd-wp bit set.
596 	 */
597 	if (unlikely(pte_swp_uffd_wp_any(pteval)))
598 		arm_uffd_pte = true;
599 
600 	if (unlikely(arm_uffd_pte)) {
601 		set_pte_at(vma->vm_mm, addr, pte,
602 			   make_pte_marker(PTE_MARKER_UFFD_WP));
603 		return true;
604 	}
605 #endif
606 	return false;
607 }
608 
vma_has_recency(const struct vm_area_struct * vma)609 static inline bool vma_has_recency(const struct vm_area_struct *vma)
610 {
611 	if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
612 		return false;
613 
614 	if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
615 		return false;
616 
617 	return true;
618 }
619 
620 #endif
621