xref: /linux/include/linux/sched/mm.h (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4 
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 #include <linux/sched/coredump.h>
12 
13 /*
14  * Routines for handling mm_structs
15  */
16 extern struct mm_struct *mm_alloc(void);
17 
18 /**
19  * mmgrab() - Pin a &struct mm_struct.
20  * @mm: The &struct mm_struct to pin.
21  *
22  * Make sure that @mm will not get freed even after the owning task
23  * exits. This doesn't guarantee that the associated address space
24  * will still exist later on and mmget_not_zero() has to be used before
25  * accessing it.
26  *
27  * This is a preferred way to pin @mm for a longer/unbounded amount
28  * of time.
29  *
30  * Use mmdrop() to release the reference acquired by mmgrab().
31  *
32  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
33  * of &mm_struct.mm_count vs &mm_struct.mm_users.
34  */
mmgrab(struct mm_struct * mm)35 static inline void mmgrab(struct mm_struct *mm)
36 {
37 	atomic_inc(&mm->mm_count);
38 }
39 
smp_mb__after_mmgrab(void)40 static inline void smp_mb__after_mmgrab(void)
41 {
42 	smp_mb__after_atomic();
43 }
44 
45 extern void __mmdrop(struct mm_struct *mm);
46 
mmdrop(struct mm_struct * mm)47 static inline void mmdrop(struct mm_struct *mm)
48 {
49 	/*
50 	 * The implicit full barrier implied by atomic_dec_and_test() is
51 	 * required by the membarrier system call before returning to
52 	 * user-space, after storing to rq->curr.
53 	 */
54 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
55 		__mmdrop(mm);
56 }
57 
58 #ifdef CONFIG_PREEMPT_RT
59 /*
60  * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
61  * by far the least expensive way to do that.
62  */
__mmdrop_delayed(struct rcu_head * rhp)63 static inline void __mmdrop_delayed(struct rcu_head *rhp)
64 {
65 	struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
66 
67 	__mmdrop(mm);
68 }
69 
70 /*
71  * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
72  * kernels via RCU.
73  */
mmdrop_sched(struct mm_struct * mm)74 static inline void mmdrop_sched(struct mm_struct *mm)
75 {
76 	/* Provides a full memory barrier. See mmdrop() */
77 	if (atomic_dec_and_test(&mm->mm_count))
78 		call_rcu(&mm->delayed_drop, __mmdrop_delayed);
79 }
80 #else
mmdrop_sched(struct mm_struct * mm)81 static inline void mmdrop_sched(struct mm_struct *mm)
82 {
83 	mmdrop(mm);
84 }
85 #endif
86 
87 /* Helpers for lazy TLB mm refcounting */
mmgrab_lazy_tlb(struct mm_struct * mm)88 static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
89 {
90 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
91 		mmgrab(mm);
92 }
93 
mmdrop_lazy_tlb(struct mm_struct * mm)94 static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
95 {
96 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
97 		mmdrop(mm);
98 	} else {
99 		/*
100 		 * mmdrop_lazy_tlb must provide a full memory barrier, see the
101 		 * membarrier comment finish_task_switch which relies on this.
102 		 */
103 		smp_mb();
104 	}
105 }
106 
mmdrop_lazy_tlb_sched(struct mm_struct * mm)107 static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
108 {
109 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
110 		mmdrop_sched(mm);
111 	else
112 		smp_mb(); /* see mmdrop_lazy_tlb() above */
113 }
114 
115 /**
116  * mmget() - Pin the address space associated with a &struct mm_struct.
117  * @mm: The address space to pin.
118  *
119  * Make sure that the address space of the given &struct mm_struct doesn't
120  * go away. This does not protect against parts of the address space being
121  * modified or freed, however.
122  *
123  * Never use this function to pin this address space for an
124  * unbounded/indefinite amount of time.
125  *
126  * Use mmput() to release the reference acquired by mmget().
127  *
128  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
129  * of &mm_struct.mm_count vs &mm_struct.mm_users.
130  */
mmget(struct mm_struct * mm)131 static inline void mmget(struct mm_struct *mm)
132 {
133 	atomic_inc(&mm->mm_users);
134 }
135 
mmget_not_zero(struct mm_struct * mm)136 static inline bool mmget_not_zero(struct mm_struct *mm)
137 {
138 	return atomic_inc_not_zero(&mm->mm_users);
139 }
140 
141 /* mmput gets rid of the mappings and all user-space */
142 extern void mmput(struct mm_struct *);
143 #if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
144 /* same as above but performs the slow path from the async context. Can
145  * be called from the atomic context as well
146  */
147 void mmput_async(struct mm_struct *);
148 #endif
149 
150 /* Grab a reference to a task's mm, if it is not already going away */
151 extern struct mm_struct *get_task_mm(struct task_struct *task);
152 /*
153  * Grab a reference to a task's mm, if it is not already going away
154  * and ptrace_may_access with the mode parameter passed to it
155  * succeeds.
156  */
157 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
158 /* Remove the current tasks stale references to the old mm_struct on exit() */
159 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
160 /* Remove the current tasks stale references to the old mm_struct on exec() */
161 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
162 
163 #ifdef CONFIG_MEMCG
164 extern void mm_update_next_owner(struct mm_struct *mm);
165 #else
mm_update_next_owner(struct mm_struct * mm)166 static inline void mm_update_next_owner(struct mm_struct *mm)
167 {
168 }
169 #endif /* CONFIG_MEMCG */
170 
171 #ifdef CONFIG_MMU
172 #ifndef arch_get_mmap_end
173 #define arch_get_mmap_end(addr, len, flags)	(TASK_SIZE)
174 #endif
175 
176 #ifndef arch_get_mmap_base
177 #define arch_get_mmap_base(addr, base) (base)
178 #endif
179 
180 extern void arch_pick_mmap_layout(struct mm_struct *mm,
181 				  const struct rlimit *rlim_stack);
182 
183 unsigned long
184 arch_get_unmapped_area(struct file *filp, unsigned long addr,
185 		       unsigned long len, unsigned long pgoff,
186 		       unsigned long flags, vm_flags_t vm_flags);
187 unsigned long
188 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
189 			       unsigned long len, unsigned long pgoff,
190 			       unsigned long flags, vm_flags_t);
191 
192 unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr,
193 				   unsigned long len, unsigned long pgoff,
194 				   unsigned long flags);
195 
196 unsigned long mm_get_unmapped_area_vmflags(struct file *filp,
197 					   unsigned long addr,
198 					   unsigned long len,
199 					   unsigned long pgoff,
200 					   unsigned long flags,
201 					   vm_flags_t vm_flags);
202 
203 unsigned long
204 generic_get_unmapped_area(struct file *filp, unsigned long addr,
205 			  unsigned long len, unsigned long pgoff,
206 			  unsigned long flags, vm_flags_t vm_flags);
207 unsigned long
208 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
209 				  unsigned long len, unsigned long pgoff,
210 				  unsigned long flags, vm_flags_t vm_flags);
211 #else
arch_pick_mmap_layout(struct mm_struct * mm,const struct rlimit * rlim_stack)212 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
213 					 const struct rlimit *rlim_stack) {}
214 #endif
215 
in_vfork(struct task_struct * tsk)216 static inline bool in_vfork(struct task_struct *tsk)
217 {
218 	bool ret;
219 
220 	/*
221 	 * need RCU to access ->real_parent if CLONE_VM was used along with
222 	 * CLONE_PARENT.
223 	 *
224 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
225 	 * imply CLONE_VM
226 	 *
227 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
228 	 * ->real_parent is not necessarily the task doing vfork(), so in
229 	 * theory we can't rely on task_lock() if we want to dereference it.
230 	 *
231 	 * And in this case we can't trust the real_parent->mm == tsk->mm
232 	 * check, it can be false negative. But we do not care, if init or
233 	 * another oom-unkillable task does this it should blame itself.
234 	 */
235 	rcu_read_lock();
236 	ret = tsk->vfork_done &&
237 			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
238 	rcu_read_unlock();
239 
240 	return ret;
241 }
242 
243 /*
244  * Applies per-task gfp context to the given allocation flags.
245  * PF_MEMALLOC_NOIO implies GFP_NOIO
246  * PF_MEMALLOC_NOFS implies GFP_NOFS
247  * PF_MEMALLOC_PIN  implies !GFP_MOVABLE
248  */
current_gfp_context(gfp_t flags)249 static inline gfp_t current_gfp_context(gfp_t flags)
250 {
251 	unsigned int pflags = READ_ONCE(current->flags);
252 
253 	if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
254 		/*
255 		 * NOIO implies both NOIO and NOFS and it is a weaker context
256 		 * so always make sure it makes precedence
257 		 */
258 		if (pflags & PF_MEMALLOC_NOIO)
259 			flags &= ~(__GFP_IO | __GFP_FS);
260 		else if (pflags & PF_MEMALLOC_NOFS)
261 			flags &= ~__GFP_FS;
262 
263 		if (pflags & PF_MEMALLOC_PIN)
264 			flags &= ~__GFP_MOVABLE;
265 	}
266 	return flags;
267 }
268 
269 #ifdef CONFIG_LOCKDEP
270 extern void __fs_reclaim_acquire(unsigned long ip);
271 extern void __fs_reclaim_release(unsigned long ip);
272 extern void fs_reclaim_acquire(gfp_t gfp_mask);
273 extern void fs_reclaim_release(gfp_t gfp_mask);
274 #else
__fs_reclaim_acquire(unsigned long ip)275 static inline void __fs_reclaim_acquire(unsigned long ip) { }
__fs_reclaim_release(unsigned long ip)276 static inline void __fs_reclaim_release(unsigned long ip) { }
fs_reclaim_acquire(gfp_t gfp_mask)277 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)278 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
279 #endif
280 
281 /* Any memory-allocation retry loop should use
282  * memalloc_retry_wait(), and pass the flags for the most
283  * constrained allocation attempt that might have failed.
284  * This provides useful documentation of where loops are,
285  * and a central place to fine tune the waiting as the MM
286  * implementation changes.
287  */
memalloc_retry_wait(gfp_t gfp_flags)288 static inline void memalloc_retry_wait(gfp_t gfp_flags)
289 {
290 	/* We use io_schedule_timeout because waiting for memory
291 	 * typically included waiting for dirty pages to be
292 	 * written out, which requires IO.
293 	 */
294 	__set_current_state(TASK_UNINTERRUPTIBLE);
295 	gfp_flags = current_gfp_context(gfp_flags);
296 	if (gfpflags_allow_blocking(gfp_flags) &&
297 	    !(gfp_flags & __GFP_NORETRY))
298 		/* Probably waited already, no need for much more */
299 		io_schedule_timeout(1);
300 	else
301 		/* Probably didn't wait, and has now released a lock,
302 		 * so now is a good time to wait
303 		 */
304 		io_schedule_timeout(HZ/50);
305 }
306 
307 /**
308  * might_alloc - Mark possible allocation sites
309  * @gfp_mask: gfp_t flags that would be used to allocate
310  *
311  * Similar to might_sleep() and other annotations, this can be used in functions
312  * that might allocate, but often don't. Compiles to nothing without
313  * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
314  */
might_alloc(gfp_t gfp_mask)315 static inline void might_alloc(gfp_t gfp_mask)
316 {
317 	fs_reclaim_acquire(gfp_mask);
318 	fs_reclaim_release(gfp_mask);
319 
320 	if (current->flags & PF_MEMALLOC)
321 		return;
322 
323 	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
324 }
325 
326 /**
327  * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
328  *
329  * This allows PF_* flags to be conveniently added, irrespective of current
330  * value, and then the old version restored with memalloc_flags_restore().
331  */
memalloc_flags_save(unsigned flags)332 static inline unsigned memalloc_flags_save(unsigned flags)
333 {
334 	unsigned oldflags = ~current->flags & flags;
335 	current->flags |= flags;
336 	return oldflags;
337 }
338 
memalloc_flags_restore(unsigned flags)339 static inline void memalloc_flags_restore(unsigned flags)
340 {
341 	current->flags &= ~flags;
342 }
343 
344 /**
345  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
346  *
347  * This functions marks the beginning of the GFP_NOIO allocation scope.
348  * All further allocations will implicitly drop __GFP_IO flag and so
349  * they are safe for the IO critical section from the allocation recursion
350  * point of view. Use memalloc_noio_restore to end the scope with flags
351  * returned by this function.
352  *
353  * Context: This function is safe to be used from any context.
354  * Return: The saved flags to be passed to memalloc_noio_restore.
355  */
memalloc_noio_save(void)356 static inline unsigned int memalloc_noio_save(void)
357 {
358 	return memalloc_flags_save(PF_MEMALLOC_NOIO);
359 }
360 
361 /**
362  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
363  * @flags: Flags to restore.
364  *
365  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
366  * Always make sure that the given flags is the return value from the
367  * pairing memalloc_noio_save call.
368  */
memalloc_noio_restore(unsigned int flags)369 static inline void memalloc_noio_restore(unsigned int flags)
370 {
371 	memalloc_flags_restore(flags);
372 }
373 
374 /**
375  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
376  *
377  * This functions marks the beginning of the GFP_NOFS allocation scope.
378  * All further allocations will implicitly drop __GFP_FS flag and so
379  * they are safe for the FS critical section from the allocation recursion
380  * point of view. Use memalloc_nofs_restore to end the scope with flags
381  * returned by this function.
382  *
383  * Context: This function is safe to be used from any context.
384  * Return: The saved flags to be passed to memalloc_nofs_restore.
385  */
memalloc_nofs_save(void)386 static inline unsigned int memalloc_nofs_save(void)
387 {
388 	return memalloc_flags_save(PF_MEMALLOC_NOFS);
389 }
390 
391 /**
392  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
393  * @flags: Flags to restore.
394  *
395  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
396  * Always make sure that the given flags is the return value from the
397  * pairing memalloc_nofs_save call.
398  */
memalloc_nofs_restore(unsigned int flags)399 static inline void memalloc_nofs_restore(unsigned int flags)
400 {
401 	memalloc_flags_restore(flags);
402 }
403 
404 /**
405  * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
406  *
407  * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
408  * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
409  * prevents entering reclaim and allows access to all memory reserves. This
410  * should only be used when the caller guarantees the allocation will allow more
411  * memory to be freed very shortly, i.e. it needs to allocate some memory in
412  * the process of freeing memory, and cannot reclaim due to potential recursion.
413  *
414  * Users of this scope have to be extremely careful to not deplete the reserves
415  * completely and implement a throttling mechanism which controls the
416  * consumption of the reserve based on the amount of freed memory. Usage of a
417  * pre-allocated pool (e.g. mempool) should be always considered before using
418  * this scope.
419  *
420  * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
421  *
422  * Context: This function should not be used in an interrupt context as that one
423  *          does not give PF_MEMALLOC access to reserves.
424  *          See __gfp_pfmemalloc_flags().
425  * Return: The saved flags to be passed to memalloc_noreclaim_restore.
426  */
memalloc_noreclaim_save(void)427 static inline unsigned int memalloc_noreclaim_save(void)
428 {
429 	return memalloc_flags_save(PF_MEMALLOC);
430 }
431 
432 /**
433  * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
434  * @flags: Flags to restore.
435  *
436  * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
437  * function. Always make sure that the given flags is the return value from the
438  * pairing memalloc_noreclaim_save call.
439  */
memalloc_noreclaim_restore(unsigned int flags)440 static inline void memalloc_noreclaim_restore(unsigned int flags)
441 {
442 	memalloc_flags_restore(flags);
443 }
444 
445 /**
446  * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
447  *
448  * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
449  * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
450  * will constraint the allocations to zones that allow long term pinning, i.e.
451  * not ZONE_MOVABLE zones.
452  *
453  * Return: The saved flags to be passed to memalloc_pin_restore.
454  */
memalloc_pin_save(void)455 static inline unsigned int memalloc_pin_save(void)
456 {
457 	return memalloc_flags_save(PF_MEMALLOC_PIN);
458 }
459 
460 /**
461  * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
462  * @flags: Flags to restore.
463  *
464  * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
465  * Always make sure that the given flags is the return value from the pairing
466  * memalloc_pin_save call.
467  */
memalloc_pin_restore(unsigned int flags)468 static inline void memalloc_pin_restore(unsigned int flags)
469 {
470 	memalloc_flags_restore(flags);
471 }
472 
473 #ifdef CONFIG_MEMCG
474 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
475 /**
476  * set_active_memcg - Starts the remote memcg charging scope.
477  * @memcg: memcg to charge.
478  *
479  * This function marks the beginning of the remote memcg charging scope. All the
480  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
481  * given memcg.
482  *
483  * Please, make sure that caller has a reference to the passed memcg structure,
484  * so its lifetime is guaranteed to exceed the scope between two
485  * set_active_memcg() calls.
486  *
487  * NOTE: This function can nest. Users must save the return value and
488  * reset the previous value after their own charging scope is over.
489  */
490 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)491 set_active_memcg(struct mem_cgroup *memcg)
492 {
493 	struct mem_cgroup *old;
494 
495 	if (!in_task()) {
496 		old = this_cpu_read(int_active_memcg);
497 		this_cpu_write(int_active_memcg, memcg);
498 	} else {
499 		old = current->active_memcg;
500 		current->active_memcg = memcg;
501 	}
502 
503 	return old;
504 }
505 #else
506 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)507 set_active_memcg(struct mem_cgroup *memcg)
508 {
509 	return NULL;
510 }
511 #endif
512 
513 #ifdef CONFIG_MEMBARRIER
514 enum {
515 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
516 	MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1),
517 	MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2),
518 	MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3),
519 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4),
520 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5),
521 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY		= (1U << 6),
522 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ			= (1U << 7),
523 };
524 
525 enum {
526 	MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0),
527 	MEMBARRIER_FLAG_RSEQ		= (1U << 1),
528 };
529 
530 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
531 #include <asm/membarrier.h>
532 #endif
533 
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)534 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
535 {
536 	/*
537 	 * The atomic_read() below prevents CSE. The following should
538 	 * help the compiler generate more efficient code on architectures
539 	 * where sync_core_before_usermode() is a no-op.
540 	 */
541 	if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE))
542 		return;
543 	if (current->mm != mm)
544 		return;
545 	if (likely(!(atomic_read(&mm->membarrier_state) &
546 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
547 		return;
548 	sync_core_before_usermode();
549 }
550 
551 extern void membarrier_exec_mmap(struct mm_struct *mm);
552 
553 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
554 
555 #else
556 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)557 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
558 					     struct mm_struct *next,
559 					     struct task_struct *tsk)
560 {
561 }
562 #endif
membarrier_exec_mmap(struct mm_struct * mm)563 static inline void membarrier_exec_mmap(struct mm_struct *mm)
564 {
565 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)566 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
567 {
568 }
membarrier_update_current_mm(struct mm_struct * next_mm)569 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
570 {
571 }
572 #endif
573 
574 #endif /* _LINUX_SCHED_MM_H */
575