xref: /linux/mm/mlock.c (revision ef815d2cba782e96b9aad9483523d474ed41c62a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mlock.c
4  *
5  *  (C) Copyright 1995 Linus Torvalds
6  *  (C) Copyright 2002 Christoph Hellwig
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mman.h>
11 #include <linux/mm.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/pagewalk.h>
18 #include <linux/mempolicy.h>
19 #include <linux/syscalls.h>
20 #include <linux/sched.h>
21 #include <linux/export.h>
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/hugetlb.h>
25 #include <linux/memcontrol.h>
26 #include <linux/mm_inline.h>
27 #include <linux/secretmem.h>
28 
29 #include "internal.h"
30 
31 struct mlock_fbatch {
32 	local_lock_t lock;
33 	struct folio_batch fbatch;
34 };
35 
36 static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
37 	.lock = INIT_LOCAL_LOCK(lock),
38 };
39 
40 bool can_do_mlock(void)
41 {
42 	if (rlimit(RLIMIT_MEMLOCK) != 0)
43 		return true;
44 	if (capable(CAP_IPC_LOCK))
45 		return true;
46 	return false;
47 }
48 EXPORT_SYMBOL(can_do_mlock);
49 
50 /*
51  * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52  * in vmscan and, possibly, the fault path; and to support semi-accurate
53  * statistics.
54  *
55  * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
56  * will be ostensibly placed on the LRU "unevictable" list (actually no such
57  * list exists), rather than the [in]active lists. PG_unevictable is set to
58  * indicate the unevictable state.
59  */
60 
61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
62 {
63 	/* There is nothing more we can do while it's off LRU */
64 	if (!folio_test_clear_lru(folio))
65 		return lruvec;
66 
67 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
68 
69 	if (unlikely(folio_evictable(folio))) {
70 		/*
71 		 * This is a little surprising, but quite possible: PG_mlocked
72 		 * must have got cleared already by another CPU.  Could this
73 		 * folio be unevictable?  I'm not sure, but move it now if so.
74 		 */
75 		if (folio_test_unevictable(folio)) {
76 			lruvec_del_folio(lruvec, folio);
77 			folio_clear_unevictable(folio);
78 			lruvec_add_folio(lruvec, folio);
79 
80 			__count_vm_events(UNEVICTABLE_PGRESCUED,
81 					  folio_nr_pages(folio));
82 		}
83 		goto out;
84 	}
85 
86 	if (folio_test_unevictable(folio)) {
87 		if (folio_test_mlocked(folio))
88 			folio->mlock_count++;
89 		goto out;
90 	}
91 
92 	lruvec_del_folio(lruvec, folio);
93 	folio_clear_active(folio);
94 	folio_set_unevictable(folio);
95 	folio->mlock_count = !!folio_test_mlocked(folio);
96 	lruvec_add_folio(lruvec, folio);
97 	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
98 out:
99 	folio_set_lru(folio);
100 	return lruvec;
101 }
102 
103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104 {
105 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106 
107 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108 
109 	/* As above, this is a little surprising, but possible */
110 	if (unlikely(folio_evictable(folio)))
111 		goto out;
112 
113 	folio_set_unevictable(folio);
114 	folio->mlock_count = !!folio_test_mlocked(folio);
115 	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116 out:
117 	lruvec_add_folio(lruvec, folio);
118 	folio_set_lru(folio);
119 	return lruvec;
120 }
121 
122 static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123 {
124 	int nr_pages = folio_nr_pages(folio);
125 	bool isolated = false;
126 
127 	if (!folio_test_clear_lru(folio))
128 		goto munlock;
129 
130 	isolated = true;
131 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132 
133 	if (folio_test_unevictable(folio)) {
134 		/* Then mlock_count is maintained, but might undercount */
135 		if (folio->mlock_count)
136 			folio->mlock_count--;
137 		if (folio->mlock_count)
138 			goto out;
139 	}
140 	/* else assume that was the last mlock: reclaim will fix it if not */
141 
142 munlock:
143 	if (folio_test_clear_mlocked(folio)) {
144 		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145 		if (isolated || !folio_test_unevictable(folio))
146 			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147 		else
148 			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149 	}
150 
151 	/* folio_evictable() has to be checked *after* clearing Mlocked */
152 	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153 		lruvec_del_folio(lruvec, folio);
154 		folio_clear_unevictable(folio);
155 		lruvec_add_folio(lruvec, folio);
156 		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157 	}
158 out:
159 	if (isolated)
160 		folio_set_lru(folio);
161 	return lruvec;
162 }
163 
164 /*
165  * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166  */
167 #define LRU_FOLIO 0x1
168 #define NEW_FOLIO 0x2
169 static inline struct folio *mlock_lru(struct folio *folio)
170 {
171 	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172 }
173 
174 static inline struct folio *mlock_new(struct folio *folio)
175 {
176 	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177 }
178 
179 /*
180  * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181  * make use of such folio pointer flags in future, but for now just keep it for
182  * mlock.  We could use three separate folio batches instead, but one feels
183  * better (munlocking a full folio batch does not need to drain mlocking folio
184  * batches first).
185  */
186 static void mlock_folio_batch(struct folio_batch *fbatch)
187 {
188 	struct lruvec *lruvec = NULL;
189 	unsigned long mlock;
190 	struct folio *folio;
191 	int i;
192 
193 	for (i = 0; i < folio_batch_count(fbatch); i++) {
194 		folio = fbatch->folios[i];
195 		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196 		folio = (struct folio *)((unsigned long)folio - mlock);
197 		fbatch->folios[i] = folio;
198 
199 		if (mlock & LRU_FOLIO)
200 			lruvec = __mlock_folio(folio, lruvec);
201 		else if (mlock & NEW_FOLIO)
202 			lruvec = __mlock_new_folio(folio, lruvec);
203 		else
204 			lruvec = __munlock_folio(folio, lruvec);
205 	}
206 
207 	if (lruvec)
208 		unlock_page_lruvec_irq(lruvec);
209 	folios_put(fbatch->folios, folio_batch_count(fbatch));
210 	folio_batch_reinit(fbatch);
211 }
212 
213 void mlock_drain_local(void)
214 {
215 	struct folio_batch *fbatch;
216 
217 	local_lock(&mlock_fbatch.lock);
218 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
219 	if (folio_batch_count(fbatch))
220 		mlock_folio_batch(fbatch);
221 	local_unlock(&mlock_fbatch.lock);
222 }
223 
224 void mlock_drain_remote(int cpu)
225 {
226 	struct folio_batch *fbatch;
227 
228 	WARN_ON_ONCE(cpu_online(cpu));
229 	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
230 	if (folio_batch_count(fbatch))
231 		mlock_folio_batch(fbatch);
232 }
233 
234 bool need_mlock_drain(int cpu)
235 {
236 	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
237 }
238 
239 /**
240  * mlock_folio - mlock a folio already on (or temporarily off) LRU
241  * @folio: folio to be mlocked.
242  */
243 void mlock_folio(struct folio *folio)
244 {
245 	struct folio_batch *fbatch;
246 
247 	local_lock(&mlock_fbatch.lock);
248 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
249 
250 	if (!folio_test_set_mlocked(folio)) {
251 		int nr_pages = folio_nr_pages(folio);
252 
253 		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
254 		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
255 	}
256 
257 	folio_get(folio);
258 	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
259 	    folio_test_large(folio) || lru_cache_disabled())
260 		mlock_folio_batch(fbatch);
261 	local_unlock(&mlock_fbatch.lock);
262 }
263 
264 /**
265  * mlock_new_folio - mlock a newly allocated folio not yet on LRU
266  * @folio: folio to be mlocked, either normal or a THP head.
267  */
268 void mlock_new_folio(struct folio *folio)
269 {
270 	struct folio_batch *fbatch;
271 	int nr_pages = folio_nr_pages(folio);
272 
273 	local_lock(&mlock_fbatch.lock);
274 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
275 	folio_set_mlocked(folio);
276 
277 	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
278 	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
279 
280 	folio_get(folio);
281 	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
282 	    folio_test_large(folio) || lru_cache_disabled())
283 		mlock_folio_batch(fbatch);
284 	local_unlock(&mlock_fbatch.lock);
285 }
286 
287 /**
288  * munlock_folio - munlock a folio
289  * @folio: folio to be munlocked, either normal or a THP head.
290  */
291 void munlock_folio(struct folio *folio)
292 {
293 	struct folio_batch *fbatch;
294 
295 	local_lock(&mlock_fbatch.lock);
296 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
297 	/*
298 	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
299 	 * which will check whether the folio is multiply mlocked.
300 	 */
301 	folio_get(folio);
302 	if (!folio_batch_add(fbatch, folio) ||
303 	    folio_test_large(folio) || lru_cache_disabled())
304 		mlock_folio_batch(fbatch);
305 	local_unlock(&mlock_fbatch.lock);
306 }
307 
308 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
309 			   unsigned long end, struct mm_walk *walk)
310 
311 {
312 	struct vm_area_struct *vma = walk->vma;
313 	spinlock_t *ptl;
314 	pte_t *start_pte, *pte;
315 	pte_t ptent;
316 	struct folio *folio;
317 
318 	ptl = pmd_trans_huge_lock(pmd, vma);
319 	if (ptl) {
320 		if (!pmd_present(*pmd))
321 			goto out;
322 		if (is_huge_zero_pmd(*pmd))
323 			goto out;
324 		folio = page_folio(pmd_page(*pmd));
325 		if (vma->vm_flags & VM_LOCKED)
326 			mlock_folio(folio);
327 		else
328 			munlock_folio(folio);
329 		goto out;
330 	}
331 
332 	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
333 	if (!start_pte) {
334 		walk->action = ACTION_AGAIN;
335 		return 0;
336 	}
337 	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
338 		ptent = ptep_get(pte);
339 		if (!pte_present(ptent))
340 			continue;
341 		folio = vm_normal_folio(vma, addr, ptent);
342 		if (!folio || folio_is_zone_device(folio))
343 			continue;
344 		if (folio_test_large(folio))
345 			continue;
346 		if (vma->vm_flags & VM_LOCKED)
347 			mlock_folio(folio);
348 		else
349 			munlock_folio(folio);
350 	}
351 	pte_unmap(start_pte);
352 out:
353 	spin_unlock(ptl);
354 	cond_resched();
355 	return 0;
356 }
357 
358 /*
359  * mlock_vma_pages_range() - mlock any pages already in the range,
360  *                           or munlock all pages in the range.
361  * @vma - vma containing range to be mlock()ed or munlock()ed
362  * @start - start address in @vma of the range
363  * @end - end of range in @vma
364  * @newflags - the new set of flags for @vma.
365  *
366  * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
367  * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
368  */
369 static void mlock_vma_pages_range(struct vm_area_struct *vma,
370 	unsigned long start, unsigned long end, vm_flags_t newflags)
371 {
372 	static const struct mm_walk_ops mlock_walk_ops = {
373 		.pmd_entry = mlock_pte_range,
374 	};
375 
376 	/*
377 	 * There is a slight chance that concurrent page migration,
378 	 * or page reclaim finding a page of this now-VM_LOCKED vma,
379 	 * will call mlock_vma_folio() and raise page's mlock_count:
380 	 * double counting, leaving the page unevictable indefinitely.
381 	 * Communicate this danger to mlock_vma_folio() with VM_IO,
382 	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
383 	 * mmap_lock is held in write mode here, so this weird
384 	 * combination should not be visible to other mmap_lock users;
385 	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
386 	 */
387 	if (newflags & VM_LOCKED)
388 		newflags |= VM_IO;
389 	vm_flags_reset_once(vma, newflags);
390 
391 	lru_add_drain();
392 	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
393 	lru_add_drain();
394 
395 	if (newflags & VM_IO) {
396 		newflags &= ~VM_IO;
397 		vm_flags_reset_once(vma, newflags);
398 	}
399 }
400 
401 /*
402  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
403  *
404  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
405  * munlock is a no-op.  However, for some special vmas, we go ahead and
406  * populate the ptes.
407  *
408  * For vmas that pass the filters, merge/split as appropriate.
409  */
410 static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
411 	       struct vm_area_struct **prev, unsigned long start,
412 	       unsigned long end, vm_flags_t newflags)
413 {
414 	struct mm_struct *mm = vma->vm_mm;
415 	pgoff_t pgoff;
416 	int nr_pages;
417 	int ret = 0;
418 	vm_flags_t oldflags = vma->vm_flags;
419 
420 	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
421 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
422 	    vma_is_dax(vma) || vma_is_secretmem(vma))
423 		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
424 		goto out;
425 
426 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
427 	*prev = vma_merge(vmi, mm, *prev, start, end, newflags,
428 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
429 			vma->vm_userfaultfd_ctx, anon_vma_name(vma));
430 	if (*prev) {
431 		vma = *prev;
432 		goto success;
433 	}
434 
435 	if (start != vma->vm_start) {
436 		ret = split_vma(vmi, vma, start, 1);
437 		if (ret)
438 			goto out;
439 	}
440 
441 	if (end != vma->vm_end) {
442 		ret = split_vma(vmi, vma, end, 0);
443 		if (ret)
444 			goto out;
445 	}
446 
447 success:
448 	/*
449 	 * Keep track of amount of locked VM.
450 	 */
451 	nr_pages = (end - start) >> PAGE_SHIFT;
452 	if (!(newflags & VM_LOCKED))
453 		nr_pages = -nr_pages;
454 	else if (oldflags & VM_LOCKED)
455 		nr_pages = 0;
456 	mm->locked_vm += nr_pages;
457 
458 	/*
459 	 * vm_flags is protected by the mmap_lock held in write mode.
460 	 * It's okay if try_to_unmap_one unmaps a page just after we
461 	 * set VM_LOCKED, populate_vma_page_range will bring it back.
462 	 */
463 
464 	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
465 		/* No work to do, and mlocking twice would be wrong */
466 		vm_flags_reset(vma, newflags);
467 	} else {
468 		mlock_vma_pages_range(vma, start, end, newflags);
469 	}
470 out:
471 	*prev = vma;
472 	return ret;
473 }
474 
475 static int apply_vma_lock_flags(unsigned long start, size_t len,
476 				vm_flags_t flags)
477 {
478 	unsigned long nstart, end, tmp;
479 	struct vm_area_struct *vma, *prev;
480 	VMA_ITERATOR(vmi, current->mm, start);
481 
482 	VM_BUG_ON(offset_in_page(start));
483 	VM_BUG_ON(len != PAGE_ALIGN(len));
484 	end = start + len;
485 	if (end < start)
486 		return -EINVAL;
487 	if (end == start)
488 		return 0;
489 	vma = vma_iter_load(&vmi);
490 	if (!vma)
491 		return -ENOMEM;
492 
493 	prev = vma_prev(&vmi);
494 	if (start > vma->vm_start)
495 		prev = vma;
496 
497 	nstart = start;
498 	tmp = vma->vm_start;
499 	for_each_vma_range(vmi, vma, end) {
500 		int error;
501 		vm_flags_t newflags;
502 
503 		if (vma->vm_start != tmp)
504 			return -ENOMEM;
505 
506 		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
507 		newflags |= flags;
508 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
509 		tmp = vma->vm_end;
510 		if (tmp > end)
511 			tmp = end;
512 		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
513 		if (error)
514 			return error;
515 		tmp = vma_iter_end(&vmi);
516 		nstart = tmp;
517 	}
518 
519 	if (tmp < end)
520 		return -ENOMEM;
521 
522 	return 0;
523 }
524 
525 /*
526  * Go through vma areas and sum size of mlocked
527  * vma pages, as return value.
528  * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
529  * is also counted.
530  * Return value: previously mlocked page counts
531  */
532 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
533 		unsigned long start, size_t len)
534 {
535 	struct vm_area_struct *vma;
536 	unsigned long count = 0;
537 	unsigned long end;
538 	VMA_ITERATOR(vmi, mm, start);
539 
540 	/* Don't overflow past ULONG_MAX */
541 	if (unlikely(ULONG_MAX - len < start))
542 		end = ULONG_MAX;
543 	else
544 		end = start + len;
545 
546 	for_each_vma_range(vmi, vma, end) {
547 		if (vma->vm_flags & VM_LOCKED) {
548 			if (start > vma->vm_start)
549 				count -= (start - vma->vm_start);
550 			if (end < vma->vm_end) {
551 				count += end - vma->vm_start;
552 				break;
553 			}
554 			count += vma->vm_end - vma->vm_start;
555 		}
556 	}
557 
558 	return count >> PAGE_SHIFT;
559 }
560 
561 /*
562  * convert get_user_pages() return value to posix mlock() error
563  */
564 static int __mlock_posix_error_return(long retval)
565 {
566 	if (retval == -EFAULT)
567 		retval = -ENOMEM;
568 	else if (retval == -ENOMEM)
569 		retval = -EAGAIN;
570 	return retval;
571 }
572 
573 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
574 {
575 	unsigned long locked;
576 	unsigned long lock_limit;
577 	int error = -ENOMEM;
578 
579 	start = untagged_addr(start);
580 
581 	if (!can_do_mlock())
582 		return -EPERM;
583 
584 	len = PAGE_ALIGN(len + (offset_in_page(start)));
585 	start &= PAGE_MASK;
586 
587 	lock_limit = rlimit(RLIMIT_MEMLOCK);
588 	lock_limit >>= PAGE_SHIFT;
589 	locked = len >> PAGE_SHIFT;
590 
591 	if (mmap_write_lock_killable(current->mm))
592 		return -EINTR;
593 
594 	locked += current->mm->locked_vm;
595 	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
596 		/*
597 		 * It is possible that the regions requested intersect with
598 		 * previously mlocked areas, that part area in "mm->locked_vm"
599 		 * should not be counted to new mlock increment count. So check
600 		 * and adjust locked count if necessary.
601 		 */
602 		locked -= count_mm_mlocked_page_nr(current->mm,
603 				start, len);
604 	}
605 
606 	/* check against resource limits */
607 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
608 		error = apply_vma_lock_flags(start, len, flags);
609 
610 	mmap_write_unlock(current->mm);
611 	if (error)
612 		return error;
613 
614 	error = __mm_populate(start, len, 0);
615 	if (error)
616 		return __mlock_posix_error_return(error);
617 	return 0;
618 }
619 
620 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
621 {
622 	return do_mlock(start, len, VM_LOCKED);
623 }
624 
625 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
626 {
627 	vm_flags_t vm_flags = VM_LOCKED;
628 
629 	if (flags & ~MLOCK_ONFAULT)
630 		return -EINVAL;
631 
632 	if (flags & MLOCK_ONFAULT)
633 		vm_flags |= VM_LOCKONFAULT;
634 
635 	return do_mlock(start, len, vm_flags);
636 }
637 
638 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
639 {
640 	int ret;
641 
642 	start = untagged_addr(start);
643 
644 	len = PAGE_ALIGN(len + (offset_in_page(start)));
645 	start &= PAGE_MASK;
646 
647 	if (mmap_write_lock_killable(current->mm))
648 		return -EINTR;
649 	ret = apply_vma_lock_flags(start, len, 0);
650 	mmap_write_unlock(current->mm);
651 
652 	return ret;
653 }
654 
655 /*
656  * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
657  * and translate into the appropriate modifications to mm->def_flags and/or the
658  * flags for all current VMAs.
659  *
660  * There are a couple of subtleties with this.  If mlockall() is called multiple
661  * times with different flags, the values do not necessarily stack.  If mlockall
662  * is called once including the MCL_FUTURE flag and then a second time without
663  * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
664  */
665 static int apply_mlockall_flags(int flags)
666 {
667 	VMA_ITERATOR(vmi, current->mm, 0);
668 	struct vm_area_struct *vma, *prev = NULL;
669 	vm_flags_t to_add = 0;
670 
671 	current->mm->def_flags &= ~VM_LOCKED_MASK;
672 	if (flags & MCL_FUTURE) {
673 		current->mm->def_flags |= VM_LOCKED;
674 
675 		if (flags & MCL_ONFAULT)
676 			current->mm->def_flags |= VM_LOCKONFAULT;
677 
678 		if (!(flags & MCL_CURRENT))
679 			goto out;
680 	}
681 
682 	if (flags & MCL_CURRENT) {
683 		to_add |= VM_LOCKED;
684 		if (flags & MCL_ONFAULT)
685 			to_add |= VM_LOCKONFAULT;
686 	}
687 
688 	for_each_vma(vmi, vma) {
689 		vm_flags_t newflags;
690 
691 		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
692 		newflags |= to_add;
693 
694 		/* Ignore errors */
695 		mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
696 			    newflags);
697 		cond_resched();
698 	}
699 out:
700 	return 0;
701 }
702 
703 SYSCALL_DEFINE1(mlockall, int, flags)
704 {
705 	unsigned long lock_limit;
706 	int ret;
707 
708 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
709 	    flags == MCL_ONFAULT)
710 		return -EINVAL;
711 
712 	if (!can_do_mlock())
713 		return -EPERM;
714 
715 	lock_limit = rlimit(RLIMIT_MEMLOCK);
716 	lock_limit >>= PAGE_SHIFT;
717 
718 	if (mmap_write_lock_killable(current->mm))
719 		return -EINTR;
720 
721 	ret = -ENOMEM;
722 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
723 	    capable(CAP_IPC_LOCK))
724 		ret = apply_mlockall_flags(flags);
725 	mmap_write_unlock(current->mm);
726 	if (!ret && (flags & MCL_CURRENT))
727 		mm_populate(0, TASK_SIZE);
728 
729 	return ret;
730 }
731 
732 SYSCALL_DEFINE0(munlockall)
733 {
734 	int ret;
735 
736 	if (mmap_write_lock_killable(current->mm))
737 		return -EINTR;
738 	ret = apply_mlockall_flags(0);
739 	mmap_write_unlock(current->mm);
740 	return ret;
741 }
742 
743 /*
744  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
745  * shm segments) get accounted against the user_struct instead.
746  */
747 static DEFINE_SPINLOCK(shmlock_user_lock);
748 
749 int user_shm_lock(size_t size, struct ucounts *ucounts)
750 {
751 	unsigned long lock_limit, locked;
752 	long memlock;
753 	int allowed = 0;
754 
755 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
756 	lock_limit = rlimit(RLIMIT_MEMLOCK);
757 	if (lock_limit != RLIM_INFINITY)
758 		lock_limit >>= PAGE_SHIFT;
759 	spin_lock(&shmlock_user_lock);
760 	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
761 
762 	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
763 		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
764 		goto out;
765 	}
766 	if (!get_ucounts(ucounts)) {
767 		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
768 		allowed = 0;
769 		goto out;
770 	}
771 	allowed = 1;
772 out:
773 	spin_unlock(&shmlock_user_lock);
774 	return allowed;
775 }
776 
777 void user_shm_unlock(size_t size, struct ucounts *ucounts)
778 {
779 	spin_lock(&shmlock_user_lock);
780 	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
781 	spin_unlock(&shmlock_user_lock);
782 	put_ucounts(ucounts);
783 }
784