xref: /linux/mm/mlock.c (revision 1c0cec64a7cc545eb49f374a43e9f7190a14defa)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *	linux/mm/mlock.c
4   *
5   *  (C) Copyright 1995 Linus Torvalds
6   *  (C) Copyright 2002 Christoph Hellwig
7   */
8  
9  #include <linux/capability.h>
10  #include <linux/mman.h>
11  #include <linux/mm.h>
12  #include <linux/sched/user.h>
13  #include <linux/swap.h>
14  #include <linux/swapops.h>
15  #include <linux/pagemap.h>
16  #include <linux/pagevec.h>
17  #include <linux/mempolicy.h>
18  #include <linux/syscalls.h>
19  #include <linux/sched.h>
20  #include <linux/export.h>
21  #include <linux/rmap.h>
22  #include <linux/mmzone.h>
23  #include <linux/hugetlb.h>
24  #include <linux/memcontrol.h>
25  #include <linux/mm_inline.h>
26  #include <linux/secretmem.h>
27  
28  #include "internal.h"
29  
30  bool can_do_mlock(void)
31  {
32  	if (rlimit(RLIMIT_MEMLOCK) != 0)
33  		return true;
34  	if (capable(CAP_IPC_LOCK))
35  		return true;
36  	return false;
37  }
38  EXPORT_SYMBOL(can_do_mlock);
39  
40  /*
41   * Mlocked pages are marked with PageMlocked() flag for efficient testing
42   * in vmscan and, possibly, the fault path; and to support semi-accurate
43   * statistics.
44   *
45   * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
46   * be placed on the LRU "unevictable" list, rather than the [in]active lists.
47   * The unevictable list is an LRU sibling list to the [in]active lists.
48   * PageUnevictable is set to indicate the unevictable state.
49   *
50   * When lazy mlocking via vmscan, it is important to ensure that the
51   * vma's VM_LOCKED status is not concurrently being modified, otherwise we
52   * may have mlocked a page that is being munlocked. So lazy mlock must take
53   * the mmap_lock for read, and verify that the vma really is locked
54   * (see mm/rmap.c).
55   */
56  
57  /*
58   *  LRU accounting for clear_page_mlock()
59   */
60  void clear_page_mlock(struct page *page)
61  {
62  	int nr_pages;
63  
64  	if (!TestClearPageMlocked(page))
65  		return;
66  
67  	nr_pages = thp_nr_pages(page);
68  	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
69  	count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
70  	/*
71  	 * The previous TestClearPageMlocked() corresponds to the smp_mb()
72  	 * in __pagevec_lru_add_fn().
73  	 *
74  	 * See __pagevec_lru_add_fn for more explanation.
75  	 */
76  	if (!isolate_lru_page(page)) {
77  		putback_lru_page(page);
78  	} else {
79  		/*
80  		 * We lost the race. the page already moved to evictable list.
81  		 */
82  		if (PageUnevictable(page))
83  			count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
84  	}
85  }
86  
87  /*
88   * Mark page as mlocked if not already.
89   * If page on LRU, isolate and putback to move to unevictable list.
90   */
91  void mlock_vma_page(struct page *page)
92  {
93  	/* Serialize with page migration */
94  	BUG_ON(!PageLocked(page));
95  
96  	VM_BUG_ON_PAGE(PageTail(page), page);
97  	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
98  
99  	if (!TestSetPageMlocked(page)) {
100  		int nr_pages = thp_nr_pages(page);
101  
102  		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
103  		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
104  		if (!isolate_lru_page(page))
105  			putback_lru_page(page);
106  	}
107  }
108  
109  /*
110   * Finish munlock after successful page isolation
111   *
112   * Page must be locked. This is a wrapper for page_mlock()
113   * and putback_lru_page() with munlock accounting.
114   */
115  static void __munlock_isolated_page(struct page *page)
116  {
117  	/*
118  	 * Optimization: if the page was mapped just once, that's our mapping
119  	 * and we don't need to check all the other vmas.
120  	 */
121  	if (page_mapcount(page) > 1)
122  		page_mlock(page);
123  
124  	/* Did try_to_unlock() succeed or punt? */
125  	if (!PageMlocked(page))
126  		count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
127  
128  	putback_lru_page(page);
129  }
130  
131  /*
132   * Accounting for page isolation fail during munlock
133   *
134   * Performs accounting when page isolation fails in munlock. There is nothing
135   * else to do because it means some other task has already removed the page
136   * from the LRU. putback_lru_page() will take care of removing the page from
137   * the unevictable list, if necessary. vmscan [page_referenced()] will move
138   * the page back to the unevictable list if some other vma has it mlocked.
139   */
140  static void __munlock_isolation_failed(struct page *page)
141  {
142  	int nr_pages = thp_nr_pages(page);
143  
144  	if (PageUnevictable(page))
145  		__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
146  	else
147  		__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
148  }
149  
150  /**
151   * munlock_vma_page - munlock a vma page
152   * @page: page to be unlocked, either a normal page or THP page head
153   *
154   * returns the size of the page as a page mask (0 for normal page,
155   *         HPAGE_PMD_NR - 1 for THP head page)
156   *
157   * called from munlock()/munmap() path with page supposedly on the LRU.
158   * When we munlock a page, because the vma where we found the page is being
159   * munlock()ed or munmap()ed, we want to check whether other vmas hold the
160   * page locked so that we can leave it on the unevictable lru list and not
161   * bother vmscan with it.  However, to walk the page's rmap list in
162   * page_mlock() we must isolate the page from the LRU.  If some other
163   * task has removed the page from the LRU, we won't be able to do that.
164   * So we clear the PageMlocked as we might not get another chance.  If we
165   * can't isolate the page, we leave it for putback_lru_page() and vmscan
166   * [page_referenced()/try_to_unmap()] to deal with.
167   */
168  unsigned int munlock_vma_page(struct page *page)
169  {
170  	int nr_pages;
171  
172  	/* For page_mlock() and to serialize with page migration */
173  	BUG_ON(!PageLocked(page));
174  	VM_BUG_ON_PAGE(PageTail(page), page);
175  
176  	if (!TestClearPageMlocked(page)) {
177  		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
178  		return 0;
179  	}
180  
181  	nr_pages = thp_nr_pages(page);
182  	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
183  
184  	if (!isolate_lru_page(page))
185  		__munlock_isolated_page(page);
186  	else
187  		__munlock_isolation_failed(page);
188  
189  	return nr_pages - 1;
190  }
191  
192  /*
193   * convert get_user_pages() return value to posix mlock() error
194   */
195  static int __mlock_posix_error_return(long retval)
196  {
197  	if (retval == -EFAULT)
198  		retval = -ENOMEM;
199  	else if (retval == -ENOMEM)
200  		retval = -EAGAIN;
201  	return retval;
202  }
203  
204  /*
205   * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
206   *
207   * The fast path is available only for evictable pages with single mapping.
208   * Then we can bypass the per-cpu pvec and get better performance.
209   * when mapcount > 1 we need page_mlock() which can fail.
210   * when !page_evictable(), we need the full redo logic of putback_lru_page to
211   * avoid leaving evictable page in unevictable list.
212   *
213   * In case of success, @page is added to @pvec and @pgrescued is incremented
214   * in case that the page was previously unevictable. @page is also unlocked.
215   */
216  static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
217  		int *pgrescued)
218  {
219  	VM_BUG_ON_PAGE(PageLRU(page), page);
220  	VM_BUG_ON_PAGE(!PageLocked(page), page);
221  
222  	if (page_mapcount(page) <= 1 && page_evictable(page)) {
223  		pagevec_add(pvec, page);
224  		if (TestClearPageUnevictable(page))
225  			(*pgrescued)++;
226  		unlock_page(page);
227  		return true;
228  	}
229  
230  	return false;
231  }
232  
233  /*
234   * Putback multiple evictable pages to the LRU
235   *
236   * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
237   * the pages might have meanwhile become unevictable but that is OK.
238   */
239  static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
240  {
241  	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
242  	/*
243  	 *__pagevec_lru_add() calls release_pages() so we don't call
244  	 * put_page() explicitly
245  	 */
246  	__pagevec_lru_add(pvec);
247  	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
248  }
249  
250  /*
251   * Munlock a batch of pages from the same zone
252   *
253   * The work is split to two main phases. First phase clears the Mlocked flag
254   * and attempts to isolate the pages, all under a single zone lru lock.
255   * The second phase finishes the munlock only for pages where isolation
256   * succeeded.
257   *
258   * Note that the pagevec may be modified during the process.
259   */
260  static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
261  {
262  	int i;
263  	int nr = pagevec_count(pvec);
264  	int delta_munlocked = -nr;
265  	struct pagevec pvec_putback;
266  	struct lruvec *lruvec = NULL;
267  	int pgrescued = 0;
268  
269  	pagevec_init(&pvec_putback);
270  
271  	/* Phase 1: page isolation */
272  	for (i = 0; i < nr; i++) {
273  		struct page *page = pvec->pages[i];
274  
275  		if (TestClearPageMlocked(page)) {
276  			/*
277  			 * We already have pin from follow_page_mask()
278  			 * so we can spare the get_page() here.
279  			 */
280  			if (TestClearPageLRU(page)) {
281  				lruvec = relock_page_lruvec_irq(page, lruvec);
282  				del_page_from_lru_list(page, lruvec);
283  				continue;
284  			} else
285  				__munlock_isolation_failed(page);
286  		} else {
287  			delta_munlocked++;
288  		}
289  
290  		/*
291  		 * We won't be munlocking this page in the next phase
292  		 * but we still need to release the follow_page_mask()
293  		 * pin. We cannot do it under lru_lock however. If it's
294  		 * the last pin, __page_cache_release() would deadlock.
295  		 */
296  		pagevec_add(&pvec_putback, pvec->pages[i]);
297  		pvec->pages[i] = NULL;
298  	}
299  	if (lruvec) {
300  		__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
301  		unlock_page_lruvec_irq(lruvec);
302  	} else if (delta_munlocked) {
303  		mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
304  	}
305  
306  	/* Now we can release pins of pages that we are not munlocking */
307  	pagevec_release(&pvec_putback);
308  
309  	/* Phase 2: page munlock */
310  	for (i = 0; i < nr; i++) {
311  		struct page *page = pvec->pages[i];
312  
313  		if (page) {
314  			lock_page(page);
315  			if (!__putback_lru_fast_prepare(page, &pvec_putback,
316  					&pgrescued)) {
317  				/*
318  				 * Slow path. We don't want to lose the last
319  				 * pin before unlock_page()
320  				 */
321  				get_page(page); /* for putback_lru_page() */
322  				__munlock_isolated_page(page);
323  				unlock_page(page);
324  				put_page(page); /* from follow_page_mask() */
325  			}
326  		}
327  	}
328  
329  	/*
330  	 * Phase 3: page putback for pages that qualified for the fast path
331  	 * This will also call put_page() to return pin from follow_page_mask()
332  	 */
333  	if (pagevec_count(&pvec_putback))
334  		__putback_lru_fast(&pvec_putback, pgrescued);
335  }
336  
337  /*
338   * Fill up pagevec for __munlock_pagevec using pte walk
339   *
340   * The function expects that the struct page corresponding to @start address is
341   * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
342   *
343   * The rest of @pvec is filled by subsequent pages within the same pmd and same
344   * zone, as long as the pte's are present and vm_normal_page() succeeds. These
345   * pages also get pinned.
346   *
347   * Returns the address of the next page that should be scanned. This equals
348   * @start + PAGE_SIZE when no page could be added by the pte walk.
349   */
350  static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
351  			struct vm_area_struct *vma, struct zone *zone,
352  			unsigned long start, unsigned long end)
353  {
354  	pte_t *pte;
355  	spinlock_t *ptl;
356  
357  	/*
358  	 * Initialize pte walk starting at the already pinned page where we
359  	 * are sure that there is a pte, as it was pinned under the same
360  	 * mmap_lock write op.
361  	 */
362  	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
363  	/* Make sure we do not cross the page table boundary */
364  	end = pgd_addr_end(start, end);
365  	end = p4d_addr_end(start, end);
366  	end = pud_addr_end(start, end);
367  	end = pmd_addr_end(start, end);
368  
369  	/* The page next to the pinned page is the first we will try to get */
370  	start += PAGE_SIZE;
371  	while (start < end) {
372  		struct page *page = NULL;
373  		pte++;
374  		if (pte_present(*pte))
375  			page = vm_normal_page(vma, start, *pte);
376  		/*
377  		 * Break if page could not be obtained or the page's node+zone does not
378  		 * match
379  		 */
380  		if (!page || page_zone(page) != zone)
381  			break;
382  
383  		/*
384  		 * Do not use pagevec for PTE-mapped THP,
385  		 * munlock_vma_pages_range() will handle them.
386  		 */
387  		if (PageTransCompound(page))
388  			break;
389  
390  		get_page(page);
391  		/*
392  		 * Increase the address that will be returned *before* the
393  		 * eventual break due to pvec becoming full by adding the page
394  		 */
395  		start += PAGE_SIZE;
396  		if (pagevec_add(pvec, page) == 0)
397  			break;
398  	}
399  	pte_unmap_unlock(pte, ptl);
400  	return start;
401  }
402  
403  /*
404   * munlock_vma_pages_range() - munlock all pages in the vma range.'
405   * @vma - vma containing range to be munlock()ed.
406   * @start - start address in @vma of the range
407   * @end - end of range in @vma.
408   *
409   *  For mremap(), munmap() and exit().
410   *
411   * Called with @vma VM_LOCKED.
412   *
413   * Returns with VM_LOCKED cleared.  Callers must be prepared to
414   * deal with this.
415   *
416   * We don't save and restore VM_LOCKED here because pages are
417   * still on lru.  In unmap path, pages might be scanned by reclaim
418   * and re-mlocked by page_mlock/try_to_unmap before we unmap and
419   * free them.  This will result in freeing mlocked pages.
420   */
421  void munlock_vma_pages_range(struct vm_area_struct *vma,
422  			     unsigned long start, unsigned long end)
423  {
424  	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
425  
426  	while (start < end) {
427  		struct page *page;
428  		unsigned int page_mask = 0;
429  		unsigned long page_increm;
430  		struct pagevec pvec;
431  		struct zone *zone;
432  
433  		pagevec_init(&pvec);
434  		/*
435  		 * Although FOLL_DUMP is intended for get_dump_page(),
436  		 * it just so happens that its special treatment of the
437  		 * ZERO_PAGE (returning an error instead of doing get_page)
438  		 * suits munlock very well (and if somehow an abnormal page
439  		 * has sneaked into the range, we won't oops here: great).
440  		 */
441  		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
442  
443  		if (page && !IS_ERR(page)) {
444  			if (PageTransTail(page)) {
445  				VM_BUG_ON_PAGE(PageMlocked(page), page);
446  				put_page(page); /* follow_page_mask() */
447  			} else if (PageTransHuge(page)) {
448  				lock_page(page);
449  				/*
450  				 * Any THP page found by follow_page_mask() may
451  				 * have gotten split before reaching
452  				 * munlock_vma_page(), so we need to compute
453  				 * the page_mask here instead.
454  				 */
455  				page_mask = munlock_vma_page(page);
456  				unlock_page(page);
457  				put_page(page); /* follow_page_mask() */
458  			} else {
459  				/*
460  				 * Non-huge pages are handled in batches via
461  				 * pagevec. The pin from follow_page_mask()
462  				 * prevents them from collapsing by THP.
463  				 */
464  				pagevec_add(&pvec, page);
465  				zone = page_zone(page);
466  
467  				/*
468  				 * Try to fill the rest of pagevec using fast
469  				 * pte walk. This will also update start to
470  				 * the next page to process. Then munlock the
471  				 * pagevec.
472  				 */
473  				start = __munlock_pagevec_fill(&pvec, vma,
474  						zone, start, end);
475  				__munlock_pagevec(&pvec, zone);
476  				goto next;
477  			}
478  		}
479  		page_increm = 1 + page_mask;
480  		start += page_increm * PAGE_SIZE;
481  next:
482  		cond_resched();
483  	}
484  }
485  
486  /*
487   * mlock_fixup  - handle mlock[all]/munlock[all] requests.
488   *
489   * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
490   * munlock is a no-op.  However, for some special vmas, we go ahead and
491   * populate the ptes.
492   *
493   * For vmas that pass the filters, merge/split as appropriate.
494   */
495  static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
496  	unsigned long start, unsigned long end, vm_flags_t newflags)
497  {
498  	struct mm_struct *mm = vma->vm_mm;
499  	pgoff_t pgoff;
500  	int nr_pages;
501  	int ret = 0;
502  	int lock = !!(newflags & VM_LOCKED);
503  	vm_flags_t old_flags = vma->vm_flags;
504  
505  	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
506  	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
507  	    vma_is_dax(vma) || vma_is_secretmem(vma))
508  		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
509  		goto out;
510  
511  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
512  	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
513  			  vma->vm_file, pgoff, vma_policy(vma),
514  			  vma->vm_userfaultfd_ctx);
515  	if (*prev) {
516  		vma = *prev;
517  		goto success;
518  	}
519  
520  	if (start != vma->vm_start) {
521  		ret = split_vma(mm, vma, start, 1);
522  		if (ret)
523  			goto out;
524  	}
525  
526  	if (end != vma->vm_end) {
527  		ret = split_vma(mm, vma, end, 0);
528  		if (ret)
529  			goto out;
530  	}
531  
532  success:
533  	/*
534  	 * Keep track of amount of locked VM.
535  	 */
536  	nr_pages = (end - start) >> PAGE_SHIFT;
537  	if (!lock)
538  		nr_pages = -nr_pages;
539  	else if (old_flags & VM_LOCKED)
540  		nr_pages = 0;
541  	mm->locked_vm += nr_pages;
542  
543  	/*
544  	 * vm_flags is protected by the mmap_lock held in write mode.
545  	 * It's okay if try_to_unmap_one unmaps a page just after we
546  	 * set VM_LOCKED, populate_vma_page_range will bring it back.
547  	 */
548  
549  	if (lock)
550  		vma->vm_flags = newflags;
551  	else
552  		munlock_vma_pages_range(vma, start, end);
553  
554  out:
555  	*prev = vma;
556  	return ret;
557  }
558  
559  static int apply_vma_lock_flags(unsigned long start, size_t len,
560  				vm_flags_t flags)
561  {
562  	unsigned long nstart, end, tmp;
563  	struct vm_area_struct *vma, *prev;
564  	int error;
565  
566  	VM_BUG_ON(offset_in_page(start));
567  	VM_BUG_ON(len != PAGE_ALIGN(len));
568  	end = start + len;
569  	if (end < start)
570  		return -EINVAL;
571  	if (end == start)
572  		return 0;
573  	vma = find_vma(current->mm, start);
574  	if (!vma || vma->vm_start > start)
575  		return -ENOMEM;
576  
577  	prev = vma->vm_prev;
578  	if (start > vma->vm_start)
579  		prev = vma;
580  
581  	for (nstart = start ; ; ) {
582  		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
583  
584  		newflags |= flags;
585  
586  		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
587  		tmp = vma->vm_end;
588  		if (tmp > end)
589  			tmp = end;
590  		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
591  		if (error)
592  			break;
593  		nstart = tmp;
594  		if (nstart < prev->vm_end)
595  			nstart = prev->vm_end;
596  		if (nstart >= end)
597  			break;
598  
599  		vma = prev->vm_next;
600  		if (!vma || vma->vm_start != nstart) {
601  			error = -ENOMEM;
602  			break;
603  		}
604  	}
605  	return error;
606  }
607  
608  /*
609   * Go through vma areas and sum size of mlocked
610   * vma pages, as return value.
611   * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
612   * is also counted.
613   * Return value: previously mlocked page counts
614   */
615  static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
616  		unsigned long start, size_t len)
617  {
618  	struct vm_area_struct *vma;
619  	unsigned long count = 0;
620  
621  	if (mm == NULL)
622  		mm = current->mm;
623  
624  	vma = find_vma(mm, start);
625  	if (vma == NULL)
626  		return 0;
627  
628  	for (; vma ; vma = vma->vm_next) {
629  		if (start >= vma->vm_end)
630  			continue;
631  		if (start + len <=  vma->vm_start)
632  			break;
633  		if (vma->vm_flags & VM_LOCKED) {
634  			if (start > vma->vm_start)
635  				count -= (start - vma->vm_start);
636  			if (start + len < vma->vm_end) {
637  				count += start + len - vma->vm_start;
638  				break;
639  			}
640  			count += vma->vm_end - vma->vm_start;
641  		}
642  	}
643  
644  	return count >> PAGE_SHIFT;
645  }
646  
647  static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
648  {
649  	unsigned long locked;
650  	unsigned long lock_limit;
651  	int error = -ENOMEM;
652  
653  	start = untagged_addr(start);
654  
655  	if (!can_do_mlock())
656  		return -EPERM;
657  
658  	len = PAGE_ALIGN(len + (offset_in_page(start)));
659  	start &= PAGE_MASK;
660  
661  	lock_limit = rlimit(RLIMIT_MEMLOCK);
662  	lock_limit >>= PAGE_SHIFT;
663  	locked = len >> PAGE_SHIFT;
664  
665  	if (mmap_write_lock_killable(current->mm))
666  		return -EINTR;
667  
668  	locked += current->mm->locked_vm;
669  	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
670  		/*
671  		 * It is possible that the regions requested intersect with
672  		 * previously mlocked areas, that part area in "mm->locked_vm"
673  		 * should not be counted to new mlock increment count. So check
674  		 * and adjust locked count if necessary.
675  		 */
676  		locked -= count_mm_mlocked_page_nr(current->mm,
677  				start, len);
678  	}
679  
680  	/* check against resource limits */
681  	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
682  		error = apply_vma_lock_flags(start, len, flags);
683  
684  	mmap_write_unlock(current->mm);
685  	if (error)
686  		return error;
687  
688  	error = __mm_populate(start, len, 0);
689  	if (error)
690  		return __mlock_posix_error_return(error);
691  	return 0;
692  }
693  
694  SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
695  {
696  	return do_mlock(start, len, VM_LOCKED);
697  }
698  
699  SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
700  {
701  	vm_flags_t vm_flags = VM_LOCKED;
702  
703  	if (flags & ~MLOCK_ONFAULT)
704  		return -EINVAL;
705  
706  	if (flags & MLOCK_ONFAULT)
707  		vm_flags |= VM_LOCKONFAULT;
708  
709  	return do_mlock(start, len, vm_flags);
710  }
711  
712  SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
713  {
714  	int ret;
715  
716  	start = untagged_addr(start);
717  
718  	len = PAGE_ALIGN(len + (offset_in_page(start)));
719  	start &= PAGE_MASK;
720  
721  	if (mmap_write_lock_killable(current->mm))
722  		return -EINTR;
723  	ret = apply_vma_lock_flags(start, len, 0);
724  	mmap_write_unlock(current->mm);
725  
726  	return ret;
727  }
728  
729  /*
730   * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
731   * and translate into the appropriate modifications to mm->def_flags and/or the
732   * flags for all current VMAs.
733   *
734   * There are a couple of subtleties with this.  If mlockall() is called multiple
735   * times with different flags, the values do not necessarily stack.  If mlockall
736   * is called once including the MCL_FUTURE flag and then a second time without
737   * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
738   */
739  static int apply_mlockall_flags(int flags)
740  {
741  	struct vm_area_struct *vma, *prev = NULL;
742  	vm_flags_t to_add = 0;
743  
744  	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
745  	if (flags & MCL_FUTURE) {
746  		current->mm->def_flags |= VM_LOCKED;
747  
748  		if (flags & MCL_ONFAULT)
749  			current->mm->def_flags |= VM_LOCKONFAULT;
750  
751  		if (!(flags & MCL_CURRENT))
752  			goto out;
753  	}
754  
755  	if (flags & MCL_CURRENT) {
756  		to_add |= VM_LOCKED;
757  		if (flags & MCL_ONFAULT)
758  			to_add |= VM_LOCKONFAULT;
759  	}
760  
761  	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
762  		vm_flags_t newflags;
763  
764  		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
765  		newflags |= to_add;
766  
767  		/* Ignore errors */
768  		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
769  		cond_resched();
770  	}
771  out:
772  	return 0;
773  }
774  
775  SYSCALL_DEFINE1(mlockall, int, flags)
776  {
777  	unsigned long lock_limit;
778  	int ret;
779  
780  	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
781  	    flags == MCL_ONFAULT)
782  		return -EINVAL;
783  
784  	if (!can_do_mlock())
785  		return -EPERM;
786  
787  	lock_limit = rlimit(RLIMIT_MEMLOCK);
788  	lock_limit >>= PAGE_SHIFT;
789  
790  	if (mmap_write_lock_killable(current->mm))
791  		return -EINTR;
792  
793  	ret = -ENOMEM;
794  	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
795  	    capable(CAP_IPC_LOCK))
796  		ret = apply_mlockall_flags(flags);
797  	mmap_write_unlock(current->mm);
798  	if (!ret && (flags & MCL_CURRENT))
799  		mm_populate(0, TASK_SIZE);
800  
801  	return ret;
802  }
803  
804  SYSCALL_DEFINE0(munlockall)
805  {
806  	int ret;
807  
808  	if (mmap_write_lock_killable(current->mm))
809  		return -EINTR;
810  	ret = apply_mlockall_flags(0);
811  	mmap_write_unlock(current->mm);
812  	return ret;
813  }
814  
815  /*
816   * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
817   * shm segments) get accounted against the user_struct instead.
818   */
819  static DEFINE_SPINLOCK(shmlock_user_lock);
820  
821  int user_shm_lock(size_t size, struct ucounts *ucounts)
822  {
823  	unsigned long lock_limit, locked;
824  	long memlock;
825  	int allowed = 0;
826  
827  	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
828  	lock_limit = rlimit(RLIMIT_MEMLOCK);
829  	if (lock_limit == RLIM_INFINITY)
830  		allowed = 1;
831  	lock_limit >>= PAGE_SHIFT;
832  	spin_lock(&shmlock_user_lock);
833  	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
834  
835  	if (!allowed && (memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
836  		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
837  		goto out;
838  	}
839  	if (!get_ucounts(ucounts)) {
840  		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
841  		goto out;
842  	}
843  	allowed = 1;
844  out:
845  	spin_unlock(&shmlock_user_lock);
846  	return allowed;
847  }
848  
849  void user_shm_unlock(size_t size, struct ucounts *ucounts)
850  {
851  	spin_lock(&shmlock_user_lock);
852  	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
853  	spin_unlock(&shmlock_user_lock);
854  	put_ucounts(ucounts);
855  }
856