xref: /linux/mm/madvise.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/userfaultfd_k.h>
14 #include <linux/hugetlb.h>
15 #include <linux/falloc.h>
16 #include <linux/sched.h>
17 #include <linux/ksm.h>
18 #include <linux/fs.h>
19 #include <linux/file.h>
20 #include <linux/blkdev.h>
21 #include <linux/backing-dev.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/shmem_fs.h>
25 #include <linux/mmu_notifier.h>
26 
27 #include <asm/tlb.h>
28 
29 #include "internal.h"
30 
31 /*
32  * Any behaviour which results in changes to the vma->vm_flags needs to
33  * take mmap_sem for writing. Others, which simply traverse vmas, need
34  * to only take it for reading.
35  */
36 static int madvise_need_mmap_write(int behavior)
37 {
38 	switch (behavior) {
39 	case MADV_REMOVE:
40 	case MADV_WILLNEED:
41 	case MADV_DONTNEED:
42 	case MADV_FREE:
43 		return 0;
44 	default:
45 		/* be safe, default to 1. list exceptions explicitly */
46 		return 1;
47 	}
48 }
49 
50 /*
51  * We can potentially split a vm area into separate
52  * areas, each area with its own behavior.
53  */
54 static long madvise_behavior(struct vm_area_struct *vma,
55 		     struct vm_area_struct **prev,
56 		     unsigned long start, unsigned long end, int behavior)
57 {
58 	struct mm_struct *mm = vma->vm_mm;
59 	int error = 0;
60 	pgoff_t pgoff;
61 	unsigned long new_flags = vma->vm_flags;
62 
63 	switch (behavior) {
64 	case MADV_NORMAL:
65 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
66 		break;
67 	case MADV_SEQUENTIAL:
68 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
69 		break;
70 	case MADV_RANDOM:
71 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
72 		break;
73 	case MADV_DONTFORK:
74 		new_flags |= VM_DONTCOPY;
75 		break;
76 	case MADV_DOFORK:
77 		if (vma->vm_flags & VM_IO) {
78 			error = -EINVAL;
79 			goto out;
80 		}
81 		new_flags &= ~VM_DONTCOPY;
82 		break;
83 	case MADV_DONTDUMP:
84 		new_flags |= VM_DONTDUMP;
85 		break;
86 	case MADV_DODUMP:
87 		if (new_flags & VM_SPECIAL) {
88 			error = -EINVAL;
89 			goto out;
90 		}
91 		new_flags &= ~VM_DONTDUMP;
92 		break;
93 	case MADV_MERGEABLE:
94 	case MADV_UNMERGEABLE:
95 		error = ksm_madvise(vma, start, end, behavior, &new_flags);
96 		if (error) {
97 			/*
98 			 * madvise() returns EAGAIN if kernel resources, such as
99 			 * slab, are temporarily unavailable.
100 			 */
101 			if (error == -ENOMEM)
102 				error = -EAGAIN;
103 			goto out;
104 		}
105 		break;
106 	case MADV_HUGEPAGE:
107 	case MADV_NOHUGEPAGE:
108 		error = hugepage_madvise(vma, &new_flags, behavior);
109 		if (error) {
110 			/*
111 			 * madvise() returns EAGAIN if kernel resources, such as
112 			 * slab, are temporarily unavailable.
113 			 */
114 			if (error == -ENOMEM)
115 				error = -EAGAIN;
116 			goto out;
117 		}
118 		break;
119 	}
120 
121 	if (new_flags == vma->vm_flags) {
122 		*prev = vma;
123 		goto out;
124 	}
125 
126 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
127 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
128 			  vma->vm_file, pgoff, vma_policy(vma),
129 			  vma->vm_userfaultfd_ctx);
130 	if (*prev) {
131 		vma = *prev;
132 		goto success;
133 	}
134 
135 	*prev = vma;
136 
137 	if (start != vma->vm_start) {
138 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
139 			error = -ENOMEM;
140 			goto out;
141 		}
142 		error = __split_vma(mm, vma, start, 1);
143 		if (error) {
144 			/*
145 			 * madvise() returns EAGAIN if kernel resources, such as
146 			 * slab, are temporarily unavailable.
147 			 */
148 			if (error == -ENOMEM)
149 				error = -EAGAIN;
150 			goto out;
151 		}
152 	}
153 
154 	if (end != vma->vm_end) {
155 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
156 			error = -ENOMEM;
157 			goto out;
158 		}
159 		error = __split_vma(mm, vma, end, 0);
160 		if (error) {
161 			/*
162 			 * madvise() returns EAGAIN if kernel resources, such as
163 			 * slab, are temporarily unavailable.
164 			 */
165 			if (error == -ENOMEM)
166 				error = -EAGAIN;
167 			goto out;
168 		}
169 	}
170 
171 success:
172 	/*
173 	 * vm_flags is protected by the mmap_sem held in write mode.
174 	 */
175 	vma->vm_flags = new_flags;
176 out:
177 	return error;
178 }
179 
180 #ifdef CONFIG_SWAP
181 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
182 	unsigned long end, struct mm_walk *walk)
183 {
184 	pte_t *orig_pte;
185 	struct vm_area_struct *vma = walk->private;
186 	unsigned long index;
187 
188 	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
189 		return 0;
190 
191 	for (index = start; index != end; index += PAGE_SIZE) {
192 		pte_t pte;
193 		swp_entry_t entry;
194 		struct page *page;
195 		spinlock_t *ptl;
196 
197 		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
198 		pte = *(orig_pte + ((index - start) / PAGE_SIZE));
199 		pte_unmap_unlock(orig_pte, ptl);
200 
201 		if (pte_present(pte) || pte_none(pte))
202 			continue;
203 		entry = pte_to_swp_entry(pte);
204 		if (unlikely(non_swap_entry(entry)))
205 			continue;
206 
207 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
208 								vma, index);
209 		if (page)
210 			put_page(page);
211 	}
212 
213 	return 0;
214 }
215 
216 static void force_swapin_readahead(struct vm_area_struct *vma,
217 		unsigned long start, unsigned long end)
218 {
219 	struct mm_walk walk = {
220 		.mm = vma->vm_mm,
221 		.pmd_entry = swapin_walk_pmd_entry,
222 		.private = vma,
223 	};
224 
225 	walk_page_range(start, end, &walk);
226 
227 	lru_add_drain();	/* Push any new pages onto the LRU now */
228 }
229 
230 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
231 		unsigned long start, unsigned long end,
232 		struct address_space *mapping)
233 {
234 	pgoff_t index;
235 	struct page *page;
236 	swp_entry_t swap;
237 
238 	for (; start < end; start += PAGE_SIZE) {
239 		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
240 
241 		page = find_get_entry(mapping, index);
242 		if (!radix_tree_exceptional_entry(page)) {
243 			if (page)
244 				put_page(page);
245 			continue;
246 		}
247 		swap = radix_to_swp_entry(page);
248 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
249 								NULL, 0);
250 		if (page)
251 			put_page(page);
252 	}
253 
254 	lru_add_drain();	/* Push any new pages onto the LRU now */
255 }
256 #endif		/* CONFIG_SWAP */
257 
258 /*
259  * Schedule all required I/O operations.  Do not wait for completion.
260  */
261 static long madvise_willneed(struct vm_area_struct *vma,
262 			     struct vm_area_struct **prev,
263 			     unsigned long start, unsigned long end)
264 {
265 	struct file *file = vma->vm_file;
266 
267 #ifdef CONFIG_SWAP
268 	if (!file) {
269 		*prev = vma;
270 		force_swapin_readahead(vma, start, end);
271 		return 0;
272 	}
273 
274 	if (shmem_mapping(file->f_mapping)) {
275 		*prev = vma;
276 		force_shm_swapin_readahead(vma, start, end,
277 					file->f_mapping);
278 		return 0;
279 	}
280 #else
281 	if (!file)
282 		return -EBADF;
283 #endif
284 
285 	if (IS_DAX(file_inode(file))) {
286 		/* no bad return value, but ignore advice */
287 		return 0;
288 	}
289 
290 	*prev = vma;
291 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
292 	if (end > vma->vm_end)
293 		end = vma->vm_end;
294 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
295 
296 	force_page_cache_readahead(file->f_mapping, file, start, end - start);
297 	return 0;
298 }
299 
300 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
301 				unsigned long end, struct mm_walk *walk)
302 
303 {
304 	struct mmu_gather *tlb = walk->private;
305 	struct mm_struct *mm = tlb->mm;
306 	struct vm_area_struct *vma = walk->vma;
307 	spinlock_t *ptl;
308 	pte_t *orig_pte, *pte, ptent;
309 	struct page *page;
310 	int nr_swap = 0;
311 	unsigned long next;
312 
313 	next = pmd_addr_end(addr, end);
314 	if (pmd_trans_huge(*pmd))
315 		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
316 			goto next;
317 
318 	if (pmd_trans_unstable(pmd))
319 		return 0;
320 
321 	tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
322 	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
323 	arch_enter_lazy_mmu_mode();
324 	for (; addr != end; pte++, addr += PAGE_SIZE) {
325 		ptent = *pte;
326 
327 		if (pte_none(ptent))
328 			continue;
329 		/*
330 		 * If the pte has swp_entry, just clear page table to
331 		 * prevent swap-in which is more expensive rather than
332 		 * (page allocation + zeroing).
333 		 */
334 		if (!pte_present(ptent)) {
335 			swp_entry_t entry;
336 
337 			entry = pte_to_swp_entry(ptent);
338 			if (non_swap_entry(entry))
339 				continue;
340 			nr_swap--;
341 			free_swap_and_cache(entry);
342 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
343 			continue;
344 		}
345 
346 		page = vm_normal_page(vma, addr, ptent);
347 		if (!page)
348 			continue;
349 
350 		/*
351 		 * If pmd isn't transhuge but the page is THP and
352 		 * is owned by only this process, split it and
353 		 * deactivate all pages.
354 		 */
355 		if (PageTransCompound(page)) {
356 			if (page_mapcount(page) != 1)
357 				goto out;
358 			get_page(page);
359 			if (!trylock_page(page)) {
360 				put_page(page);
361 				goto out;
362 			}
363 			pte_unmap_unlock(orig_pte, ptl);
364 			if (split_huge_page(page)) {
365 				unlock_page(page);
366 				put_page(page);
367 				pte_offset_map_lock(mm, pmd, addr, &ptl);
368 				goto out;
369 			}
370 			put_page(page);
371 			unlock_page(page);
372 			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
373 			pte--;
374 			addr -= PAGE_SIZE;
375 			continue;
376 		}
377 
378 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
379 
380 		if (PageSwapCache(page) || PageDirty(page)) {
381 			if (!trylock_page(page))
382 				continue;
383 			/*
384 			 * If page is shared with others, we couldn't clear
385 			 * PG_dirty of the page.
386 			 */
387 			if (page_mapcount(page) != 1) {
388 				unlock_page(page);
389 				continue;
390 			}
391 
392 			if (PageSwapCache(page) && !try_to_free_swap(page)) {
393 				unlock_page(page);
394 				continue;
395 			}
396 
397 			ClearPageDirty(page);
398 			unlock_page(page);
399 		}
400 
401 		if (pte_young(ptent) || pte_dirty(ptent)) {
402 			/*
403 			 * Some of architecture(ex, PPC) don't update TLB
404 			 * with set_pte_at and tlb_remove_tlb_entry so for
405 			 * the portability, remap the pte with old|clean
406 			 * after pte clearing.
407 			 */
408 			ptent = ptep_get_and_clear_full(mm, addr, pte,
409 							tlb->fullmm);
410 
411 			ptent = pte_mkold(ptent);
412 			ptent = pte_mkclean(ptent);
413 			set_pte_at(mm, addr, pte, ptent);
414 			if (PageActive(page))
415 				deactivate_page(page);
416 			tlb_remove_tlb_entry(tlb, pte, addr);
417 		}
418 	}
419 out:
420 	if (nr_swap) {
421 		if (current->mm == mm)
422 			sync_mm_rss(mm);
423 
424 		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
425 	}
426 	arch_leave_lazy_mmu_mode();
427 	pte_unmap_unlock(orig_pte, ptl);
428 	cond_resched();
429 next:
430 	return 0;
431 }
432 
433 static void madvise_free_page_range(struct mmu_gather *tlb,
434 			     struct vm_area_struct *vma,
435 			     unsigned long addr, unsigned long end)
436 {
437 	struct mm_walk free_walk = {
438 		.pmd_entry = madvise_free_pte_range,
439 		.mm = vma->vm_mm,
440 		.private = tlb,
441 	};
442 
443 	tlb_start_vma(tlb, vma);
444 	walk_page_range(addr, end, &free_walk);
445 	tlb_end_vma(tlb, vma);
446 }
447 
448 static int madvise_free_single_vma(struct vm_area_struct *vma,
449 			unsigned long start_addr, unsigned long end_addr)
450 {
451 	unsigned long start, end;
452 	struct mm_struct *mm = vma->vm_mm;
453 	struct mmu_gather tlb;
454 
455 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
456 		return -EINVAL;
457 
458 	/* MADV_FREE works for only anon vma at the moment */
459 	if (!vma_is_anonymous(vma))
460 		return -EINVAL;
461 
462 	start = max(vma->vm_start, start_addr);
463 	if (start >= vma->vm_end)
464 		return -EINVAL;
465 	end = min(vma->vm_end, end_addr);
466 	if (end <= vma->vm_start)
467 		return -EINVAL;
468 
469 	lru_add_drain();
470 	tlb_gather_mmu(&tlb, mm, start, end);
471 	update_hiwater_rss(mm);
472 
473 	mmu_notifier_invalidate_range_start(mm, start, end);
474 	madvise_free_page_range(&tlb, vma, start, end);
475 	mmu_notifier_invalidate_range_end(mm, start, end);
476 	tlb_finish_mmu(&tlb, start, end);
477 
478 	return 0;
479 }
480 
481 static long madvise_free(struct vm_area_struct *vma,
482 			     struct vm_area_struct **prev,
483 			     unsigned long start, unsigned long end)
484 {
485 	*prev = vma;
486 	return madvise_free_single_vma(vma, start, end);
487 }
488 
489 /*
490  * Application no longer needs these pages.  If the pages are dirty,
491  * it's OK to just throw them away.  The app will be more careful about
492  * data it wants to keep.  Be sure to free swap resources too.  The
493  * zap_page_range call sets things up for shrink_active_list to actually free
494  * these pages later if no one else has touched them in the meantime,
495  * although we could add these pages to a global reuse list for
496  * shrink_active_list to pick up before reclaiming other pages.
497  *
498  * NB: This interface discards data rather than pushes it out to swap,
499  * as some implementations do.  This has performance implications for
500  * applications like large transactional databases which want to discard
501  * pages in anonymous maps after committing to backing store the data
502  * that was kept in them.  There is no reason to write this data out to
503  * the swap area if the application is discarding it.
504  *
505  * An interface that causes the system to free clean pages and flush
506  * dirty pages is already available as msync(MS_INVALIDATE).
507  */
508 static long madvise_dontneed(struct vm_area_struct *vma,
509 			     struct vm_area_struct **prev,
510 			     unsigned long start, unsigned long end)
511 {
512 	*prev = vma;
513 	if (!can_madv_dontneed_vma(vma))
514 		return -EINVAL;
515 
516 	if (!userfaultfd_remove(vma, start, end)) {
517 		*prev = NULL; /* mmap_sem has been dropped, prev is stale */
518 
519 		down_read(&current->mm->mmap_sem);
520 		vma = find_vma(current->mm, start);
521 		if (!vma)
522 			return -ENOMEM;
523 		if (start < vma->vm_start) {
524 			/*
525 			 * This "vma" under revalidation is the one
526 			 * with the lowest vma->vm_start where start
527 			 * is also < vma->vm_end. If start <
528 			 * vma->vm_start it means an hole materialized
529 			 * in the user address space within the
530 			 * virtual range passed to MADV_DONTNEED.
531 			 */
532 			return -ENOMEM;
533 		}
534 		if (!can_madv_dontneed_vma(vma))
535 			return -EINVAL;
536 		if (end > vma->vm_end) {
537 			/*
538 			 * Don't fail if end > vma->vm_end. If the old
539 			 * vma was splitted while the mmap_sem was
540 			 * released the effect of the concurrent
541 			 * operation may not cause MADV_DONTNEED to
542 			 * have an undefined result. There may be an
543 			 * adjacent next vma that we'll walk
544 			 * next. userfaultfd_remove() will generate an
545 			 * UFFD_EVENT_REMOVE repetition on the
546 			 * end-vma->vm_end range, but the manager can
547 			 * handle a repetition fine.
548 			 */
549 			end = vma->vm_end;
550 		}
551 		VM_WARN_ON(start >= end);
552 	}
553 	zap_page_range(vma, start, end - start);
554 	return 0;
555 }
556 
557 /*
558  * Application wants to free up the pages and associated backing store.
559  * This is effectively punching a hole into the middle of a file.
560  */
561 static long madvise_remove(struct vm_area_struct *vma,
562 				struct vm_area_struct **prev,
563 				unsigned long start, unsigned long end)
564 {
565 	loff_t offset;
566 	int error;
567 	struct file *f;
568 
569 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
570 
571 	if (vma->vm_flags & VM_LOCKED)
572 		return -EINVAL;
573 
574 	f = vma->vm_file;
575 
576 	if (!f || !f->f_mapping || !f->f_mapping->host) {
577 			return -EINVAL;
578 	}
579 
580 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
581 		return -EACCES;
582 
583 	offset = (loff_t)(start - vma->vm_start)
584 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
585 
586 	/*
587 	 * Filesystem's fallocate may need to take i_mutex.  We need to
588 	 * explicitly grab a reference because the vma (and hence the
589 	 * vma's reference to the file) can go away as soon as we drop
590 	 * mmap_sem.
591 	 */
592 	get_file(f);
593 	if (userfaultfd_remove(vma, start, end)) {
594 		/* mmap_sem was not released by userfaultfd_remove() */
595 		up_read(&current->mm->mmap_sem);
596 	}
597 	error = vfs_fallocate(f,
598 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
599 				offset, end - start);
600 	fput(f);
601 	down_read(&current->mm->mmap_sem);
602 	return error;
603 }
604 
605 #ifdef CONFIG_MEMORY_FAILURE
606 /*
607  * Error injection support for memory error handling.
608  */
609 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
610 {
611 	struct page *p;
612 	if (!capable(CAP_SYS_ADMIN))
613 		return -EPERM;
614 	for (; start < end; start += PAGE_SIZE <<
615 				compound_order(compound_head(p))) {
616 		int ret;
617 
618 		ret = get_user_pages_fast(start, 1, 0, &p);
619 		if (ret != 1)
620 			return ret;
621 
622 		if (PageHWPoison(p)) {
623 			put_page(p);
624 			continue;
625 		}
626 		if (bhv == MADV_SOFT_OFFLINE) {
627 			pr_info("Soft offlining page %#lx at %#lx\n",
628 				page_to_pfn(p), start);
629 			ret = soft_offline_page(p, MF_COUNT_INCREASED);
630 			if (ret)
631 				return ret;
632 			continue;
633 		}
634 		pr_info("Injecting memory failure for page %#lx at %#lx\n",
635 		       page_to_pfn(p), start);
636 		ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
637 		if (ret)
638 			return ret;
639 	}
640 	return 0;
641 }
642 #endif
643 
644 static long
645 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
646 		unsigned long start, unsigned long end, int behavior)
647 {
648 	switch (behavior) {
649 	case MADV_REMOVE:
650 		return madvise_remove(vma, prev, start, end);
651 	case MADV_WILLNEED:
652 		return madvise_willneed(vma, prev, start, end);
653 	case MADV_FREE:
654 		/*
655 		 * XXX: In this implementation, MADV_FREE works like
656 		 * MADV_DONTNEED on swapless system or full swap.
657 		 */
658 		if (get_nr_swap_pages() > 0)
659 			return madvise_free(vma, prev, start, end);
660 		/* passthrough */
661 	case MADV_DONTNEED:
662 		return madvise_dontneed(vma, prev, start, end);
663 	default:
664 		return madvise_behavior(vma, prev, start, end, behavior);
665 	}
666 }
667 
668 static bool
669 madvise_behavior_valid(int behavior)
670 {
671 	switch (behavior) {
672 	case MADV_DOFORK:
673 	case MADV_DONTFORK:
674 	case MADV_NORMAL:
675 	case MADV_SEQUENTIAL:
676 	case MADV_RANDOM:
677 	case MADV_REMOVE:
678 	case MADV_WILLNEED:
679 	case MADV_DONTNEED:
680 	case MADV_FREE:
681 #ifdef CONFIG_KSM
682 	case MADV_MERGEABLE:
683 	case MADV_UNMERGEABLE:
684 #endif
685 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
686 	case MADV_HUGEPAGE:
687 	case MADV_NOHUGEPAGE:
688 #endif
689 	case MADV_DONTDUMP:
690 	case MADV_DODUMP:
691 		return true;
692 
693 	default:
694 		return false;
695 	}
696 }
697 
698 /*
699  * The madvise(2) system call.
700  *
701  * Applications can use madvise() to advise the kernel how it should
702  * handle paging I/O in this VM area.  The idea is to help the kernel
703  * use appropriate read-ahead and caching techniques.  The information
704  * provided is advisory only, and can be safely disregarded by the
705  * kernel without affecting the correct operation of the application.
706  *
707  * behavior values:
708  *  MADV_NORMAL - the default behavior is to read clusters.  This
709  *		results in some read-ahead and read-behind.
710  *  MADV_RANDOM - the system should read the minimum amount of data
711  *		on any access, since it is unlikely that the appli-
712  *		cation will need more than what it asks for.
713  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
714  *		once, so they can be aggressively read ahead, and
715  *		can be freed soon after they are accessed.
716  *  MADV_WILLNEED - the application is notifying the system to read
717  *		some pages ahead.
718  *  MADV_DONTNEED - the application is finished with the given range,
719  *		so the kernel can free resources associated with it.
720  *  MADV_FREE - the application marks pages in the given range as lazy free,
721  *		where actual purges are postponed until memory pressure happens.
722  *  MADV_REMOVE - the application wants to free up the given range of
723  *		pages and associated backing store.
724  *  MADV_DONTFORK - omit this area from child's address space when forking:
725  *		typically, to avoid COWing pages pinned by get_user_pages().
726  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
727  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
728  *		were corrupted by unrecoverable hardware memory failure.
729  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
730  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
731  *		this area with pages of identical content from other such areas.
732  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
733  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
734  *		huge pages in the future. Existing pages might be coalesced and
735  *		new pages might be allocated as THP.
736  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
737  *		transparent huge pages so the existing pages will not be
738  *		coalesced into THP and new pages will not be allocated as THP.
739  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
740  *		from being included in its core dump.
741  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
742  *
743  * return values:
744  *  zero    - success
745  *  -EINVAL - start + len < 0, start is not page-aligned,
746  *		"behavior" is not a valid value, or application
747  *		is attempting to release locked or shared pages.
748  *  -ENOMEM - addresses in the specified range are not currently
749  *		mapped, or are outside the AS of the process.
750  *  -EIO    - an I/O error occurred while paging in data.
751  *  -EBADF  - map exists, but area maps something that isn't a file.
752  *  -EAGAIN - a kernel resource was temporarily unavailable.
753  */
754 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
755 {
756 	unsigned long end, tmp;
757 	struct vm_area_struct *vma, *prev;
758 	int unmapped_error = 0;
759 	int error = -EINVAL;
760 	int write;
761 	size_t len;
762 	struct blk_plug plug;
763 
764 #ifdef CONFIG_MEMORY_FAILURE
765 	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
766 		return madvise_hwpoison(behavior, start, start+len_in);
767 #endif
768 	if (!madvise_behavior_valid(behavior))
769 		return error;
770 
771 	if (start & ~PAGE_MASK)
772 		return error;
773 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
774 
775 	/* Check to see whether len was rounded up from small -ve to zero */
776 	if (len_in && !len)
777 		return error;
778 
779 	end = start + len;
780 	if (end < start)
781 		return error;
782 
783 	error = 0;
784 	if (end == start)
785 		return error;
786 
787 	write = madvise_need_mmap_write(behavior);
788 	if (write) {
789 		if (down_write_killable(&current->mm->mmap_sem))
790 			return -EINTR;
791 	} else {
792 		down_read(&current->mm->mmap_sem);
793 	}
794 
795 	/*
796 	 * If the interval [start,end) covers some unmapped address
797 	 * ranges, just ignore them, but return -ENOMEM at the end.
798 	 * - different from the way of handling in mlock etc.
799 	 */
800 	vma = find_vma_prev(current->mm, start, &prev);
801 	if (vma && start > vma->vm_start)
802 		prev = vma;
803 
804 	blk_start_plug(&plug);
805 	for (;;) {
806 		/* Still start < end. */
807 		error = -ENOMEM;
808 		if (!vma)
809 			goto out;
810 
811 		/* Here start < (end|vma->vm_end). */
812 		if (start < vma->vm_start) {
813 			unmapped_error = -ENOMEM;
814 			start = vma->vm_start;
815 			if (start >= end)
816 				goto out;
817 		}
818 
819 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
820 		tmp = vma->vm_end;
821 		if (end < tmp)
822 			tmp = end;
823 
824 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
825 		error = madvise_vma(vma, &prev, start, tmp, behavior);
826 		if (error)
827 			goto out;
828 		start = tmp;
829 		if (prev && start < prev->vm_end)
830 			start = prev->vm_end;
831 		error = unmapped_error;
832 		if (start >= end)
833 			goto out;
834 		if (prev)
835 			vma = prev->vm_next;
836 		else	/* madvise_remove dropped mmap_sem */
837 			vma = find_vma(current->mm, start);
838 	}
839 out:
840 	blk_finish_plug(&plug);
841 	if (write)
842 		up_write(&current->mm->mmap_sem);
843 	else
844 		up_read(&current->mm->mmap_sem);
845 
846 	return error;
847 }
848