1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/mmu_context.h>
23 #include <linux/string.h>
24 #include <linux/uio.h>
25 #include <linux/ksm.h>
26 #include <linux/fs.h>
27 #include <linux/file.h>
28 #include <linux/blkdev.h>
29 #include <linux/backing-dev.h>
30 #include <linux/pagewalk.h>
31 #include <linux/swap.h>
32 #include <linux/leafops.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mmu_notifier.h>
35
36 #include <asm/tlb.h>
37
38 #include "internal.h"
39 #include "swap.h"
40
41 #define __MADV_SET_ANON_VMA_NAME (-1)
42
43 /*
44 * Maximum number of attempts we make to install guard pages before we give up
45 * and return -ERESTARTNOINTR to have userspace try again.
46 */
47 #define MAX_MADVISE_GUARD_RETRIES 3
48
49 struct madvise_walk_private {
50 struct mmu_gather *tlb;
51 bool pageout;
52 };
53
54 enum madvise_lock_mode {
55 MADVISE_NO_LOCK,
56 MADVISE_MMAP_READ_LOCK,
57 MADVISE_MMAP_WRITE_LOCK,
58 MADVISE_VMA_READ_LOCK,
59 };
60
61 struct madvise_behavior_range {
62 unsigned long start;
63 unsigned long end;
64 };
65
66 struct madvise_behavior {
67 struct mm_struct *mm;
68 int behavior;
69 struct mmu_gather *tlb;
70 enum madvise_lock_mode lock_mode;
71 struct anon_vma_name *anon_name;
72
73 /*
74 * The range over which the behaviour is currently being applied. If
75 * traversing multiple VMAs, this is updated for each.
76 */
77 struct madvise_behavior_range range;
78 /* The VMA and VMA preceding it (if applicable) currently targeted. */
79 struct vm_area_struct *prev;
80 struct vm_area_struct *vma;
81 bool lock_dropped;
82 };
83
84 #ifdef CONFIG_ANON_VMA_NAME
85 static int madvise_walk_vmas(struct madvise_behavior *madv_behavior);
86
anon_vma_name_alloc(const char * name)87 struct anon_vma_name *anon_vma_name_alloc(const char *name)
88 {
89 struct anon_vma_name *anon_name;
90 size_t count;
91
92 /* Add 1 for NUL terminator at the end of the anon_name->name */
93 count = strlen(name) + 1;
94 anon_name = kmalloc_flex(*anon_name, name, count);
95 if (anon_name) {
96 kref_init(&anon_name->kref);
97 memcpy(anon_name->name, name, count);
98 }
99
100 return anon_name;
101 }
102
anon_vma_name_free(struct kref * kref)103 void anon_vma_name_free(struct kref *kref)
104 {
105 struct anon_vma_name *anon_name =
106 container_of(kref, struct anon_vma_name, kref);
107 kfree(anon_name);
108 }
109
anon_vma_name(struct vm_area_struct * vma)110 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
111 {
112 vma_assert_stabilised(vma);
113 return vma->anon_name;
114 }
115
116 /* mmap_lock should be write-locked */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)117 static int replace_anon_vma_name(struct vm_area_struct *vma,
118 struct anon_vma_name *anon_name)
119 {
120 struct anon_vma_name *orig_name = anon_vma_name(vma);
121
122 if (!anon_name) {
123 vma->anon_name = NULL;
124 anon_vma_name_put(orig_name);
125 return 0;
126 }
127
128 if (anon_vma_name_eq(orig_name, anon_name))
129 return 0;
130
131 vma->anon_name = anon_vma_name_reuse(anon_name);
132 anon_vma_name_put(orig_name);
133
134 return 0;
135 }
136 #else /* CONFIG_ANON_VMA_NAME */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)137 static int replace_anon_vma_name(struct vm_area_struct *vma,
138 struct anon_vma_name *anon_name)
139 {
140 if (anon_name)
141 return -EINVAL;
142
143 return 0;
144 }
145 #endif /* CONFIG_ANON_VMA_NAME */
146 /*
147 * Update the vm_flags or anon_name on region of a vma, splitting it or merging
148 * it as necessary. Must be called with mmap_lock held for writing.
149 */
madvise_update_vma(vm_flags_t new_flags,struct madvise_behavior * madv_behavior)150 static int madvise_update_vma(vm_flags_t new_flags,
151 struct madvise_behavior *madv_behavior)
152 {
153 struct vm_area_struct *vma = madv_behavior->vma;
154 vma_flags_t new_vma_flags = legacy_to_vma_flags(new_flags);
155 struct madvise_behavior_range *range = &madv_behavior->range;
156 struct anon_vma_name *anon_name = madv_behavior->anon_name;
157 bool set_new_anon_name = madv_behavior->behavior == __MADV_SET_ANON_VMA_NAME;
158 VMA_ITERATOR(vmi, madv_behavior->mm, range->start);
159
160 if (vma_flags_same_mask(&vma->flags, new_vma_flags) &&
161 (!set_new_anon_name ||
162 anon_vma_name_eq(anon_vma_name(vma), anon_name)))
163 return 0;
164
165 if (set_new_anon_name)
166 vma = vma_modify_name(&vmi, madv_behavior->prev, vma,
167 range->start, range->end, anon_name);
168 else
169 vma = vma_modify_flags(&vmi, madv_behavior->prev, vma,
170 range->start, range->end, &new_vma_flags);
171
172 if (IS_ERR(vma))
173 return PTR_ERR(vma);
174
175 madv_behavior->vma = vma;
176
177 /* vm_flags is protected by the mmap_lock held in write mode. */
178 vma_start_write(vma);
179 vma->flags = new_vma_flags;
180 if (set_new_anon_name)
181 return replace_anon_vma_name(vma, anon_name);
182
183 return 0;
184 }
185
186 #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)187 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
188 unsigned long end, struct mm_walk *walk)
189 {
190 struct vm_area_struct *vma = walk->private;
191 struct swap_iocb *splug = NULL;
192 pte_t *ptep = NULL;
193 spinlock_t *ptl;
194 unsigned long addr;
195
196 for (addr = start; addr < end; addr += PAGE_SIZE) {
197 pte_t pte;
198 softleaf_t entry;
199 struct folio *folio;
200
201 if (!ptep++) {
202 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
203 if (!ptep)
204 break;
205 }
206
207 pte = ptep_get(ptep);
208 entry = softleaf_from_pte(pte);
209 if (unlikely(!softleaf_is_swap(entry)))
210 continue;
211
212 pte_unmap_unlock(ptep, ptl);
213 ptep = NULL;
214
215 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
216 vma, addr, &splug);
217 if (folio)
218 folio_put(folio);
219 }
220
221 if (ptep)
222 pte_unmap_unlock(ptep, ptl);
223 swap_read_unplug(splug);
224 cond_resched();
225
226 return 0;
227 }
228
229 static const struct mm_walk_ops swapin_walk_ops = {
230 .pmd_entry = swapin_walk_pmd_entry,
231 .walk_lock = PGWALK_RDLOCK,
232 };
233
shmem_swapin_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)234 static void shmem_swapin_range(struct vm_area_struct *vma,
235 unsigned long start, unsigned long end,
236 struct address_space *mapping)
237 {
238 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
239 pgoff_t end_index = linear_page_index(vma, end) - 1;
240 struct folio *folio;
241 struct swap_iocb *splug = NULL;
242
243 rcu_read_lock();
244 xas_for_each(&xas, folio, end_index) {
245 unsigned long addr;
246 swp_entry_t entry;
247
248 if (!xa_is_value(folio))
249 continue;
250 entry = radix_to_swp_entry(folio);
251 /* There might be swapin error entries in shmem mapping. */
252 if (!softleaf_is_swap(entry))
253 continue;
254
255 addr = vma->vm_start +
256 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
257 xas_pause(&xas);
258 rcu_read_unlock();
259
260 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
261 vma, addr, &splug);
262 if (folio)
263 folio_put(folio);
264
265 rcu_read_lock();
266 }
267 rcu_read_unlock();
268 swap_read_unplug(splug);
269 }
270 #endif /* CONFIG_SWAP */
271
mark_mmap_lock_dropped(struct madvise_behavior * madv_behavior)272 static void mark_mmap_lock_dropped(struct madvise_behavior *madv_behavior)
273 {
274 VM_WARN_ON_ONCE(madv_behavior->lock_mode == MADVISE_VMA_READ_LOCK);
275 madv_behavior->lock_dropped = true;
276 }
277
278 /*
279 * Schedule all required I/O operations. Do not wait for completion.
280 */
madvise_willneed(struct madvise_behavior * madv_behavior)281 static long madvise_willneed(struct madvise_behavior *madv_behavior)
282 {
283 struct vm_area_struct *vma = madv_behavior->vma;
284 struct mm_struct *mm = madv_behavior->mm;
285 struct file *file = vma->vm_file;
286 unsigned long start = madv_behavior->range.start;
287 unsigned long end = madv_behavior->range.end;
288 loff_t offset;
289
290 #ifdef CONFIG_SWAP
291 if (!file) {
292 walk_page_range_vma(vma, start, end, &swapin_walk_ops, vma);
293 lru_add_drain(); /* Push any new pages onto the LRU now */
294 return 0;
295 }
296
297 if (shmem_mapping(file->f_mapping)) {
298 shmem_swapin_range(vma, start, end, file->f_mapping);
299 lru_add_drain(); /* Push any new pages onto the LRU now */
300 return 0;
301 }
302 #else
303 if (!file)
304 return -EBADF;
305 #endif
306
307 if (IS_DAX(file_inode(file))) {
308 /* no bad return value, but ignore advice */
309 return 0;
310 }
311
312 /*
313 * Filesystem's fadvise may need to take various locks. We need to
314 * explicitly grab a reference because the vma (and hence the
315 * vma's reference to the file) can go away as soon as we drop
316 * mmap_lock.
317 */
318 mark_mmap_lock_dropped(madv_behavior);
319 get_file(file);
320 offset = (loff_t)(start - vma->vm_start)
321 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
322 mmap_read_unlock(mm);
323 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
324 fput(file);
325 mmap_read_lock(mm);
326 return 0;
327 }
328
can_do_file_pageout(struct vm_area_struct * vma)329 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
330 {
331 if (!vma->vm_file)
332 return false;
333 /*
334 * paging out pagecache only for non-anonymous mappings that correspond
335 * to the files the calling process could (if tried) open for writing;
336 * otherwise we'd be including shared non-exclusive mappings, which
337 * opens a side channel.
338 */
339 return inode_owner_or_capable(&nop_mnt_idmap,
340 file_inode(vma->vm_file)) ||
341 file_permission(vma->vm_file, MAY_WRITE) == 0;
342 }
343
madvise_folio_pte_batch(unsigned long addr,unsigned long end,struct folio * folio,pte_t * ptep,pte_t * ptentp)344 static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
345 struct folio *folio, pte_t *ptep,
346 pte_t *ptentp)
347 {
348 int max_nr = (end - addr) / PAGE_SIZE;
349
350 return folio_pte_batch_flags(folio, NULL, ptep, ptentp, max_nr,
351 FPB_MERGE_YOUNG_DIRTY);
352 }
353
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)354 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
355 unsigned long addr, unsigned long end,
356 struct mm_walk *walk)
357 {
358 struct madvise_walk_private *private = walk->private;
359 struct mmu_gather *tlb = private->tlb;
360 bool pageout = private->pageout;
361 struct mm_struct *mm = tlb->mm;
362 struct vm_area_struct *vma = walk->vma;
363 pte_t *start_pte, *pte, ptent;
364 spinlock_t *ptl;
365 struct folio *folio = NULL;
366 LIST_HEAD(folio_list);
367 bool pageout_anon_only_filter;
368 unsigned int batch_count = 0;
369 int nr;
370
371 if (fatal_signal_pending(current))
372 return -EINTR;
373
374 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
375 !can_do_file_pageout(vma);
376
377 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
378 if (pmd_trans_huge(*pmd)) {
379 pmd_t orig_pmd;
380 unsigned long next = pmd_addr_end(addr, end);
381
382 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
383 ptl = pmd_trans_huge_lock(pmd, vma);
384 if (!ptl)
385 return 0;
386
387 orig_pmd = *pmd;
388 if (is_huge_zero_pmd(orig_pmd))
389 goto huge_unlock;
390
391 if (unlikely(!pmd_present(orig_pmd))) {
392 VM_BUG_ON(thp_migration_supported() &&
393 !pmd_is_migration_entry(orig_pmd));
394 goto huge_unlock;
395 }
396
397 folio = pmd_folio(orig_pmd);
398
399 /* Do not interfere with other mappings of this folio */
400 if (folio_maybe_mapped_shared(folio))
401 goto huge_unlock;
402
403 if (pageout_anon_only_filter && !folio_test_anon(folio))
404 goto huge_unlock;
405
406 if (next - addr != HPAGE_PMD_SIZE) {
407 int err;
408
409 folio_get(folio);
410 spin_unlock(ptl);
411 folio_lock(folio);
412 err = split_folio(folio);
413 folio_unlock(folio);
414 folio_put(folio);
415 if (!err)
416 goto regular_folio;
417 return 0;
418 }
419
420 if (!pageout && pmd_young(orig_pmd)) {
421 pmdp_invalidate(vma, addr, pmd);
422 orig_pmd = pmd_mkold(orig_pmd);
423
424 set_pmd_at(mm, addr, pmd, orig_pmd);
425 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
426 }
427
428 folio_clear_referenced(folio);
429 folio_test_clear_young(folio);
430 if (folio_test_active(folio))
431 folio_set_workingset(folio);
432 if (pageout) {
433 if (folio_isolate_lru(folio)) {
434 if (folio_test_unevictable(folio))
435 folio_putback_lru(folio);
436 else
437 list_add(&folio->lru, &folio_list);
438 }
439 } else
440 folio_deactivate(folio);
441 huge_unlock:
442 spin_unlock(ptl);
443 if (pageout)
444 reclaim_pages(&folio_list);
445 return 0;
446 }
447
448 regular_folio:
449 #endif
450 tlb_change_page_size(tlb, PAGE_SIZE);
451 restart:
452 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
453 if (!start_pte)
454 return 0;
455 flush_tlb_batched_pending(mm);
456 lazy_mmu_mode_enable();
457 for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) {
458 nr = 1;
459 ptent = ptep_get(pte);
460
461 if (++batch_count == SWAP_CLUSTER_MAX) {
462 batch_count = 0;
463 if (need_resched()) {
464 lazy_mmu_mode_disable();
465 pte_unmap_unlock(start_pte, ptl);
466 cond_resched();
467 goto restart;
468 }
469 }
470
471 if (pte_none(ptent))
472 continue;
473
474 if (!pte_present(ptent))
475 continue;
476
477 folio = vm_normal_folio(vma, addr, ptent);
478 if (!folio || folio_is_zone_device(folio))
479 continue;
480
481 /*
482 * If we encounter a large folio, only split it if it is not
483 * fully mapped within the range we are operating on. Otherwise
484 * leave it as is so that it can be swapped out whole. If we
485 * fail to split a folio, leave it in place and advance to the
486 * next pte in the range.
487 */
488 if (folio_test_large(folio)) {
489 nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent);
490 if (nr < folio_nr_pages(folio)) {
491 int err;
492
493 if (folio_maybe_mapped_shared(folio))
494 continue;
495 if (pageout_anon_only_filter && !folio_test_anon(folio))
496 continue;
497 if (!folio_trylock(folio))
498 continue;
499 folio_get(folio);
500 lazy_mmu_mode_disable();
501 pte_unmap_unlock(start_pte, ptl);
502 start_pte = NULL;
503 err = split_folio(folio);
504 folio_unlock(folio);
505 folio_put(folio);
506 start_pte = pte =
507 pte_offset_map_lock(mm, pmd, addr, &ptl);
508 if (!start_pte)
509 break;
510 flush_tlb_batched_pending(mm);
511 lazy_mmu_mode_enable();
512 if (!err)
513 nr = 0;
514 continue;
515 }
516 }
517
518 /*
519 * Do not interfere with other mappings of this folio and
520 * non-LRU folio. If we have a large folio at this point, we
521 * know it is fully mapped so if its mapcount is the same as its
522 * number of pages, it must be exclusive.
523 */
524 if (!folio_test_lru(folio) ||
525 folio_mapcount(folio) != folio_nr_pages(folio))
526 continue;
527
528 if (pageout_anon_only_filter && !folio_test_anon(folio))
529 continue;
530
531 if (!pageout && pte_young(ptent)) {
532 clear_young_dirty_ptes(vma, addr, pte, nr,
533 CYDP_CLEAR_YOUNG);
534 tlb_remove_tlb_entries(tlb, pte, nr, addr);
535 }
536
537 /*
538 * We are deactivating a folio for accelerating reclaiming.
539 * VM couldn't reclaim the folio unless we clear PG_young.
540 * As a side effect, it makes confuse idle-page tracking
541 * because they will miss recent referenced history.
542 */
543 folio_clear_referenced(folio);
544 folio_test_clear_young(folio);
545 if (folio_test_active(folio))
546 folio_set_workingset(folio);
547 if (pageout) {
548 if (folio_isolate_lru(folio)) {
549 if (folio_test_unevictable(folio))
550 folio_putback_lru(folio);
551 else
552 list_add(&folio->lru, &folio_list);
553 }
554 } else
555 folio_deactivate(folio);
556 }
557
558 if (start_pte) {
559 lazy_mmu_mode_disable();
560 pte_unmap_unlock(start_pte, ptl);
561 }
562 if (pageout)
563 reclaim_pages(&folio_list);
564 cond_resched();
565
566 return 0;
567 }
568
569 static const struct mm_walk_ops cold_walk_ops = {
570 .pmd_entry = madvise_cold_or_pageout_pte_range,
571 .walk_lock = PGWALK_RDLOCK,
572 };
573
madvise_cold_page_range(struct mmu_gather * tlb,struct madvise_behavior * madv_behavior)574 static void madvise_cold_page_range(struct mmu_gather *tlb,
575 struct madvise_behavior *madv_behavior)
576
577 {
578 struct vm_area_struct *vma = madv_behavior->vma;
579 struct madvise_behavior_range *range = &madv_behavior->range;
580 struct madvise_walk_private walk_private = {
581 .pageout = false,
582 .tlb = tlb,
583 };
584
585 tlb_start_vma(tlb, vma);
586 walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
587 &walk_private);
588 tlb_end_vma(tlb, vma);
589 }
590
can_madv_lru_vma(struct vm_area_struct * vma)591 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
592 {
593 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
594 }
595
madvise_cold(struct madvise_behavior * madv_behavior)596 static long madvise_cold(struct madvise_behavior *madv_behavior)
597 {
598 struct vm_area_struct *vma = madv_behavior->vma;
599 struct mmu_gather tlb;
600
601 if (!can_madv_lru_vma(vma))
602 return -EINVAL;
603
604 lru_add_drain();
605 tlb_gather_mmu(&tlb, madv_behavior->mm);
606 madvise_cold_page_range(&tlb, madv_behavior);
607 tlb_finish_mmu(&tlb);
608
609 return 0;
610 }
611
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,struct madvise_behavior_range * range)612 static void madvise_pageout_page_range(struct mmu_gather *tlb,
613 struct vm_area_struct *vma,
614 struct madvise_behavior_range *range)
615 {
616 struct madvise_walk_private walk_private = {
617 .pageout = true,
618 .tlb = tlb,
619 };
620
621 tlb_start_vma(tlb, vma);
622 walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
623 &walk_private);
624 tlb_end_vma(tlb, vma);
625 }
626
madvise_pageout(struct madvise_behavior * madv_behavior)627 static long madvise_pageout(struct madvise_behavior *madv_behavior)
628 {
629 struct mmu_gather tlb;
630 struct vm_area_struct *vma = madv_behavior->vma;
631
632 if (!can_madv_lru_vma(vma))
633 return -EINVAL;
634
635 /*
636 * If the VMA belongs to a private file mapping, there can be private
637 * dirty pages which can be paged out if even this process is neither
638 * owner nor write capable of the file. We allow private file mappings
639 * further to pageout dirty anon pages.
640 */
641 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
642 (vma->vm_flags & VM_MAYSHARE)))
643 return 0;
644
645 lru_add_drain();
646 tlb_gather_mmu(&tlb, madv_behavior->mm);
647 madvise_pageout_page_range(&tlb, vma, &madv_behavior->range);
648 tlb_finish_mmu(&tlb);
649
650 return 0;
651 }
652
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)653 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
654 unsigned long end, struct mm_walk *walk)
655
656 {
657 const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY;
658 struct mmu_gather *tlb = walk->private;
659 struct mm_struct *mm = tlb->mm;
660 struct vm_area_struct *vma = walk->vma;
661 spinlock_t *ptl;
662 pte_t *start_pte, *pte, ptent;
663 struct folio *folio;
664 int nr_swap = 0;
665 unsigned long next;
666 int nr, max_nr;
667
668 next = pmd_addr_end(addr, end);
669 if (pmd_trans_huge(*pmd))
670 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
671 return 0;
672
673 tlb_change_page_size(tlb, PAGE_SIZE);
674 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
675 if (!start_pte)
676 return 0;
677 flush_tlb_batched_pending(mm);
678 lazy_mmu_mode_enable();
679 for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
680 nr = 1;
681 ptent = ptep_get(pte);
682
683 if (pte_none(ptent))
684 continue;
685 /*
686 * If the pte has swp_entry, just clear page table to
687 * prevent swap-in which is more expensive rather than
688 * (page allocation + zeroing).
689 */
690 if (!pte_present(ptent)) {
691 softleaf_t entry = softleaf_from_pte(ptent);
692
693 if (softleaf_is_swap(entry)) {
694 max_nr = (end - addr) / PAGE_SIZE;
695 nr = swap_pte_batch(pte, max_nr, ptent);
696 nr_swap -= nr;
697 swap_put_entries_direct(entry, nr);
698 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm);
699 } else if (softleaf_is_hwpoison(entry) ||
700 softleaf_is_poison_marker(entry)) {
701 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
702 }
703 continue;
704 }
705
706 folio = vm_normal_folio(vma, addr, ptent);
707 if (!folio || folio_is_zone_device(folio))
708 continue;
709
710 /*
711 * If we encounter a large folio, only split it if it is not
712 * fully mapped within the range we are operating on. Otherwise
713 * leave it as is so that it can be marked as lazyfree. If we
714 * fail to split a folio, leave it in place and advance to the
715 * next pte in the range.
716 */
717 if (folio_test_large(folio)) {
718 nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent);
719 if (nr < folio_nr_pages(folio)) {
720 int err;
721
722 if (folio_maybe_mapped_shared(folio))
723 continue;
724 if (!folio_trylock(folio))
725 continue;
726 folio_get(folio);
727 lazy_mmu_mode_disable();
728 pte_unmap_unlock(start_pte, ptl);
729 start_pte = NULL;
730 err = split_folio(folio);
731 folio_unlock(folio);
732 folio_put(folio);
733 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
734 start_pte = pte;
735 if (!start_pte)
736 break;
737 flush_tlb_batched_pending(mm);
738 lazy_mmu_mode_enable();
739 if (!err)
740 nr = 0;
741 continue;
742 }
743 }
744
745 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
746 if (!folio_trylock(folio))
747 continue;
748 /*
749 * If we have a large folio at this point, we know it is
750 * fully mapped so if its mapcount is the same as its
751 * number of pages, it must be exclusive.
752 */
753 if (folio_mapcount(folio) != folio_nr_pages(folio)) {
754 folio_unlock(folio);
755 continue;
756 }
757
758 if (folio_test_swapcache(folio) &&
759 !folio_free_swap(folio)) {
760 folio_unlock(folio);
761 continue;
762 }
763
764 folio_clear_dirty(folio);
765 folio_unlock(folio);
766 }
767
768 if (pte_young(ptent) || pte_dirty(ptent)) {
769 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags);
770 tlb_remove_tlb_entries(tlb, pte, nr, addr);
771 }
772 folio_mark_lazyfree(folio);
773 }
774
775 if (nr_swap)
776 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
777 if (start_pte) {
778 lazy_mmu_mode_disable();
779 pte_unmap_unlock(start_pte, ptl);
780 }
781 cond_resched();
782
783 return 0;
784 }
785
get_walk_lock(enum madvise_lock_mode mode)786 static inline enum page_walk_lock get_walk_lock(enum madvise_lock_mode mode)
787 {
788 switch (mode) {
789 case MADVISE_VMA_READ_LOCK:
790 return PGWALK_VMA_RDLOCK_VERIFY;
791 case MADVISE_MMAP_READ_LOCK:
792 return PGWALK_RDLOCK;
793 default:
794 /* Other modes don't require fixing up the walk_lock */
795 WARN_ON_ONCE(1);
796 return PGWALK_RDLOCK;
797 }
798 }
799
madvise_free_single_vma(struct madvise_behavior * madv_behavior)800 static int madvise_free_single_vma(struct madvise_behavior *madv_behavior)
801 {
802 struct mm_struct *mm = madv_behavior->mm;
803 struct vm_area_struct *vma = madv_behavior->vma;
804 struct mmu_notifier_range range = {
805 .start = madv_behavior->range.start,
806 .end = madv_behavior->range.end,
807 };
808 struct mmu_gather *tlb = madv_behavior->tlb;
809 struct mm_walk_ops walk_ops = {
810 .pmd_entry = madvise_free_pte_range,
811 };
812
813 /* MADV_FREE works for only anon vma at the moment */
814 if (!vma_is_anonymous(vma))
815 return -EINVAL;
816
817 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
818 range.start, range.end);
819
820 lru_add_drain();
821 update_hiwater_rss(mm);
822
823 mmu_notifier_invalidate_range_start(&range);
824 tlb_start_vma(tlb, vma);
825 walk_ops.walk_lock = get_walk_lock(madv_behavior->lock_mode);
826 walk_page_range_vma(vma, range.start, range.end,
827 &walk_ops, tlb);
828 tlb_end_vma(tlb, vma);
829 mmu_notifier_invalidate_range_end(&range);
830 return 0;
831 }
832
833 /*
834 * Application no longer needs these pages. If the pages are dirty,
835 * it's OK to just throw them away. The app will be more careful about
836 * data it wants to keep. Be sure to free swap resources too. The
837 * zap_vma_range call sets things up for shrink_active_list to actually
838 * free these pages later if no one else has touched them in the meantime,
839 * although we could add these pages to a global reuse list for
840 * shrink_active_list to pick up before reclaiming other pages.
841 *
842 * NB: This interface discards data rather than pushes it out to swap,
843 * as some implementations do. This has performance implications for
844 * applications like large transactional databases which want to discard
845 * pages in anonymous maps after committing to backing store the data
846 * that was kept in them. There is no reason to write this data out to
847 * the swap area if the application is discarding it.
848 *
849 * An interface that causes the system to free clean pages and flush
850 * dirty pages is already available as msync(MS_INVALIDATE).
851 */
madvise_dontneed_single_vma(struct madvise_behavior * madv_behavior)852 static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior)
853
854 {
855 struct madvise_behavior_range *range = &madv_behavior->range;
856 struct zap_details details = {
857 .reclaim_pt = true,
858 };
859
860 zap_vma_range_batched(madv_behavior->tlb, madv_behavior->vma,
861 range->start, range->end - range->start, &details);
862 return 0;
863 }
864
865 static
madvise_dontneed_free_valid_vma(struct madvise_behavior * madv_behavior)866 bool madvise_dontneed_free_valid_vma(struct madvise_behavior *madv_behavior)
867 {
868 struct vm_area_struct *vma = madv_behavior->vma;
869 int behavior = madv_behavior->behavior;
870 struct madvise_behavior_range *range = &madv_behavior->range;
871
872 if (!is_vm_hugetlb_page(vma)) {
873 unsigned int forbidden = VM_PFNMAP;
874
875 if (behavior != MADV_DONTNEED_LOCKED)
876 forbidden |= VM_LOCKED;
877
878 return !(vma->vm_flags & forbidden);
879 }
880
881 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
882 return false;
883 if (range->start & ~huge_page_mask(hstate_vma(vma)))
884 return false;
885
886 /*
887 * Madvise callers expect the length to be rounded up to PAGE_SIZE
888 * boundaries, and may be unaware that this VMA uses huge pages.
889 * Avoid unexpected data loss by rounding down the number of
890 * huge pages freed.
891 */
892 range->end = ALIGN_DOWN(range->end, huge_page_size(hstate_vma(vma)));
893
894 return true;
895 }
896
madvise_dontneed_free(struct madvise_behavior * madv_behavior)897 static long madvise_dontneed_free(struct madvise_behavior *madv_behavior)
898 {
899 struct mm_struct *mm = madv_behavior->mm;
900 struct madvise_behavior_range *range = &madv_behavior->range;
901 int behavior = madv_behavior->behavior;
902
903 if (!madvise_dontneed_free_valid_vma(madv_behavior))
904 return -EINVAL;
905
906 if (range->start == range->end)
907 return 0;
908
909 if (!userfaultfd_remove(madv_behavior->vma, range->start, range->end)) {
910 struct vm_area_struct *vma;
911
912 mark_mmap_lock_dropped(madv_behavior);
913 mmap_read_lock(mm);
914 madv_behavior->vma = vma = vma_lookup(mm, range->start);
915 if (!vma)
916 return -ENOMEM;
917 /*
918 * Potential end adjustment for hugetlb vma is OK as
919 * the check below keeps end within vma.
920 */
921 if (!madvise_dontneed_free_valid_vma(madv_behavior))
922 return -EINVAL;
923 if (range->end > vma->vm_end) {
924 /*
925 * Don't fail if end > vma->vm_end. If the old
926 * vma was split while the mmap_lock was
927 * released the effect of the concurrent
928 * operation may not cause madvise() to
929 * have an undefined result. There may be an
930 * adjacent next vma that we'll walk
931 * next. userfaultfd_remove() will generate an
932 * UFFD_EVENT_REMOVE repetition on the
933 * end-vma->vm_end range, but the manager can
934 * handle a repetition fine.
935 */
936 range->end = vma->vm_end;
937 }
938 /*
939 * If the memory region between start and end was
940 * originally backed by 4kB pages and then remapped to
941 * be backed by hugepages while mmap_lock was dropped,
942 * the adjustment for hugetlb vma above may have rounded
943 * end down to the start address.
944 */
945 if (range->start == range->end)
946 return 0;
947 VM_WARN_ON(range->start > range->end);
948 }
949
950 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
951 return madvise_dontneed_single_vma(madv_behavior);
952 else if (behavior == MADV_FREE)
953 return madvise_free_single_vma(madv_behavior);
954 else
955 return -EINVAL;
956 }
957
madvise_populate(struct madvise_behavior * madv_behavior)958 static long madvise_populate(struct madvise_behavior *madv_behavior)
959 {
960 struct mm_struct *mm = madv_behavior->mm;
961 const bool write = madv_behavior->behavior == MADV_POPULATE_WRITE;
962 int locked = 1;
963 unsigned long start = madv_behavior->range.start;
964 unsigned long end = madv_behavior->range.end;
965 long pages;
966
967 while (start < end) {
968 /* Populate (prefault) page tables readable/writable. */
969 pages = faultin_page_range(mm, start, end, write, &locked);
970 if (!locked) {
971 mmap_read_lock(mm);
972 locked = 1;
973 }
974 if (pages < 0) {
975 switch (pages) {
976 case -EINTR:
977 return -EINTR;
978 case -EINVAL: /* Incompatible mappings / permissions. */
979 return -EINVAL;
980 case -EHWPOISON:
981 return -EHWPOISON;
982 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
983 return -EFAULT;
984 default:
985 pr_warn_once("%s: unhandled return value: %ld\n",
986 __func__, pages);
987 fallthrough;
988 case -ENOMEM: /* No VMA or out of memory. */
989 return -ENOMEM;
990 }
991 }
992 start += pages * PAGE_SIZE;
993 }
994 return 0;
995 }
996
997 /*
998 * Application wants to free up the pages and associated backing store.
999 * This is effectively punching a hole into the middle of a file.
1000 */
madvise_remove(struct madvise_behavior * madv_behavior)1001 static long madvise_remove(struct madvise_behavior *madv_behavior)
1002 {
1003 loff_t offset;
1004 int error;
1005 struct file *f;
1006 struct mm_struct *mm = madv_behavior->mm;
1007 struct vm_area_struct *vma = madv_behavior->vma;
1008 unsigned long start = madv_behavior->range.start;
1009 unsigned long end = madv_behavior->range.end;
1010
1011 mark_mmap_lock_dropped(madv_behavior);
1012
1013 if (vma->vm_flags & VM_LOCKED)
1014 return -EINVAL;
1015
1016 f = vma->vm_file;
1017
1018 if (!f || !f->f_mapping || !f->f_mapping->host) {
1019 return -EINVAL;
1020 }
1021
1022 if (!vma_is_shared_maywrite(vma))
1023 return -EACCES;
1024
1025 offset = (loff_t)(start - vma->vm_start)
1026 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1027
1028 /*
1029 * Filesystem's fallocate may need to take i_rwsem. We need to
1030 * explicitly grab a reference because the vma (and hence the
1031 * vma's reference to the file) can go away as soon as we drop
1032 * mmap_lock.
1033 */
1034 get_file(f);
1035 if (userfaultfd_remove(vma, start, end)) {
1036 /* mmap_lock was not released by userfaultfd_remove() */
1037 mmap_read_unlock(mm);
1038 }
1039 error = vfs_fallocate(f,
1040 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1041 offset, end - start);
1042 fput(f);
1043 mmap_read_lock(mm);
1044 return error;
1045 }
1046
is_valid_guard_vma(struct vm_area_struct * vma,bool allow_locked)1047 static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked)
1048 {
1049 vm_flags_t disallowed = VM_SPECIAL | VM_HUGETLB;
1050
1051 /*
1052 * A user could lock after setting a guard range but that's fine, as
1053 * they'd not be able to fault in. The issue arises when we try to zap
1054 * existing locked VMAs. We don't want to do that.
1055 */
1056 if (!allow_locked)
1057 disallowed |= VM_LOCKED;
1058
1059 return !(vma->vm_flags & disallowed);
1060 }
1061
is_guard_pte_marker(pte_t ptent)1062 static bool is_guard_pte_marker(pte_t ptent)
1063 {
1064 const softleaf_t entry = softleaf_from_pte(ptent);
1065
1066 return softleaf_is_guard_marker(entry);
1067 }
1068
guard_install_pud_entry(pud_t * pud,unsigned long addr,unsigned long next,struct mm_walk * walk)1069 static int guard_install_pud_entry(pud_t *pud, unsigned long addr,
1070 unsigned long next, struct mm_walk *walk)
1071 {
1072 pud_t pudval = pudp_get(pud);
1073
1074 /* If huge return >0 so we abort the operation + zap. */
1075 return pud_trans_huge(pudval);
1076 }
1077
guard_install_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)1078 static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr,
1079 unsigned long next, struct mm_walk *walk)
1080 {
1081 pmd_t pmdval = pmdp_get(pmd);
1082
1083 /* If huge return >0 so we abort the operation + zap. */
1084 return pmd_trans_huge(pmdval);
1085 }
1086
guard_install_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)1087 static int guard_install_pte_entry(pte_t *pte, unsigned long addr,
1088 unsigned long next, struct mm_walk *walk)
1089 {
1090 pte_t pteval = ptep_get(pte);
1091 unsigned long *nr_pages = (unsigned long *)walk->private;
1092
1093 /* If there is already a guard page marker, we have nothing to do. */
1094 if (is_guard_pte_marker(pteval)) {
1095 (*nr_pages)++;
1096
1097 return 0;
1098 }
1099
1100 /* If populated return >0 so we abort the operation + zap. */
1101 return 1;
1102 }
1103
guard_install_set_pte(unsigned long addr,unsigned long next,pte_t * ptep,struct mm_walk * walk)1104 static int guard_install_set_pte(unsigned long addr, unsigned long next,
1105 pte_t *ptep, struct mm_walk *walk)
1106 {
1107 unsigned long *nr_pages = (unsigned long *)walk->private;
1108
1109 /* Simply install a PTE marker, this causes segfault on access. */
1110 *ptep = make_pte_marker(PTE_MARKER_GUARD);
1111 (*nr_pages)++;
1112
1113 return 0;
1114 }
1115
madvise_guard_install(struct madvise_behavior * madv_behavior)1116 static long madvise_guard_install(struct madvise_behavior *madv_behavior)
1117 {
1118 struct vm_area_struct *vma = madv_behavior->vma;
1119 struct madvise_behavior_range *range = &madv_behavior->range;
1120 struct mm_walk_ops walk_ops = {
1121 .pud_entry = guard_install_pud_entry,
1122 .pmd_entry = guard_install_pmd_entry,
1123 .pte_entry = guard_install_pte_entry,
1124 .install_pte = guard_install_set_pte,
1125 .walk_lock = get_walk_lock(madv_behavior->lock_mode),
1126 };
1127 long err;
1128 int i;
1129
1130 if (!is_valid_guard_vma(vma, /* allow_locked = */false))
1131 return -EINVAL;
1132
1133 /*
1134 * Set atomically under read lock. All pertinent readers will need to
1135 * acquire an mmap/VMA write lock to read it. All remaining readers may
1136 * or may not see the flag set, but we don't care.
1137 */
1138 vma_set_atomic_flag(vma, VMA_MAYBE_GUARD_BIT);
1139
1140 /*
1141 * If anonymous and we are establishing page tables the VMA ought to
1142 * have an anon_vma associated with it.
1143 *
1144 * We will hold an mmap read lock if this is necessary, this is checked
1145 * as part of the VMA lock logic.
1146 */
1147 if (vma_is_anonymous(vma)) {
1148 VM_WARN_ON_ONCE(!vma->anon_vma &&
1149 madv_behavior->lock_mode != MADVISE_MMAP_READ_LOCK);
1150
1151 err = anon_vma_prepare(vma);
1152 if (err)
1153 return err;
1154 }
1155
1156 /*
1157 * Optimistically try to install the guard marker pages first. If any
1158 * non-guard pages or THP huge pages are encountered, give up and zap
1159 * the range before trying again.
1160 *
1161 * We try a few times before giving up and releasing back to userland to
1162 * loop around, releasing locks in the process to avoid contention.
1163 *
1164 * This would only happen due to races with e.g. page faults or
1165 * khugepaged.
1166 *
1167 * In most cases we should simply install the guard markers immediately
1168 * with no zap or looping.
1169 */
1170 for (i = 0; i < MAX_MADVISE_GUARD_RETRIES; i++) {
1171 unsigned long nr_pages = 0;
1172
1173 /* Returns < 0 on error, == 0 if success, > 0 if zap needed. */
1174 if (madv_behavior->lock_mode == MADVISE_VMA_READ_LOCK)
1175 err = walk_page_range_vma_unsafe(madv_behavior->vma,
1176 range->start, range->end, &walk_ops,
1177 &nr_pages);
1178 else
1179 err = walk_page_range_mm_unsafe(vma->vm_mm, range->start,
1180 range->end, &walk_ops, &nr_pages);
1181 if (err < 0)
1182 return err;
1183
1184 if (err == 0) {
1185 unsigned long nr_expected_pages =
1186 PHYS_PFN(range->end - range->start);
1187
1188 VM_WARN_ON(nr_pages != nr_expected_pages);
1189 return 0;
1190 }
1191
1192 /*
1193 * OK some of the range have non-guard pages mapped, zap
1194 * them. This leaves existing guard pages in place.
1195 */
1196 zap_vma_range(vma, range->start, range->end - range->start);
1197 }
1198
1199 /*
1200 * We were unable to install the guard pages, return to userspace and
1201 * immediately retry, relieving lock contention.
1202 */
1203 return restart_syscall();
1204 }
1205
guard_remove_pud_entry(pud_t * pud,unsigned long addr,unsigned long next,struct mm_walk * walk)1206 static int guard_remove_pud_entry(pud_t *pud, unsigned long addr,
1207 unsigned long next, struct mm_walk *walk)
1208 {
1209 pud_t pudval = pudp_get(pud);
1210
1211 /* If huge, cannot have guard pages present, so no-op - skip. */
1212 if (pud_trans_huge(pudval))
1213 walk->action = ACTION_CONTINUE;
1214
1215 return 0;
1216 }
1217
guard_remove_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)1218 static int guard_remove_pmd_entry(pmd_t *pmd, unsigned long addr,
1219 unsigned long next, struct mm_walk *walk)
1220 {
1221 pmd_t pmdval = pmdp_get(pmd);
1222
1223 /* If huge, cannot have guard pages present, so no-op - skip. */
1224 if (pmd_trans_huge(pmdval))
1225 walk->action = ACTION_CONTINUE;
1226
1227 return 0;
1228 }
1229
guard_remove_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)1230 static int guard_remove_pte_entry(pte_t *pte, unsigned long addr,
1231 unsigned long next, struct mm_walk *walk)
1232 {
1233 pte_t ptent = ptep_get(pte);
1234
1235 if (is_guard_pte_marker(ptent)) {
1236 /* Simply clear the PTE marker. */
1237 pte_clear_not_present_full(walk->mm, addr, pte, false);
1238 update_mmu_cache(walk->vma, addr, pte);
1239 }
1240
1241 return 0;
1242 }
1243
madvise_guard_remove(struct madvise_behavior * madv_behavior)1244 static long madvise_guard_remove(struct madvise_behavior *madv_behavior)
1245 {
1246 struct vm_area_struct *vma = madv_behavior->vma;
1247 struct madvise_behavior_range *range = &madv_behavior->range;
1248 struct mm_walk_ops wallk_ops = {
1249 .pud_entry = guard_remove_pud_entry,
1250 .pmd_entry = guard_remove_pmd_entry,
1251 .pte_entry = guard_remove_pte_entry,
1252 .walk_lock = get_walk_lock(madv_behavior->lock_mode),
1253 };
1254
1255 /*
1256 * We're ok with removing guards in mlock()'d ranges, as this is a
1257 * non-destructive action.
1258 */
1259 if (!is_valid_guard_vma(vma, /* allow_locked = */true))
1260 return -EINVAL;
1261
1262 return walk_page_range_vma(vma, range->start, range->end,
1263 &wallk_ops, NULL);
1264 }
1265
1266 #ifdef CONFIG_64BIT
1267 /* Does the madvise operation result in discarding of mapped data? */
is_discard(int behavior)1268 static bool is_discard(int behavior)
1269 {
1270 switch (behavior) {
1271 case MADV_FREE:
1272 case MADV_DONTNEED:
1273 case MADV_DONTNEED_LOCKED:
1274 case MADV_REMOVE:
1275 case MADV_DONTFORK:
1276 case MADV_WIPEONFORK:
1277 case MADV_GUARD_INSTALL:
1278 return true;
1279 }
1280
1281 return false;
1282 }
1283
1284 /*
1285 * We are restricted from madvise()'ing mseal()'d VMAs only in very particular
1286 * circumstances - discarding of data from read-only anonymous SEALED mappings.
1287 *
1288 * This is because users cannot trivally discard data from these VMAs, and may
1289 * only do so via an appropriate madvise() call.
1290 */
can_madvise_modify(struct madvise_behavior * madv_behavior)1291 static bool can_madvise_modify(struct madvise_behavior *madv_behavior)
1292 {
1293 struct vm_area_struct *vma = madv_behavior->vma;
1294
1295 /* If the VMA isn't sealed we're good. */
1296 if (!vma_is_sealed(vma))
1297 return true;
1298
1299 /* For a sealed VMA, we only care about discard operations. */
1300 if (!is_discard(madv_behavior->behavior))
1301 return true;
1302
1303 /*
1304 * We explicitly permit all file-backed mappings, whether MAP_SHARED or
1305 * MAP_PRIVATE.
1306 *
1307 * The latter causes some complications. Because now, one can mmap()
1308 * read/write a MAP_PRIVATE mapping, write to it, then mprotect()
1309 * read-only, mseal() and a discard will be permitted.
1310 *
1311 * However, in order to avoid issues with potential use of madvise(...,
1312 * MADV_DONTNEED) of mseal()'d .text mappings we, for the time being,
1313 * permit this.
1314 */
1315 if (!vma_is_anonymous(vma))
1316 return true;
1317
1318 /* If the user could write to the mapping anyway, then this is fine. */
1319 if ((vma->vm_flags & VM_WRITE) &&
1320 arch_vma_access_permitted(vma, /* write= */ true,
1321 /* execute= */ false, /* foreign= */ false))
1322 return true;
1323
1324 /* Otherwise, we are not permitted to perform this operation. */
1325 return false;
1326 }
1327 #else
can_madvise_modify(struct madvise_behavior * madv_behavior)1328 static bool can_madvise_modify(struct madvise_behavior *madv_behavior)
1329 {
1330 return true;
1331 }
1332 #endif
1333
1334 /*
1335 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1336 * will handle splitting a vm area into separate areas, each area with its own
1337 * behavior.
1338 */
madvise_vma_behavior(struct madvise_behavior * madv_behavior)1339 static int madvise_vma_behavior(struct madvise_behavior *madv_behavior)
1340 {
1341 int behavior = madv_behavior->behavior;
1342 struct vm_area_struct *vma = madv_behavior->vma;
1343 vm_flags_t new_flags = vma->vm_flags;
1344 struct madvise_behavior_range *range = &madv_behavior->range;
1345 int error;
1346
1347 if (unlikely(!can_madvise_modify(madv_behavior)))
1348 return -EPERM;
1349
1350 switch (behavior) {
1351 case MADV_REMOVE:
1352 return madvise_remove(madv_behavior);
1353 case MADV_WILLNEED:
1354 return madvise_willneed(madv_behavior);
1355 case MADV_COLD:
1356 return madvise_cold(madv_behavior);
1357 case MADV_PAGEOUT:
1358 return madvise_pageout(madv_behavior);
1359 case MADV_FREE:
1360 case MADV_DONTNEED:
1361 case MADV_DONTNEED_LOCKED:
1362 return madvise_dontneed_free(madv_behavior);
1363 case MADV_COLLAPSE:
1364 return madvise_collapse(vma, range->start, range->end,
1365 &madv_behavior->lock_dropped);
1366 case MADV_GUARD_INSTALL:
1367 return madvise_guard_install(madv_behavior);
1368 case MADV_GUARD_REMOVE:
1369 return madvise_guard_remove(madv_behavior);
1370
1371 /* The below behaviours update VMAs via madvise_update_vma(). */
1372
1373 case MADV_NORMAL:
1374 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1375 break;
1376 case MADV_SEQUENTIAL:
1377 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1378 break;
1379 case MADV_RANDOM:
1380 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1381 break;
1382 case MADV_DONTFORK:
1383 new_flags |= VM_DONTCOPY;
1384 break;
1385 case MADV_DOFORK:
1386 if (new_flags & VM_SPECIAL)
1387 return -EINVAL;
1388 new_flags &= ~VM_DONTCOPY;
1389 break;
1390 case MADV_WIPEONFORK:
1391 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1392 if (vma->vm_file || new_flags & VM_SHARED)
1393 return -EINVAL;
1394 new_flags |= VM_WIPEONFORK;
1395 break;
1396 case MADV_KEEPONFORK:
1397 if (new_flags & VM_DROPPABLE)
1398 return -EINVAL;
1399 new_flags &= ~VM_WIPEONFORK;
1400 break;
1401 case MADV_DONTDUMP:
1402 new_flags |= VM_DONTDUMP;
1403 break;
1404 case MADV_DODUMP:
1405 if ((!is_vm_hugetlb_page(vma) && (new_flags & VM_SPECIAL)) ||
1406 (new_flags & VM_DROPPABLE))
1407 return -EINVAL;
1408 new_flags &= ~VM_DONTDUMP;
1409 break;
1410 case MADV_MERGEABLE:
1411 case MADV_UNMERGEABLE:
1412 error = ksm_madvise(vma, range->start, range->end,
1413 behavior, &new_flags);
1414 if (error)
1415 goto out;
1416 break;
1417 case MADV_HUGEPAGE:
1418 case MADV_NOHUGEPAGE:
1419 error = hugepage_madvise(vma, &new_flags, behavior);
1420 if (error)
1421 goto out;
1422 break;
1423 case __MADV_SET_ANON_VMA_NAME:
1424 /* Only anonymous mappings can be named */
1425 if (vma->vm_file && !vma_is_anon_shmem(vma))
1426 return -EBADF;
1427 break;
1428 }
1429
1430 /* This is a write operation.*/
1431 VM_WARN_ON_ONCE(madv_behavior->lock_mode != MADVISE_MMAP_WRITE_LOCK);
1432
1433 error = madvise_update_vma(new_flags, madv_behavior);
1434 out:
1435 /*
1436 * madvise() returns EAGAIN if kernel resources, such as
1437 * slab, are temporarily unavailable.
1438 */
1439 if (error == -ENOMEM)
1440 error = -EAGAIN;
1441 return error;
1442 }
1443
1444 #ifdef CONFIG_MEMORY_FAILURE
1445 /*
1446 * Error injection support for memory error handling.
1447 */
madvise_inject_error(struct madvise_behavior * madv_behavior)1448 static int madvise_inject_error(struct madvise_behavior *madv_behavior)
1449 {
1450 unsigned long size;
1451 unsigned long start = madv_behavior->range.start;
1452 unsigned long end = madv_behavior->range.end;
1453
1454 if (!capable(CAP_SYS_ADMIN))
1455 return -EPERM;
1456
1457 for (; start < end; start += size) {
1458 unsigned long pfn;
1459 struct page *page;
1460 int ret;
1461
1462 ret = get_user_pages_fast(start, 1, 0, &page);
1463 if (ret != 1)
1464 return ret;
1465 pfn = page_to_pfn(page);
1466
1467 /*
1468 * When soft offlining hugepages, after migrating the page
1469 * we dissolve it, therefore in the second loop "page" will
1470 * no longer be a compound page.
1471 */
1472 size = page_size(compound_head(page));
1473
1474 if (madv_behavior->behavior == MADV_SOFT_OFFLINE) {
1475 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1476 pfn, start);
1477 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1478 } else {
1479 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1480 pfn, start);
1481 ret = memory_failure(pfn, MF_ACTION_REQUIRED | MF_COUNT_INCREASED | MF_SW_SIMULATED);
1482 if (ret == -EOPNOTSUPP)
1483 ret = 0;
1484 }
1485
1486 if (ret)
1487 return ret;
1488 }
1489
1490 return 0;
1491 }
1492
is_memory_failure(struct madvise_behavior * madv_behavior)1493 static bool is_memory_failure(struct madvise_behavior *madv_behavior)
1494 {
1495 switch (madv_behavior->behavior) {
1496 case MADV_HWPOISON:
1497 case MADV_SOFT_OFFLINE:
1498 return true;
1499 default:
1500 return false;
1501 }
1502 }
1503
1504 #else
1505
madvise_inject_error(struct madvise_behavior * madv_behavior)1506 static int madvise_inject_error(struct madvise_behavior *madv_behavior)
1507 {
1508 return 0;
1509 }
1510
is_memory_failure(struct madvise_behavior * madv_behavior)1511 static bool is_memory_failure(struct madvise_behavior *madv_behavior)
1512 {
1513 return false;
1514 }
1515
1516 #endif /* CONFIG_MEMORY_FAILURE */
1517
1518 static bool
madvise_behavior_valid(int behavior)1519 madvise_behavior_valid(int behavior)
1520 {
1521 switch (behavior) {
1522 case MADV_DOFORK:
1523 case MADV_DONTFORK:
1524 case MADV_NORMAL:
1525 case MADV_SEQUENTIAL:
1526 case MADV_RANDOM:
1527 case MADV_REMOVE:
1528 case MADV_WILLNEED:
1529 case MADV_DONTNEED:
1530 case MADV_DONTNEED_LOCKED:
1531 case MADV_FREE:
1532 case MADV_COLD:
1533 case MADV_PAGEOUT:
1534 case MADV_POPULATE_READ:
1535 case MADV_POPULATE_WRITE:
1536 #ifdef CONFIG_KSM
1537 case MADV_MERGEABLE:
1538 case MADV_UNMERGEABLE:
1539 #endif
1540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1541 case MADV_HUGEPAGE:
1542 case MADV_NOHUGEPAGE:
1543 case MADV_COLLAPSE:
1544 #endif
1545 case MADV_DONTDUMP:
1546 case MADV_DODUMP:
1547 case MADV_WIPEONFORK:
1548 case MADV_KEEPONFORK:
1549 case MADV_GUARD_INSTALL:
1550 case MADV_GUARD_REMOVE:
1551 #ifdef CONFIG_MEMORY_FAILURE
1552 case MADV_SOFT_OFFLINE:
1553 case MADV_HWPOISON:
1554 #endif
1555 return true;
1556
1557 default:
1558 return false;
1559 }
1560 }
1561
1562 /* Can we invoke process_madvise() on a remote mm for the specified behavior? */
process_madvise_remote_valid(int behavior)1563 static bool process_madvise_remote_valid(int behavior)
1564 {
1565 switch (behavior) {
1566 case MADV_COLD:
1567 case MADV_PAGEOUT:
1568 case MADV_WILLNEED:
1569 case MADV_COLLAPSE:
1570 return true;
1571 default:
1572 return false;
1573 }
1574 }
1575
1576 /* Does this operation invoke anon_vma_prepare()? */
prepares_anon_vma(int behavior)1577 static bool prepares_anon_vma(int behavior)
1578 {
1579 switch (behavior) {
1580 case MADV_GUARD_INSTALL:
1581 return true;
1582 default:
1583 return false;
1584 }
1585 }
1586
1587 /*
1588 * We have acquired a VMA read lock, is the VMA valid to be madvise'd under VMA
1589 * read lock only now we have a VMA to examine?
1590 */
is_vma_lock_sufficient(struct vm_area_struct * vma,struct madvise_behavior * madv_behavior)1591 static bool is_vma_lock_sufficient(struct vm_area_struct *vma,
1592 struct madvise_behavior *madv_behavior)
1593 {
1594 /* Must span only a single VMA.*/
1595 if (madv_behavior->range.end > vma->vm_end)
1596 return false;
1597 /* Remote processes unsupported. */
1598 if (current->mm != vma->vm_mm)
1599 return false;
1600 /* Userfaultfd unsupported. */
1601 if (userfaultfd_armed(vma))
1602 return false;
1603 /*
1604 * anon_vma_prepare() explicitly requires an mmap lock for
1605 * serialisation, so we cannot use a VMA lock in this case.
1606 *
1607 * Note we might race with anon_vma being set, however this makes this
1608 * check overly paranoid which is safe.
1609 */
1610 if (vma_is_anonymous(vma) &&
1611 prepares_anon_vma(madv_behavior->behavior) && !vma->anon_vma)
1612 return false;
1613
1614 return true;
1615 }
1616
1617 /*
1618 * Try to acquire a VMA read lock if possible.
1619 *
1620 * We only support this lock over a single VMA, which the input range must
1621 * span either partially or fully.
1622 *
1623 * This function always returns with an appropriate lock held. If a VMA read
1624 * lock could be acquired, we return true and set madv_behavior state
1625 * accordingly.
1626 *
1627 * If a VMA read lock could not be acquired, we return false and expect caller to
1628 * fallback to mmap lock behaviour.
1629 */
try_vma_read_lock(struct madvise_behavior * madv_behavior)1630 static bool try_vma_read_lock(struct madvise_behavior *madv_behavior)
1631 {
1632 struct mm_struct *mm = madv_behavior->mm;
1633 struct vm_area_struct *vma;
1634
1635 vma = lock_vma_under_rcu(mm, madv_behavior->range.start);
1636 if (!vma)
1637 goto take_mmap_read_lock;
1638
1639 if (!is_vma_lock_sufficient(vma, madv_behavior)) {
1640 vma_end_read(vma);
1641 goto take_mmap_read_lock;
1642 }
1643
1644 madv_behavior->vma = vma;
1645 return true;
1646
1647 take_mmap_read_lock:
1648 mmap_read_lock(mm);
1649 madv_behavior->lock_mode = MADVISE_MMAP_READ_LOCK;
1650 return false;
1651 }
1652
1653 /*
1654 * Walk the vmas in range [start,end), and call the madvise_vma_behavior
1655 * function on each one. The function will get start and end parameters that
1656 * cover the overlap between the current vma and the original range. Any
1657 * unmapped regions in the original range will result in this function returning
1658 * -ENOMEM while still calling the madvise_vma_behavior function on all of the
1659 * existing vmas in the range. Must be called with the mmap_lock held for
1660 * reading or writing.
1661 */
1662 static
madvise_walk_vmas(struct madvise_behavior * madv_behavior)1663 int madvise_walk_vmas(struct madvise_behavior *madv_behavior)
1664 {
1665 struct mm_struct *mm = madv_behavior->mm;
1666 struct madvise_behavior_range *range = &madv_behavior->range;
1667 /* range is updated to span each VMA, so store end of entire range. */
1668 unsigned long last_end = range->end;
1669 int unmapped_error = 0;
1670 int error;
1671 struct vm_area_struct *prev, *vma;
1672
1673 /*
1674 * If VMA read lock is supported, apply madvise to a single VMA
1675 * tentatively, avoiding walking VMAs.
1676 */
1677 if (madv_behavior->lock_mode == MADVISE_VMA_READ_LOCK &&
1678 try_vma_read_lock(madv_behavior)) {
1679 error = madvise_vma_behavior(madv_behavior);
1680 vma_end_read(madv_behavior->vma);
1681 return error;
1682 }
1683
1684 vma = find_vma_prev(mm, range->start, &prev);
1685 if (vma && range->start > vma->vm_start)
1686 prev = vma;
1687
1688 for (;;) {
1689 /* Still start < end. */
1690 if (!vma)
1691 return -ENOMEM;
1692
1693 /* Here start < (last_end|vma->vm_end). */
1694 if (range->start < vma->vm_start) {
1695 /*
1696 * This indicates a gap between VMAs in the input
1697 * range. This does not cause the operation to abort,
1698 * rather we simply return -ENOMEM to indicate that this
1699 * has happened, but carry on.
1700 */
1701 unmapped_error = -ENOMEM;
1702 range->start = vma->vm_start;
1703 if (range->start >= last_end)
1704 break;
1705 }
1706
1707 /* Here vma->vm_start <= range->start < (last_end|vma->vm_end) */
1708 range->end = min(vma->vm_end, last_end);
1709
1710 /* Here vma->vm_start <= range->start < range->end <= (last_end|vma->vm_end). */
1711 madv_behavior->prev = prev;
1712 madv_behavior->vma = vma;
1713 error = madvise_vma_behavior(madv_behavior);
1714 if (error)
1715 return error;
1716 if (madv_behavior->lock_dropped) {
1717 /* We dropped the mmap lock, we can't ref the VMA. */
1718 prev = NULL;
1719 vma = NULL;
1720 madv_behavior->lock_dropped = false;
1721 } else {
1722 vma = madv_behavior->vma;
1723 prev = vma;
1724 }
1725
1726 if (vma && range->end < vma->vm_end)
1727 range->end = vma->vm_end;
1728 if (range->end >= last_end)
1729 break;
1730
1731 vma = find_vma(mm, vma ? vma->vm_end : range->end);
1732 range->start = range->end;
1733 }
1734
1735 return unmapped_error;
1736 }
1737
1738 /*
1739 * Any behaviour which results in changes to the vma->vm_flags needs to
1740 * take mmap_lock for writing. Others, which simply traverse vmas, need
1741 * to only take it for reading.
1742 */
get_lock_mode(struct madvise_behavior * madv_behavior)1743 static enum madvise_lock_mode get_lock_mode(struct madvise_behavior *madv_behavior)
1744 {
1745 if (is_memory_failure(madv_behavior))
1746 return MADVISE_NO_LOCK;
1747
1748 switch (madv_behavior->behavior) {
1749 case MADV_REMOVE:
1750 case MADV_WILLNEED:
1751 case MADV_COLD:
1752 case MADV_PAGEOUT:
1753 case MADV_POPULATE_READ:
1754 case MADV_POPULATE_WRITE:
1755 case MADV_COLLAPSE:
1756 return MADVISE_MMAP_READ_LOCK;
1757 case MADV_GUARD_INSTALL:
1758 case MADV_GUARD_REMOVE:
1759 case MADV_DONTNEED:
1760 case MADV_DONTNEED_LOCKED:
1761 case MADV_FREE:
1762 return MADVISE_VMA_READ_LOCK;
1763 default:
1764 return MADVISE_MMAP_WRITE_LOCK;
1765 }
1766 }
1767
madvise_lock(struct madvise_behavior * madv_behavior)1768 static int madvise_lock(struct madvise_behavior *madv_behavior)
1769 {
1770 struct mm_struct *mm = madv_behavior->mm;
1771 enum madvise_lock_mode lock_mode = get_lock_mode(madv_behavior);
1772
1773 switch (lock_mode) {
1774 case MADVISE_NO_LOCK:
1775 break;
1776 case MADVISE_MMAP_WRITE_LOCK:
1777 if (mmap_write_lock_killable(mm))
1778 return -EINTR;
1779 break;
1780 case MADVISE_MMAP_READ_LOCK:
1781 mmap_read_lock(mm);
1782 break;
1783 case MADVISE_VMA_READ_LOCK:
1784 /* We will acquire the lock per-VMA in madvise_walk_vmas(). */
1785 break;
1786 }
1787
1788 madv_behavior->lock_mode = lock_mode;
1789 return 0;
1790 }
1791
madvise_unlock(struct madvise_behavior * madv_behavior)1792 static void madvise_unlock(struct madvise_behavior *madv_behavior)
1793 {
1794 struct mm_struct *mm = madv_behavior->mm;
1795
1796 switch (madv_behavior->lock_mode) {
1797 case MADVISE_NO_LOCK:
1798 return;
1799 case MADVISE_MMAP_WRITE_LOCK:
1800 mmap_write_unlock(mm);
1801 break;
1802 case MADVISE_MMAP_READ_LOCK:
1803 mmap_read_unlock(mm);
1804 break;
1805 case MADVISE_VMA_READ_LOCK:
1806 /* We will drop the lock per-VMA in madvise_walk_vmas(). */
1807 break;
1808 }
1809
1810 madv_behavior->lock_mode = MADVISE_NO_LOCK;
1811 }
1812
madvise_batch_tlb_flush(int behavior)1813 static bool madvise_batch_tlb_flush(int behavior)
1814 {
1815 switch (behavior) {
1816 case MADV_DONTNEED:
1817 case MADV_DONTNEED_LOCKED:
1818 case MADV_FREE:
1819 return true;
1820 default:
1821 return false;
1822 }
1823 }
1824
madvise_init_tlb(struct madvise_behavior * madv_behavior)1825 static void madvise_init_tlb(struct madvise_behavior *madv_behavior)
1826 {
1827 if (madvise_batch_tlb_flush(madv_behavior->behavior))
1828 tlb_gather_mmu(madv_behavior->tlb, madv_behavior->mm);
1829 }
1830
madvise_finish_tlb(struct madvise_behavior * madv_behavior)1831 static void madvise_finish_tlb(struct madvise_behavior *madv_behavior)
1832 {
1833 if (madvise_batch_tlb_flush(madv_behavior->behavior))
1834 tlb_finish_mmu(madv_behavior->tlb);
1835 }
1836
is_valid_madvise(unsigned long start,size_t len_in,int behavior)1837 static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior)
1838 {
1839 size_t len;
1840
1841 if (!madvise_behavior_valid(behavior))
1842 return false;
1843
1844 if (!PAGE_ALIGNED(start))
1845 return false;
1846 len = PAGE_ALIGN(len_in);
1847
1848 /* Check to see whether len was rounded up from small -ve to zero */
1849 if (len_in && !len)
1850 return false;
1851
1852 if (start + len < start)
1853 return false;
1854
1855 return true;
1856 }
1857
1858 /*
1859 * madvise_should_skip() - Return if the request is invalid or nothing.
1860 * @start: Start address of madvise-requested address range.
1861 * @len_in: Length of madvise-requested address range.
1862 * @behavior: Requested madvise behavior.
1863 * @err: Pointer to store an error code from the check.
1864 *
1865 * If the specified behaviour is invalid or nothing would occur, we skip the
1866 * operation. This function returns true in the cases, otherwise false. In
1867 * the former case we store an error on @err.
1868 */
madvise_should_skip(unsigned long start,size_t len_in,int behavior,int * err)1869 static bool madvise_should_skip(unsigned long start, size_t len_in,
1870 int behavior, int *err)
1871 {
1872 if (!is_valid_madvise(start, len_in, behavior)) {
1873 *err = -EINVAL;
1874 return true;
1875 }
1876 if (start + PAGE_ALIGN(len_in) == start) {
1877 *err = 0;
1878 return true;
1879 }
1880 return false;
1881 }
1882
is_madvise_populate(struct madvise_behavior * madv_behavior)1883 static bool is_madvise_populate(struct madvise_behavior *madv_behavior)
1884 {
1885 switch (madv_behavior->behavior) {
1886 case MADV_POPULATE_READ:
1887 case MADV_POPULATE_WRITE:
1888 return true;
1889 default:
1890 return false;
1891 }
1892 }
1893
1894 /*
1895 * untagged_addr_remote() assumes mmap_lock is already held. On
1896 * architectures like x86 and RISC-V, tagging is tricky because each
1897 * mm may have a different tagging mask. However, we might only hold
1898 * the per-VMA lock (currently only local processes are supported),
1899 * so untagged_addr is used to avoid the mmap_lock assertion for
1900 * local processes.
1901 */
get_untagged_addr(struct mm_struct * mm,unsigned long start)1902 static inline unsigned long get_untagged_addr(struct mm_struct *mm,
1903 unsigned long start)
1904 {
1905 return current->mm == mm ? untagged_addr(start) :
1906 untagged_addr_remote(mm, start);
1907 }
1908
madvise_do_behavior(unsigned long start,size_t len_in,struct madvise_behavior * madv_behavior)1909 static int madvise_do_behavior(unsigned long start, size_t len_in,
1910 struct madvise_behavior *madv_behavior)
1911 {
1912 struct blk_plug plug;
1913 int error;
1914 struct madvise_behavior_range *range = &madv_behavior->range;
1915
1916 if (is_memory_failure(madv_behavior)) {
1917 range->start = start;
1918 range->end = start + len_in;
1919 return madvise_inject_error(madv_behavior);
1920 }
1921
1922 range->start = get_untagged_addr(madv_behavior->mm, start);
1923 range->end = range->start + PAGE_ALIGN(len_in);
1924
1925 blk_start_plug(&plug);
1926 if (is_madvise_populate(madv_behavior))
1927 error = madvise_populate(madv_behavior);
1928 else
1929 error = madvise_walk_vmas(madv_behavior);
1930 blk_finish_plug(&plug);
1931 return error;
1932 }
1933
1934 /*
1935 * The madvise(2) system call.
1936 *
1937 * Applications can use madvise() to advise the kernel how it should
1938 * handle paging I/O in this VM area. The idea is to help the kernel
1939 * use appropriate read-ahead and caching techniques. The information
1940 * provided is advisory only, and can be safely disregarded by the
1941 * kernel without affecting the correct operation of the application.
1942 *
1943 * behavior values:
1944 * MADV_NORMAL - the default behavior is to read clusters. This
1945 * results in some read-ahead and read-behind.
1946 * MADV_RANDOM - the system should read the minimum amount of data
1947 * on any access, since it is unlikely that the appli-
1948 * cation will need more than what it asks for.
1949 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1950 * once, so they can be aggressively read ahead, and
1951 * can be freed soon after they are accessed.
1952 * MADV_WILLNEED - the application is notifying the system to read
1953 * some pages ahead.
1954 * MADV_DONTNEED - the application is finished with the given range,
1955 * so the kernel can free resources associated with it.
1956 * MADV_FREE - the application marks pages in the given range as lazy free,
1957 * where actual purges are postponed until memory pressure happens.
1958 * MADV_REMOVE - the application wants to free up the given range of
1959 * pages and associated backing store.
1960 * MADV_DONTFORK - omit this area from child's address space when forking:
1961 * typically, to avoid COWing pages pinned by get_user_pages().
1962 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1963 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1964 * range after a fork.
1965 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1966 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1967 * were corrupted by unrecoverable hardware memory failure.
1968 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1969 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1970 * this area with pages of identical content from other such areas.
1971 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1972 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1973 * huge pages in the future. Existing pages might be coalesced and
1974 * new pages might be allocated as THP.
1975 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1976 * transparent huge pages so the existing pages will not be
1977 * coalesced into THP and new pages will not be allocated as THP.
1978 * MADV_COLLAPSE - synchronously coalesce pages into new THP.
1979 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1980 * from being included in its core dump.
1981 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1982 * MADV_COLD - the application is not expected to use this memory soon,
1983 * deactivate pages in this range so that they can be reclaimed
1984 * easily if memory pressure happens.
1985 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1986 * page out the pages in this range immediately.
1987 * MADV_POPULATE_READ - populate (prefault) page tables readable by
1988 * triggering read faults if required
1989 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1990 * triggering write faults if required
1991 *
1992 * return values:
1993 * zero - success
1994 * -EINVAL - start + len < 0, start is not page-aligned,
1995 * "behavior" is not a valid value, or application
1996 * is attempting to release locked or shared pages,
1997 * or the specified address range includes file, Huge TLB,
1998 * MAP_SHARED or VMPFNMAP range.
1999 * -ENOMEM - addresses in the specified range are not currently
2000 * mapped, or are outside the AS of the process.
2001 * -EIO - an I/O error occurred while paging in data.
2002 * -EBADF - map exists, but area maps something that isn't a file.
2003 * -EAGAIN - a kernel resource was temporarily unavailable.
2004 * -EPERM - memory is sealed.
2005 */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)2006 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
2007 {
2008 int error;
2009 struct mmu_gather tlb;
2010 struct madvise_behavior madv_behavior = {
2011 .mm = mm,
2012 .behavior = behavior,
2013 .tlb = &tlb,
2014 };
2015
2016 if (madvise_should_skip(start, len_in, behavior, &error))
2017 return error;
2018 error = madvise_lock(&madv_behavior);
2019 if (error)
2020 return error;
2021 madvise_init_tlb(&madv_behavior);
2022 error = madvise_do_behavior(start, len_in, &madv_behavior);
2023 madvise_finish_tlb(&madv_behavior);
2024 madvise_unlock(&madv_behavior);
2025
2026 return error;
2027 }
2028
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)2029 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
2030 {
2031 return do_madvise(current->mm, start, len_in, behavior);
2032 }
2033
2034 /* Perform an madvise operation over a vector of addresses and lengths. */
vector_madvise(struct mm_struct * mm,struct iov_iter * iter,int behavior)2035 static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
2036 int behavior)
2037 {
2038 ssize_t ret = 0;
2039 size_t total_len;
2040 struct mmu_gather tlb;
2041 struct madvise_behavior madv_behavior = {
2042 .mm = mm,
2043 .behavior = behavior,
2044 .tlb = &tlb,
2045 };
2046
2047 total_len = iov_iter_count(iter);
2048
2049 ret = madvise_lock(&madv_behavior);
2050 if (ret)
2051 return ret;
2052 madvise_init_tlb(&madv_behavior);
2053
2054 while (iov_iter_count(iter)) {
2055 unsigned long start = (unsigned long)iter_iov_addr(iter);
2056 size_t len_in = iter_iov_len(iter);
2057 int error;
2058
2059 if (madvise_should_skip(start, len_in, behavior, &error))
2060 ret = error;
2061 else
2062 ret = madvise_do_behavior(start, len_in, &madv_behavior);
2063 /*
2064 * An madvise operation is attempting to restart the syscall,
2065 * but we cannot proceed as it would not be correct to repeat
2066 * the operation in aggregate, and would be surprising to the
2067 * user.
2068 *
2069 * We drop and reacquire locks so it is safe to just loop and
2070 * try again. We check for fatal signals in case we need exit
2071 * early anyway.
2072 */
2073 if (ret == -ERESTARTNOINTR) {
2074 if (fatal_signal_pending(current)) {
2075 ret = -EINTR;
2076 break;
2077 }
2078
2079 /* Drop and reacquire lock to unwind race. */
2080 madvise_finish_tlb(&madv_behavior);
2081 madvise_unlock(&madv_behavior);
2082 ret = madvise_lock(&madv_behavior);
2083 if (ret)
2084 goto out;
2085 madvise_init_tlb(&madv_behavior);
2086 continue;
2087 }
2088 if (ret < 0)
2089 break;
2090 iov_iter_advance(iter, iter_iov_len(iter));
2091 }
2092 madvise_finish_tlb(&madv_behavior);
2093 madvise_unlock(&madv_behavior);
2094
2095 out:
2096 ret = (total_len - iov_iter_count(iter)) ? : ret;
2097
2098 return ret;
2099 }
2100
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)2101 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
2102 size_t, vlen, int, behavior, unsigned int, flags)
2103 {
2104 ssize_t ret;
2105 struct iovec iovstack[UIO_FASTIOV];
2106 struct iovec *iov = iovstack;
2107 struct iov_iter iter;
2108 struct task_struct *task;
2109 struct mm_struct *mm;
2110 unsigned int f_flags;
2111
2112 if (flags != 0) {
2113 ret = -EINVAL;
2114 goto out;
2115 }
2116
2117 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
2118 if (ret < 0)
2119 goto out;
2120
2121 task = pidfd_get_task(pidfd, &f_flags);
2122 if (IS_ERR(task)) {
2123 ret = PTR_ERR(task);
2124 goto free_iov;
2125 }
2126
2127 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
2128 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
2129 if (IS_ERR(mm)) {
2130 ret = PTR_ERR(mm);
2131 goto release_task;
2132 }
2133
2134 /*
2135 * We need only perform this check if we are attempting to manipulate a
2136 * remote process's address space.
2137 */
2138 if (mm != current->mm && !process_madvise_remote_valid(behavior)) {
2139 ret = -EINVAL;
2140 goto release_mm;
2141 }
2142
2143 /*
2144 * Require CAP_SYS_NICE for influencing process performance. Note that
2145 * only non-destructive hints are currently supported for remote
2146 * processes.
2147 */
2148 if (mm != current->mm && !capable(CAP_SYS_NICE)) {
2149 ret = -EPERM;
2150 goto release_mm;
2151 }
2152
2153 ret = vector_madvise(mm, &iter, behavior);
2154
2155 release_mm:
2156 mmput(mm);
2157 release_task:
2158 put_task_struct(task);
2159 free_iov:
2160 kfree(iov);
2161 out:
2162 return ret;
2163 }
2164
2165 #ifdef CONFIG_ANON_VMA_NAME
2166
2167 #define ANON_VMA_NAME_MAX_LEN 80
2168 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]"
2169
is_valid_name_char(char ch)2170 static inline bool is_valid_name_char(char ch)
2171 {
2172 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2173 return ch > 0x1f && ch < 0x7f &&
2174 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
2175 }
2176
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)2177 static int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
2178 unsigned long len_in, struct anon_vma_name *anon_name)
2179 {
2180 unsigned long end;
2181 unsigned long len;
2182 int error;
2183 struct madvise_behavior madv_behavior = {
2184 .mm = mm,
2185 .behavior = __MADV_SET_ANON_VMA_NAME,
2186 .anon_name = anon_name,
2187 };
2188
2189 if (start & ~PAGE_MASK)
2190 return -EINVAL;
2191 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
2192
2193 /* Check to see whether len was rounded up from small -ve to zero */
2194 if (len_in && !len)
2195 return -EINVAL;
2196
2197 end = start + len;
2198 if (end < start)
2199 return -EINVAL;
2200
2201 if (end == start)
2202 return 0;
2203
2204 madv_behavior.range.start = start;
2205 madv_behavior.range.end = end;
2206
2207 error = madvise_lock(&madv_behavior);
2208 if (error)
2209 return error;
2210 error = madvise_walk_vmas(&madv_behavior);
2211 madvise_unlock(&madv_behavior);
2212
2213 return error;
2214 }
2215
set_anon_vma_name(unsigned long addr,unsigned long size,const char __user * uname)2216 int set_anon_vma_name(unsigned long addr, unsigned long size,
2217 const char __user *uname)
2218 {
2219 struct anon_vma_name *anon_name = NULL;
2220 struct mm_struct *mm = current->mm;
2221 int error;
2222
2223 if (uname) {
2224 char *name, *pch;
2225
2226 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
2227 if (IS_ERR(name))
2228 return PTR_ERR(name);
2229
2230 for (pch = name; *pch != '\0'; pch++) {
2231 if (!is_valid_name_char(*pch)) {
2232 kfree(name);
2233 return -EINVAL;
2234 }
2235 }
2236 /* anon_vma has its own copy */
2237 anon_name = anon_vma_name_alloc(name);
2238 kfree(name);
2239 if (!anon_name)
2240 return -ENOMEM;
2241 }
2242
2243 error = madvise_set_anon_name(mm, addr, size, anon_name);
2244 anon_vma_name_put(anon_name);
2245
2246 return error;
2247 }
2248 #endif
2249