vma.h (fc21959f74bc1138b28e90a02ec224ab8626111e) vma.h (cacded5e42b9609b07b22d80c10f0076d439f7d1)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 38 unchanged lines hidden (view full) ---

47 unsigned long nr_pages; /* Number of pages being removed */
48 unsigned long locked_vm; /* Number of locked pages */
49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
50 unsigned long exec_vm;
51 unsigned long stack_vm;
52 unsigned long data_vm;
53};
54
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H

--- 38 unchanged lines hidden (view full) ---

47 unsigned long nr_pages; /* Number of pages being removed */
48 unsigned long locked_vm; /* Number of locked pages */
49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
50 unsigned long exec_vm;
51 unsigned long stack_vm;
52 unsigned long data_vm;
53};
54
55enum vma_merge_state {
56 VMA_MERGE_START,
57 VMA_MERGE_ERROR_NOMEM,
58 VMA_MERGE_NOMERGE,
59 VMA_MERGE_SUCCESS,
60};
61
55/* Represents a VMA merge operation. */
56struct vma_merge_struct {
57 struct mm_struct *mm;
58 struct vma_iterator *vmi;
59 pgoff_t pgoff;
60 struct vm_area_struct *prev;
61 struct vm_area_struct *next; /* Modified by vma_merge(). */
62 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
63 unsigned long start;
64 unsigned long end;
65 unsigned long flags;
66 struct file *file;
67 struct anon_vma *anon_vma;
68 struct mempolicy *policy;
69 struct vm_userfaultfd_ctx uffd_ctx;
70 struct anon_vma_name *anon_name;
62/* Represents a VMA merge operation. */
63struct vma_merge_struct {
64 struct mm_struct *mm;
65 struct vma_iterator *vmi;
66 pgoff_t pgoff;
67 struct vm_area_struct *prev;
68 struct vm_area_struct *next; /* Modified by vma_merge(). */
69 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
70 unsigned long start;
71 unsigned long end;
72 unsigned long flags;
73 struct file *file;
74 struct anon_vma *anon_vma;
75 struct mempolicy *policy;
76 struct vm_userfaultfd_ctx uffd_ctx;
77 struct anon_vma_name *anon_name;
78 enum vma_merge_state state;
71};
72
79};
80
81static inline bool vmg_nomem(struct vma_merge_struct *vmg)
82{
83 return vmg->state == VMA_MERGE_ERROR_NOMEM;
84}
85
73/* Assumes addr >= vma->vm_start. */
74static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
75 unsigned long addr)
76{
77 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
78}
79
80#define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
81 struct vma_merge_struct name = { \
82 .mm = mm_, \
83 .vmi = vmi_, \
84 .start = start_, \
85 .end = end_, \
86 .flags = flags_, \
87 .pgoff = pgoff_, \
86/* Assumes addr >= vma->vm_start. */
87static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
88 unsigned long addr)
89{
90 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
91}
92
93#define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
94 struct vma_merge_struct name = { \
95 .mm = mm_, \
96 .vmi = vmi_, \
97 .start = start_, \
98 .end = end_, \
99 .flags = flags_, \
100 .pgoff = pgoff_, \
101 .state = VMA_MERGE_START, \
88 }
89
90#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
91 struct vma_merge_struct name = { \
92 .mm = vma_->vm_mm, \
93 .vmi = vmi_, \
94 .prev = prev_, \
95 .next = NULL, \
96 .vma = vma_, \
97 .start = start_, \
98 .end = end_, \
99 .flags = vma_->vm_flags, \
100 .pgoff = vma_pgoff_offset(vma_, start_), \
101 .file = vma_->vm_file, \
102 .anon_vma = vma_->anon_vma, \
103 .policy = vma_policy(vma_), \
104 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
105 .anon_name = anon_vma_name(vma_), \
102 }
103
104#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
105 struct vma_merge_struct name = { \
106 .mm = vma_->vm_mm, \
107 .vmi = vmi_, \
108 .prev = prev_, \
109 .next = NULL, \
110 .vma = vma_, \
111 .start = start_, \
112 .end = end_, \
113 .flags = vma_->vm_flags, \
114 .pgoff = vma_pgoff_offset(vma_, start_), \
115 .file = vma_->vm_file, \
116 .anon_vma = vma_->anon_vma, \
117 .policy = vma_policy(vma_), \
118 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
119 .anon_name = anon_vma_name(vma_), \
120 .state = VMA_MERGE_START, \
106 }
107
108#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
109void validate_mm(struct mm_struct *mm);
110#else
111#define validate_mm(mm) do { } while (0)
112#endif
113

--- 190 unchanged lines hidden (view full) ---

304struct vm_area_struct
305*vma_modify_flags_uffd(struct vma_iterator *vmi,
306 struct vm_area_struct *prev,
307 struct vm_area_struct *vma,
308 unsigned long start, unsigned long end,
309 unsigned long new_flags,
310 struct vm_userfaultfd_ctx new_ctx);
311
121 }
122
123#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
124void validate_mm(struct mm_struct *mm);
125#else
126#define validate_mm(mm) do { } while (0)
127#endif
128

--- 190 unchanged lines hidden (view full) ---

319struct vm_area_struct
320*vma_modify_flags_uffd(struct vma_iterator *vmi,
321 struct vm_area_struct *prev,
322 struct vm_area_struct *vma,
323 unsigned long start, unsigned long end,
324 unsigned long new_flags,
325 struct vm_userfaultfd_ctx new_ctx);
326
312struct vm_area_struct
313*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
314 struct vm_area_struct *vma, unsigned long start,
315 unsigned long end, pgoff_t pgoff);
327struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
316
317struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
318 struct vm_area_struct *vma,
319 unsigned long delta);
320
321void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
322
323void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);

--- 176 unchanged lines hidden (view full) ---

500}
501
502static inline
503struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
504{
505 return mas_prev_range(&vmi->mas, 0);
506}
507
328
329struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
330 struct vm_area_struct *vma,
331 unsigned long delta);
332
333void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
334
335void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);

--- 176 unchanged lines hidden (view full) ---

512}
513
514static inline
515struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
516{
517 return mas_prev_range(&vmi->mas, 0);
518}
519
520/*
521 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
522 * if no previous VMA, to index 0.
523 */
524static inline
525struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
526 struct vm_area_struct **pprev)
527{
528 struct vm_area_struct *next = vma_next(vmi);
529 struct vm_area_struct *prev = vma_prev(vmi);
530
531 /*
532 * Consider the case where no previous VMA exists. We advance to the
533 * next VMA, skipping any gap, then rewind to the start of the range.
534 *
535 * If we were to unconditionally advance to the next range we'd wind up
536 * at the next VMA again, so we check to ensure there is a previous VMA
537 * to skip over.
538 */
539 if (prev)
540 vma_iter_next_range(vmi);
541
542 if (pprev)
543 *pprev = prev;
544
545 return next;
546}
547
508#ifdef CONFIG_64BIT
509
510static inline bool vma_is_sealed(struct vm_area_struct *vma)
511{
512 return (vma->vm_flags & VM_SEALED);
513}
514
515/*

--- 28 unchanged lines hidden ---
548#ifdef CONFIG_64BIT
549
550static inline bool vma_is_sealed(struct vm_area_struct *vma)
551{
552 return (vma->vm_flags & VM_SEALED);
553}
554
555/*

--- 28 unchanged lines hidden ---