xref: /linux/mm/msync.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *	linux/mm/msync.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6 
7 /*
8  * The msync() system call.
9  */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/syscalls.h>
16 
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
19 
20 static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
21 				unsigned long addr, unsigned long end)
22 {
23 	pte_t *pte;
24 	spinlock_t *ptl;
25 	int progress = 0;
26 
27 again:
28 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
29 	do {
30 		struct page *page;
31 
32 		if (progress >= 64) {
33 			progress = 0;
34 			if (need_resched() || need_lockbreak(ptl))
35 				break;
36 		}
37 		progress++;
38 		if (!pte_present(*pte))
39 			continue;
40 		if (!pte_maybe_dirty(*pte))
41 			continue;
42 		page = vm_normal_page(vma, addr, *pte);
43 		if (!page)
44 			continue;
45 		if (ptep_clear_flush_dirty(vma, addr, pte) ||
46 		    page_test_and_clear_dirty(page))
47 			set_page_dirty(page);
48 		progress += 3;
49 	} while (pte++, addr += PAGE_SIZE, addr != end);
50 	pte_unmap_unlock(pte - 1, ptl);
51 	cond_resched();
52 	if (addr != end)
53 		goto again;
54 }
55 
56 static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
57 				unsigned long addr, unsigned long end)
58 {
59 	pmd_t *pmd;
60 	unsigned long next;
61 
62 	pmd = pmd_offset(pud, addr);
63 	do {
64 		next = pmd_addr_end(addr, end);
65 		if (pmd_none_or_clear_bad(pmd))
66 			continue;
67 		msync_pte_range(vma, pmd, addr, next);
68 	} while (pmd++, addr = next, addr != end);
69 }
70 
71 static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
72 				unsigned long addr, unsigned long end)
73 {
74 	pud_t *pud;
75 	unsigned long next;
76 
77 	pud = pud_offset(pgd, addr);
78 	do {
79 		next = pud_addr_end(addr, end);
80 		if (pud_none_or_clear_bad(pud))
81 			continue;
82 		msync_pmd_range(vma, pud, addr, next);
83 	} while (pud++, addr = next, addr != end);
84 }
85 
86 static void msync_page_range(struct vm_area_struct *vma,
87 				unsigned long addr, unsigned long end)
88 {
89 	pgd_t *pgd;
90 	unsigned long next;
91 
92 	/* For hugepages we can't go walking the page table normally,
93 	 * but that's ok, hugetlbfs is memory based, so we don't need
94 	 * to do anything more on an msync().
95 	 */
96 	if (vma->vm_flags & VM_HUGETLB)
97 		return;
98 
99 	BUG_ON(addr >= end);
100 	pgd = pgd_offset(vma->vm_mm, addr);
101 	flush_cache_range(vma, addr, end);
102 	do {
103 		next = pgd_addr_end(addr, end);
104 		if (pgd_none_or_clear_bad(pgd))
105 			continue;
106 		msync_pud_range(vma, pgd, addr, next);
107 	} while (pgd++, addr = next, addr != end);
108 }
109 
110 /*
111  * MS_SYNC syncs the entire file - including mappings.
112  *
113  * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
114  * marks the relevant pages dirty.  The application may now run fsync() to
115  * write out the dirty pages and wait on the writeout and check the result.
116  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
117  * async writeout immediately.
118  * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
119  * applications.
120  */
121 static int msync_interval(struct vm_area_struct *vma,
122 			unsigned long addr, unsigned long end, int flags)
123 {
124 	int ret = 0;
125 	struct file *file = vma->vm_file;
126 
127 	if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
128 		return -EBUSY;
129 
130 	if (file && (vma->vm_flags & VM_SHARED)) {
131 		msync_page_range(vma, addr, end);
132 
133 		if (flags & MS_SYNC) {
134 			struct address_space *mapping = file->f_mapping;
135 			int err;
136 
137 			ret = filemap_fdatawrite(mapping);
138 			if (file->f_op && file->f_op->fsync) {
139 				/*
140 				 * We don't take i_mutex here because mmap_sem
141 				 * is already held.
142 				 */
143 				err = file->f_op->fsync(file,file->f_dentry,1);
144 				if (err && !ret)
145 					ret = err;
146 			}
147 			err = filemap_fdatawait(mapping);
148 			if (!ret)
149 				ret = err;
150 		}
151 	}
152 	return ret;
153 }
154 
155 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
156 {
157 	unsigned long end;
158 	struct vm_area_struct *vma;
159 	int unmapped_error, error = -EINVAL;
160 
161 	if (flags & MS_SYNC)
162 		current->flags |= PF_SYNCWRITE;
163 
164 	down_read(&current->mm->mmap_sem);
165 	if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
166 		goto out;
167 	if (start & ~PAGE_MASK)
168 		goto out;
169 	if ((flags & MS_ASYNC) && (flags & MS_SYNC))
170 		goto out;
171 	error = -ENOMEM;
172 	len = (len + ~PAGE_MASK) & PAGE_MASK;
173 	end = start + len;
174 	if (end < start)
175 		goto out;
176 	error = 0;
177 	if (end == start)
178 		goto out;
179 	/*
180 	 * If the interval [start,end) covers some unmapped address ranges,
181 	 * just ignore them, but return -ENOMEM at the end.
182 	 */
183 	vma = find_vma(current->mm, start);
184 	unmapped_error = 0;
185 	for (;;) {
186 		/* Still start < end. */
187 		error = -ENOMEM;
188 		if (!vma)
189 			goto out;
190 		/* Here start < vma->vm_end. */
191 		if (start < vma->vm_start) {
192 			unmapped_error = -ENOMEM;
193 			start = vma->vm_start;
194 		}
195 		/* Here vma->vm_start <= start < vma->vm_end. */
196 		if (end <= vma->vm_end) {
197 			if (start < end) {
198 				error = msync_interval(vma, start, end, flags);
199 				if (error)
200 					goto out;
201 			}
202 			error = unmapped_error;
203 			goto out;
204 		}
205 		/* Here vma->vm_start <= start < vma->vm_end < end. */
206 		error = msync_interval(vma, start, vma->vm_end, flags);
207 		if (error)
208 			goto out;
209 		start = vma->vm_end;
210 		vma = vma->vm_next;
211 	}
212 out:
213 	up_read(&current->mm->mmap_sem);
214 	current->flags &= ~PF_SYNCWRITE;
215 	return error;
216 }
217