xref: /linux/mm/madvise.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 
14 /*
15  * We can potentially split a vm area into separate
16  * areas, each area with its own behavior.
17  */
18 static long madvise_behavior(struct vm_area_struct * vma,
19 		     struct vm_area_struct **prev,
20 		     unsigned long start, unsigned long end, int behavior)
21 {
22 	struct mm_struct * mm = vma->vm_mm;
23 	int error = 0;
24 	pgoff_t pgoff;
25 	int new_flags = vma->vm_flags & ~VM_READHINTMASK;
26 
27 	switch (behavior) {
28 	case MADV_SEQUENTIAL:
29 		new_flags |= VM_SEQ_READ;
30 		break;
31 	case MADV_RANDOM:
32 		new_flags |= VM_RAND_READ;
33 		break;
34 	default:
35 		break;
36 	}
37 
38 	if (new_flags == vma->vm_flags) {
39 		*prev = vma;
40 		goto out;
41 	}
42 
43 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
44 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
45 				vma->vm_file, pgoff, vma_policy(vma));
46 	if (*prev) {
47 		vma = *prev;
48 		goto success;
49 	}
50 
51 	*prev = vma;
52 
53 	if (start != vma->vm_start) {
54 		error = split_vma(mm, vma, start, 1);
55 		if (error)
56 			goto out;
57 	}
58 
59 	if (end != vma->vm_end) {
60 		error = split_vma(mm, vma, end, 0);
61 		if (error)
62 			goto out;
63 	}
64 
65 success:
66 	/*
67 	 * vm_flags is protected by the mmap_sem held in write mode.
68 	 */
69 	vma->vm_flags = new_flags;
70 
71 out:
72 	if (error == -ENOMEM)
73 		error = -EAGAIN;
74 	return error;
75 }
76 
77 /*
78  * Schedule all required I/O operations.  Do not wait for completion.
79  */
80 static long madvise_willneed(struct vm_area_struct * vma,
81 			     struct vm_area_struct ** prev,
82 			     unsigned long start, unsigned long end)
83 {
84 	struct file *file = vma->vm_file;
85 
86 	if (file->f_mapping->a_ops->get_xip_page) {
87 		/* no bad return value, but ignore advice */
88 		return 0;
89 	}
90 
91 	*prev = vma;
92 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
93 	if (end > vma->vm_end)
94 		end = vma->vm_end;
95 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
96 
97 	force_page_cache_readahead(file->f_mapping,
98 			file, start, max_sane_readahead(end - start));
99 	return 0;
100 }
101 
102 /*
103  * Application no longer needs these pages.  If the pages are dirty,
104  * it's OK to just throw them away.  The app will be more careful about
105  * data it wants to keep.  Be sure to free swap resources too.  The
106  * zap_page_range call sets things up for refill_inactive to actually free
107  * these pages later if no one else has touched them in the meantime,
108  * although we could add these pages to a global reuse list for
109  * refill_inactive to pick up before reclaiming other pages.
110  *
111  * NB: This interface discards data rather than pushes it out to swap,
112  * as some implementations do.  This has performance implications for
113  * applications like large transactional databases which want to discard
114  * pages in anonymous maps after committing to backing store the data
115  * that was kept in them.  There is no reason to write this data out to
116  * the swap area if the application is discarding it.
117  *
118  * An interface that causes the system to free clean pages and flush
119  * dirty pages is already available as msync(MS_INVALIDATE).
120  */
121 static long madvise_dontneed(struct vm_area_struct * vma,
122 			     struct vm_area_struct ** prev,
123 			     unsigned long start, unsigned long end)
124 {
125 	*prev = vma;
126 	if ((vma->vm_flags & VM_LOCKED) || is_vm_hugetlb_page(vma))
127 		return -EINVAL;
128 
129 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
130 		struct zap_details details = {
131 			.nonlinear_vma = vma,
132 			.last_index = ULONG_MAX,
133 		};
134 		zap_page_range(vma, start, end - start, &details);
135 	} else
136 		zap_page_range(vma, start, end - start, NULL);
137 	return 0;
138 }
139 
140 static long
141 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
142 		unsigned long start, unsigned long end, int behavior)
143 {
144 	struct file *filp = vma->vm_file;
145 	long error = -EBADF;
146 
147 	if (!filp)
148 		goto  out;
149 
150 	switch (behavior) {
151 	case MADV_NORMAL:
152 	case MADV_SEQUENTIAL:
153 	case MADV_RANDOM:
154 		error = madvise_behavior(vma, prev, start, end, behavior);
155 		break;
156 
157 	case MADV_WILLNEED:
158 		error = madvise_willneed(vma, prev, start, end);
159 		break;
160 
161 	case MADV_DONTNEED:
162 		error = madvise_dontneed(vma, prev, start, end);
163 		break;
164 
165 	default:
166 		error = -EINVAL;
167 		break;
168 	}
169 
170 out:
171 	return error;
172 }
173 
174 /*
175  * The madvise(2) system call.
176  *
177  * Applications can use madvise() to advise the kernel how it should
178  * handle paging I/O in this VM area.  The idea is to help the kernel
179  * use appropriate read-ahead and caching techniques.  The information
180  * provided is advisory only, and can be safely disregarded by the
181  * kernel without affecting the correct operation of the application.
182  *
183  * behavior values:
184  *  MADV_NORMAL - the default behavior is to read clusters.  This
185  *		results in some read-ahead and read-behind.
186  *  MADV_RANDOM - the system should read the minimum amount of data
187  *		on any access, since it is unlikely that the appli-
188  *		cation will need more than what it asks for.
189  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
190  *		once, so they can be aggressively read ahead, and
191  *		can be freed soon after they are accessed.
192  *  MADV_WILLNEED - the application is notifying the system to read
193  *		some pages ahead.
194  *  MADV_DONTNEED - the application is finished with the given range,
195  *		so the kernel can free resources associated with it.
196  *
197  * return values:
198  *  zero    - success
199  *  -EINVAL - start + len < 0, start is not page-aligned,
200  *		"behavior" is not a valid value, or application
201  *		is attempting to release locked or shared pages.
202  *  -ENOMEM - addresses in the specified range are not currently
203  *		mapped, or are outside the AS of the process.
204  *  -EIO    - an I/O error occurred while paging in data.
205  *  -EBADF  - map exists, but area maps something that isn't a file.
206  *  -EAGAIN - a kernel resource was temporarily unavailable.
207  */
208 asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
209 {
210 	unsigned long end, tmp;
211 	struct vm_area_struct * vma, *prev;
212 	int unmapped_error = 0;
213 	int error = -EINVAL;
214 	size_t len;
215 
216 	down_write(&current->mm->mmap_sem);
217 
218 	if (start & ~PAGE_MASK)
219 		goto out;
220 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
221 
222 	/* Check to see whether len was rounded up from small -ve to zero */
223 	if (len_in && !len)
224 		goto out;
225 
226 	end = start + len;
227 	if (end < start)
228 		goto out;
229 
230 	error = 0;
231 	if (end == start)
232 		goto out;
233 
234 	/*
235 	 * If the interval [start,end) covers some unmapped address
236 	 * ranges, just ignore them, but return -ENOMEM at the end.
237 	 * - different from the way of handling in mlock etc.
238 	 */
239 	vma = find_vma_prev(current->mm, start, &prev);
240 	if (vma && start > vma->vm_start)
241 		prev = vma;
242 
243 	for (;;) {
244 		/* Still start < end. */
245 		error = -ENOMEM;
246 		if (!vma)
247 			goto out;
248 
249 		/* Here start < (end|vma->vm_end). */
250 		if (start < vma->vm_start) {
251 			unmapped_error = -ENOMEM;
252 			start = vma->vm_start;
253 			if (start >= end)
254 				goto out;
255 		}
256 
257 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
258 		tmp = vma->vm_end;
259 		if (end < tmp)
260 			tmp = end;
261 
262 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
263 		error = madvise_vma(vma, &prev, start, tmp, behavior);
264 		if (error)
265 			goto out;
266 		start = tmp;
267 		if (start < prev->vm_end)
268 			start = prev->vm_end;
269 		error = unmapped_error;
270 		if (start >= end)
271 			goto out;
272 		vma = prev->vm_next;
273 	}
274 out:
275 	up_write(&current->mm->mmap_sem);
276 	return error;
277 }
278