xref: /linux/mm/madvise.c (revision 60b2737de1b1ddfdb90f3ba622634eb49d6f3603)
1 /*
2  *	linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7 
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 
14 /*
15  * We can potentially split a vm area into separate
16  * areas, each area with its own behavior.
17  */
18 static long madvise_behavior(struct vm_area_struct * vma,
19 		     struct vm_area_struct **prev,
20 		     unsigned long start, unsigned long end, int behavior)
21 {
22 	struct mm_struct * mm = vma->vm_mm;
23 	int error = 0;
24 	pgoff_t pgoff;
25 	int new_flags = vma->vm_flags & ~VM_READHINTMASK;
26 
27 	switch (behavior) {
28 	case MADV_SEQUENTIAL:
29 		new_flags |= VM_SEQ_READ;
30 		break;
31 	case MADV_RANDOM:
32 		new_flags |= VM_RAND_READ;
33 		break;
34 	default:
35 		break;
36 	}
37 
38 	if (new_flags == vma->vm_flags) {
39 		*prev = vma;
40 		goto success;
41 	}
42 
43 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
44 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
45 				vma->vm_file, pgoff, vma_policy(vma));
46 	if (*prev) {
47 		vma = *prev;
48 		goto success;
49 	}
50 
51 	*prev = vma;
52 
53 	if (start != vma->vm_start) {
54 		error = split_vma(mm, vma, start, 1);
55 		if (error)
56 			goto out;
57 	}
58 
59 	if (end != vma->vm_end) {
60 		error = split_vma(mm, vma, end, 0);
61 		if (error)
62 			goto out;
63 	}
64 
65 	/*
66 	 * vm_flags is protected by the mmap_sem held in write mode.
67 	 */
68 	VM_ClearReadHint(vma);
69 	vma->vm_flags = new_flags;
70 
71 out:
72 	if (error == -ENOMEM)
73 		error = -EAGAIN;
74 success:
75 	return error;
76 }
77 
78 /*
79  * Schedule all required I/O operations.  Do not wait for completion.
80  */
81 static long madvise_willneed(struct vm_area_struct * vma,
82 			     struct vm_area_struct ** prev,
83 			     unsigned long start, unsigned long end)
84 {
85 	struct file *file = vma->vm_file;
86 
87 	if (!file)
88 		return -EBADF;
89 
90 	*prev = vma;
91 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
92 	if (end > vma->vm_end)
93 		end = vma->vm_end;
94 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
95 
96 	force_page_cache_readahead(file->f_mapping,
97 			file, start, max_sane_readahead(end - start));
98 	return 0;
99 }
100 
101 /*
102  * Application no longer needs these pages.  If the pages are dirty,
103  * it's OK to just throw them away.  The app will be more careful about
104  * data it wants to keep.  Be sure to free swap resources too.  The
105  * zap_page_range call sets things up for refill_inactive to actually free
106  * these pages later if no one else has touched them in the meantime,
107  * although we could add these pages to a global reuse list for
108  * refill_inactive to pick up before reclaiming other pages.
109  *
110  * NB: This interface discards data rather than pushes it out to swap,
111  * as some implementations do.  This has performance implications for
112  * applications like large transactional databases which want to discard
113  * pages in anonymous maps after committing to backing store the data
114  * that was kept in them.  There is no reason to write this data out to
115  * the swap area if the application is discarding it.
116  *
117  * An interface that causes the system to free clean pages and flush
118  * dirty pages is already available as msync(MS_INVALIDATE).
119  */
120 static long madvise_dontneed(struct vm_area_struct * vma,
121 			     struct vm_area_struct ** prev,
122 			     unsigned long start, unsigned long end)
123 {
124 	*prev = vma;
125 	if ((vma->vm_flags & VM_LOCKED) || is_vm_hugetlb_page(vma))
126 		return -EINVAL;
127 
128 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
129 		struct zap_details details = {
130 			.nonlinear_vma = vma,
131 			.last_index = ULONG_MAX,
132 		};
133 		zap_page_range(vma, start, end - start, &details);
134 	} else
135 		zap_page_range(vma, start, end - start, NULL);
136 	return 0;
137 }
138 
139 static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
140 			unsigned long start, unsigned long end, int behavior)
141 {
142 	long error = -EBADF;
143 
144 	switch (behavior) {
145 	case MADV_NORMAL:
146 	case MADV_SEQUENTIAL:
147 	case MADV_RANDOM:
148 		error = madvise_behavior(vma, prev, start, end, behavior);
149 		break;
150 
151 	case MADV_WILLNEED:
152 		error = madvise_willneed(vma, prev, start, end);
153 		break;
154 
155 	case MADV_DONTNEED:
156 		error = madvise_dontneed(vma, prev, start, end);
157 		break;
158 
159 	default:
160 		error = -EINVAL;
161 		break;
162 	}
163 
164 	return error;
165 }
166 
167 /*
168  * The madvise(2) system call.
169  *
170  * Applications can use madvise() to advise the kernel how it should
171  * handle paging I/O in this VM area.  The idea is to help the kernel
172  * use appropriate read-ahead and caching techniques.  The information
173  * provided is advisory only, and can be safely disregarded by the
174  * kernel without affecting the correct operation of the application.
175  *
176  * behavior values:
177  *  MADV_NORMAL - the default behavior is to read clusters.  This
178  *		results in some read-ahead and read-behind.
179  *  MADV_RANDOM - the system should read the minimum amount of data
180  *		on any access, since it is unlikely that the appli-
181  *		cation will need more than what it asks for.
182  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
183  *		once, so they can be aggressively read ahead, and
184  *		can be freed soon after they are accessed.
185  *  MADV_WILLNEED - the application is notifying the system to read
186  *		some pages ahead.
187  *  MADV_DONTNEED - the application is finished with the given range,
188  *		so the kernel can free resources associated with it.
189  *
190  * return values:
191  *  zero    - success
192  *  -EINVAL - start + len < 0, start is not page-aligned,
193  *		"behavior" is not a valid value, or application
194  *		is attempting to release locked or shared pages.
195  *  -ENOMEM - addresses in the specified range are not currently
196  *		mapped, or are outside the AS of the process.
197  *  -EIO    - an I/O error occurred while paging in data.
198  *  -EBADF  - map exists, but area maps something that isn't a file.
199  *  -EAGAIN - a kernel resource was temporarily unavailable.
200  */
201 asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
202 {
203 	unsigned long end, tmp;
204 	struct vm_area_struct * vma, *prev;
205 	int unmapped_error = 0;
206 	int error = -EINVAL;
207 	size_t len;
208 
209 	down_write(&current->mm->mmap_sem);
210 
211 	if (start & ~PAGE_MASK)
212 		goto out;
213 	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
214 
215 	/* Check to see whether len was rounded up from small -ve to zero */
216 	if (len_in && !len)
217 		goto out;
218 
219 	end = start + len;
220 	if (end < start)
221 		goto out;
222 
223 	error = 0;
224 	if (end == start)
225 		goto out;
226 
227 	/*
228 	 * If the interval [start,end) covers some unmapped address
229 	 * ranges, just ignore them, but return -ENOMEM at the end.
230 	 * - different from the way of handling in mlock etc.
231 	 */
232 	vma = find_vma_prev(current->mm, start, &prev);
233 	if (!vma && prev)
234 		vma = prev->vm_next;
235 	for (;;) {
236 		/* Still start < end. */
237 		error = -ENOMEM;
238 		if (!vma)
239 			goto out;
240 
241 		/* Here start < (end|vma->vm_end). */
242 		if (start < vma->vm_start) {
243 			unmapped_error = -ENOMEM;
244 			start = vma->vm_start;
245 			if (start >= end)
246 				goto out;
247 		}
248 
249 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
250 		tmp = vma->vm_end;
251 		if (end < tmp)
252 			tmp = end;
253 
254 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
255 		error = madvise_vma(vma, &prev, start, tmp, behavior);
256 		if (error)
257 			goto out;
258 		start = tmp;
259 		if (start < prev->vm_end)
260 			start = prev->vm_end;
261 		error = unmapped_error;
262 		if (start >= end)
263 			goto out;
264 		vma = prev->vm_next;
265 	}
266 out:
267 	up_write(&current->mm->mmap_sem);
268 	return error;
269 }
270