1 /* 2 * linux/mm/madvise.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 2002 Christoph Hellwig 6 */ 7 8 #include <linux/mman.h> 9 #include <linux/pagemap.h> 10 #include <linux/syscalls.h> 11 #include <linux/mempolicy.h> 12 #include <linux/hugetlb.h> 13 14 /* 15 * We can potentially split a vm area into separate 16 * areas, each area with its own behavior. 17 */ 18 static long madvise_behavior(struct vm_area_struct * vma, 19 struct vm_area_struct **prev, 20 unsigned long start, unsigned long end, int behavior) 21 { 22 struct mm_struct * mm = vma->vm_mm; 23 int error = 0; 24 pgoff_t pgoff; 25 int new_flags = vma->vm_flags; 26 27 switch (behavior) { 28 case MADV_NORMAL: 29 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 30 break; 31 case MADV_SEQUENTIAL: 32 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 33 break; 34 case MADV_RANDOM: 35 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 36 break; 37 case MADV_DONTFORK: 38 new_flags |= VM_DONTCOPY; 39 break; 40 case MADV_DOFORK: 41 new_flags &= ~VM_DONTCOPY; 42 break; 43 } 44 45 if (new_flags == vma->vm_flags) { 46 *prev = vma; 47 goto out; 48 } 49 50 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 51 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 52 vma->vm_file, pgoff, vma_policy(vma)); 53 if (*prev) { 54 vma = *prev; 55 goto success; 56 } 57 58 *prev = vma; 59 60 if (start != vma->vm_start) { 61 error = split_vma(mm, vma, start, 1); 62 if (error) 63 goto out; 64 } 65 66 if (end != vma->vm_end) { 67 error = split_vma(mm, vma, end, 0); 68 if (error) 69 goto out; 70 } 71 72 success: 73 /* 74 * vm_flags is protected by the mmap_sem held in write mode. 75 */ 76 vma->vm_flags = new_flags; 77 78 out: 79 if (error == -ENOMEM) 80 error = -EAGAIN; 81 return error; 82 } 83 84 /* 85 * Schedule all required I/O operations. Do not wait for completion. 86 */ 87 static long madvise_willneed(struct vm_area_struct * vma, 88 struct vm_area_struct ** prev, 89 unsigned long start, unsigned long end) 90 { 91 struct file *file = vma->vm_file; 92 93 if (!file) 94 return -EBADF; 95 96 if (file->f_mapping->a_ops->get_xip_page) { 97 /* no bad return value, but ignore advice */ 98 return 0; 99 } 100 101 *prev = vma; 102 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 103 if (end > vma->vm_end) 104 end = vma->vm_end; 105 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 106 107 force_page_cache_readahead(file->f_mapping, 108 file, start, max_sane_readahead(end - start)); 109 return 0; 110 } 111 112 /* 113 * Application no longer needs these pages. If the pages are dirty, 114 * it's OK to just throw them away. The app will be more careful about 115 * data it wants to keep. Be sure to free swap resources too. The 116 * zap_page_range call sets things up for refill_inactive to actually free 117 * these pages later if no one else has touched them in the meantime, 118 * although we could add these pages to a global reuse list for 119 * refill_inactive to pick up before reclaiming other pages. 120 * 121 * NB: This interface discards data rather than pushes it out to swap, 122 * as some implementations do. This has performance implications for 123 * applications like large transactional databases which want to discard 124 * pages in anonymous maps after committing to backing store the data 125 * that was kept in them. There is no reason to write this data out to 126 * the swap area if the application is discarding it. 127 * 128 * An interface that causes the system to free clean pages and flush 129 * dirty pages is already available as msync(MS_INVALIDATE). 130 */ 131 static long madvise_dontneed(struct vm_area_struct * vma, 132 struct vm_area_struct ** prev, 133 unsigned long start, unsigned long end) 134 { 135 *prev = vma; 136 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) 137 return -EINVAL; 138 139 if (unlikely(vma->vm_flags & VM_NONLINEAR)) { 140 struct zap_details details = { 141 .nonlinear_vma = vma, 142 .last_index = ULONG_MAX, 143 }; 144 zap_page_range(vma, start, end - start, &details); 145 } else 146 zap_page_range(vma, start, end - start, NULL); 147 return 0; 148 } 149 150 /* 151 * Application wants to free up the pages and associated backing store. 152 * This is effectively punching a hole into the middle of a file. 153 * 154 * NOTE: Currently, only shmfs/tmpfs is supported for this operation. 155 * Other filesystems return -ENOSYS. 156 */ 157 static long madvise_remove(struct vm_area_struct *vma, 158 unsigned long start, unsigned long end) 159 { 160 struct address_space *mapping; 161 loff_t offset, endoff; 162 163 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) 164 return -EINVAL; 165 166 if (!vma->vm_file || !vma->vm_file->f_mapping 167 || !vma->vm_file->f_mapping->host) { 168 return -EINVAL; 169 } 170 171 mapping = vma->vm_file->f_mapping; 172 173 offset = (loff_t)(start - vma->vm_start) 174 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 175 endoff = (loff_t)(end - vma->vm_start - 1) 176 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 177 return vmtruncate_range(mapping->host, offset, endoff); 178 } 179 180 static long 181 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, 182 unsigned long start, unsigned long end, int behavior) 183 { 184 long error; 185 186 switch (behavior) { 187 case MADV_DOFORK: 188 if (vma->vm_flags & VM_IO) { 189 error = -EINVAL; 190 break; 191 } 192 case MADV_DONTFORK: 193 case MADV_NORMAL: 194 case MADV_SEQUENTIAL: 195 case MADV_RANDOM: 196 error = madvise_behavior(vma, prev, start, end, behavior); 197 break; 198 case MADV_REMOVE: 199 error = madvise_remove(vma, start, end); 200 break; 201 202 case MADV_WILLNEED: 203 error = madvise_willneed(vma, prev, start, end); 204 break; 205 206 case MADV_DONTNEED: 207 error = madvise_dontneed(vma, prev, start, end); 208 break; 209 210 default: 211 error = -EINVAL; 212 break; 213 } 214 return error; 215 } 216 217 /* 218 * The madvise(2) system call. 219 * 220 * Applications can use madvise() to advise the kernel how it should 221 * handle paging I/O in this VM area. The idea is to help the kernel 222 * use appropriate read-ahead and caching techniques. The information 223 * provided is advisory only, and can be safely disregarded by the 224 * kernel without affecting the correct operation of the application. 225 * 226 * behavior values: 227 * MADV_NORMAL - the default behavior is to read clusters. This 228 * results in some read-ahead and read-behind. 229 * MADV_RANDOM - the system should read the minimum amount of data 230 * on any access, since it is unlikely that the appli- 231 * cation will need more than what it asks for. 232 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 233 * once, so they can be aggressively read ahead, and 234 * can be freed soon after they are accessed. 235 * MADV_WILLNEED - the application is notifying the system to read 236 * some pages ahead. 237 * MADV_DONTNEED - the application is finished with the given range, 238 * so the kernel can free resources associated with it. 239 * MADV_REMOVE - the application wants to free up the given range of 240 * pages and associated backing store. 241 * 242 * return values: 243 * zero - success 244 * -EINVAL - start + len < 0, start is not page-aligned, 245 * "behavior" is not a valid value, or application 246 * is attempting to release locked or shared pages. 247 * -ENOMEM - addresses in the specified range are not currently 248 * mapped, or are outside the AS of the process. 249 * -EIO - an I/O error occurred while paging in data. 250 * -EBADF - map exists, but area maps something that isn't a file. 251 * -EAGAIN - a kernel resource was temporarily unavailable. 252 */ 253 asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior) 254 { 255 unsigned long end, tmp; 256 struct vm_area_struct * vma, *prev; 257 int unmapped_error = 0; 258 int error = -EINVAL; 259 size_t len; 260 261 down_write(¤t->mm->mmap_sem); 262 263 if (start & ~PAGE_MASK) 264 goto out; 265 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 266 267 /* Check to see whether len was rounded up from small -ve to zero */ 268 if (len_in && !len) 269 goto out; 270 271 end = start + len; 272 if (end < start) 273 goto out; 274 275 error = 0; 276 if (end == start) 277 goto out; 278 279 /* 280 * If the interval [start,end) covers some unmapped address 281 * ranges, just ignore them, but return -ENOMEM at the end. 282 * - different from the way of handling in mlock etc. 283 */ 284 vma = find_vma_prev(current->mm, start, &prev); 285 if (vma && start > vma->vm_start) 286 prev = vma; 287 288 for (;;) { 289 /* Still start < end. */ 290 error = -ENOMEM; 291 if (!vma) 292 goto out; 293 294 /* Here start < (end|vma->vm_end). */ 295 if (start < vma->vm_start) { 296 unmapped_error = -ENOMEM; 297 start = vma->vm_start; 298 if (start >= end) 299 goto out; 300 } 301 302 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 303 tmp = vma->vm_end; 304 if (end < tmp) 305 tmp = end; 306 307 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 308 error = madvise_vma(vma, &prev, start, tmp, behavior); 309 if (error) 310 goto out; 311 start = tmp; 312 if (start < prev->vm_end) 313 start = prev->vm_end; 314 error = unmapped_error; 315 if (start >= end) 316 goto out; 317 vma = prev->vm_next; 318 } 319 out: 320 up_write(¤t->mm->mmap_sem); 321 return error; 322 } 323