1 /* 2 * linux/mm/process_vm_access.c 3 * 4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/uio.h> 14 #include <linux/sched.h> 15 #include <linux/sched/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/ptrace.h> 18 #include <linux/slab.h> 19 #include <linux/syscalls.h> 20 21 #ifdef CONFIG_COMPAT 22 #include <linux/compat.h> 23 #endif 24 25 /** 26 * process_vm_rw_pages - read/write pages from task specified 27 * @pages: array of pointers to pages we want to copy 28 * @start_offset: offset in page to start copying from/to 29 * @len: number of bytes to copy 30 * @iter: where to copy to/from locally 31 * @vm_write: 0 means copy from, 1 means copy to 32 * Returns 0 on success, error code otherwise 33 */ 34 static int process_vm_rw_pages(struct page **pages, 35 unsigned offset, 36 size_t len, 37 struct iov_iter *iter, 38 int vm_write) 39 { 40 /* Do the copy for each page */ 41 while (len && iov_iter_count(iter)) { 42 struct page *page = *pages++; 43 size_t copy = PAGE_SIZE - offset; 44 size_t copied; 45 46 if (copy > len) 47 copy = len; 48 49 if (vm_write) { 50 copied = copy_page_from_iter(page, offset, copy, iter); 51 set_page_dirty_lock(page); 52 } else { 53 copied = copy_page_to_iter(page, offset, copy, iter); 54 } 55 len -= copied; 56 if (copied < copy && iov_iter_count(iter)) 57 return -EFAULT; 58 offset = 0; 59 } 60 return 0; 61 } 62 63 /* Maximum number of pages kmalloc'd to hold struct page's during copy */ 64 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2) 65 66 /** 67 * process_vm_rw_single_vec - read/write pages from task specified 68 * @addr: start memory address of target process 69 * @len: size of area to copy to/from 70 * @iter: where to copy to/from locally 71 * @process_pages: struct pages area that can store at least 72 * nr_pages_to_copy struct page pointers 73 * @mm: mm for task 74 * @task: task to read/write from 75 * @vm_write: 0 means copy from, 1 means copy to 76 * Returns 0 on success or on failure error code 77 */ 78 static int process_vm_rw_single_vec(unsigned long addr, 79 unsigned long len, 80 struct iov_iter *iter, 81 struct page **process_pages, 82 struct mm_struct *mm, 83 struct task_struct *task, 84 int vm_write) 85 { 86 unsigned long pa = addr & PAGE_MASK; 87 unsigned long start_offset = addr - pa; 88 unsigned long nr_pages; 89 ssize_t rc = 0; 90 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 91 / sizeof(struct pages *); 92 unsigned int flags = 0; 93 94 /* Work out address and page range required */ 95 if (len == 0) 96 return 0; 97 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 98 99 if (vm_write) 100 flags |= FOLL_WRITE; 101 102 while (!rc && nr_pages && iov_iter_count(iter)) { 103 int pages = min(nr_pages, max_pages_per_loop); 104 int locked = 1; 105 size_t bytes; 106 107 /* 108 * Get the pages we're interested in. We must 109 * access remotely because task/mm might not 110 * current/current->mm 111 */ 112 down_read(&mm->mmap_sem); 113 pages = get_user_pages_remote(task, mm, pa, pages, flags, 114 process_pages, NULL, &locked); 115 if (locked) 116 up_read(&mm->mmap_sem); 117 if (pages <= 0) 118 return -EFAULT; 119 120 bytes = pages * PAGE_SIZE - start_offset; 121 if (bytes > len) 122 bytes = len; 123 124 rc = process_vm_rw_pages(process_pages, 125 start_offset, bytes, iter, 126 vm_write); 127 len -= bytes; 128 start_offset = 0; 129 nr_pages -= pages; 130 pa += pages * PAGE_SIZE; 131 while (pages) 132 put_page(process_pages[--pages]); 133 } 134 135 return rc; 136 } 137 138 /* Maximum number of entries for process pages array 139 which lives on stack */ 140 #define PVM_MAX_PP_ARRAY_COUNT 16 141 142 /** 143 * process_vm_rw_core - core of reading/writing pages from task specified 144 * @pid: PID of process to read/write from/to 145 * @iter: where to copy to/from locally 146 * @rvec: iovec array specifying where to copy to/from in the other process 147 * @riovcnt: size of rvec array 148 * @flags: currently unused 149 * @vm_write: 0 if reading from other process, 1 if writing to other process 150 * Returns the number of bytes read/written or error code. May 151 * return less bytes than expected if an error occurs during the copying 152 * process. 153 */ 154 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, 155 const struct iovec *rvec, 156 unsigned long riovcnt, 157 unsigned long flags, int vm_write) 158 { 159 struct task_struct *task; 160 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; 161 struct page **process_pages = pp_stack; 162 struct mm_struct *mm; 163 unsigned long i; 164 ssize_t rc = 0; 165 unsigned long nr_pages = 0; 166 unsigned long nr_pages_iov; 167 ssize_t iov_len; 168 size_t total_len = iov_iter_count(iter); 169 170 /* 171 * Work out how many pages of struct pages we're going to need 172 * when eventually calling get_user_pages 173 */ 174 for (i = 0; i < riovcnt; i++) { 175 iov_len = rvec[i].iov_len; 176 if (iov_len > 0) { 177 nr_pages_iov = ((unsigned long)rvec[i].iov_base 178 + iov_len) 179 / PAGE_SIZE - (unsigned long)rvec[i].iov_base 180 / PAGE_SIZE + 1; 181 nr_pages = max(nr_pages, nr_pages_iov); 182 } 183 } 184 185 if (nr_pages == 0) 186 return 0; 187 188 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { 189 /* For reliability don't try to kmalloc more than 190 2 pages worth */ 191 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, 192 sizeof(struct pages *)*nr_pages), 193 GFP_KERNEL); 194 195 if (!process_pages) 196 return -ENOMEM; 197 } 198 199 /* Get process information */ 200 task = find_get_task_by_vpid(pid); 201 if (!task) { 202 rc = -ESRCH; 203 goto free_proc_pages; 204 } 205 206 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); 207 if (!mm || IS_ERR(mm)) { 208 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 209 /* 210 * Explicitly map EACCES to EPERM as EPERM is a more a 211 * appropriate error code for process_vw_readv/writev 212 */ 213 if (rc == -EACCES) 214 rc = -EPERM; 215 goto put_task_struct; 216 } 217 218 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) 219 rc = process_vm_rw_single_vec( 220 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 221 iter, process_pages, mm, task, vm_write); 222 223 /* copied = space before - space after */ 224 total_len -= iov_iter_count(iter); 225 226 /* If we have managed to copy any data at all then 227 we return the number of bytes copied. Otherwise 228 we return the error code */ 229 if (total_len) 230 rc = total_len; 231 232 mmput(mm); 233 234 put_task_struct: 235 put_task_struct(task); 236 237 free_proc_pages: 238 if (process_pages != pp_stack) 239 kfree(process_pages); 240 return rc; 241 } 242 243 /** 244 * process_vm_rw - check iovecs before calling core routine 245 * @pid: PID of process to read/write from/to 246 * @lvec: iovec array specifying where to copy to/from locally 247 * @liovcnt: size of lvec array 248 * @rvec: iovec array specifying where to copy to/from in the other process 249 * @riovcnt: size of rvec array 250 * @flags: currently unused 251 * @vm_write: 0 if reading from other process, 1 if writing to other process 252 * Returns the number of bytes read/written or error code. May 253 * return less bytes than expected if an error occurs during the copying 254 * process. 255 */ 256 static ssize_t process_vm_rw(pid_t pid, 257 const struct iovec __user *lvec, 258 unsigned long liovcnt, 259 const struct iovec __user *rvec, 260 unsigned long riovcnt, 261 unsigned long flags, int vm_write) 262 { 263 struct iovec iovstack_l[UIO_FASTIOV]; 264 struct iovec iovstack_r[UIO_FASTIOV]; 265 struct iovec *iov_l = iovstack_l; 266 struct iovec *iov_r = iovstack_r; 267 struct iov_iter iter; 268 ssize_t rc; 269 int dir = vm_write ? WRITE : READ; 270 271 if (flags != 0) 272 return -EINVAL; 273 274 /* Check iovecs */ 275 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 276 if (rc < 0) 277 return rc; 278 if (!iov_iter_count(&iter)) 279 goto free_iovecs; 280 281 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, 282 iovstack_r, &iov_r); 283 if (rc <= 0) 284 goto free_iovecs; 285 286 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 287 288 free_iovecs: 289 if (iov_r != iovstack_r) 290 kfree(iov_r); 291 kfree(iov_l); 292 293 return rc; 294 } 295 296 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, 297 unsigned long, liovcnt, const struct iovec __user *, rvec, 298 unsigned long, riovcnt, unsigned long, flags) 299 { 300 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); 301 } 302 303 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, 304 const struct iovec __user *, lvec, 305 unsigned long, liovcnt, const struct iovec __user *, rvec, 306 unsigned long, riovcnt, unsigned long, flags) 307 { 308 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); 309 } 310 311 #ifdef CONFIG_COMPAT 312 313 static ssize_t 314 compat_process_vm_rw(compat_pid_t pid, 315 const struct compat_iovec __user *lvec, 316 unsigned long liovcnt, 317 const struct compat_iovec __user *rvec, 318 unsigned long riovcnt, 319 unsigned long flags, int vm_write) 320 { 321 struct iovec iovstack_l[UIO_FASTIOV]; 322 struct iovec iovstack_r[UIO_FASTIOV]; 323 struct iovec *iov_l = iovstack_l; 324 struct iovec *iov_r = iovstack_r; 325 struct iov_iter iter; 326 ssize_t rc = -EFAULT; 327 int dir = vm_write ? WRITE : READ; 328 329 if (flags != 0) 330 return -EINVAL; 331 332 rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 333 if (rc < 0) 334 return rc; 335 if (!iov_iter_count(&iter)) 336 goto free_iovecs; 337 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, 338 UIO_FASTIOV, iovstack_r, 339 &iov_r); 340 if (rc <= 0) 341 goto free_iovecs; 342 343 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 344 345 free_iovecs: 346 if (iov_r != iovstack_r) 347 kfree(iov_r); 348 kfree(iov_l); 349 return rc; 350 } 351 352 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, 353 const struct compat_iovec __user *, lvec, 354 compat_ulong_t, liovcnt, 355 const struct compat_iovec __user *, rvec, 356 compat_ulong_t, riovcnt, 357 compat_ulong_t, flags) 358 { 359 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 360 riovcnt, flags, 0); 361 } 362 363 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, 364 const struct compat_iovec __user *, lvec, 365 compat_ulong_t, liovcnt, 366 const struct compat_iovec __user *, rvec, 367 compat_ulong_t, riovcnt, 368 compat_ulong_t, flags) 369 { 370 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 371 riovcnt, flags, 1); 372 } 373 374 #endif 375