xref: /linux/mm/process_vm_access.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * linux/mm/process_vm_access.c
3  *
4  * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
19 
20 #ifdef CONFIG_COMPAT
21 #include <linux/compat.h>
22 #endif
23 
24 /**
25  * process_vm_rw_pages - read/write pages from task specified
26  * @pages: array of pointers to pages we want to copy
27  * @start_offset: offset in page to start copying from/to
28  * @len: number of bytes to copy
29  * @iter: where to copy to/from locally
30  * @vm_write: 0 means copy from, 1 means copy to
31  * Returns 0 on success, error code otherwise
32  */
33 static int process_vm_rw_pages(struct page **pages,
34 			       unsigned offset,
35 			       size_t len,
36 			       struct iov_iter *iter,
37 			       int vm_write)
38 {
39 	/* Do the copy for each page */
40 	while (len && iov_iter_count(iter)) {
41 		struct page *page = *pages++;
42 		size_t copy = PAGE_SIZE - offset;
43 		size_t copied;
44 
45 		if (copy > len)
46 			copy = len;
47 
48 		if (vm_write) {
49 			copied = copy_page_from_iter(page, offset, copy, iter);
50 			set_page_dirty_lock(page);
51 		} else {
52 			copied = copy_page_to_iter(page, offset, copy, iter);
53 		}
54 		len -= copied;
55 		if (copied < copy && iov_iter_count(iter))
56 			return -EFAULT;
57 		offset = 0;
58 	}
59 	return 0;
60 }
61 
62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
63 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
64 
65 /**
66  * process_vm_rw_single_vec - read/write pages from task specified
67  * @addr: start memory address of target process
68  * @len: size of area to copy to/from
69  * @iter: where to copy to/from locally
70  * @process_pages: struct pages area that can store at least
71  *  nr_pages_to_copy struct page pointers
72  * @mm: mm for task
73  * @task: task to read/write from
74  * @vm_write: 0 means copy from, 1 means copy to
75  * Returns 0 on success or on failure error code
76  */
77 static int process_vm_rw_single_vec(unsigned long addr,
78 				    unsigned long len,
79 				    struct iov_iter *iter,
80 				    struct page **process_pages,
81 				    struct mm_struct *mm,
82 				    struct task_struct *task,
83 				    int vm_write)
84 {
85 	unsigned long pa = addr & PAGE_MASK;
86 	unsigned long start_offset = addr - pa;
87 	unsigned long nr_pages;
88 	ssize_t rc = 0;
89 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 		/ sizeof(struct pages *);
91 
92 	/* Work out address and page range required */
93 	if (len == 0)
94 		return 0;
95 	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
96 
97 	while (!rc && nr_pages && iov_iter_count(iter)) {
98 		int pages = min(nr_pages, max_pages_per_loop);
99 		size_t bytes;
100 
101 		/* Get the pages we're interested in */
102 		pages = get_user_pages_unlocked(task, mm, pa, pages,
103 						vm_write, 0, process_pages);
104 		if (pages <= 0)
105 			return -EFAULT;
106 
107 		bytes = pages * PAGE_SIZE - start_offset;
108 		if (bytes > len)
109 			bytes = len;
110 
111 		rc = process_vm_rw_pages(process_pages,
112 					 start_offset, bytes, iter,
113 					 vm_write);
114 		len -= bytes;
115 		start_offset = 0;
116 		nr_pages -= pages;
117 		pa += pages * PAGE_SIZE;
118 		while (pages)
119 			put_page(process_pages[--pages]);
120 	}
121 
122 	return rc;
123 }
124 
125 /* Maximum number of entries for process pages array
126    which lives on stack */
127 #define PVM_MAX_PP_ARRAY_COUNT 16
128 
129 /**
130  * process_vm_rw_core - core of reading/writing pages from task specified
131  * @pid: PID of process to read/write from/to
132  * @iter: where to copy to/from locally
133  * @rvec: iovec array specifying where to copy to/from in the other process
134  * @riovcnt: size of rvec array
135  * @flags: currently unused
136  * @vm_write: 0 if reading from other process, 1 if writing to other process
137  * Returns the number of bytes read/written or error code. May
138  *  return less bytes than expected if an error occurs during the copying
139  *  process.
140  */
141 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
142 				  const struct iovec *rvec,
143 				  unsigned long riovcnt,
144 				  unsigned long flags, int vm_write)
145 {
146 	struct task_struct *task;
147 	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
148 	struct page **process_pages = pp_stack;
149 	struct mm_struct *mm;
150 	unsigned long i;
151 	ssize_t rc = 0;
152 	unsigned long nr_pages = 0;
153 	unsigned long nr_pages_iov;
154 	ssize_t iov_len;
155 	size_t total_len = iov_iter_count(iter);
156 
157 	/*
158 	 * Work out how many pages of struct pages we're going to need
159 	 * when eventually calling get_user_pages
160 	 */
161 	for (i = 0; i < riovcnt; i++) {
162 		iov_len = rvec[i].iov_len;
163 		if (iov_len > 0) {
164 			nr_pages_iov = ((unsigned long)rvec[i].iov_base
165 					+ iov_len)
166 				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
167 				/ PAGE_SIZE + 1;
168 			nr_pages = max(nr_pages, nr_pages_iov);
169 		}
170 	}
171 
172 	if (nr_pages == 0)
173 		return 0;
174 
175 	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
176 		/* For reliability don't try to kmalloc more than
177 		   2 pages worth */
178 		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
179 					      sizeof(struct pages *)*nr_pages),
180 					GFP_KERNEL);
181 
182 		if (!process_pages)
183 			return -ENOMEM;
184 	}
185 
186 	/* Get process information */
187 	rcu_read_lock();
188 	task = find_task_by_vpid(pid);
189 	if (task)
190 		get_task_struct(task);
191 	rcu_read_unlock();
192 	if (!task) {
193 		rc = -ESRCH;
194 		goto free_proc_pages;
195 	}
196 
197 	mm = mm_access(task, PTRACE_MODE_ATTACH);
198 	if (!mm || IS_ERR(mm)) {
199 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
200 		/*
201 		 * Explicitly map EACCES to EPERM as EPERM is a more a
202 		 * appropriate error code for process_vw_readv/writev
203 		 */
204 		if (rc == -EACCES)
205 			rc = -EPERM;
206 		goto put_task_struct;
207 	}
208 
209 	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
210 		rc = process_vm_rw_single_vec(
211 			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
212 			iter, process_pages, mm, task, vm_write);
213 
214 	/* copied = space before - space after */
215 	total_len -= iov_iter_count(iter);
216 
217 	/* If we have managed to copy any data at all then
218 	   we return the number of bytes copied. Otherwise
219 	   we return the error code */
220 	if (total_len)
221 		rc = total_len;
222 
223 	mmput(mm);
224 
225 put_task_struct:
226 	put_task_struct(task);
227 
228 free_proc_pages:
229 	if (process_pages != pp_stack)
230 		kfree(process_pages);
231 	return rc;
232 }
233 
234 /**
235  * process_vm_rw - check iovecs before calling core routine
236  * @pid: PID of process to read/write from/to
237  * @lvec: iovec array specifying where to copy to/from locally
238  * @liovcnt: size of lvec array
239  * @rvec: iovec array specifying where to copy to/from in the other process
240  * @riovcnt: size of rvec array
241  * @flags: currently unused
242  * @vm_write: 0 if reading from other process, 1 if writing to other process
243  * Returns the number of bytes read/written or error code. May
244  *  return less bytes than expected if an error occurs during the copying
245  *  process.
246  */
247 static ssize_t process_vm_rw(pid_t pid,
248 			     const struct iovec __user *lvec,
249 			     unsigned long liovcnt,
250 			     const struct iovec __user *rvec,
251 			     unsigned long riovcnt,
252 			     unsigned long flags, int vm_write)
253 {
254 	struct iovec iovstack_l[UIO_FASTIOV];
255 	struct iovec iovstack_r[UIO_FASTIOV];
256 	struct iovec *iov_l = iovstack_l;
257 	struct iovec *iov_r = iovstack_r;
258 	struct iov_iter iter;
259 	ssize_t rc;
260 
261 	if (flags != 0)
262 		return -EINVAL;
263 
264 	/* Check iovecs */
265 	if (vm_write)
266 		rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
267 					   iovstack_l, &iov_l);
268 	else
269 		rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
270 					   iovstack_l, &iov_l);
271 	if (rc <= 0)
272 		goto free_iovecs;
273 
274 	iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
275 
276 	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
277 				   iovstack_r, &iov_r);
278 	if (rc <= 0)
279 		goto free_iovecs;
280 
281 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
282 
283 free_iovecs:
284 	if (iov_r != iovstack_r)
285 		kfree(iov_r);
286 	if (iov_l != iovstack_l)
287 		kfree(iov_l);
288 
289 	return rc;
290 }
291 
292 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
293 		unsigned long, liovcnt, const struct iovec __user *, rvec,
294 		unsigned long, riovcnt,	unsigned long, flags)
295 {
296 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
297 }
298 
299 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
300 		const struct iovec __user *, lvec,
301 		unsigned long, liovcnt, const struct iovec __user *, rvec,
302 		unsigned long, riovcnt,	unsigned long, flags)
303 {
304 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
305 }
306 
307 #ifdef CONFIG_COMPAT
308 
309 static ssize_t
310 compat_process_vm_rw(compat_pid_t pid,
311 		     const struct compat_iovec __user *lvec,
312 		     unsigned long liovcnt,
313 		     const struct compat_iovec __user *rvec,
314 		     unsigned long riovcnt,
315 		     unsigned long flags, int vm_write)
316 {
317 	struct iovec iovstack_l[UIO_FASTIOV];
318 	struct iovec iovstack_r[UIO_FASTIOV];
319 	struct iovec *iov_l = iovstack_l;
320 	struct iovec *iov_r = iovstack_r;
321 	struct iov_iter iter;
322 	ssize_t rc = -EFAULT;
323 
324 	if (flags != 0)
325 		return -EINVAL;
326 
327 	if (vm_write)
328 		rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
329 						  UIO_FASTIOV, iovstack_l,
330 						  &iov_l);
331 	else
332 		rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
333 						  UIO_FASTIOV, iovstack_l,
334 						  &iov_l);
335 	if (rc <= 0)
336 		goto free_iovecs;
337 	iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
338 	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
339 					  UIO_FASTIOV, iovstack_r,
340 					  &iov_r);
341 	if (rc <= 0)
342 		goto free_iovecs;
343 
344 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
345 
346 free_iovecs:
347 	if (iov_r != iovstack_r)
348 		kfree(iov_r);
349 	if (iov_l != iovstack_l)
350 		kfree(iov_l);
351 	return rc;
352 }
353 
354 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
355 		       const struct compat_iovec __user *, lvec,
356 		       compat_ulong_t, liovcnt,
357 		       const struct compat_iovec __user *, rvec,
358 		       compat_ulong_t, riovcnt,
359 		       compat_ulong_t, flags)
360 {
361 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
362 				    riovcnt, flags, 0);
363 }
364 
365 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
366 		       const struct compat_iovec __user *, lvec,
367 		       compat_ulong_t, liovcnt,
368 		       const struct compat_iovec __user *, rvec,
369 		       compat_ulong_t, riovcnt,
370 		       compat_ulong_t, flags)
371 {
372 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
373 				    riovcnt, flags, 1);
374 }
375 
376 #endif
377