xref: /linux/kernel/ptrace.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 
22 #include <asm/pgtable.h>
23 #include <asm/uaccess.h>
24 
25 /*
26  * ptrace a task: make the debugger its new parent and
27  * move it to the ptrace list.
28  *
29  * Must be called with the tasklist lock write-held.
30  */
31 void __ptrace_link(task_t *child, task_t *new_parent)
32 {
33 	if (!list_empty(&child->ptrace_list))
34 		BUG();
35 	if (child->parent == new_parent)
36 		return;
37 	list_add(&child->ptrace_list, &child->parent->ptrace_children);
38 	REMOVE_LINKS(child);
39 	child->parent = new_parent;
40 	SET_LINKS(child);
41 }
42 
43 /*
44  * Turn a tracing stop into a normal stop now, since with no tracer there
45  * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
46  * signal sent that would resume the child, but didn't because it was in
47  * TASK_TRACED, resume it now.
48  * Requires that irqs be disabled.
49  */
50 void ptrace_untrace(task_t *child)
51 {
52 	spin_lock(&child->sighand->siglock);
53 	if (child->state == TASK_TRACED) {
54 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
55 			child->state = TASK_STOPPED;
56 		} else {
57 			signal_wake_up(child, 1);
58 		}
59 	}
60 	if (child->signal->flags & SIGNAL_GROUP_EXIT) {
61 		sigaddset(&child->pending.signal, SIGKILL);
62 		signal_wake_up(child, 1);
63 	}
64 	spin_unlock(&child->sighand->siglock);
65 }
66 
67 /*
68  * unptrace a task: move it back to its original parent and
69  * remove it from the ptrace list.
70  *
71  * Must be called with the tasklist lock write-held.
72  */
73 void __ptrace_unlink(task_t *child)
74 {
75 	BUG_ON(!child->ptrace);
76 
77 	child->ptrace = 0;
78 	if (!list_empty(&child->ptrace_list)) {
79 		list_del_init(&child->ptrace_list);
80 		REMOVE_LINKS(child);
81 		child->parent = child->real_parent;
82 		SET_LINKS(child);
83 	}
84 
85 	ptrace_untrace(child);
86 }
87 
88 /*
89  * Check that we have indeed attached to the thing..
90  */
91 int ptrace_check_attach(struct task_struct *child, int kill)
92 {
93 	int ret = -ESRCH;
94 
95 	/*
96 	 * We take the read lock around doing both checks to close a
97 	 * possible race where someone else was tracing our child and
98 	 * detached between these two checks.  After this locked check,
99 	 * we are sure that this is our traced child and that can only
100 	 * be changed by us so it's not changing right after this.
101 	 */
102 	read_lock(&tasklist_lock);
103 	if ((child->ptrace & PT_PTRACED) && child->parent == current &&
104 	    (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
105 	    && child->signal != NULL) {
106 		ret = 0;
107 		spin_lock_irq(&child->sighand->siglock);
108 		if (child->state == TASK_STOPPED) {
109 			child->state = TASK_TRACED;
110 		} else if (child->state != TASK_TRACED && !kill) {
111 			ret = -ESRCH;
112 		}
113 		spin_unlock_irq(&child->sighand->siglock);
114 	}
115 	read_unlock(&tasklist_lock);
116 
117 	if (!ret && !kill) {
118 		wait_task_inactive(child);
119 	}
120 
121 	/* All systems go.. */
122 	return ret;
123 }
124 
125 static int may_attach(struct task_struct *task)
126 {
127 	if (!task->mm)
128 		return -EPERM;
129 	if (((current->uid != task->euid) ||
130 	     (current->uid != task->suid) ||
131 	     (current->uid != task->uid) ||
132 	     (current->gid != task->egid) ||
133 	     (current->gid != task->sgid) ||
134 	     (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
135 		return -EPERM;
136 	smp_rmb();
137 	if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
138 		return -EPERM;
139 
140 	return security_ptrace(current, task);
141 }
142 
143 int ptrace_may_attach(struct task_struct *task)
144 {
145 	int err;
146 	task_lock(task);
147 	err = may_attach(task);
148 	task_unlock(task);
149 	return !err;
150 }
151 
152 int ptrace_attach(struct task_struct *task)
153 {
154 	int retval;
155 	task_lock(task);
156 	retval = -EPERM;
157 	if (task->pid <= 1)
158 		goto bad;
159 	if (task->tgid == current->tgid)
160 		goto bad;
161 	/* the same process cannot be attached many times */
162 	if (task->ptrace & PT_PTRACED)
163 		goto bad;
164 	retval = may_attach(task);
165 	if (retval)
166 		goto bad;
167 
168 	/* Go */
169 	task->ptrace |= PT_PTRACED | ((task->real_parent != current)
170 				      ? PT_ATTACHED : 0);
171 	if (capable(CAP_SYS_PTRACE))
172 		task->ptrace |= PT_PTRACE_CAP;
173 	task_unlock(task);
174 
175 	write_lock_irq(&tasklist_lock);
176 	__ptrace_link(task, current);
177 	write_unlock_irq(&tasklist_lock);
178 
179 	force_sig_specific(SIGSTOP, task);
180 	return 0;
181 
182 bad:
183 	task_unlock(task);
184 	return retval;
185 }
186 
187 void __ptrace_detach(struct task_struct *child, unsigned int data)
188 {
189 	child->exit_code = data;
190 	/* .. re-parent .. */
191 	__ptrace_unlink(child);
192 	/* .. and wake it up. */
193 	if (child->exit_state != EXIT_ZOMBIE)
194 		wake_up_process(child);
195 }
196 
197 int ptrace_detach(struct task_struct *child, unsigned int data)
198 {
199 	if (!valid_signal(data))
200 		return -EIO;
201 
202 	/* Architecture-specific hardware disable .. */
203 	ptrace_disable(child);
204 
205 	write_lock_irq(&tasklist_lock);
206 	if (child->ptrace)
207 		__ptrace_detach(child, data);
208 	write_unlock_irq(&tasklist_lock);
209 
210 	return 0;
211 }
212 
213 /*
214  * Access another process' address space.
215  * Source/target buffer must be kernel space,
216  * Do not walk the page table directly, use get_user_pages
217  */
218 
219 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
220 {
221 	struct mm_struct *mm;
222 	struct vm_area_struct *vma;
223 	struct page *page;
224 	void *old_buf = buf;
225 
226 	mm = get_task_mm(tsk);
227 	if (!mm)
228 		return 0;
229 
230 	down_read(&mm->mmap_sem);
231 	/* ignore errors, just check how much was sucessfully transfered */
232 	while (len) {
233 		int bytes, ret, offset;
234 		void *maddr;
235 
236 		ret = get_user_pages(tsk, mm, addr, 1,
237 				write, 1, &page, &vma);
238 		if (ret <= 0)
239 			break;
240 
241 		bytes = len;
242 		offset = addr & (PAGE_SIZE-1);
243 		if (bytes > PAGE_SIZE-offset)
244 			bytes = PAGE_SIZE-offset;
245 
246 		maddr = kmap(page);
247 		if (write) {
248 			copy_to_user_page(vma, page, addr,
249 					  maddr + offset, buf, bytes);
250 			set_page_dirty_lock(page);
251 		} else {
252 			copy_from_user_page(vma, page, addr,
253 					    buf, maddr + offset, bytes);
254 		}
255 		kunmap(page);
256 		page_cache_release(page);
257 		len -= bytes;
258 		buf += bytes;
259 		addr += bytes;
260 	}
261 	up_read(&mm->mmap_sem);
262 	mmput(mm);
263 
264 	return buf - old_buf;
265 }
266 
267 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
268 {
269 	int copied = 0;
270 
271 	while (len > 0) {
272 		char buf[128];
273 		int this_len, retval;
274 
275 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
276 		retval = access_process_vm(tsk, src, buf, this_len, 0);
277 		if (!retval) {
278 			if (copied)
279 				break;
280 			return -EIO;
281 		}
282 		if (copy_to_user(dst, buf, retval))
283 			return -EFAULT;
284 		copied += retval;
285 		src += retval;
286 		dst += retval;
287 		len -= retval;
288 	}
289 	return copied;
290 }
291 
292 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
293 {
294 	int copied = 0;
295 
296 	while (len > 0) {
297 		char buf[128];
298 		int this_len, retval;
299 
300 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
301 		if (copy_from_user(buf, src, this_len))
302 			return -EFAULT;
303 		retval = access_process_vm(tsk, dst, buf, this_len, 1);
304 		if (!retval) {
305 			if (copied)
306 				break;
307 			return -EIO;
308 		}
309 		copied += retval;
310 		src += retval;
311 		dst += retval;
312 		len -= retval;
313 	}
314 	return copied;
315 }
316 
317 static int ptrace_setoptions(struct task_struct *child, long data)
318 {
319 	child->ptrace &= ~PT_TRACE_MASK;
320 
321 	if (data & PTRACE_O_TRACESYSGOOD)
322 		child->ptrace |= PT_TRACESYSGOOD;
323 
324 	if (data & PTRACE_O_TRACEFORK)
325 		child->ptrace |= PT_TRACE_FORK;
326 
327 	if (data & PTRACE_O_TRACEVFORK)
328 		child->ptrace |= PT_TRACE_VFORK;
329 
330 	if (data & PTRACE_O_TRACECLONE)
331 		child->ptrace |= PT_TRACE_CLONE;
332 
333 	if (data & PTRACE_O_TRACEEXEC)
334 		child->ptrace |= PT_TRACE_EXEC;
335 
336 	if (data & PTRACE_O_TRACEVFORKDONE)
337 		child->ptrace |= PT_TRACE_VFORK_DONE;
338 
339 	if (data & PTRACE_O_TRACEEXIT)
340 		child->ptrace |= PT_TRACE_EXIT;
341 
342 	return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
343 }
344 
345 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
346 {
347 	siginfo_t lastinfo;
348 	int error = -ESRCH;
349 
350 	read_lock(&tasklist_lock);
351 	if (likely(child->sighand != NULL)) {
352 		error = -EINVAL;
353 		spin_lock_irq(&child->sighand->siglock);
354 		if (likely(child->last_siginfo != NULL)) {
355 			lastinfo = *child->last_siginfo;
356 			error = 0;
357 		}
358 		spin_unlock_irq(&child->sighand->siglock);
359 	}
360 	read_unlock(&tasklist_lock);
361 	if (!error)
362 		return copy_siginfo_to_user(data, &lastinfo);
363 	return error;
364 }
365 
366 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
367 {
368 	siginfo_t newinfo;
369 	int error = -ESRCH;
370 
371 	if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
372 		return -EFAULT;
373 
374 	read_lock(&tasklist_lock);
375 	if (likely(child->sighand != NULL)) {
376 		error = -EINVAL;
377 		spin_lock_irq(&child->sighand->siglock);
378 		if (likely(child->last_siginfo != NULL)) {
379 			*child->last_siginfo = newinfo;
380 			error = 0;
381 		}
382 		spin_unlock_irq(&child->sighand->siglock);
383 	}
384 	read_unlock(&tasklist_lock);
385 	return error;
386 }
387 
388 int ptrace_request(struct task_struct *child, long request,
389 		   long addr, long data)
390 {
391 	int ret = -EIO;
392 
393 	switch (request) {
394 #ifdef PTRACE_OLDSETOPTIONS
395 	case PTRACE_OLDSETOPTIONS:
396 #endif
397 	case PTRACE_SETOPTIONS:
398 		ret = ptrace_setoptions(child, data);
399 		break;
400 	case PTRACE_GETEVENTMSG:
401 		ret = put_user(child->ptrace_message, (unsigned long __user *) data);
402 		break;
403 	case PTRACE_GETSIGINFO:
404 		ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
405 		break;
406 	case PTRACE_SETSIGINFO:
407 		ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
408 		break;
409 	default:
410 		break;
411 	}
412 
413 	return ret;
414 }
415 
416 /**
417  * ptrace_traceme  --  helper for PTRACE_TRACEME
418  *
419  * Performs checks and sets PT_PTRACED.
420  * Should be used by all ptrace implementations for PTRACE_TRACEME.
421  */
422 int ptrace_traceme(void)
423 {
424 	int ret;
425 
426 	/*
427 	 * Are we already being traced?
428 	 */
429 	if (current->ptrace & PT_PTRACED)
430 		return -EPERM;
431 	ret = security_ptrace(current->parent, current);
432 	if (ret)
433 		return -EPERM;
434 	/*
435 	 * Set the ptrace bit in the process ptrace flags.
436 	 */
437 	current->ptrace |= PT_PTRACED;
438 	return 0;
439 }
440 
441 /**
442  * ptrace_get_task_struct  --  grab a task struct reference for ptrace
443  * @pid:       process id to grab a task_struct reference of
444  *
445  * This function is a helper for ptrace implementations.  It checks
446  * permissions and then grabs a task struct for use of the actual
447  * ptrace implementation.
448  *
449  * Returns the task_struct for @pid or an ERR_PTR() on failure.
450  */
451 struct task_struct *ptrace_get_task_struct(pid_t pid)
452 {
453 	struct task_struct *child;
454 
455 	/*
456 	 * Tracing init is not allowed.
457 	 */
458 	if (pid == 1)
459 		return ERR_PTR(-EPERM);
460 
461 	read_lock(&tasklist_lock);
462 	child = find_task_by_pid(pid);
463 	if (child)
464 		get_task_struct(child);
465 	read_unlock(&tasklist_lock);
466 	if (!child)
467 		return ERR_PTR(-ESRCH);
468 	return child;
469 }
470 
471 #ifndef __ARCH_SYS_PTRACE
472 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
473 {
474 	struct task_struct *child;
475 	long ret;
476 
477 	/*
478 	 * This lock_kernel fixes a subtle race with suid exec
479 	 */
480 	lock_kernel();
481 	if (request == PTRACE_TRACEME) {
482 		ret = ptrace_traceme();
483 		goto out;
484 	}
485 
486 	child = ptrace_get_task_struct(pid);
487 	if (IS_ERR(child)) {
488 		ret = PTR_ERR(child);
489 		goto out;
490 	}
491 
492 	if (request == PTRACE_ATTACH) {
493 		ret = ptrace_attach(child);
494 		goto out_put_task_struct;
495 	}
496 
497 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
498 	if (ret < 0)
499 		goto out_put_task_struct;
500 
501 	ret = arch_ptrace(child, request, addr, data);
502 	if (ret < 0)
503 		goto out_put_task_struct;
504 
505  out_put_task_struct:
506 	put_task_struct(child);
507  out:
508 	unlock_kernel();
509 	return ret;
510 }
511 #endif /* __ARCH_SYS_PTRACE */
512