xref: /freebsd/sys/kern/sys_process.c (revision 09e8dea79366f1e5b3a73e8a271b26e4b6bf2e6a)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/sysproto.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/sx.h>
43 #include <sys/user.h>
44 
45 #include <machine/reg.h>
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 
55 /*
56  * Functions implemented using PROC_ACTION():
57  *
58  * proc_read_regs(proc, regs)
59  *	Get the current user-visible register set from the process
60  *	and copy it into the regs structure (<machine/reg.h>).
61  *	The process is stopped at the time read_regs is called.
62  *
63  * proc_write_regs(proc, regs)
64  *	Update the current register set from the passed in regs
65  *	structure.  Take care to avoid clobbering special CPU
66  *	registers or privileged bits in the PSL.
67  *	Depending on the architecture this may have fix-up work to do,
68  *	especially if the IAR or PCW are modified.
69  *	The process is stopped at the time write_regs is called.
70  *
71  * proc_read_fpregs, proc_write_fpregs
72  *	deal with the floating point register set, otherwise as above.
73  *
74  * proc_read_dbregs, proc_write_dbregs
75  *	deal with the processor debug register set, otherwise as above.
76  *
77  * proc_sstep(proc)
78  *	Arrange for the process to trap after executing a single instruction.
79  */
80 
81 #define	PROC_ACTION(action) do {					\
82 	int error;							\
83 									\
84 	mtx_lock_spin(&sched_lock);					\
85 	if ((td->td_proc->p_sflag & PS_INMEM) == 0)			\
86 		error = EIO;						\
87 	else								\
88 		error = (action);					\
89 	mtx_unlock_spin(&sched_lock);					\
90 	return (error);							\
91 } while(0)
92 
93 int
94 proc_read_regs(struct thread *td, struct reg *regs)
95 {
96 
97 	PROC_ACTION(fill_regs(td, regs));
98 }
99 
100 int
101 proc_write_regs(struct thread *td, struct reg *regs)
102 {
103 
104 	PROC_ACTION(set_regs(td, regs));
105 }
106 
107 int
108 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
109 {
110 
111 	PROC_ACTION(fill_dbregs(td, dbregs));
112 }
113 
114 int
115 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
116 {
117 
118 	PROC_ACTION(set_dbregs(td, dbregs));
119 }
120 
121 /*
122  * Ptrace doesn't support fpregs at all, and there are no security holes
123  * or translations for fpregs, so we can just copy them.
124  */
125 int
126 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
127 {
128 
129 	PROC_ACTION(fill_fpregs(td, fpregs));
130 }
131 
132 int
133 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
134 {
135 
136 	PROC_ACTION(set_fpregs(td, fpregs));
137 }
138 
139 int
140 proc_sstep(struct thread *td)
141 {
142 
143 	PROC_ACTION(ptrace_single_step(td));
144 }
145 
146 int
147 proc_rwmem(struct proc *p, struct uio *uio)
148 {
149 	struct vmspace *vm;
150 	vm_map_t map;
151 	vm_object_t object = NULL;
152 	vm_offset_t pageno = 0;		/* page number */
153 	vm_prot_t reqprot;
154 	vm_offset_t kva;
155 	int error, writing;
156 
157 	GIANT_REQUIRED;
158 
159 	/*
160 	 * if the vmspace is in the midst of being deallocated or the
161 	 * process is exiting, don't try to grab anything.  The page table
162 	 * usage in that process can be messed up.
163 	 */
164 	vm = p->p_vmspace;
165 	if ((p->p_flag & P_WEXIT))
166 		return (EFAULT);
167 	if (vm->vm_refcnt < 1)
168 		return (EFAULT);
169 	++vm->vm_refcnt;
170 	/*
171 	 * The map we want...
172 	 */
173 	map = &vm->vm_map;
174 
175 	writing = uio->uio_rw == UIO_WRITE;
176 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
177 	    VM_PROT_READ;
178 
179 	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
180 
181 	/*
182 	 * Only map in one page at a time.  We don't have to, but it
183 	 * makes things easier.  This way is trivial - right?
184 	 */
185 	do {
186 		vm_map_t tmap;
187 		vm_offset_t uva;
188 		int page_offset;		/* offset into page */
189 		vm_map_entry_t out_entry;
190 		vm_prot_t out_prot;
191 		boolean_t wired;
192 		vm_pindex_t pindex;
193 		u_int len;
194 		vm_page_t m;
195 
196 		object = NULL;
197 
198 		uva = (vm_offset_t)uio->uio_offset;
199 
200 		/*
201 		 * Get the page number of this segment.
202 		 */
203 		pageno = trunc_page(uva);
204 		page_offset = uva - pageno;
205 
206 		/*
207 		 * How many bytes to copy
208 		 */
209 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
210 
211 		/*
212 		 * Fault the page on behalf of the process
213 		 */
214 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
215 		if (error) {
216 			error = EFAULT;
217 			break;
218 		}
219 
220 		/*
221 		 * Now we need to get the page.  out_entry, out_prot, wired,
222 		 * and single_use aren't used.  One would think the vm code
223 		 * would be a *bit* nicer...  We use tmap because
224 		 * vm_map_lookup() can change the map argument.
225 		 */
226 		tmap = map;
227 		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
228 		    &object, &pindex, &out_prot, &wired);
229 
230 		if (error) {
231 			error = EFAULT;
232 
233 			/*
234 			 * Make sure that there is no residue in 'object' from
235 			 * an error return on vm_map_lookup.
236 			 */
237 			object = NULL;
238 
239 			break;
240 		}
241 
242 		m = vm_page_lookup(object, pindex);
243 
244 		/* Allow fallback to backing objects if we are reading */
245 
246 		while (m == NULL && !writing && object->backing_object) {
247 
248 			pindex += OFF_TO_IDX(object->backing_object_offset);
249 			object = object->backing_object;
250 
251 			m = vm_page_lookup(object, pindex);
252 		}
253 
254 		if (m == NULL) {
255 			error = EFAULT;
256 
257 			/*
258 			 * Make sure that there is no residue in 'object' from
259 			 * an error return on vm_map_lookup.
260 			 */
261 			object = NULL;
262 
263 			vm_map_lookup_done(tmap, out_entry);
264 
265 			break;
266 		}
267 
268 		/*
269 		 * Wire the page into memory
270 		 */
271 		vm_page_wire(m);
272 
273 		/*
274 		 * We're done with tmap now.
275 		 * But reference the object first, so that we won't loose
276 		 * it.
277 		 */
278 		vm_object_reference(object);
279 		vm_map_lookup_done(tmap, out_entry);
280 
281 		pmap_qenter(kva, &m, 1);
282 
283 		/*
284 		 * Now do the i/o move.
285 		 */
286 		error = uiomove((caddr_t)(kva + page_offset), len, uio);
287 
288 		pmap_qremove(kva, 1);
289 
290 		/*
291 		 * release the page and the object
292 		 */
293 		vm_page_unwire(m, 1);
294 		vm_object_deallocate(object);
295 
296 		object = NULL;
297 
298 	} while (error == 0 && uio->uio_resid > 0);
299 
300 	if (object)
301 		vm_object_deallocate(object);
302 
303 	kmem_free(kernel_map, kva, PAGE_SIZE);
304 	vmspace_free(vm);
305 	return (error);
306 }
307 
308 /*
309  * Process debugging system call.
310  */
311 #ifndef _SYS_SYSPROTO_H_
312 struct ptrace_args {
313 	int	req;
314 	pid_t	pid;
315 	caddr_t	addr;
316 	int	data;
317 };
318 #endif
319 
320 int
321 ptrace(struct thread *td, struct ptrace_args *uap)
322 {
323 	struct iovec iov;
324 	struct uio uio;
325 	/*
326 	 * XXX this obfuscation is to reduce stack usage, but the register
327 	 * structs may be too large to put on the stack anyway.
328 	 */
329 	union {
330 		struct ptrace_io_desc piod;
331 		struct dbreg dbreg;
332 		struct fpreg fpreg;
333 		struct reg reg;
334 	} r;
335 	struct proc *curp, *p, *pp;
336 	struct thread *td2;
337 	int error, write;
338 	int proctree_locked = 0;
339 
340 	curp = td->td_proc;
341 
342 	/*
343 	 * Do copyin() early before getting locks and lock proctree before
344 	 * locking the process.
345 	 */
346 	switch (uap->req) {
347 	case PT_TRACE_ME:
348 	case PT_ATTACH:
349 	case PT_STEP:
350 	case PT_CONTINUE:
351 	case PT_DETACH:
352 		sx_xlock(&proctree_lock);
353 		proctree_locked = 1;
354 		break;
355 #ifdef PT_SETREGS
356 	case PT_SETREGS:
357 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
358 		if (error)
359 			return (error);
360 		break;
361 #endif /* PT_SETREGS */
362 #ifdef PT_SETFPREGS
363 	case PT_SETFPREGS:
364 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
365 		if (error)
366 			return (error);
367 		break;
368 #endif /* PT_SETFPREGS */
369 #ifdef PT_SETDBREGS
370 	case PT_SETDBREGS:
371 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
372 		if (error)
373 			return (error);
374 		break;
375 #endif /* PT_SETDBREGS */
376 	default:
377 		break;
378 	}
379 
380 	write = 0;
381 	if (uap->req == PT_TRACE_ME) {
382 		p = td->td_proc;
383 		PROC_LOCK(p);
384 	} else {
385 		if ((p = pfind(uap->pid)) == NULL) {
386 			if (proctree_locked)
387 				sx_xunlock(&proctree_lock);
388 			return (ESRCH);
389 		}
390 	}
391 	if (p_cansee(td, p)) {
392 		error = ESRCH;
393 		goto fail;
394 	}
395 
396 	if ((error = p_candebug(td, p)) != 0)
397 		goto fail;
398 
399 	/*
400 	 * System processes can't be debugged.
401 	 */
402 	if ((p->p_flag & P_SYSTEM) != 0) {
403 		error = EINVAL;
404 		goto fail;
405 	}
406 
407 	/*
408 	 * Permissions check
409 	 */
410 	switch (uap->req) {
411 	case PT_TRACE_ME:
412 		/* Always legal. */
413 		break;
414 
415 	case PT_ATTACH:
416 		/* Self */
417 		if (p->p_pid == td->td_proc->p_pid) {
418 			error = EINVAL;
419 			goto fail;
420 		}
421 
422 		/* Already traced */
423 		if (p->p_flag & P_TRACED) {
424 			error = EBUSY;
425 			goto fail;
426 		}
427 
428 		/* Can't trace an ancestor if you're being traced. */
429 		if (curp->p_flag & P_TRACED) {
430 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
431 				if (pp == p) {
432 					error = EINVAL;
433 					goto fail;
434 				}
435 			}
436 		}
437 
438 
439 		/* OK */
440 		break;
441 
442 	case PT_READ_I:
443 	case PT_READ_D:
444 	case PT_WRITE_I:
445 	case PT_WRITE_D:
446 	case PT_IO:
447 	case PT_CONTINUE:
448 	case PT_KILL:
449 	case PT_STEP:
450 	case PT_DETACH:
451 	case PT_GETREGS:
452 	case PT_SETREGS:
453 	case PT_GETFPREGS:
454 	case PT_SETFPREGS:
455 	case PT_GETDBREGS:
456 	case PT_SETDBREGS:
457 		/* not being traced... */
458 		if ((p->p_flag & P_TRACED) == 0) {
459 			error = EPERM;
460 			goto fail;
461 		}
462 
463 		/* not being traced by YOU */
464 		if (p->p_pptr != td->td_proc) {
465 			error = EBUSY;
466 			goto fail;
467 		}
468 
469 		/* not currently stopped */
470 		if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) {
471 			error = EBUSY;
472 			goto fail;
473 		}
474 
475 		/* OK */
476 		break;
477 
478 	default:
479 		error = EINVAL;
480 		goto fail;
481 	}
482 
483 	td2 = FIRST_THREAD_IN_PROC(p);
484 #ifdef FIX_SSTEP
485 	/*
486 	 * Single step fixup ala procfs
487 	 */
488 	FIX_SSTEP(td2);			/* XXXKSE */
489 #endif
490 
491 	/*
492 	 * Actually do the requests
493 	 */
494 
495 	td->td_retval[0] = 0;
496 
497 	switch (uap->req) {
498 	case PT_TRACE_ME:
499 		/* set my trace flag and "owner" so it can read/write me */
500 		p->p_flag |= P_TRACED;
501 		p->p_oppid = p->p_pptr->p_pid;
502 		PROC_UNLOCK(p);
503 		sx_xunlock(&proctree_lock);
504 		return (0);
505 
506 	case PT_ATTACH:
507 		/* security check done above */
508 		p->p_flag |= P_TRACED;
509 		p->p_oppid = p->p_pptr->p_pid;
510 		if (p->p_pptr != td->td_proc)
511 			proc_reparent(p, td->td_proc);
512 		uap->data = SIGSTOP;
513 		goto sendsig;	/* in PT_CONTINUE below */
514 
515 	case PT_STEP:
516 	case PT_CONTINUE:
517 	case PT_DETACH:
518 		/* XXX uap->data is used even in the PT_STEP case. */
519 		if (uap->req != PT_STEP && (unsigned)uap->data > _SIG_MAXSIG) {
520 			error = EINVAL;
521 			goto fail;
522 		}
523 
524 		_PHOLD(p);
525 
526 		if (uap->req == PT_STEP) {
527 			error = ptrace_single_step(td2);
528 			if (error) {
529 				_PRELE(p);
530 				goto fail;
531 			}
532 		}
533 
534 		if (uap->addr != (caddr_t)1) {
535 			fill_kinfo_proc(p, &p->p_uarea->u_kproc);
536 			error = ptrace_set_pc(td2,
537 			    (u_long)(uintfptr_t)uap->addr);
538 			if (error) {
539 				_PRELE(p);
540 				goto fail;
541 			}
542 		}
543 		_PRELE(p);
544 
545 		if (uap->req == PT_DETACH) {
546 			/* reset process parent */
547 			if (p->p_oppid != p->p_pptr->p_pid) {
548 				struct proc *pp;
549 
550 				PROC_UNLOCK(p);
551 				pp = pfind(p->p_oppid);
552 				if (pp == NULL)
553 					pp = initproc;
554 				else
555 					PROC_UNLOCK(pp);
556 				PROC_LOCK(p);
557 				proc_reparent(p, pp);
558 			}
559 			p->p_flag &= ~(P_TRACED | P_WAITED);
560 			p->p_oppid = 0;
561 
562 			/* should we send SIGCHLD? */
563 		}
564 
565 	sendsig:
566 		if (proctree_locked)
567 			sx_xunlock(&proctree_lock);
568 		/* deliver or queue signal */
569 		if (p->p_stat == SSTOP) {
570 			p->p_xstat = uap->data;
571 			mtx_lock_spin(&sched_lock);
572 			setrunnable(td2);	/* XXXKSE */
573 			mtx_unlock_spin(&sched_lock);
574 		} else if (uap->data)
575 			psignal(p, uap->data);
576 		PROC_UNLOCK(p);
577 
578 		return (0);
579 
580 	case PT_WRITE_I:
581 	case PT_WRITE_D:
582 		write = 1;
583 		/* fallthrough */
584 	case PT_READ_I:
585 	case PT_READ_D:
586 		PROC_UNLOCK(p);
587 		/* write = 0 set above */
588 		iov.iov_base = write ? (caddr_t)&uap->data :
589 		    (caddr_t)td->td_retval;
590 		iov.iov_len = sizeof(int);
591 		uio.uio_iov = &iov;
592 		uio.uio_iovcnt = 1;
593 		uio.uio_offset = (off_t)(uintptr_t)uap->addr;
594 		uio.uio_resid = sizeof(int);
595 		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
596 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
597 		uio.uio_td = td;
598 		error = proc_rwmem(p, &uio);
599 		if (uio.uio_resid != 0) {
600 			/*
601 			 * XXX proc_rwmem() doesn't currently return ENOSPC,
602 			 * so I think write() can bogusly return 0.
603 			 * XXX what happens for short writes?  We don't want
604 			 * to write partial data.
605 			 * XXX proc_rwmem() returns EPERM for other invalid
606 			 * addresses.  Convert this to EINVAL.  Does this
607 			 * clobber returns of EPERM for other reasons?
608 			 */
609 			if (error == 0 || error == ENOSPC || error == EPERM)
610 				error = EINVAL;	/* EOF */
611 		}
612 		return (error);
613 
614 	case PT_IO:
615 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
616 		if (error)
617 			return (error);
618 		iov.iov_base = r.piod.piod_addr;
619 		iov.iov_len = r.piod.piod_len;
620 		uio.uio_iov = &iov;
621 		uio.uio_iovcnt = 1;
622 		uio.uio_offset = (off_t)(uintptr_t)r.piod.piod_offs;
623 		uio.uio_resid = r.piod.piod_len;
624 		uio.uio_segflg = UIO_USERSPACE;
625 		uio.uio_td = td;
626 		switch (r.piod.piod_op) {
627 		case PIOD_READ_D:
628 		case PIOD_READ_I:
629 			uio.uio_rw = UIO_READ;
630 			break;
631 		case PIOD_WRITE_D:
632 		case PIOD_WRITE_I:
633 			uio.uio_rw = UIO_WRITE;
634 			break;
635 		default:
636 			return (EINVAL);
637 		}
638 		error = proc_rwmem(p, &uio);
639 		r.piod.piod_len -= uio.uio_resid;
640 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
641 		return (error);
642 
643 	case PT_KILL:
644 		uap->data = SIGKILL;
645 		goto sendsig;	/* in PT_CONTINUE above */
646 
647 	case PT_SETREGS:
648 		_PHOLD(p);
649 		error = proc_write_regs(td2, &r.reg);
650 		_PRELE(p);
651 		PROC_UNLOCK(p);
652 		return (error);
653 
654 	case PT_GETREGS:
655 		_PHOLD(p);
656 		error = proc_read_regs(td2, &r.reg);
657 		_PRELE(p);
658 		PROC_UNLOCK(p);
659 		if (error == 0)
660 			error = copyout(&r.reg, uap->addr, sizeof r.reg);
661 		return (error);
662 
663 	case PT_SETFPREGS:
664 		_PHOLD(p);
665 		error = proc_write_fpregs(td2, &r.fpreg);
666 		_PRELE(p);
667 		PROC_UNLOCK(p);
668 		return (error);
669 
670 	case PT_GETFPREGS:
671 		_PHOLD(p);
672 		error = proc_read_fpregs(td2, &r.fpreg);
673 		_PRELE(p);
674 		PROC_UNLOCK(p);
675 		if (error == 0)
676 			error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
677 		return (error);
678 
679 	case PT_SETDBREGS:
680 		_PHOLD(p);
681 		error = proc_write_dbregs(td2, &r.dbreg);
682 		_PRELE(p);
683 		PROC_UNLOCK(p);
684 		return (error);
685 
686 	case PT_GETDBREGS:
687 		_PHOLD(p);
688 		error = proc_read_dbregs(td2, &r.dbreg);
689 		_PRELE(p);
690 		PROC_UNLOCK(p);
691 		if (error == 0)
692 			error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
693 		return (error);
694 
695 	default:
696 		KASSERT(0, ("unreachable code\n"));
697 		break;
698 	}
699 
700 	KASSERT(0, ("unreachable code\n"));
701 	return (0);
702 
703 fail:
704 	PROC_UNLOCK(p);
705 	if (proctree_locked)
706 		sx_xunlock(&proctree_lock);
707 	return (error);
708 }
709 
710 /*
711  * Stop a process because of a debugging event;
712  * stay stopped until p->p_step is cleared
713  * (cleared by PIOCCONT in procfs).
714  */
715 void
716 stopevent(struct proc *p, unsigned int event, unsigned int val)
717 {
718 
719 	PROC_LOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
720 	p->p_step = 1;
721 
722 	do {
723 		p->p_xstat = val;
724 		p->p_stype = event;	/* Which event caused the stop? */
725 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
726 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
727 	} while (p->p_step);
728 }
729