xref: /freebsd/sys/kern/sys_process.c (revision ee41f1b1cf5e3d4f586cb85b46123b416275862c)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/proc.h>
38 #include <sys/vnode.h>
39 #include <sys/ptrace.h>
40 
41 #include <machine/reg.h>
42 #include <vm/vm.h>
43 #include <sys/lock.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_map.h>
46 #include <vm/vm_page.h>
47 
48 #include <sys/user.h>
49 #include <miscfs/procfs/procfs.h>
50 
51 /* use the equivalent procfs code */
52 #if 0
53 static int
54 pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
55 	int		rv;
56 	vm_map_t	map, tmap;
57 	vm_object_t	object;
58 	vm_offset_t	kva = 0;
59 	int		page_offset;	/* offset into page */
60 	vm_offset_t	pageno;		/* page number */
61 	vm_map_entry_t	out_entry;
62 	vm_prot_t	out_prot;
63 	boolean_t	wired;
64 	vm_pindex_t	pindex;
65 
66 	/* Map page into kernel space */
67 
68 	map = &procp->p_vmspace->vm_map;
69 
70 	page_offset = addr - trunc_page(addr);
71 	pageno = trunc_page(addr);
72 
73 	tmap = map;
74 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry,
75 		&object, &pindex, &out_prot, &wired);
76 
77 	if (rv != KERN_SUCCESS)
78 		return EINVAL;
79 
80 	vm_map_lookup_done (tmap, out_entry);
81 
82 	/* Find space in kernel_map for the page we're interested in */
83 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
84 		&kva, PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0);
85 
86 	if (!rv) {
87 		vm_object_reference (object);
88 
89 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
90 		if (!rv) {
91 			*retval = 0;
92 			bcopy ((caddr_t)kva + page_offset,
93 			       retval, sizeof *retval);
94 		}
95 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
96 	}
97 
98 	return rv;
99 }
100 
101 static int
102 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
103 	int		rv;
104 	vm_map_t	map, tmap;
105 	vm_object_t	object;
106 	vm_offset_t	kva = 0;
107 	int		page_offset;	/* offset into page */
108 	vm_offset_t	pageno;		/* page number */
109 	vm_map_entry_t	out_entry;
110 	vm_prot_t	out_prot;
111 	boolean_t	wired;
112 	vm_pindex_t	pindex;
113 	boolean_t	fix_prot = 0;
114 
115 	/* Map page into kernel space */
116 
117 	map = &procp->p_vmspace->vm_map;
118 
119 	page_offset = addr - trunc_page(addr);
120 	pageno = trunc_page(addr);
121 
122 	/*
123 	 * Check the permissions for the area we're interested in.
124 	 */
125 
126 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
127 		VM_PROT_WRITE) == FALSE) {
128 		/*
129 		 * If the page was not writable, we make it so.
130 		 * XXX It is possible a page may *not* be read/executable,
131 		 * if a process changes that!
132 		 */
133 		fix_prot = 1;
134 		/* The page isn't writable, so let's try making it so... */
135 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
136 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
137 		  return EFAULT;	/* I guess... */
138 	}
139 
140 	/*
141 	 * Now we need to get the page.  out_entry, out_prot, wired, and
142 	 * single_use aren't used.  One would think the vm code would be
143 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
144 	 * change the map argument.
145 	 */
146 
147 	tmap = map;
148 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry,
149 		&object, &pindex, &out_prot, &wired);
150 	if (rv != KERN_SUCCESS) {
151 		return EINVAL;
152 	}
153 
154 	/*
155 	 * Okay, we've got the page.  Let's release tmap.
156 	 */
157 
158 	vm_map_lookup_done (tmap, out_entry);
159 
160 	/*
161 	 * Fault the page in...
162 	 */
163 
164 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
165 	if (rv != KERN_SUCCESS)
166 		return EFAULT;
167 
168 	/* Find space in kernel_map for the page we're interested in */
169 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
170 		&kva, PAGE_SIZE, 0,
171 		VM_PROT_ALL, VM_PROT_ALL, 0);
172 	if (!rv) {
173 		vm_object_reference (object);
174 
175 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
176 		if (!rv) {
177 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
178 		}
179 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
180 	}
181 
182 	if (fix_prot)
183 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
184 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
185 	return rv;
186 }
187 #endif
188 
189 /*
190  * Process debugging system call.
191  */
192 #ifndef _SYS_SYSPROTO_H_
193 struct ptrace_args {
194 	int	req;
195 	pid_t	pid;
196 	caddr_t	addr;
197 	int	data;
198 };
199 #endif
200 
201 int
202 ptrace(curp, uap)
203 	struct proc *curp;
204 	struct ptrace_args *uap;
205 {
206 	struct proc *p;
207 	struct iovec iov;
208 	struct uio uio;
209 	int error = 0;
210 	int write;
211 	int s;
212 
213 	write = 0;
214 	if (uap->req == PT_TRACE_ME)
215 		p = curp;
216 	else {
217 		if ((p = pfind(uap->pid)) == NULL)
218 			return ESRCH;
219 	}
220 	if (p_can(curp, p, P_CAN_SEE, NULL))
221 		return (ESRCH);
222 
223 	/*
224 	 * Permissions check
225 	 */
226 	switch (uap->req) {
227 	case PT_TRACE_ME:
228 		/* Always legal. */
229 		break;
230 
231 	case PT_ATTACH:
232 		/* Self */
233 		if (p->p_pid == curp->p_pid)
234 			return EINVAL;
235 
236 		/* Already traced */
237 		if (p->p_flag & P_TRACED)
238 			return EBUSY;
239 
240 		if ((error = p_can(curp, p, P_CAN_DEBUG, NULL)))
241 			return error;
242 
243 		/* OK */
244 		break;
245 
246 	case PT_READ_I:
247 	case PT_READ_D:
248 	case PT_READ_U:
249 	case PT_WRITE_I:
250 	case PT_WRITE_D:
251 	case PT_WRITE_U:
252 	case PT_CONTINUE:
253 	case PT_KILL:
254 	case PT_STEP:
255 	case PT_DETACH:
256 #ifdef PT_GETREGS
257 	case PT_GETREGS:
258 #endif
259 #ifdef PT_SETREGS
260 	case PT_SETREGS:
261 #endif
262 #ifdef PT_GETFPREGS
263 	case PT_GETFPREGS:
264 #endif
265 #ifdef PT_SETFPREGS
266 	case PT_SETFPREGS:
267 #endif
268 #ifdef PT_GETDBREGS
269 	case PT_GETDBREGS:
270 #endif
271 #ifdef PT_SETDBREGS
272 	case PT_SETDBREGS:
273 #endif
274 		/* not being traced... */
275 		if ((p->p_flag & P_TRACED) == 0)
276 			return EPERM;
277 
278 		/* not being traced by YOU */
279 		PROCTREE_LOCK(PT_SHARED);
280 		if (p->p_pptr != curp) {
281 			PROCTREE_LOCK(PT_RELEASE);
282 			return EBUSY;
283 		}
284 		PROCTREE_LOCK(PT_RELEASE);
285 
286 		/* not currently stopped */
287 		mtx_lock_spin(&sched_lock);
288 		if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) {
289 			mtx_unlock_spin(&sched_lock);
290 			return EBUSY;
291 		}
292 		mtx_unlock_spin(&sched_lock);
293 
294 		/* OK */
295 		break;
296 
297 	default:
298 		return EINVAL;
299 	}
300 
301 #ifdef FIX_SSTEP
302 	/*
303 	 * Single step fixup ala procfs
304 	 */
305 	FIX_SSTEP(p);
306 #endif
307 
308 	/*
309 	 * Actually do the requests
310 	 */
311 
312 	curp->p_retval[0] = 0;
313 
314 	switch (uap->req) {
315 	case PT_TRACE_ME:
316 		/* set my trace flag and "owner" so it can read/write me */
317 		p->p_flag |= P_TRACED;
318 		PROCTREE_LOCK(PT_SHARED);
319 		p->p_oppid = p->p_pptr->p_pid;
320 		PROCTREE_LOCK(PT_RELEASE);
321 		return 0;
322 
323 	case PT_ATTACH:
324 		/* security check done above */
325 		p->p_flag |= P_TRACED;
326 		PROCTREE_LOCK(PT_EXCLUSIVE);
327 		p->p_oppid = p->p_pptr->p_pid;
328 		if (p->p_pptr != curp)
329 			proc_reparent(p, curp);
330 		PROCTREE_LOCK(PT_RELEASE);
331 		uap->data = SIGSTOP;
332 		goto sendsig;	/* in PT_CONTINUE below */
333 
334 	case PT_STEP:
335 	case PT_CONTINUE:
336 	case PT_DETACH:
337 		if ((uap->req != PT_STEP) && ((unsigned)uap->data >= NSIG))
338 			return EINVAL;
339 
340 		PHOLD(p);
341 
342 		if (uap->req == PT_STEP) {
343 			if ((error = ptrace_single_step (p))) {
344 				PRELE(p);
345 				return error;
346 			}
347 		}
348 
349 		if (uap->addr != (caddr_t)1) {
350 			fill_kinfo_proc (p, &p->p_addr->u_kproc);
351 			if ((error = ptrace_set_pc (p,
352 			    (u_long)(uintfptr_t)uap->addr))) {
353 				PRELE(p);
354 				return error;
355 			}
356 		}
357 		PRELE(p);
358 
359 		if (uap->req == PT_DETACH) {
360 			/* reset process parent */
361 			PROCTREE_LOCK(PT_EXCLUSIVE);
362 			if (p->p_oppid != p->p_pptr->p_pid) {
363 				struct proc *pp;
364 
365 				pp = pfind(p->p_oppid);
366 				proc_reparent(p, pp ? pp : initproc);
367 			}
368 			PROCTREE_LOCK(PT_RELEASE);
369 
370 			p->p_flag &= ~(P_TRACED | P_WAITED);
371 			p->p_oppid = 0;
372 
373 			/* should we send SIGCHLD? */
374 
375 		}
376 
377 	sendsig:
378 		/* deliver or queue signal */
379 		s = splhigh();
380 		mtx_lock_spin(&sched_lock);
381 		if (p->p_stat == SSTOP) {
382 			p->p_xstat = uap->data;
383 			setrunnable(p);
384 			mtx_unlock_spin(&sched_lock);
385 		} else {
386 			mtx_unlock_spin(&sched_lock);
387 			if (uap->data) {
388 				mtx_assert(&Giant, MA_OWNED);
389 				psignal(p, uap->data);
390 			}
391 		}
392 		splx(s);
393 		return 0;
394 
395 	case PT_WRITE_I:
396 	case PT_WRITE_D:
397 		write = 1;
398 		/* fallthrough */
399 	case PT_READ_I:
400 	case PT_READ_D:
401 		/* write = 0 set above */
402 		iov.iov_base = write ? (caddr_t)&uap->data : (caddr_t)curp->p_retval;
403 		iov.iov_len = sizeof(int);
404 		uio.uio_iov = &iov;
405 		uio.uio_iovcnt = 1;
406 		uio.uio_offset = (off_t)(uintptr_t)uap->addr;
407 		uio.uio_resid = sizeof(int);
408 		uio.uio_segflg = UIO_SYSSPACE;	/* ie: the uap */
409 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
410 		uio.uio_procp = p;
411 		error = procfs_domem(curp, p, NULL, &uio);
412 		if (uio.uio_resid != 0) {
413 			/*
414 			 * XXX procfs_domem() doesn't currently return ENOSPC,
415 			 * so I think write() can bogusly return 0.
416 			 * XXX what happens for short writes?  We don't want
417 			 * to write partial data.
418 			 * XXX procfs_domem() returns EPERM for other invalid
419 			 * addresses.  Convert this to EINVAL.  Does this
420 			 * clobber returns of EPERM for other reasons?
421 			 */
422 			if (error == 0 || error == ENOSPC || error == EPERM)
423 				error = EINVAL;	/* EOF */
424 		}
425 		return (error);
426 
427 	case PT_READ_U:
428 		if ((uintptr_t)uap->addr > UPAGES * PAGE_SIZE - sizeof(int)) {
429 			return EFAULT;
430 		}
431 		if ((uintptr_t)uap->addr & (sizeof(int) - 1)) {
432 			return EFAULT;
433 		}
434 		if (ptrace_read_u_check(p,(vm_offset_t) uap->addr,
435 					sizeof(int))) {
436 			return EFAULT;
437 		}
438 		error = 0;
439 		PHOLD(p);	/* user had damn well better be incore! */
440 		mtx_lock_spin(&sched_lock);
441 		if (p->p_sflag & PS_INMEM) {
442 			mtx_unlock_spin(&sched_lock);
443 			fill_kinfo_proc (p, &p->p_addr->u_kproc);
444 			curp->p_retval[0] = *(int *)
445 			    ((uintptr_t)p->p_addr + (uintptr_t)uap->addr);
446 		} else {
447 			mtx_unlock_spin(&sched_lock);
448 			curp->p_retval[0] = 0;
449 			error = EFAULT;
450 		}
451 		PRELE(p);
452 		return error;
453 
454 	case PT_WRITE_U:
455 		PHOLD(p);	/* user had damn well better be incore! */
456 		mtx_lock_spin(&sched_lock);
457 		if (p->p_sflag & PS_INMEM) {
458 			mtx_unlock_spin(&sched_lock);
459 			fill_kinfo_proc (p, &p->p_addr->u_kproc);
460 			error = ptrace_write_u(p, (vm_offset_t)uap->addr, uap->data);
461 		} else {
462 			mtx_unlock_spin(&sched_lock);
463 			error = EFAULT;
464 		}
465 		PRELE(p);
466 		return error;
467 
468 	case PT_KILL:
469 		uap->data = SIGKILL;
470 		goto sendsig;	/* in PT_CONTINUE above */
471 
472 #ifdef PT_SETREGS
473 	case PT_SETREGS:
474 		write = 1;
475 		/* fallthrough */
476 #endif /* PT_SETREGS */
477 #ifdef PT_GETREGS
478 	case PT_GETREGS:
479 		/* write = 0 above */
480 #endif /* PT_SETREGS */
481 #if defined(PT_SETREGS) || defined(PT_GETREGS)
482 		if (!procfs_validregs(p))	/* no P_SYSTEM procs please */
483 			return EINVAL;
484 		else {
485 			iov.iov_base = uap->addr;
486 			iov.iov_len = sizeof(struct reg);
487 			uio.uio_iov = &iov;
488 			uio.uio_iovcnt = 1;
489 			uio.uio_offset = 0;
490 			uio.uio_resid = sizeof(struct reg);
491 			uio.uio_segflg = UIO_USERSPACE;
492 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
493 			uio.uio_procp = curp;
494 			return (procfs_doregs(curp, p, NULL, &uio));
495 		}
496 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
497 
498 #ifdef PT_SETFPREGS
499 	case PT_SETFPREGS:
500 		write = 1;
501 		/* fallthrough */
502 #endif /* PT_SETFPREGS */
503 #ifdef PT_GETFPREGS
504 	case PT_GETFPREGS:
505 		/* write = 0 above */
506 #endif /* PT_SETFPREGS */
507 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
508 		if (!procfs_validfpregs(p))	/* no P_SYSTEM procs please */
509 			return EINVAL;
510 		else {
511 			iov.iov_base = uap->addr;
512 			iov.iov_len = sizeof(struct fpreg);
513 			uio.uio_iov = &iov;
514 			uio.uio_iovcnt = 1;
515 			uio.uio_offset = 0;
516 			uio.uio_resid = sizeof(struct fpreg);
517 			uio.uio_segflg = UIO_USERSPACE;
518 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
519 			uio.uio_procp = curp;
520 			return (procfs_dofpregs(curp, p, NULL, &uio));
521 		}
522 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
523 
524 #ifdef PT_SETDBREGS
525 	case PT_SETDBREGS:
526 		write = 1;
527 		/* fallthrough */
528 #endif /* PT_SETDBREGS */
529 #ifdef PT_GETDBREGS
530 	case PT_GETDBREGS:
531 		/* write = 0 above */
532 #endif /* PT_SETDBREGS */
533 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
534 		if (!procfs_validdbregs(p))	/* no P_SYSTEM procs please */
535 			return EINVAL;
536 		else {
537 			iov.iov_base = uap->addr;
538 			iov.iov_len = sizeof(struct dbreg);
539 			uio.uio_iov = &iov;
540 			uio.uio_iovcnt = 1;
541 			uio.uio_offset = 0;
542 			uio.uio_resid = sizeof(struct dbreg);
543 			uio.uio_segflg = UIO_USERSPACE;
544 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
545 			uio.uio_procp = curp;
546 			return (procfs_dodbregs(curp, p, NULL, &uio));
547 		}
548 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
549 
550 	default:
551 		break;
552 	}
553 
554 	return 0;
555 }
556 
557 int
558 trace_req(p)
559 	struct proc *p;
560 {
561 	return 1;
562 }
563 
564 /*
565  * stopevent()
566  * Stop a process because of a procfs event;
567  * stay stopped until p->p_step is cleared
568  * (cleared by PIOCCONT in procfs).
569  *
570  * Must be called with the proc struct mutex held.
571  */
572 
573 void
574 stopevent(p, event, val)
575 	struct proc *p;
576 	unsigned int event;
577 	unsigned int val;
578 {
579 
580 	mtx_assert(&p->p_mtx, MA_OWNED | MA_NOTRECURSED);
581 	p->p_step = 1;
582 
583 	do {
584 		p->p_xstat = val;
585 		p->p_stype = event;	/* Which event caused the stop? */
586 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
587 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
588 	} while (p->p_step);
589 }
590