xref: /freebsd/sys/kern/sys_process.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/sysproto.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/sx.h>
43 #include <sys/user.h>
44 
45 #include <machine/reg.h>
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_page.h>
51 
52 #include <fs/procfs/procfs.h>
53 
54 /* use the equivalent procfs code */
55 #if 0
56 static int
57 pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
58 	int		rv;
59 	vm_map_t	map, tmap;
60 	vm_object_t	object;
61 	vm_offset_t	kva = 0;
62 	int		page_offset;	/* offset into page */
63 	vm_offset_t	pageno;		/* page number */
64 	vm_map_entry_t	out_entry;
65 	vm_prot_t	out_prot;
66 	boolean_t	wired;
67 	vm_pindex_t	pindex;
68 
69 	/* Map page into kernel space */
70 
71 	map = &procp->p_vmspace->vm_map;
72 
73 	page_offset = addr - trunc_page(addr);
74 	pageno = trunc_page(addr);
75 
76 	tmap = map;
77 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry,
78 		&object, &pindex, &out_prot, &wired);
79 
80 	if (rv != KERN_SUCCESS)
81 		return EINVAL;
82 
83 	vm_map_lookup_done (tmap, out_entry);
84 
85 	/* Find space in kernel_map for the page we're interested in */
86 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
87 		&kva, PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0);
88 
89 	if (!rv) {
90 		vm_object_reference (object);
91 
92 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
93 		if (!rv) {
94 			*retval = 0;
95 			bcopy ((caddr_t)kva + page_offset,
96 			       retval, sizeof *retval);
97 		}
98 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
99 	}
100 
101 	return rv;
102 }
103 
104 static int
105 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
106 	int		rv;
107 	vm_map_t	map, tmap;
108 	vm_object_t	object;
109 	vm_offset_t	kva = 0;
110 	int		page_offset;	/* offset into page */
111 	vm_offset_t	pageno;		/* page number */
112 	vm_map_entry_t	out_entry;
113 	vm_prot_t	out_prot;
114 	boolean_t	wired;
115 	vm_pindex_t	pindex;
116 	boolean_t	fix_prot = 0;
117 
118 	/* Map page into kernel space */
119 
120 	map = &procp->p_vmspace->vm_map;
121 
122 	page_offset = addr - trunc_page(addr);
123 	pageno = trunc_page(addr);
124 
125 	/*
126 	 * Check the permissions for the area we're interested in.
127 	 */
128 
129 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
130 		VM_PROT_WRITE) == FALSE) {
131 		/*
132 		 * If the page was not writable, we make it so.
133 		 * XXX It is possible a page may *not* be read/executable,
134 		 * if a process changes that!
135 		 */
136 		fix_prot = 1;
137 		/* The page isn't writable, so let's try making it so... */
138 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
139 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
140 		  return EFAULT;	/* I guess... */
141 	}
142 
143 	/*
144 	 * Now we need to get the page.  out_entry, out_prot, wired, and
145 	 * single_use aren't used.  One would think the vm code would be
146 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
147 	 * change the map argument.
148 	 */
149 
150 	tmap = map;
151 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry,
152 		&object, &pindex, &out_prot, &wired);
153 	if (rv != KERN_SUCCESS) {
154 		return EINVAL;
155 	}
156 
157 	/*
158 	 * Okay, we've got the page.  Let's release tmap.
159 	 */
160 
161 	vm_map_lookup_done (tmap, out_entry);
162 
163 	/*
164 	 * Fault the page in...
165 	 */
166 
167 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
168 	if (rv != KERN_SUCCESS)
169 		return EFAULT;
170 
171 	/* Find space in kernel_map for the page we're interested in */
172 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
173 		&kva, PAGE_SIZE, 0,
174 		VM_PROT_ALL, VM_PROT_ALL, 0);
175 	if (!rv) {
176 		vm_object_reference (object);
177 
178 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
179 		if (!rv) {
180 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
181 		}
182 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
183 	}
184 
185 	if (fix_prot)
186 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
187 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
188 	return rv;
189 }
190 #endif
191 
192 /*
193  * Process debugging system call.
194  */
195 #ifndef _SYS_SYSPROTO_H_
196 struct ptrace_args {
197 	int	req;
198 	pid_t	pid;
199 	caddr_t	addr;
200 	int	data;
201 };
202 #endif
203 
204 int
205 ptrace(td, uap)
206 	struct thread *td;
207 	struct ptrace_args *uap;
208 {
209 	struct proc *curp = td->td_proc;
210 	struct proc *p;
211 	struct iovec iov;
212 	struct uio uio;
213 	int error = 0;
214 	int write;
215 
216 	write = 0;
217 	if (uap->req == PT_TRACE_ME) {
218 		p = curp;
219 		PROC_LOCK(p);
220 	} else {
221 		if ((p = pfind(uap->pid)) == NULL)
222 			return ESRCH;
223 	}
224 	if (p_cansee(curp, p)) {
225 		PROC_UNLOCK(p);
226 		return (ESRCH);
227 	}
228 
229 	/*
230 	 * Permissions check
231 	 */
232 	switch (uap->req) {
233 	case PT_TRACE_ME:
234 		/* Always legal. */
235 		break;
236 
237 	case PT_ATTACH:
238 		/* Self */
239 		if (p->p_pid == curp->p_pid) {
240 			PROC_UNLOCK(p);
241 			return EINVAL;
242 		}
243 
244 		/* Already traced */
245 		if (p->p_flag & P_TRACED) {
246 			PROC_UNLOCK(p);
247 			return EBUSY;
248 		}
249 
250 		if ((error = p_candebug(curp, p))) {
251 			PROC_UNLOCK(p);
252 			return error;
253 		}
254 
255 		/* OK */
256 		break;
257 
258 	case PT_READ_I:
259 	case PT_READ_D:
260 	case PT_WRITE_I:
261 	case PT_WRITE_D:
262 	case PT_CONTINUE:
263 	case PT_KILL:
264 	case PT_STEP:
265 	case PT_DETACH:
266 #ifdef PT_GETREGS
267 	case PT_GETREGS:
268 #endif
269 #ifdef PT_SETREGS
270 	case PT_SETREGS:
271 #endif
272 #ifdef PT_GETFPREGS
273 	case PT_GETFPREGS:
274 #endif
275 #ifdef PT_SETFPREGS
276 	case PT_SETFPREGS:
277 #endif
278 #ifdef PT_GETDBREGS
279 	case PT_GETDBREGS:
280 #endif
281 #ifdef PT_SETDBREGS
282 	case PT_SETDBREGS:
283 #endif
284 		/* not being traced... */
285 		if ((p->p_flag & P_TRACED) == 0) {
286 			PROC_UNLOCK(p);
287 			return EPERM;
288 		}
289 
290 		/* not being traced by YOU */
291 		if (p->p_pptr != curp) {
292 			PROC_UNLOCK(p);
293 			return EBUSY;
294 		}
295 
296 		/* not currently stopped */
297 		mtx_lock_spin(&sched_lock);
298 		if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) {
299 			mtx_unlock_spin(&sched_lock);
300 			PROC_UNLOCK(p);
301 			return EBUSY;
302 		}
303 		mtx_unlock_spin(&sched_lock);
304 
305 		/* OK */
306 		break;
307 
308 	default:
309 		PROC_UNLOCK(p);
310 		return EINVAL;
311 	}
312 
313 	PROC_UNLOCK(p);
314 #ifdef FIX_SSTEP
315 	/*
316 	 * Single step fixup ala procfs
317 	 */
318 	FIX_SSTEP(&p->p_thread);	/* XXXKSE */
319 #endif
320 
321 	/*
322 	 * Actually do the requests
323 	 */
324 
325 	td->td_retval[0] = 0;
326 
327 	switch (uap->req) {
328 	case PT_TRACE_ME:
329 		/* set my trace flag and "owner" so it can read/write me */
330 		sx_xlock(&proctree_lock);
331 		PROC_LOCK(p);
332 		p->p_flag |= P_TRACED;
333 		p->p_oppid = p->p_pptr->p_pid;
334 		PROC_UNLOCK(p);
335 		sx_xunlock(&proctree_lock);
336 		return 0;
337 
338 	case PT_ATTACH:
339 		/* security check done above */
340 		sx_xlock(&proctree_lock);
341 		PROC_LOCK(p);
342 		p->p_flag |= P_TRACED;
343 		p->p_oppid = p->p_pptr->p_pid;
344 		if (p->p_pptr != curp)
345 			proc_reparent(p, curp);
346 		PROC_UNLOCK(p);
347 		sx_xunlock(&proctree_lock);
348 		uap->data = SIGSTOP;
349 		goto sendsig;	/* in PT_CONTINUE below */
350 
351 	case PT_STEP:
352 	case PT_CONTINUE:
353 	case PT_DETACH:
354 		if ((uap->req != PT_STEP) && ((unsigned)uap->data >= NSIG))
355 			return EINVAL;
356 
357 		PHOLD(p);
358 
359 		if (uap->req == PT_STEP) {
360 			if ((error = ptrace_single_step (td))) {
361 				PRELE(p);
362 				return error;
363 			}
364 		}
365 
366 		if (uap->addr != (caddr_t)1) {
367 			fill_kinfo_proc (p, &p->p_uarea->u_kproc);
368 			if ((error = ptrace_set_pc (td,
369 			    (u_long)(uintfptr_t)uap->addr))) {
370 				PRELE(p);
371 				return error;
372 			}
373 		}
374 		PRELE(p);
375 
376 		if (uap->req == PT_DETACH) {
377 			/* reset process parent */
378 			sx_xlock(&proctree_lock);
379 			if (p->p_oppid != p->p_pptr->p_pid) {
380 				struct proc *pp;
381 
382 				pp = pfind(p->p_oppid);
383 				if (pp != NULL)
384 					PROC_UNLOCK(pp);
385 				else
386 					pp = initproc;
387 				PROC_LOCK(p);
388 				proc_reparent(p, pp);
389 			} else
390 				PROC_LOCK(p);
391 			p->p_flag &= ~(P_TRACED | P_WAITED);
392 			p->p_oppid = 0;
393 
394 			PROC_UNLOCK(p);
395 			sx_xunlock(&proctree_lock);
396 
397 			/* should we send SIGCHLD? */
398 
399 		}
400 
401 	sendsig:
402 		/* deliver or queue signal */
403 		PROC_LOCK(p);
404 		mtx_lock_spin(&sched_lock);
405 		if (p->p_stat == SSTOP) {
406 			p->p_xstat = uap->data;
407 			setrunnable(&p->p_thread); /* XXXKSE */
408 			mtx_unlock_spin(&sched_lock);
409 		} else {
410 			mtx_unlock_spin(&sched_lock);
411 			if (uap->data)
412 				psignal(p, uap->data);
413 
414 		}
415 		PROC_UNLOCK(p);
416 		return 0;
417 
418 	case PT_WRITE_I:
419 	case PT_WRITE_D:
420 		write = 1;
421 		/* fallthrough */
422 	case PT_READ_I:
423 	case PT_READ_D:
424 		/* write = 0 set above */
425 		iov.iov_base = write ? (caddr_t)&uap->data : (caddr_t)td->td_retval;
426 		iov.iov_len = sizeof(int);
427 		uio.uio_iov = &iov;
428 		uio.uio_iovcnt = 1;
429 		uio.uio_offset = (off_t)(uintptr_t)uap->addr;
430 		uio.uio_resid = sizeof(int);
431 		uio.uio_segflg = UIO_SYSSPACE;	/* ie: the uap */
432 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
433 		uio.uio_td = td;
434 		error = procfs_domem(curp, p, NULL, &uio);
435 		if (uio.uio_resid != 0) {
436 			/*
437 			 * XXX procfs_domem() doesn't currently return ENOSPC,
438 			 * so I think write() can bogusly return 0.
439 			 * XXX what happens for short writes?  We don't want
440 			 * to write partial data.
441 			 * XXX procfs_domem() returns EPERM for other invalid
442 			 * addresses.  Convert this to EINVAL.  Does this
443 			 * clobber returns of EPERM for other reasons?
444 			 */
445 			if (error == 0 || error == ENOSPC || error == EPERM)
446 				error = EINVAL;	/* EOF */
447 		}
448 		return (error);
449 
450 	case PT_KILL:
451 		uap->data = SIGKILL;
452 		goto sendsig;	/* in PT_CONTINUE above */
453 
454 #ifdef PT_SETREGS
455 	case PT_SETREGS:
456 		write = 1;
457 		/* fallthrough */
458 #endif /* PT_SETREGS */
459 #ifdef PT_GETREGS
460 	case PT_GETREGS:
461 		/* write = 0 above */
462 #endif /* PT_SETREGS */
463 #if defined(PT_SETREGS) || defined(PT_GETREGS)
464 		if (!procfs_validregs(td))	/* no P_SYSTEM procs please */
465 			return EINVAL;
466 		else {
467 			iov.iov_base = uap->addr;
468 			iov.iov_len = sizeof(struct reg);
469 			uio.uio_iov = &iov;
470 			uio.uio_iovcnt = 1;
471 			uio.uio_offset = 0;
472 			uio.uio_resid = sizeof(struct reg);
473 			uio.uio_segflg = UIO_USERSPACE;
474 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
475 			uio.uio_td = td;
476 			return (procfs_doregs(curp, p, NULL, &uio));
477 		}
478 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
479 
480 #ifdef PT_SETFPREGS
481 	case PT_SETFPREGS:
482 		write = 1;
483 		/* fallthrough */
484 #endif /* PT_SETFPREGS */
485 #ifdef PT_GETFPREGS
486 	case PT_GETFPREGS:
487 		/* write = 0 above */
488 #endif /* PT_SETFPREGS */
489 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
490 		if (!procfs_validfpregs(td))	/* no P_SYSTEM procs please */
491 			return EINVAL;
492 		else {
493 			iov.iov_base = uap->addr;
494 			iov.iov_len = sizeof(struct fpreg);
495 			uio.uio_iov = &iov;
496 			uio.uio_iovcnt = 1;
497 			uio.uio_offset = 0;
498 			uio.uio_resid = sizeof(struct fpreg);
499 			uio.uio_segflg = UIO_USERSPACE;
500 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
501 			uio.uio_td = td;
502 			return (procfs_dofpregs(curp, p, NULL, &uio));
503 		}
504 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
505 
506 #ifdef PT_SETDBREGS
507 	case PT_SETDBREGS:
508 		write = 1;
509 		/* fallthrough */
510 #endif /* PT_SETDBREGS */
511 #ifdef PT_GETDBREGS
512 	case PT_GETDBREGS:
513 		/* write = 0 above */
514 #endif /* PT_SETDBREGS */
515 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
516 		if (!procfs_validdbregs(td))	/* no P_SYSTEM procs please */
517 			return EINVAL;
518 		else {
519 			iov.iov_base = uap->addr;
520 			iov.iov_len = sizeof(struct dbreg);
521 			uio.uio_iov = &iov;
522 			uio.uio_iovcnt = 1;
523 			uio.uio_offset = 0;
524 			uio.uio_resid = sizeof(struct dbreg);
525 			uio.uio_segflg = UIO_USERSPACE;
526 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
527 			uio.uio_td = td;
528 			return (procfs_dodbregs(curp, p, NULL, &uio));
529 		}
530 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
531 
532 	default:
533 		break;
534 	}
535 
536 	return 0;
537 }
538 
539 int
540 trace_req(p)
541 	struct proc *p;
542 {
543 	return 1;
544 }
545 
546 /*
547  * stopevent()
548  * Stop a process because of a procfs event;
549  * stay stopped until p->p_step is cleared
550  * (cleared by PIOCCONT in procfs).
551  *
552  * Must be called with the proc struct mutex held.
553  */
554 
555 void
556 stopevent(p, event, val)
557 	struct proc *p;
558 	unsigned int event;
559 	unsigned int val;
560 {
561 
562 	PROC_LOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
563 	p->p_step = 1;
564 
565 	do {
566 		p->p_xstat = val;
567 		p->p_stype = event;	/* Which event caused the stop? */
568 		wakeup(&p->p_stype);	/* Wake up any PIOCWAIT'ing procs */
569 		msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
570 	} while (p->p_step);
571 }
572