xref: /freebsd/sys/kern/sys_process.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	$Id: sys_process.c,v 1.29 1997/04/27 21:26:29 alex Exp $
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/proc.h>
38 #include <sys/vnode.h>
39 #include <sys/ptrace.h>
40 
41 #include <machine/reg.h>
42 #include <vm/vm.h>
43 #include <vm/vm_prot.h>
44 #include <sys/lock.h>
45 #include <vm/pmap.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_extern.h>
49 
50 #include <sys/user.h>
51 #include <miscfs/procfs/procfs.h>
52 
53 /* use the equivalent procfs code */
54 #if 0
55 static int
56 pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
57 	int		rv;
58 	vm_map_t	map, tmap;
59 	vm_object_t	object;
60 	vm_offset_t	kva = 0;
61 	int		page_offset;	/* offset into page */
62 	vm_offset_t	pageno;		/* page number */
63 	vm_map_entry_t	out_entry;
64 	vm_prot_t	out_prot;
65 	boolean_t	wired, single_use;
66 	vm_pindex_t	pindex;
67 
68 	/* Map page into kernel space */
69 
70 	map = &procp->p_vmspace->vm_map;
71 
72 	page_offset = addr - trunc_page(addr);
73 	pageno = trunc_page(addr);
74 
75 	tmap = map;
76 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry,
77 		&object, &pindex, &out_prot, &wired, &single_use);
78 
79 	if (rv != KERN_SUCCESS)
80 		return EINVAL;
81 
82 	vm_map_lookup_done (tmap, out_entry);
83 
84 	/* Find space in kernel_map for the page we're interested in */
85 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
86 		&kva, PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0);
87 
88 	if (!rv) {
89 		vm_object_reference (object);
90 
91 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
92 		if (!rv) {
93 			*retval = 0;
94 			bcopy ((caddr_t)kva + page_offset,
95 			       retval, sizeof *retval);
96 		}
97 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
98 	}
99 
100 	return rv;
101 }
102 
103 static int
104 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
105 	int		rv;
106 	vm_map_t	map, tmap;
107 	vm_object_t	object;
108 	vm_offset_t	kva = 0;
109 	int		page_offset;	/* offset into page */
110 	vm_offset_t	pageno;		/* page number */
111 	vm_map_entry_t	out_entry;
112 	vm_prot_t	out_prot;
113 	boolean_t	wired, single_use;
114 	vm_pindex_t	pindex;
115 	boolean_t	fix_prot = 0;
116 
117 	/* Map page into kernel space */
118 
119 	map = &procp->p_vmspace->vm_map;
120 
121 	page_offset = addr - trunc_page(addr);
122 	pageno = trunc_page(addr);
123 
124 	/*
125 	 * Check the permissions for the area we're interested in.
126 	 */
127 
128 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
129 		VM_PROT_WRITE) == FALSE) {
130 		/*
131 		 * If the page was not writable, we make it so.
132 		 * XXX It is possible a page may *not* be read/executable,
133 		 * if a process changes that!
134 		 */
135 		fix_prot = 1;
136 		/* The page isn't writable, so let's try making it so... */
137 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
138 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
139 		  return EFAULT;	/* I guess... */
140 	}
141 
142 	/*
143 	 * Now we need to get the page.  out_entry, out_prot, wired, and
144 	 * single_use aren't used.  One would think the vm code would be
145 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
146 	 * change the map argument.
147 	 */
148 
149 	tmap = map;
150 	rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry,
151 		&object, &pindex, &out_prot, &wired, &single_use);
152 	if (rv != KERN_SUCCESS) {
153 		return EINVAL;
154 	}
155 
156 	/*
157 	 * Okay, we've got the page.  Let's release tmap.
158 	 */
159 
160 	vm_map_lookup_done (tmap, out_entry);
161 
162 	/*
163 	 * Fault the page in...
164 	 */
165 
166 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
167 	if (rv != KERN_SUCCESS)
168 		return EFAULT;
169 
170 	/* Find space in kernel_map for the page we're interested in */
171 	rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex),
172 		&kva, PAGE_SIZE, 0,
173 		VM_PROT_ALL, VM_PROT_ALL, 0);
174 	if (!rv) {
175 		vm_object_reference (object);
176 
177 		rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
178 		if (!rv) {
179 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
180 		}
181 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
182 	}
183 
184 	if (fix_prot)
185 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
186 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
187 	return rv;
188 }
189 #endif
190 
191 /*
192  * Process debugging system call.
193  */
194 #ifndef _SYS_SYSPROTO_H_
195 struct ptrace_args {
196 	int	req;
197 	pid_t	pid;
198 	caddr_t	addr;
199 	int	data;
200 };
201 #endif
202 
203 int
204 ptrace(curp, uap, retval)
205 	struct proc *curp;
206 	struct ptrace_args *uap;
207 	int *retval;
208 {
209 	struct proc *p;
210 	struct iovec iov;
211 	struct uio uio;
212 	int error = 0;
213 	int write;
214 	int s;
215 
216 	if (uap->req == PT_TRACE_ME)
217 		p = curp;
218 	else {
219 		if ((p = pfind(uap->pid)) == NULL)
220 			return ESRCH;
221 	}
222 
223 	/*
224 	 * Permissions check
225 	 */
226 	switch (uap->req) {
227 	case PT_TRACE_ME:
228 		/* Always legal. */
229 		break;
230 
231 	case PT_ATTACH:
232 		/* Self */
233 		if (p->p_pid == curp->p_pid)
234 			return EINVAL;
235 
236 		/* Already traced */
237 		if (p->p_flag & P_TRACED)
238 			return EBUSY;
239 
240 		/* not owned by you, has done setuid (unless you're root) */
241 		if ((p->p_cred->p_ruid != curp->p_cred->p_ruid) ||
242 		     (p->p_flag & P_SUGID)) {
243 			if (error = suser(curp->p_ucred, &curp->p_acflag))
244 				return error;
245 		}
246 
247 		/* can't trace init when securelevel > 0 */
248 		if (securelevel > 0 && p->p_pid == 1)
249 			return EPERM;
250 
251 		/* OK */
252 		break;
253 
254 	case PT_READ_I:
255 	case PT_READ_D:
256 	case PT_READ_U:
257 	case PT_WRITE_I:
258 	case PT_WRITE_D:
259 	case PT_WRITE_U:
260 	case PT_CONTINUE:
261 	case PT_KILL:
262 	case PT_STEP:
263 	case PT_DETACH:
264 #ifdef PT_GETREGS
265 	case PT_GETREGS:
266 #endif
267 #ifdef PT_SETREGS
268 	case PT_SETREGS:
269 #endif
270 #ifdef PT_GETFPREGS
271 	case PT_GETFPREGS:
272 #endif
273 #ifdef PT_SETFPREGS
274 	case PT_SETFPREGS:
275 #endif
276 		/* not being traced... */
277 		if ((p->p_flag & P_TRACED) == 0)
278 			return EPERM;
279 
280 		/* not being traced by YOU */
281 		if (p->p_pptr != curp)
282 			return EBUSY;
283 
284 		/* not currently stopped */
285 		if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0)
286 			return EBUSY;
287 
288 		/* OK */
289 		break;
290 
291 	default:
292 		return EINVAL;
293 	}
294 
295 #ifdef FIX_SSTEP
296 	/*
297 	 * Single step fixup ala procfs
298 	 */
299 	FIX_SSTEP(p);
300 #endif
301 
302 	/*
303 	 * Actually do the requests
304 	 */
305 
306 	write = 0;
307 	*retval = 0;
308 
309 	switch (uap->req) {
310 	case PT_TRACE_ME:
311 		/* set my trace flag and "owner" so it can read/write me */
312 		p->p_flag |= P_TRACED;
313 		p->p_oppid = p->p_pptr->p_pid;
314 		return 0;
315 
316 	case PT_ATTACH:
317 		/* security check done above */
318 		p->p_flag |= P_TRACED;
319 		p->p_oppid = p->p_pptr->p_pid;
320 		if (p->p_pptr != curp)
321 			proc_reparent(p, curp);
322 		uap->data = SIGSTOP;
323 		goto sendsig;	/* in PT_CONTINUE below */
324 
325 	case PT_STEP:
326 	case PT_CONTINUE:
327 	case PT_DETACH:
328 		if ((unsigned)uap->data >= NSIG)
329 			return EINVAL;
330 
331 		PHOLD(p);
332 
333 		if (uap->req == PT_STEP) {
334 			if ((error = ptrace_single_step (p))) {
335 				PRELE(p);
336 				return error;
337 			}
338 		}
339 
340 		if (uap->addr != (caddr_t)1) {
341 			fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
342 			if ((error = ptrace_set_pc (p, (u_int)uap->addr))) {
343 				PRELE(p);
344 				return error;
345 			}
346 		}
347 		PRELE(p);
348 
349 		if (uap->req == PT_DETACH) {
350 			/* reset process parent */
351 			if (p->p_oppid != p->p_pptr->p_pid) {
352 				struct proc *pp;
353 
354 				pp = pfind(p->p_oppid);
355 				proc_reparent(p, pp ? pp : initproc);
356 			}
357 
358 			p->p_flag &= ~(P_TRACED | P_WAITED);
359 			p->p_oppid = 0;
360 
361 			/* should we send SIGCHLD? */
362 
363 		}
364 
365 	sendsig:
366 		/* deliver or queue signal */
367 		s = splhigh();
368 		if (p->p_stat == SSTOP) {
369 			p->p_xstat = uap->data;
370 			setrunnable(p);
371 		} else if (uap->data) {
372 			psignal(p, uap->data);
373 		}
374 		splx(s);
375 		return 0;
376 
377 	case PT_WRITE_I:
378 	case PT_WRITE_D:
379 		write = 1;
380 		/* fallthrough */
381 	case PT_READ_I:
382 	case PT_READ_D:
383 		/* write = 0 set above */
384 		iov.iov_base = write ? (caddr_t)&uap->data : (caddr_t)retval;
385 		iov.iov_len = sizeof(int);
386 		uio.uio_iov = &iov;
387 		uio.uio_iovcnt = 1;
388 		uio.uio_offset = (off_t)(u_long)uap->addr;
389 		uio.uio_resid = sizeof(int);
390 		uio.uio_segflg = UIO_SYSSPACE;	/* ie: the uap */
391 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
392 		uio.uio_procp = p;
393 		error = procfs_domem(curp, p, NULL, &uio);
394 		if (uio.uio_resid != 0) {
395 			/*
396 			 * XXX procfs_domem() doesn't currently return ENOSPC,
397 			 * so I think write() can bogusly return 0.
398 			 * XXX what happens for short writes?  We don't want
399 			 * to write partial data.
400 			 * XXX procfs_domem() returns EPERM for other invalid
401 			 * addresses.  Convert this to EINVAL.  Does this
402 			 * clobber returns of EPERM for other reasons?
403 			 */
404 			if (error == 0 || error == ENOSPC || error == EPERM)
405 				error = EINVAL;	/* EOF */
406 		}
407 		return (error);
408 
409 	case PT_READ_U:
410 		if ((u_int)uap->addr > (UPAGES * PAGE_SIZE - sizeof(int))) {
411 			return EFAULT;
412 		}
413 		error = 0;
414 		PHOLD(p);	/* user had damn well better be incore! */
415 		if (p->p_flag & P_INMEM) {
416 			p->p_addr->u_kproc.kp_proc = *p;
417 			fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
418 			*retval = *(int*)((u_int)p->p_addr + (u_int)uap->addr);
419 		} else {
420 			*retval = 0;
421 			error = EFAULT;
422 		}
423 		PRELE(p);
424 		return error;
425 
426 	case PT_WRITE_U:
427 		PHOLD(p);	/* user had damn well better be incore! */
428 		if (p->p_flag & P_INMEM) {
429 			p->p_addr->u_kproc.kp_proc = *p;
430 			fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
431 			error = ptrace_write_u(p, (vm_offset_t)uap->addr, uap->data);
432 		} else {
433 			error = EFAULT;
434 		}
435 		PRELE(p);
436 		return error;
437 
438 	case PT_KILL:
439 		uap->data = SIGKILL;
440 		goto sendsig;	/* in PT_CONTINUE above */
441 
442 #ifdef PT_SETREGS
443 	case PT_SETREGS:
444 		write = 1;
445 		/* fallthrough */
446 #endif /* PT_SETREGS */
447 #ifdef PT_GETREGS
448 	case PT_GETREGS:
449 		/* write = 0 above */
450 #endif /* PT_SETREGS */
451 #if defined(PT_SETREGS) || defined(PT_GETREGS)
452 		if (!procfs_validregs(p))	/* no P_SYSTEM procs please */
453 			return EINVAL;
454 		else {
455 			iov.iov_base = uap->addr;
456 			iov.iov_len = sizeof(struct reg);
457 			uio.uio_iov = &iov;
458 			uio.uio_iovcnt = 1;
459 			uio.uio_offset = 0;
460 			uio.uio_resid = sizeof(struct reg);
461 			uio.uio_segflg = UIO_USERSPACE;
462 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
463 			uio.uio_procp = curp;
464 			return (procfs_doregs(curp, p, NULL, &uio));
465 		}
466 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
467 
468 #ifdef PT_SETFPREGS
469 	case PT_SETFPREGS:
470 		write = 1;
471 		/* fallthrough */
472 #endif /* PT_SETFPREGS */
473 #ifdef PT_GETFPREGS
474 	case PT_GETFPREGS:
475 		/* write = 0 above */
476 #endif /* PT_SETFPREGS */
477 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
478 		if (!procfs_validfpregs(p))	/* no P_SYSTEM procs please */
479 			return EINVAL;
480 		else {
481 			iov.iov_base = uap->addr;
482 			iov.iov_len = sizeof(struct fpreg);
483 			uio.uio_iov = &iov;
484 			uio.uio_iovcnt = 1;
485 			uio.uio_offset = 0;
486 			uio.uio_resid = sizeof(struct fpreg);
487 			uio.uio_segflg = UIO_USERSPACE;
488 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
489 			uio.uio_procp = curp;
490 			return (procfs_dofpregs(curp, p, NULL, &uio));
491 		}
492 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
493 
494 	default:
495 		break;
496 	}
497 
498 	return 0;
499 }
500 
501 int
502 trace_req(p)
503 	struct proc *p;
504 {
505 	return 1;
506 }
507