1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/syscall.h> 55 #include <sys/vnode.h> 56 #include <sys/acct.h> 57 #include <sys/ktr.h> 58 #include <sys/ktrace.h> 59 #include <sys/kthread.h> 60 #include <sys/unistd.h> 61 #include <sys/jail.h> 62 63 #include <vm/vm.h> 64 #include <sys/lock.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_zone.h> 69 70 #include <sys/vmmeter.h> 71 #include <sys/user.h> 72 73 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 74 75 static int fast_vfork = 1; 76 SYSCTL_INT(_kern, OID_AUTO, fast_vfork, CTLFLAG_RW, &fast_vfork, 0, 77 "flag to indicate whether we have a fast vfork()"); 78 79 /* 80 * These are the stuctures used to create a callout list for things to do 81 * when forking a process 82 */ 83 struct forklist { 84 forklist_fn function; 85 TAILQ_ENTRY(forklist) next; 86 }; 87 88 TAILQ_HEAD(forklist_head, forklist); 89 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 90 91 #ifndef _SYS_SYSPROTO_H_ 92 struct fork_args { 93 int dummy; 94 }; 95 #endif 96 97 /* ARGSUSED */ 98 int 99 fork(p, uap) 100 struct proc *p; 101 struct fork_args *uap; 102 { 103 int error; 104 struct proc *p2; 105 106 error = fork1(p, RFFDG | RFPROC, &p2); 107 if (error == 0) { 108 p->p_retval[0] = p2->p_pid; 109 p->p_retval[1] = 0; 110 } 111 return error; 112 } 113 114 /* ARGSUSED */ 115 int 116 vfork(p, uap) 117 struct proc *p; 118 struct vfork_args *uap; 119 { 120 int error; 121 struct proc *p2; 122 123 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 124 if (error == 0) { 125 p->p_retval[0] = p2->p_pid; 126 p->p_retval[1] = 0; 127 } 128 return error; 129 } 130 131 int 132 rfork(p, uap) 133 struct proc *p; 134 struct rfork_args *uap; 135 { 136 int error; 137 struct proc *p2; 138 139 /* mask kernel only flags out of the user flags */ 140 error = fork1(p, uap->flags & ~RFKERNELONLY, &p2); 141 if (error == 0) { 142 p->p_retval[0] = p2 ? p2->p_pid : 0; 143 p->p_retval[1] = 0; 144 } 145 return error; 146 } 147 148 149 int nprocs = 1; /* process 0 */ 150 static int nextpid = 0; 151 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &nextpid, 0, 152 "Last used PID"); 153 154 /* 155 * Random component to nextpid generation. We mix in a random factor to make 156 * it a little harder to predict. We sanity check the modulus value to avoid 157 * doing it in critical paths. Don't let it be too small or we pointlessly 158 * waste randomness entropy, and don't let it be impossibly large. Using a 159 * modulus that is too big causes a LOT more process table scans and slows 160 * down fork processing as the pidchecked caching is defeated. 161 */ 162 static int randompid = 0; 163 164 static int 165 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 166 { 167 int error, pid; 168 169 pid = randompid; 170 error = sysctl_handle_int(oidp, &pid, 0, req); 171 if (error || !req->newptr) 172 return (error); 173 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 174 pid = PID_MAX - 100; 175 else if (pid < 2) /* NOP */ 176 pid = 0; 177 else if (pid < 100) /* Make it reasonable */ 178 pid = 100; 179 randompid = pid; 180 return (error); 181 } 182 183 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 184 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 185 186 int 187 fork1(p1, flags, procp) 188 struct proc *p1; /* parent proc */ 189 int flags; 190 struct proc **procp; /* child proc */ 191 { 192 struct proc *p2, *pptr; 193 uid_t uid; 194 struct proc *newproc; 195 int trypid; 196 int ok; 197 static int pidchecked = 0; 198 struct forklist *ep; 199 200 /* Can't copy and clear */ 201 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 202 return (EINVAL); 203 204 /* 205 * Here we don't create a new process, but we divorce 206 * certain parts of a process from itself. 207 */ 208 if ((flags & RFPROC) == 0) { 209 210 vm_fork(p1, 0, flags); 211 212 /* 213 * Close all file descriptors. 214 */ 215 if (flags & RFCFDG) { 216 struct filedesc *fdtmp; 217 fdtmp = fdinit(p1); 218 fdfree(p1); 219 p1->p_fd = fdtmp; 220 } 221 222 /* 223 * Unshare file descriptors (from parent.) 224 */ 225 if (flags & RFFDG) { 226 if (p1->p_fd->fd_refcnt > 1) { 227 struct filedesc *newfd; 228 newfd = fdcopy(p1); 229 fdfree(p1); 230 p1->p_fd = newfd; 231 } 232 } 233 *procp = NULL; 234 return (0); 235 } 236 237 /* 238 * Although process entries are dynamically created, we still keep 239 * a global limit on the maximum number we will create. Don't allow 240 * a nonprivileged user to use the last process; don't let root 241 * exceed the limit. The variable nprocs is the current number of 242 * processes, maxproc is the limit. 243 */ 244 uid = p1->p_cred->p_ruid; 245 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) { 246 tablefull("proc"); 247 return (EAGAIN); 248 } 249 /* 250 * Increment the nprocs resource before blocking can occur. There 251 * are hard-limits as to the number of processes that can run. 252 */ 253 nprocs++; 254 255 /* 256 * Increment the count of procs running with this uid. Don't allow 257 * a nonprivileged user to exceed their current limit. 258 */ 259 ok = chgproccnt(p1->p_cred->p_uidinfo, 1, 260 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 261 if (!ok) { 262 /* 263 * Back out the process count 264 */ 265 nprocs--; 266 return (EAGAIN); 267 } 268 269 /* Allocate new proc. */ 270 newproc = zalloc(proc_zone); 271 272 /* 273 * Setup linkage for kernel based threading 274 */ 275 if((flags & RFTHREAD) != 0) { 276 newproc->p_peers = p1->p_peers; 277 p1->p_peers = newproc; 278 newproc->p_leader = p1->p_leader; 279 } else { 280 newproc->p_peers = NULL; 281 newproc->p_leader = newproc; 282 } 283 284 newproc->p_vmspace = NULL; 285 286 /* 287 * Find an unused process ID. We remember a range of unused IDs 288 * ready to use (from nextpid+1 through pidchecked-1). 289 * 290 * If RFHIGHPID is set (used during system boot), do not allocate 291 * low-numbered pids. 292 */ 293 ALLPROC_LOCK(AP_EXCLUSIVE); 294 trypid = nextpid + 1; 295 if (flags & RFHIGHPID) { 296 if (trypid < 10) { 297 trypid = 10; 298 } 299 } else { 300 if (randompid) 301 trypid += arc4random() % randompid; 302 } 303 retry: 304 /* 305 * If the process ID prototype has wrapped around, 306 * restart somewhat above 0, as the low-numbered procs 307 * tend to include daemons that don't exit. 308 */ 309 if (trypid >= PID_MAX) { 310 trypid = trypid % PID_MAX; 311 if (trypid < 100) 312 trypid += 100; 313 pidchecked = 0; 314 } 315 if (trypid >= pidchecked) { 316 int doingzomb = 0; 317 318 pidchecked = PID_MAX; 319 /* 320 * Scan the active and zombie procs to check whether this pid 321 * is in use. Remember the lowest pid that's greater 322 * than trypid, so we can avoid checking for a while. 323 */ 324 p2 = LIST_FIRST(&allproc); 325 again: 326 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { 327 while (p2->p_pid == trypid || 328 p2->p_pgrp->pg_id == trypid || 329 p2->p_session->s_sid == trypid) { 330 trypid++; 331 if (trypid >= pidchecked) 332 goto retry; 333 } 334 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 335 pidchecked = p2->p_pid; 336 if (p2->p_pgrp->pg_id > trypid && 337 pidchecked > p2->p_pgrp->pg_id) 338 pidchecked = p2->p_pgrp->pg_id; 339 if (p2->p_session->s_sid > trypid && 340 pidchecked > p2->p_session->s_sid) 341 pidchecked = p2->p_session->s_sid; 342 } 343 if (!doingzomb) { 344 doingzomb = 1; 345 p2 = LIST_FIRST(&zombproc); 346 goto again; 347 } 348 } 349 350 /* 351 * RFHIGHPID does not mess with the nextpid counter during boot. 352 */ 353 if (flags & RFHIGHPID) 354 pidchecked = 0; 355 else 356 nextpid = trypid; 357 358 p2 = newproc; 359 p2->p_intr_nesting_level = 0; 360 p2->p_pri.pri_native = PRI_MAX; 361 p2->p_stat = SIDL; /* protect against others */ 362 p2->p_pid = trypid; 363 LIST_INSERT_HEAD(&allproc, p2, p_list); 364 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 365 ALLPROC_LOCK(AP_RELEASE); 366 367 /* 368 * Make a proc table entry for the new process. 369 * Start by zeroing the section of proc that is zero-initialized, 370 * then copy the section that is copied directly from the parent. 371 */ 372 bzero(&p2->p_startzero, 373 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 374 bcopy(&p1->p_startcopy, &p2->p_startcopy, 375 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 376 377 mtx_init(&p2->p_mtx, "process lock", MTX_DEF); 378 p2->p_aioinfo = NULL; 379 380 /* 381 * Duplicate sub-structures as needed. 382 * Increase reference counts on shared objects. 383 * The p_stats and p_sigacts substructs are set in vm_fork. 384 */ 385 p2->p_flag = 0; 386 mtx_lock_spin(&sched_lock); 387 p2->p_sflag = PS_INMEM; 388 if (p1->p_sflag & PS_PROFIL) 389 startprofclock(p2); 390 mtx_unlock_spin(&sched_lock); 391 MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred), 392 M_SUBPROC, M_WAITOK); 393 bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); 394 p2->p_cred->p_refcnt = 1; 395 crhold(p1->p_ucred); 396 uihold(p1->p_cred->p_uidinfo); 397 398 if (p2->p_args) 399 p2->p_args->ar_ref++; 400 401 if (flags & RFSIGSHARE) { 402 p2->p_procsig = p1->p_procsig; 403 p2->p_procsig->ps_refcnt++; 404 if (p1->p_sigacts == &p1->p_addr->u_sigacts) { 405 struct sigacts *newsigacts; 406 int s; 407 408 /* Create the shared sigacts structure */ 409 MALLOC(newsigacts, struct sigacts *, 410 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 411 s = splhigh(); 412 /* 413 * Set p_sigacts to the new shared structure. 414 * Note that this is updating p1->p_sigacts at the 415 * same time, since p_sigacts is just a pointer to 416 * the shared p_procsig->ps_sigacts. 417 */ 418 p2->p_sigacts = newsigacts; 419 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts, 420 sizeof(*p2->p_sigacts)); 421 *p2->p_sigacts = p1->p_addr->u_sigacts; 422 splx(s); 423 } 424 } else { 425 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig), 426 M_SUBPROC, M_WAITOK); 427 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 428 p2->p_procsig->ps_refcnt = 1; 429 p2->p_sigacts = NULL; /* finished in vm_fork() */ 430 } 431 if (flags & RFLINUXTHPN) 432 p2->p_sigparent = SIGUSR1; 433 else 434 p2->p_sigparent = SIGCHLD; 435 436 /* bump references to the text vnode (for procfs) */ 437 p2->p_textvp = p1->p_textvp; 438 if (p2->p_textvp) 439 VREF(p2->p_textvp); 440 441 if (flags & RFCFDG) 442 p2->p_fd = fdinit(p1); 443 else if (flags & RFFDG) 444 p2->p_fd = fdcopy(p1); 445 else 446 p2->p_fd = fdshare(p1); 447 448 /* 449 * If p_limit is still copy-on-write, bump refcnt, 450 * otherwise get a copy that won't be modified. 451 * (If PL_SHAREMOD is clear, the structure is shared 452 * copy-on-write.) 453 */ 454 if (p1->p_limit->p_lflags & PL_SHAREMOD) 455 p2->p_limit = limcopy(p1->p_limit); 456 else { 457 p2->p_limit = p1->p_limit; 458 p2->p_limit->p_refcnt++; 459 } 460 461 /* 462 * Preserve some more flags in subprocess. P_PROFIL has already 463 * been preserved. 464 */ 465 p2->p_flag |= p1->p_flag & P_SUGID; 466 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 467 p2->p_flag |= P_CONTROLT; 468 if (flags & RFPPWAIT) 469 p2->p_flag |= P_PPWAIT; 470 471 LIST_INSERT_AFTER(p1, p2, p_pglist); 472 473 /* 474 * Attach the new process to its parent. 475 * 476 * If RFNOWAIT is set, the newly created process becomes a child 477 * of init. This effectively disassociates the child from the 478 * parent. 479 */ 480 if (flags & RFNOWAIT) 481 pptr = initproc; 482 else 483 pptr = p1; 484 PROCTREE_LOCK(PT_EXCLUSIVE); 485 p2->p_pptr = pptr; 486 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 487 PROCTREE_LOCK(PT_RELEASE); 488 LIST_INIT(&p2->p_children); 489 LIST_INIT(&p2->p_heldmtx); 490 LIST_INIT(&p2->p_contested); 491 492 callout_init(&p2->p_itcallout, 0); 493 callout_init(&p2->p_slpcallout, 1); 494 495 #ifdef KTRACE 496 /* 497 * Copy traceflag and tracefile if enabled. 498 * If not inherited, these were zeroed above. 499 */ 500 if (p1->p_traceflag&KTRFAC_INHERIT) { 501 p2->p_traceflag = p1->p_traceflag; 502 if ((p2->p_tracep = p1->p_tracep) != NULL) 503 VREF(p2->p_tracep); 504 } 505 #endif 506 507 /* 508 * set priority of child to be that of parent 509 */ 510 p2->p_estcpu = p1->p_estcpu; 511 512 /* 513 * This begins the section where we must prevent the parent 514 * from being swapped. 515 */ 516 PHOLD(p1); 517 518 /* 519 * Finish creating the child process. It will return via a different 520 * execution path later. (ie: directly into user mode) 521 */ 522 vm_fork(p1, p2, flags); 523 524 if (flags == (RFFDG | RFPROC)) { 525 cnt.v_forks++; 526 cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 527 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 528 cnt.v_vforks++; 529 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 530 } else if (p1 == &proc0) { 531 cnt.v_kthreads++; 532 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 533 } else { 534 cnt.v_rforks++; 535 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 536 } 537 538 /* 539 * Both processes are set up, now check if any loadable modules want 540 * to adjust anything. 541 * What if they have an error? XXX 542 */ 543 TAILQ_FOREACH(ep, &fork_list, next) { 544 (*ep->function)(p1, p2, flags); 545 } 546 547 /* 548 * If RFSTOPPED not requested, make child runnable and add to 549 * run queue. 550 */ 551 microtime(&(p2->p_stats->p_start)); 552 p2->p_acflag = AFORK; 553 if ((flags & RFSTOPPED) == 0) { 554 splhigh(); 555 mtx_lock_spin(&sched_lock); 556 p2->p_stat = SRUN; 557 setrunqueue(p2); 558 mtx_unlock_spin(&sched_lock); 559 spl0(); 560 } 561 562 /* 563 * Now can be swapped. 564 */ 565 PRELE(p1); 566 567 /* 568 * tell any interested parties about the new process 569 */ 570 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 571 572 /* 573 * Preserve synchronization semantics of vfork. If waiting for 574 * child to exec or exit, set P_PPWAIT on child, and sleep on our 575 * proc (in case of exit). 576 */ 577 while (p2->p_flag & P_PPWAIT) 578 tsleep(p1, PWAIT, "ppwait", 0); 579 580 /* 581 * Return child proc pointer to parent. 582 */ 583 *procp = p2; 584 return (0); 585 } 586 587 /* 588 * The next two functionms are general routines to handle adding/deleting 589 * items on the fork callout list. 590 * 591 * at_fork(): 592 * Take the arguments given and put them onto the fork callout list, 593 * However first make sure that it's not already there. 594 * Returns 0 on success or a standard error number. 595 */ 596 597 int 598 at_fork(function) 599 forklist_fn function; 600 { 601 struct forklist *ep; 602 603 #ifdef INVARIANTS 604 /* let the programmer know if he's been stupid */ 605 if (rm_at_fork(function)) 606 printf("WARNING: fork callout entry (%p) already present\n", 607 function); 608 #endif 609 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 610 if (ep == NULL) 611 return (ENOMEM); 612 ep->function = function; 613 TAILQ_INSERT_TAIL(&fork_list, ep, next); 614 return (0); 615 } 616 617 /* 618 * Scan the exit callout list for the given item and remove it.. 619 * Returns the number of items removed (0 or 1) 620 */ 621 622 int 623 rm_at_fork(function) 624 forklist_fn function; 625 { 626 struct forklist *ep; 627 628 TAILQ_FOREACH(ep, &fork_list, next) { 629 if (ep->function == function) { 630 TAILQ_REMOVE(&fork_list, ep, next); 631 free(ep, M_ATFORK); 632 return(1); 633 } 634 } 635 return (0); 636 } 637 638 /* 639 * Handle the return of a child process from fork1(). This function 640 * is called from the MD fork_trampoline() entry point. 641 */ 642 void 643 fork_exit(callout, arg, frame) 644 void (*callout)(void *, struct trapframe *); 645 void *arg; 646 struct trapframe *frame; 647 { 648 struct proc *p; 649 650 /* 651 * Setup the sched_lock state so that we can release it. 652 */ 653 sched_lock.mtx_lock = (uintptr_t)curproc; 654 sched_lock.mtx_recurse = 0; 655 mtx_unlock_spin(&sched_lock); 656 /* 657 * XXX: We really shouldn't have to do this. 658 */ 659 enable_intr(); 660 661 #ifdef SMP 662 if (PCPU_GET(switchtime.tv_sec) == 0) 663 microuptime(PCPU_PTR(switchtime)); 664 PCPU_SET(switchticks, ticks); 665 #endif 666 667 /* 668 * cpu_set_fork_handler intercepts this function call to 669 * have this call a non-return function to stay in kernel mode. 670 * initproc has its own fork handler, but it does return. 671 */ 672 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 673 callout(arg, frame); 674 675 /* 676 * Check if a kernel thread misbehaved and returned from its main 677 * function. 678 */ 679 p = CURPROC; 680 if (p->p_flag & P_KTHREAD) { 681 mtx_lock(&Giant); 682 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 683 p->p_comm, p->p_pid); 684 kthread_exit(0); 685 } 686 mtx_assert(&Giant, MA_NOTOWNED); 687 } 688 689 /* 690 * Simplified back end of syscall(), used when returning from fork() 691 * directly into user mode. Giant is not held on entry, and must not 692 * be held on return. This function is passed in to fork_exit() as the 693 * first parameter and is called when returning to a new userland process. 694 */ 695 void 696 fork_return(p, frame) 697 struct proc *p; 698 struct trapframe *frame; 699 { 700 701 userret(p, frame, 0); 702 #ifdef KTRACE 703 if (KTRPOINT(p, KTR_SYSRET)) { 704 if (!mtx_owned(&Giant)) 705 mtx_lock(&Giant); 706 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 707 } 708 #endif 709 if (mtx_owned(&Giant)) 710 mtx_unlock(&Giant); 711 mtx_assert(&Giant, MA_NOTOWNED); 712 } 713