1 /* 2 * work_fork.c - fork implementation for blocking worker child. 3 */ 4 #include <config.h> 5 #include "ntp_workimpl.h" 6 7 #ifdef WORK_FORK 8 #include <stdio.h> 9 #include <ctype.h> 10 #include <signal.h> 11 12 #include "iosignal.h" 13 #include "ntp_stdlib.h" 14 #include "ntp_malloc.h" 15 #include "ntp_syslog.h" 16 #include "ntpd.h" 17 #include "ntp_io.h" 18 #include "ntp_assert.h" 19 #include "ntp_unixtime.h" 20 #include "ntp_worker.h" 21 22 /* === variables === */ 23 int worker_process; 24 addremove_io_fd_func addremove_io_fd; 25 static volatile int worker_sighup_received; 26 27 /* === function prototypes === */ 28 static void fork_blocking_child(blocking_child *); 29 static RETSIGTYPE worker_sighup(int); 30 static void send_worker_home_atexit(void); 31 static void cleanup_after_child(blocking_child *); 32 33 /* === functions === */ 34 /* 35 * exit_worker() 36 * 37 * On some systems _exit() is preferred to exit() for forked children. 38 * For example, http://netbsd.gw.com/cgi-bin/man-cgi?fork++NetBSD-5.0 39 * recommends _exit() to avoid double-flushing C runtime stream buffers 40 * and also to avoid calling the parent's atexit() routines in the 41 * child. On those systems WORKER_CHILD_EXIT is _exit. Since _exit 42 * bypasses CRT cleanup, fflush() files we know might have output 43 * buffered. 44 */ 45 void 46 exit_worker( 47 int exitcode 48 ) 49 { 50 if (syslog_file != NULL) 51 fflush(syslog_file); 52 fflush(stdout); 53 fflush(stderr); 54 WORKER_CHILD_EXIT (exitcode); /* space before ( required */ 55 } 56 57 58 static RETSIGTYPE 59 worker_sighup( 60 int sig 61 ) 62 { 63 if (SIGHUP == sig) 64 worker_sighup_received = 1; 65 } 66 67 68 int 69 worker_sleep( 70 blocking_child * c, 71 time_t seconds 72 ) 73 { 74 u_int sleep_remain; 75 76 sleep_remain = (u_int)seconds; 77 do { 78 if (!worker_sighup_received) 79 sleep_remain = sleep(sleep_remain); 80 if (worker_sighup_received) { 81 TRACE(1, ("worker SIGHUP with %us left to sleep", 82 sleep_remain)); 83 worker_sighup_received = 0; 84 return -1; 85 } 86 } while (sleep_remain); 87 88 return 0; 89 } 90 91 92 void 93 interrupt_worker_sleep(void) 94 { 95 u_int idx; 96 blocking_child * c; 97 int rc; 98 99 for (idx = 0; idx < blocking_children_alloc; idx++) { 100 c = blocking_children[idx]; 101 102 if (NULL == c || c->reusable == TRUE) 103 continue; 104 105 rc = kill(c->pid, SIGHUP); 106 if (rc < 0) 107 msyslog(LOG_ERR, 108 "Unable to signal HUP to wake child pid %d: %m", 109 c->pid); 110 } 111 } 112 113 114 /* 115 * req_child_exit() runs in the parent. 116 */ 117 int 118 req_child_exit( 119 blocking_child * c 120 ) 121 { 122 if (-1 != c->req_write_pipe) { 123 close(c->req_write_pipe); 124 c->req_write_pipe = -1; 125 return 0; 126 } 127 return -1; 128 } 129 130 131 /* 132 * cleanup_after_child() runs in parent. 133 */ 134 static void 135 cleanup_after_child( 136 blocking_child * c 137 ) 138 { 139 if (-1 != c->req_write_pipe) { 140 close(c->req_write_pipe); 141 c->req_write_pipe = -1; 142 } 143 if (-1 != c->resp_read_pipe) { 144 (*addremove_io_fd)(c->resp_read_pipe, c->ispipe, TRUE); 145 close(c->resp_read_pipe); 146 c->resp_read_pipe = -1; 147 } 148 c->pid = 0; 149 c->resp_read_ctx = NULL; 150 DEBUG_INSIST(-1 == c->req_read_pipe); 151 DEBUG_INSIST(-1 == c->resp_write_pipe); 152 c->reusable = TRUE; 153 } 154 155 156 static void 157 send_worker_home_atexit(void) 158 { 159 u_int idx; 160 blocking_child * c; 161 162 if (worker_process) 163 return; 164 165 for (idx = 0; idx < blocking_children_alloc; idx++) { 166 c = blocking_children[idx]; 167 if (NULL == c) 168 continue; 169 req_child_exit(c); 170 } 171 } 172 173 174 int 175 send_blocking_req_internal( 176 blocking_child * c, 177 blocking_pipe_header * hdr, 178 void * data 179 ) 180 { 181 int octets; 182 int rc; 183 184 DEBUG_REQUIRE(hdr != NULL); 185 DEBUG_REQUIRE(data != NULL); 186 DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == hdr->magic_sig); 187 188 if (-1 == c->req_write_pipe) { 189 fork_blocking_child(c); 190 DEBUG_INSIST(-1 != c->req_write_pipe); 191 } 192 193 octets = sizeof(*hdr); 194 rc = write(c->req_write_pipe, hdr, octets); 195 196 if (rc == octets) { 197 octets = hdr->octets - sizeof(*hdr); 198 rc = write(c->req_write_pipe, data, octets); 199 200 if (rc == octets) 201 return 0; 202 } 203 204 if (rc < 0) 205 msyslog(LOG_ERR, 206 "send_blocking_req_internal: pipe write: %m"); 207 else 208 msyslog(LOG_ERR, 209 "send_blocking_req_internal: short write %d of %d", 210 rc, octets); 211 212 exit(1); /* otherwise would be return -1 */ 213 } 214 215 216 blocking_pipe_header * 217 receive_blocking_req_internal( 218 blocking_child * c 219 ) 220 { 221 blocking_pipe_header hdr; 222 blocking_pipe_header * req; 223 int rc; 224 long octets; 225 226 DEBUG_REQUIRE(-1 != c->req_read_pipe); 227 228 req = NULL; 229 230 do { 231 rc = read(c->req_read_pipe, &hdr, sizeof(hdr)); 232 } while (rc < 0 && EINTR == errno); 233 234 if (rc < 0) { 235 msyslog(LOG_ERR, 236 "receive_blocking_req_internal: pipe read %m"); 237 } else if (0 == rc) { 238 TRACE(4, ("parent closed request pipe, child %d terminating\n", 239 c->pid)); 240 } else if (rc != sizeof(hdr)) { 241 msyslog(LOG_ERR, 242 "receive_blocking_req_internal: short header read %d of %lu", 243 rc, (u_long)sizeof(hdr)); 244 } else { 245 INSIST(sizeof(hdr) < hdr.octets && hdr.octets < 4 * 1024); 246 req = emalloc(hdr.octets); 247 memcpy(req, &hdr, sizeof(*req)); 248 octets = hdr.octets - sizeof(hdr); 249 rc = read(c->req_read_pipe, (char *)req + sizeof(*req), 250 octets); 251 252 if (rc < 0) 253 msyslog(LOG_ERR, 254 "receive_blocking_req_internal: pipe data read %m"); 255 else if (rc != octets) 256 msyslog(LOG_ERR, 257 "receive_blocking_req_internal: short read %d of %ld", 258 rc, octets); 259 else if (BLOCKING_REQ_MAGIC != req->magic_sig) 260 msyslog(LOG_ERR, 261 "receive_blocking_req_internal: packet header mismatch (0x%x)", 262 req->magic_sig); 263 else 264 return req; 265 } 266 267 if (req != NULL) 268 free(req); 269 270 return NULL; 271 } 272 273 274 int 275 send_blocking_resp_internal( 276 blocking_child * c, 277 blocking_pipe_header * resp 278 ) 279 { 280 long octets; 281 int rc; 282 283 DEBUG_REQUIRE(-1 != c->resp_write_pipe); 284 285 octets = resp->octets; 286 rc = write(c->resp_write_pipe, resp, octets); 287 free(resp); 288 289 if (octets == rc) 290 return 0; 291 292 if (rc < 0) 293 TRACE(1, ("send_blocking_resp_internal: pipe write %m\n")); 294 else 295 TRACE(1, ("send_blocking_resp_internal: short write %d of %ld\n", 296 rc, octets)); 297 298 return -1; 299 } 300 301 302 blocking_pipe_header * 303 receive_blocking_resp_internal( 304 blocking_child * c 305 ) 306 { 307 blocking_pipe_header hdr; 308 blocking_pipe_header * resp; 309 int rc; 310 long octets; 311 312 DEBUG_REQUIRE(c->resp_read_pipe != -1); 313 314 resp = NULL; 315 rc = read(c->resp_read_pipe, &hdr, sizeof(hdr)); 316 317 if (rc < 0) { 318 TRACE(1, ("receive_blocking_resp_internal: pipe read %m\n")); 319 } else if (0 == rc) { 320 /* this is the normal child exited indication */ 321 } else if (rc != sizeof(hdr)) { 322 TRACE(1, ("receive_blocking_resp_internal: short header read %d of %lu\n", 323 rc, (u_long)sizeof(hdr))); 324 } else if (BLOCKING_RESP_MAGIC != hdr.magic_sig) { 325 TRACE(1, ("receive_blocking_resp_internal: header mismatch (0x%x)\n", 326 hdr.magic_sig)); 327 } else { 328 INSIST(sizeof(hdr) < hdr.octets && 329 hdr.octets < 16 * 1024); 330 resp = emalloc(hdr.octets); 331 memcpy(resp, &hdr, sizeof(*resp)); 332 octets = hdr.octets - sizeof(hdr); 333 rc = read(c->resp_read_pipe, 334 (char *)resp + sizeof(*resp), 335 octets); 336 337 if (rc < 0) 338 TRACE(1, ("receive_blocking_resp_internal: pipe data read %m\n")); 339 else if (rc < octets) 340 TRACE(1, ("receive_blocking_resp_internal: short read %d of %ld\n", 341 rc, octets)); 342 else 343 return resp; 344 } 345 346 cleanup_after_child(c); 347 348 if (resp != NULL) 349 free(resp); 350 351 return NULL; 352 } 353 354 355 #if defined(HAVE_DROPROOT) && defined(WORK_FORK) 356 void 357 fork_deferred_worker(void) 358 { 359 u_int idx; 360 blocking_child * c; 361 362 REQUIRE(droproot && root_dropped); 363 364 for (idx = 0; idx < blocking_children_alloc; idx++) { 365 c = blocking_children[idx]; 366 if (NULL == c) 367 continue; 368 if (-1 != c->req_write_pipe && 0 == c->pid) 369 fork_blocking_child(c); 370 } 371 } 372 #endif 373 374 375 static void 376 fork_blocking_child( 377 blocking_child * c 378 ) 379 { 380 static int atexit_installed; 381 static int blocking_pipes[4] = { -1, -1, -1, -1 }; 382 int rc; 383 int was_pipe; 384 int is_pipe; 385 int saved_errno = 0; 386 int childpid; 387 int keep_fd; 388 int fd; 389 390 /* 391 * parent and child communicate via a pair of pipes. 392 * 393 * 0 child read request 394 * 1 parent write request 395 * 2 parent read response 396 * 3 child write response 397 */ 398 if (-1 == c->req_write_pipe) { 399 rc = pipe_socketpair(&blocking_pipes[0], &was_pipe); 400 if (0 != rc) { 401 saved_errno = errno; 402 } else { 403 rc = pipe_socketpair(&blocking_pipes[2], &is_pipe); 404 if (0 != rc) { 405 saved_errno = errno; 406 close(blocking_pipes[0]); 407 close(blocking_pipes[1]); 408 } else { 409 INSIST(was_pipe == is_pipe); 410 } 411 } 412 if (0 != rc) { 413 errno = saved_errno; 414 msyslog(LOG_ERR, "unable to create worker pipes: %m"); 415 exit(1); 416 } 417 418 /* 419 * Move the descriptors the parent will keep open out of the 420 * low descriptors preferred by C runtime buffered FILE *. 421 */ 422 c->req_write_pipe = move_fd(blocking_pipes[1]); 423 c->resp_read_pipe = move_fd(blocking_pipes[2]); 424 /* 425 * wake any worker child on orderly shutdown of the 426 * daemon so that it can notice the broken pipes and 427 * go away promptly. 428 */ 429 if (!atexit_installed) { 430 atexit(&send_worker_home_atexit); 431 atexit_installed = TRUE; 432 } 433 } 434 435 #ifdef HAVE_DROPROOT 436 /* defer the fork until after root is dropped */ 437 if (droproot && !root_dropped) 438 return; 439 #endif 440 if (syslog_file != NULL) 441 fflush(syslog_file); 442 fflush(stdout); 443 fflush(stderr); 444 445 signal_no_reset(SIGCHLD, SIG_IGN); 446 447 childpid = fork(); 448 if (-1 == childpid) { 449 msyslog(LOG_ERR, "unable to fork worker: %m"); 450 exit(1); 451 } 452 453 if (childpid) { 454 /* this is the parent */ 455 TRACE(1, ("forked worker child (pid %d)\n", childpid)); 456 c->pid = childpid; 457 c->ispipe = is_pipe; 458 459 /* close the child's pipe descriptors. */ 460 close(blocking_pipes[0]); 461 close(blocking_pipes[3]); 462 463 memset(blocking_pipes, -1, sizeof(blocking_pipes)); 464 465 /* wire into I/O loop */ 466 (*addremove_io_fd)(c->resp_read_pipe, is_pipe, FALSE); 467 468 return; /* parent returns */ 469 } 470 471 /* 472 * The parent gets the child pid as the return value of fork(). 473 * The child must work for it. 474 */ 475 c->pid = getpid(); 476 worker_process = TRUE; 477 478 /* 479 * In the child, close all files except stdin, stdout, stderr, 480 * and the two child ends of the pipes. 481 */ 482 DEBUG_INSIST(-1 == c->req_read_pipe); 483 DEBUG_INSIST(-1 == c->resp_write_pipe); 484 c->req_read_pipe = blocking_pipes[0]; 485 c->resp_write_pipe = blocking_pipes[3]; 486 487 kill_asyncio(0); 488 closelog(); 489 if (syslog_file != NULL) { 490 fclose(syslog_file); 491 syslog_file = NULL; 492 syslogit = TRUE; 493 } 494 keep_fd = max(c->req_read_pipe, c->resp_write_pipe); 495 for (fd = 3; fd < keep_fd; fd++) 496 if (fd != c->req_read_pipe && 497 fd != c->resp_write_pipe) 498 close(fd); 499 close_all_beyond(keep_fd); 500 /* 501 * We get signals from refclock serial I/O on NetBSD in the 502 * worker if we do not reset SIGIO's handler to the default. 503 * It is not conditionalized for NetBSD alone because on 504 * systems where it is not needed, it is harmless, and that 505 * allows us to handle unknown others with NetBSD behavior. 506 * [Bug 1386] 507 */ 508 #if defined(USE_SIGIO) 509 signal_no_reset(SIGIO, SIG_DFL); 510 #elif defined(USE_SIGPOLL) 511 signal_no_reset(SIGPOLL, SIG_DFL); 512 #endif 513 signal_no_reset(SIGHUP, worker_sighup); 514 init_logging("ntp_intres", 0, FALSE); 515 setup_logfile(NULL); 516 517 /* 518 * And now back to the portable code 519 */ 520 exit_worker(blocking_child_common(c)); 521 } 522 523 524 #else /* !WORK_FORK follows */ 525 char work_fork_nonempty_compilation_unit; 526 #endif 527