1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1980, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifndef lint 33 #endif /* not lint */ 34 35 #include <sys/param.h> 36 #include <sys/socket.h> 37 #include <sys/wait.h> 38 #include <sys/stat.h> 39 40 #include <ufs/ufs/dinode.h> 41 #include <ufs/ffs/fs.h> 42 43 #include <protocols/dumprestore.h> 44 45 #include <assert.h> 46 #include <errno.h> 47 #include <fcntl.h> 48 #include <limits.h> 49 #include <setjmp.h> 50 #include <signal.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <time.h> 55 #include <unistd.h> 56 57 #include "dump.h" 58 59 ino_t curino; /* current inumber; used globally */ 60 int newtape; /* new tape flag */ 61 union u_spcl u_spcl; /* mapping of variables in a control block */ 62 63 static int tapefd; /* tape file descriptor */ 64 static long asize; /* number of 0.1" units written on cur tape */ 65 static int writesize; /* size of malloc()ed buffer for tape */ 66 static int64_t lastspclrec = -1; /* tape block number of last written header */ 67 static int trecno = 0; /* next record to write in current block */ 68 static long blocksthisvol; /* number of blocks on current output file */ 69 static char *nexttape; 70 static FILE *popenfp = NULL; 71 72 static int atomic_read(int, void *, int); 73 static int atomic_write(int, const void *, int); 74 static void worker(int, int); 75 static void create_workers(void); 76 static void flushtape(void); 77 static void killall(void); 78 static void rollforward(void); 79 80 /* 81 * Concurrent dump mods (Caltech) - disk block reading and tape writing 82 * are exported to several worker processes. While one worker writes the 83 * tape, the others read disk blocks; they pass control of the tape in 84 * a ring via signals. The parent process traverses the file system and 85 * sends writeheader()'s and lists of daddr's to the workers via pipes. 86 * The following structure defines the instruction packets sent to workers. 87 */ 88 struct req { 89 ufs2_daddr_t dblk; 90 int count; 91 }; 92 static int reqsiz; 93 94 #define WORKERS 3 /* 1 worker writing, 1 reading, 1 for slack */ 95 static struct worker { 96 int64_t tapea; /* header number at start of this chunk */ 97 int64_t firstrec; /* record number of this block */ 98 int count; /* count to next header (used for TS_TAPE */ 99 /* after EOT) */ 100 int inode; /* inode that we are currently dealing with */ 101 int fd; /* FD for this worker */ 102 int pid; /* PID for this worker */ 103 int sent; /* 1 == we've sent this worker requests */ 104 char (*tblock)[TP_BSIZE]; /* buffer for data blocks */ 105 struct req *req; /* buffer for requests */ 106 } workers[WORKERS+1]; 107 static struct worker *wp; 108 109 static char (*nextblock)[TP_BSIZE]; 110 111 static int master; /* pid of master, for sending error signals */ 112 static int tenths; /* length of tape used per block written */ 113 static volatile sig_atomic_t caught; /* have we caught the signal to proceed? */ 114 static volatile sig_atomic_t ready; /* reached the lock point without having */ 115 /* received the SIGUSR2 signal from the prev worker? */ 116 static jmp_buf jmpbuf; /* where to jump to if we are ready when the */ 117 /* SIGUSR2 arrives from the previous worker */ 118 119 int 120 alloctape(void) 121 { 122 int pgoff = getpagesize() - 1; 123 char *buf; 124 int i; 125 126 writesize = ntrec * TP_BSIZE; 127 reqsiz = (ntrec + 1) * sizeof(struct req); 128 /* 129 * CDC 92181's and 92185's make 0.8" gaps in 1600-bpi start/stop mode 130 * (see DEC TU80 User's Guide). The shorter gaps of 6250-bpi require 131 * repositioning after stopping, i.e, streaming mode, where the gap is 132 * variable, 0.30" to 0.45". The gap is maximal when the tape stops. 133 */ 134 if (blocksperfile == 0 && !unlimited) 135 tenths = writesize / density + 136 (cartridge ? 16 : density == 625 ? 5 : 8); 137 /* 138 * Allocate tape buffer contiguous with the array of instruction 139 * packets, so flushtape() can write them together with one write(). 140 * Align tape buffer on page boundary to speed up tape write(). 141 */ 142 for (i = 0; i <= WORKERS; i++) { 143 buf = (char *) 144 malloc((unsigned)(reqsiz + writesize + pgoff + TP_BSIZE)); 145 if (buf == NULL) 146 return(0); 147 workers[i].tblock = (char (*)[TP_BSIZE]) 148 (((long)&buf[ntrec + 1] + pgoff) &~ pgoff); 149 workers[i].req = (struct req *)workers[i].tblock - ntrec - 1; 150 } 151 wp = &workers[0]; 152 wp->count = 1; 153 wp->tapea = 0; 154 wp->firstrec = 0; 155 nextblock = wp->tblock; 156 return(1); 157 } 158 159 void 160 writerec(char *dp, int isspcl) 161 { 162 163 wp->req[trecno].dblk = (ufs2_daddr_t)0; 164 wp->req[trecno].count = 1; 165 /* Can't do a structure assignment due to alignment problems */ 166 bcopy(dp, *(nextblock)++, sizeof (union u_spcl)); 167 if (isspcl) 168 lastspclrec = spcl.c_tapea; 169 trecno++; 170 spcl.c_tapea++; 171 if (trecno >= ntrec) 172 flushtape(); 173 } 174 175 void 176 dumpblock(ufs2_daddr_t blkno, int size) 177 { 178 int avail, tpblks; 179 ufs2_daddr_t dblkno; 180 181 dblkno = fsbtodb(sblock, blkno); 182 tpblks = size >> tp_bshift; 183 while ((avail = MIN(tpblks, ntrec - trecno)) > 0) { 184 wp->req[trecno].dblk = dblkno; 185 wp->req[trecno].count = avail; 186 trecno += avail; 187 spcl.c_tapea += avail; 188 if (trecno >= ntrec) 189 flushtape(); 190 dblkno += avail << (tp_bshift - dev_bshift); 191 tpblks -= avail; 192 } 193 } 194 195 int nogripe = 0; 196 197 void 198 tperror(int signo __unused) 199 { 200 201 if (pipeout) { 202 msg("write error on %s\n", tape); 203 quit("Cannot recover\n"); 204 /* NOTREACHED */ 205 } 206 msg("write error %ld blocks into volume %d\n", blocksthisvol, tapeno); 207 broadcast("DUMP WRITE ERROR!\n"); 208 if (!query("Do you want to restart?")) 209 dumpabort(0); 210 msg("Closing this volume. Prepare to restart with new media;\n"); 211 msg("this dump volume will be rewritten.\n"); 212 killall(); 213 nogripe = 1; 214 close_rewind(); 215 Exit(X_REWRITE); 216 } 217 218 void 219 sigpipe(int signo __unused) 220 { 221 222 quit("Broken pipe\n"); 223 } 224 225 static void 226 flushtape(void) 227 { 228 int i, blks, got; 229 int64_t lastfirstrec; 230 231 int siz = (char *)nextblock - (char *)wp->req; 232 233 wp->req[trecno].count = 0; /* Sentinel */ 234 235 if (atomic_write(wp->fd, (const void *)wp->req, siz) != siz) 236 quit("error writing command pipe: %s\n", strerror(errno)); 237 wp->sent = 1; /* we sent a request, read the response later */ 238 239 lastfirstrec = wp->firstrec; 240 241 if (++wp >= &workers[WORKERS]) 242 wp = &workers[0]; 243 244 /* Read results back from next worker */ 245 if (wp->sent) { 246 if (atomic_read(wp->fd, (void *)&got, sizeof got) 247 != sizeof got) { 248 perror(" DUMP: error reading command pipe in master"); 249 dumpabort(0); 250 } 251 wp->sent = 0; 252 253 /* Check for end of tape */ 254 if (got < writesize) { 255 msg("End of tape detected\n"); 256 257 /* 258 * Drain the results, don't care what the values were. 259 * If we read them here then trewind won't... 260 */ 261 for (i = 0; i < WORKERS; i++) { 262 if (workers[i].sent) { 263 if (atomic_read(workers[i].fd, 264 (void *)&got, sizeof got) 265 != sizeof got) { 266 perror(" DUMP: error reading command pipe in master"); 267 dumpabort(0); 268 } 269 workers[i].sent = 0; 270 } 271 } 272 273 close_rewind(); 274 rollforward(); 275 return; 276 } 277 } 278 279 blks = 0; 280 if (spcl.c_type != TS_END && spcl.c_type != TS_CLRI && 281 spcl.c_type != TS_BITS) { 282 assert(spcl.c_count <= TP_NINDIR); 283 for (i = 0; i < spcl.c_count; i++) 284 if (spcl.c_addr[i] != 0) 285 blks++; 286 } 287 wp->count = lastspclrec + blks + 1 - spcl.c_tapea; 288 wp->tapea = spcl.c_tapea; 289 wp->firstrec = lastfirstrec + ntrec; 290 wp->inode = curino; 291 nextblock = wp->tblock; 292 trecno = 0; 293 asize += tenths; 294 blockswritten += ntrec; 295 blocksthisvol += ntrec; 296 if (!pipeout && !unlimited && (blocksperfile ? 297 (blocksthisvol >= blocksperfile) : (asize > tsize))) { 298 close_rewind(); 299 startnewtape(0); 300 } 301 timeest(); 302 } 303 304 void 305 trewind(void) 306 { 307 struct stat sb; 308 int f; 309 int got; 310 311 for (f = 0; f < WORKERS; f++) { 312 /* 313 * Drain the results, but unlike EOT we DO (or should) care 314 * what the return values were, since if we detect EOT after 315 * we think we've written the last blocks to the tape anyway, 316 * we have to replay those blocks with rollforward. 317 * 318 * fixme: punt for now. 319 */ 320 if (workers[f].sent) { 321 if (atomic_read(workers[f].fd, (void *)&got, sizeof got) 322 != sizeof got) { 323 perror(" DUMP: error reading command pipe in master"); 324 dumpabort(0); 325 } 326 workers[f].sent = 0; 327 if (got != writesize) { 328 msg("EOT detected in last 2 tape records!\n"); 329 msg("Use a longer tape, decrease the size estimate\n"); 330 quit("or use no size estimate at all.\n"); 331 } 332 } 333 (void) close(workers[f].fd); 334 } 335 while (wait((int *)NULL) >= 0) /* wait for any signals from workers */ 336 /* void */; 337 338 if (pipeout) 339 return; 340 341 msg("Closing %s\n", tape); 342 343 if (popenout) { 344 tapefd = -1; 345 (void)pclose(popenfp); 346 popenfp = NULL; 347 return; 348 } 349 #ifdef RDUMP 350 if (host) { 351 rmtclose(); 352 while (rmtopen(tape, 0) < 0) 353 sleep(10); 354 rmtclose(); 355 return; 356 } 357 #endif 358 if (fstat(tapefd, &sb) == 0 && S_ISFIFO(sb.st_mode)) { 359 (void)close(tapefd); 360 return; 361 } 362 (void) close(tapefd); 363 while ((f = open(tape, 0)) < 0) 364 sleep (10); 365 (void) close(f); 366 } 367 368 void 369 close_rewind() 370 { 371 time_t tstart_changevol, tend_changevol; 372 373 trewind(); 374 if (nexttape) 375 return; 376 (void)time((time_t *)&(tstart_changevol)); 377 if (!nogripe) { 378 msg("Change Volumes: Mount volume #%d\n", tapeno+1); 379 broadcast("CHANGE DUMP VOLUMES!\a\a\n"); 380 } 381 while (!query("Is the new volume mounted and ready to go?")) 382 if (query("Do you want to abort?")) { 383 dumpabort(0); 384 /*NOTREACHED*/ 385 } 386 (void)time((time_t *)&(tend_changevol)); 387 if ((tstart_changevol != (time_t)-1) && (tend_changevol != (time_t)-1)) 388 tstart_writing += (tend_changevol - tstart_changevol); 389 } 390 391 void 392 rollforward(void) 393 { 394 struct req *p, *q, *prev; 395 struct worker *twp; 396 int i, size, got; 397 int64_t savedtapea; 398 union u_spcl *ntb, *otb; 399 twp = &workers[WORKERS]; 400 ntb = (union u_spcl *)twp->tblock[1]; 401 402 /* 403 * Each of the N workers should have requests that need to 404 * be replayed on the next tape. Use the extra worker buffers 405 * (workers[WORKERS]) to construct request lists to be sent to 406 * each worker in turn. 407 */ 408 for (i = 0; i < WORKERS; i++) { 409 q = &twp->req[1]; 410 otb = (union u_spcl *)wp->tblock; 411 412 /* 413 * For each request in the current worker, copy it to twp. 414 */ 415 416 prev = NULL; 417 for (p = wp->req; p->count > 0; p += p->count) { 418 *q = *p; 419 if (p->dblk == 0) 420 *ntb++ = *otb++; /* copy the datablock also */ 421 prev = q; 422 q += q->count; 423 } 424 if (prev == NULL) 425 quit("rollforward: protocol botch"); 426 if (prev->dblk != 0) 427 prev->count -= 1; 428 else 429 ntb--; 430 q -= 1; 431 q->count = 0; 432 q = &twp->req[0]; 433 if (i == 0) { 434 q->dblk = 0; 435 q->count = 1; 436 trecno = 0; 437 nextblock = twp->tblock; 438 savedtapea = spcl.c_tapea; 439 spcl.c_tapea = wp->tapea; 440 startnewtape(0); 441 spcl.c_tapea = savedtapea; 442 lastspclrec = savedtapea - 1; 443 } 444 size = (char *)ntb - (char *)q; 445 if (atomic_write(wp->fd, (const void *)q, size) != size) { 446 perror(" DUMP: error writing command pipe"); 447 dumpabort(0); 448 } 449 wp->sent = 1; 450 if (++wp >= &workers[WORKERS]) 451 wp = &workers[0]; 452 453 q->count = 1; 454 455 if (prev->dblk != 0) { 456 /* 457 * If the last one was a disk block, make the 458 * first of this one be the last bit of that disk 459 * block... 460 */ 461 q->dblk = prev->dblk + 462 prev->count * (TP_BSIZE / DEV_BSIZE); 463 ntb = (union u_spcl *)twp->tblock; 464 } else { 465 /* 466 * It wasn't a disk block. Copy the data to its 467 * new location in the buffer. 468 */ 469 q->dblk = 0; 470 *((union u_spcl *)twp->tblock) = *ntb; 471 ntb = (union u_spcl *)twp->tblock[1]; 472 } 473 } 474 wp->req[0] = *q; 475 nextblock = wp->tblock; 476 if (q->dblk == 0) 477 nextblock++; 478 trecno = 1; 479 480 /* 481 * Clear the first workers' response. One hopes that it 482 * worked ok, otherwise the tape is much too short! 483 */ 484 if (wp->sent) { 485 if (atomic_read(wp->fd, (void *)&got, sizeof got) 486 != sizeof got) { 487 perror(" DUMP: error reading command pipe in master"); 488 dumpabort(0); 489 } 490 wp->sent = 0; 491 492 if (got != writesize) { 493 quit("EOT detected at start of the tape!\n"); 494 } 495 } 496 } 497 498 /* 499 * We implement taking and restoring checkpoints on the tape level. 500 * When each tape is opened, a new process is created by forking; this 501 * saves all of the necessary context in the parent. The child 502 * continues the dump; the parent waits around, saving the context. 503 * If the child returns X_REWRITE, then it had problems writing that tape; 504 * this causes the parent to fork again, duplicating the context, and 505 * everything continues as if nothing had happened. 506 */ 507 void 508 startnewtape(int top) 509 { 510 int parentpid; 511 int childpid; 512 int status; 513 char *p; 514 sig_t interrupt_save; 515 516 interrupt_save = signal(SIGINT, SIG_IGN); 517 parentpid = getpid(); 518 519 restore_check_point: 520 (void)signal(SIGINT, interrupt_save); 521 /* 522 * All signals are inherited... 523 */ 524 setproctitle(NULL); /* Restore the proctitle. */ 525 childpid = fork(); 526 if (childpid < 0) { 527 msg("Context save fork fails in parent %d\n", parentpid); 528 Exit(X_ABORT); 529 } 530 if (childpid != 0) { 531 /* 532 * PARENT: 533 * save the context by waiting 534 * until the child doing all of the work returns. 535 * don't catch the interrupt 536 */ 537 signal(SIGINT, SIG_IGN); 538 #ifdef TDEBUG 539 msg("Tape: %d; parent process: %d child process %d\n", 540 tapeno+1, parentpid, childpid); 541 #endif /* TDEBUG */ 542 if (waitpid(childpid, &status, 0) == -1) 543 msg("Waiting for child %d: %s\n", childpid, 544 strerror(errno)); 545 if (status & 0xFF) { 546 msg("Child %d returns LOB status %o\n", 547 childpid, status&0xFF); 548 } 549 status = (status >> 8) & 0xFF; 550 #ifdef TDEBUG 551 switch(status) { 552 case X_FINOK: 553 msg("Child %d finishes X_FINOK\n", childpid); 554 break; 555 case X_ABORT: 556 msg("Child %d finishes X_ABORT\n", childpid); 557 break; 558 case X_REWRITE: 559 msg("Child %d finishes X_REWRITE\n", childpid); 560 break; 561 default: 562 msg("Child %d finishes unknown %d\n", 563 childpid, status); 564 break; 565 } 566 #endif /* TDEBUG */ 567 switch(status) { 568 case X_FINOK: 569 Exit(X_FINOK); 570 case X_ABORT: 571 Exit(X_ABORT); 572 case X_REWRITE: 573 goto restore_check_point; 574 default: 575 msg("Bad return code from dump: %d\n", status); 576 Exit(X_ABORT); 577 } 578 /*NOTREACHED*/ 579 } else { /* we are the child; just continue */ 580 #ifdef TDEBUG 581 sleep(4); /* allow time for parent's message to get out */ 582 msg("Child on Tape %d has parent %d, my pid = %d\n", 583 tapeno+1, parentpid, getpid()); 584 #endif /* TDEBUG */ 585 /* 586 * If we have a name like "/dev/rmt0,/dev/rmt1", 587 * use the name before the comma first, and save 588 * the remaining names for subsequent volumes. 589 */ 590 tapeno++; /* current tape sequence */ 591 if (nexttape || strchr(tape, ',')) { 592 if (nexttape && *nexttape) 593 tape = nexttape; 594 if ((p = strchr(tape, ',')) != NULL) { 595 *p = '\0'; 596 nexttape = p + 1; 597 } else 598 nexttape = NULL; 599 msg("Dumping volume %d on %s\n", tapeno, tape); 600 } 601 if (pipeout) { 602 tapefd = STDOUT_FILENO; 603 } else if (popenout) { 604 char volno[sizeof("2147483647")]; 605 606 (void)sprintf(volno, "%d", spcl.c_volume + 1); 607 if (setenv("DUMP_VOLUME", volno, 1) == -1) { 608 msg("Cannot set $DUMP_VOLUME.\n"); 609 dumpabort(0); 610 } 611 popenfp = popen(popenout, "w"); 612 if (popenfp == NULL) { 613 msg("Cannot open output pipeline \"%s\".\n", 614 popenout); 615 dumpabort(0); 616 } 617 tapefd = fileno(popenfp); 618 } else { 619 #ifdef RDUMP 620 while ((tapefd = (host ? rmtopen(tape, 2) : 621 open(tape, O_WRONLY|O_CREAT, 0666))) < 0) 622 #else 623 while ((tapefd = 624 open(tape, O_WRONLY|O_CREAT, 0666)) < 0) 625 #endif 626 { 627 msg("Cannot open output \"%s\".\n", tape); 628 if (!query("Do you want to retry the open?")) 629 dumpabort(0); 630 } 631 } 632 633 create_workers(); /* Share open tape file descriptor with workers */ 634 if (popenout) 635 close(tapefd); /* Give up our copy of it. */ 636 signal(SIGINFO, infosch); 637 638 asize = 0; 639 blocksthisvol = 0; 640 if (top) 641 newtape++; /* new tape signal */ 642 spcl.c_count = wp->count; 643 /* 644 * measure firstrec in TP_BSIZE units since restore doesn't 645 * know the correct ntrec value... 646 */ 647 spcl.c_firstrec = wp->firstrec; 648 spcl.c_volume++; 649 spcl.c_type = TS_TAPE; 650 writeheader((ino_t)wp->inode); 651 if (tapeno > 1) 652 msg("Volume %d begins with blocks from inode %d\n", 653 tapeno, wp->inode); 654 } 655 } 656 657 void 658 dumpabort(int signo __unused) 659 { 660 661 if (master != 0 && master != getpid()) 662 /* Signals master to call dumpabort */ 663 (void) kill(master, SIGTERM); 664 else { 665 killall(); 666 msg("The ENTIRE dump is aborted.\n"); 667 } 668 #ifdef RDUMP 669 rmtclose(); 670 #endif 671 Exit(X_ABORT); 672 } 673 674 void 675 Exit(int status) 676 { 677 678 #ifdef TDEBUG 679 msg("pid = %d exits with status %d\n", getpid(), status); 680 #endif /* TDEBUG */ 681 exit(status); 682 } 683 684 /* 685 * proceed - handler for SIGUSR2, used to synchronize IO between the workers. 686 */ 687 void 688 proceed(int signo __unused) 689 { 690 691 if (ready) 692 longjmp(jmpbuf, 1); 693 caught++; 694 } 695 696 void 697 create_workers(void) 698 { 699 int cmd[2]; 700 int i, j; 701 702 master = getpid(); 703 704 signal(SIGTERM, dumpabort); /* Worker sends SIGTERM on dumpabort() */ 705 signal(SIGPIPE, sigpipe); 706 signal(SIGUSR1, tperror); /* Worker sends SIGUSR1 on tape errors */ 707 signal(SIGUSR2, proceed); /* Worker sends SIGUSR2 to next worker */ 708 709 for (i = 0; i < WORKERS; i++) { 710 if (i == wp - &workers[0]) { 711 caught = 1; 712 } else { 713 caught = 0; 714 } 715 716 if (socketpair(AF_UNIX, SOCK_STREAM, 0, cmd) < 0 || 717 (workers[i].pid = fork()) < 0) 718 quit("too many workers, %d (recompile smaller): %s\n", 719 i, strerror(errno)); 720 721 workers[i].fd = cmd[1]; 722 workers[i].sent = 0; 723 if (workers[i].pid == 0) { /* Worker starts up here */ 724 for (j = 0; j <= i; j++) 725 (void) close(workers[j].fd); 726 signal(SIGINT, SIG_IGN); /* Master handles this */ 727 worker(cmd[0], i); 728 Exit(X_FINOK); 729 } 730 } 731 732 for (i = 0; i < WORKERS; i++) 733 (void) atomic_write(workers[i].fd, 734 (const void *) &workers[(i + 1) % WORKERS].pid, 735 sizeof workers[0].pid); 736 737 master = 0; 738 } 739 740 void 741 killall(void) 742 { 743 int i; 744 745 for (i = 0; i < WORKERS; i++) 746 if (workers[i].pid > 0) { 747 (void) kill(workers[i].pid, SIGKILL); 748 workers[i].sent = 0; 749 } 750 } 751 752 /* 753 * Synchronization - each process has a lockfile, and shares file 754 * descriptors to the following process's lockfile. When our write 755 * completes, we release our lock on the following process's lock- 756 * file, allowing the following process to lock it and proceed. We 757 * get the lock back for the next cycle by swapping descriptors. 758 */ 759 static void 760 worker(int cmd, int worker_number) 761 { 762 int nread; 763 int nextworker, size, wrote, eot_count; 764 765 /* 766 * Need our own seek pointer. 767 */ 768 (void) close(diskfd); 769 if ((diskfd = open(disk, O_RDONLY)) < 0) 770 quit("worker couldn't reopen disk: %s\n", strerror(errno)); 771 772 /* 773 * Need the pid of the next worker in the loop... 774 */ 775 if ((nread = atomic_read(cmd, (void *)&nextworker, sizeof nextworker)) 776 != sizeof nextworker) { 777 quit("master/worker protocol botched - didn't get pid of next worker.\n"); 778 } 779 780 /* 781 * Get list of blocks to dump, read the blocks into tape buffer 782 */ 783 while ((nread = atomic_read(cmd, (void *)wp->req, reqsiz)) == reqsiz) { 784 struct req *p = wp->req; 785 786 for (trecno = 0; trecno < ntrec; 787 trecno += p->count, p += p->count) { 788 if (p->dblk) { 789 blkread(p->dblk, wp->tblock[trecno], 790 p->count * TP_BSIZE); 791 } else { 792 if (p->count != 1 || atomic_read(cmd, 793 (void *)wp->tblock[trecno], 794 TP_BSIZE) != TP_BSIZE) 795 quit("master/worker protocol botched.\n"); 796 } 797 } 798 if (setjmp(jmpbuf) == 0) { 799 ready = 1; 800 if (!caught) 801 (void) pause(); 802 } 803 ready = 0; 804 caught = 0; 805 806 /* Try to write the data... */ 807 eot_count = 0; 808 size = 0; 809 810 wrote = 0; 811 while (eot_count < 10 && size < writesize) { 812 #ifdef RDUMP 813 if (host) 814 wrote = rmtwrite(wp->tblock[0]+size, 815 writesize-size); 816 else 817 #endif 818 wrote = write(tapefd, wp->tblock[0]+size, 819 writesize-size); 820 #ifdef WRITEDEBUG 821 printf("worker %d wrote %d\n", worker_number, wrote); 822 #endif 823 if (wrote < 0) 824 break; 825 if (wrote == 0) 826 eot_count++; 827 size += wrote; 828 } 829 830 #ifdef WRITEDEBUG 831 if (size != writesize) 832 printf("worker %d only wrote %d out of %d bytes and gave up.\n", 833 worker_number, size, writesize); 834 #endif 835 836 /* 837 * Handle ENOSPC as an EOT condition. 838 */ 839 if (wrote < 0 && errno == ENOSPC) { 840 wrote = 0; 841 eot_count++; 842 } 843 844 if (eot_count > 0) 845 size = 0; 846 847 if (wrote < 0) { 848 (void) kill(master, SIGUSR1); 849 for (;;) 850 (void) sigpause(0); 851 } else { 852 /* 853 * pass size of write back to master 854 * (for EOT handling) 855 */ 856 (void)atomic_write(cmd, (const void *)&size, 857 sizeof size); 858 } 859 860 /* 861 * If partial write, don't want next worker to go. 862 * Also jolts him awake. 863 */ 864 (void) kill(nextworker, SIGUSR2); 865 } 866 if (nread != 0) 867 quit("error reading command pipe: %s\n", strerror(errno)); 868 } 869 870 /* 871 * Since a read from a pipe may not return all we asked for, 872 * loop until the count is satisfied (or error). 873 */ 874 static int 875 atomic_read(int fd, void *buf, int count) 876 { 877 int got, need = count; 878 879 while ((got = read(fd, buf, need)) > 0 && (need -= got) > 0) 880 buf += got; 881 return (got < 0 ? got : count - need); 882 } 883 884 /* 885 * Since a write to a pipe may not write all we ask if we get a signal, 886 * loop until the count is satisfied (or error). 887 */ 888 static int 889 atomic_write(int fd, const void *buf, int count) 890 { 891 int got, need = count; 892 893 while ((got = write(fd, buf, need)) > 0 && (need -= got) > 0) 894 buf += got; 895 return (got < 0 ? got : count - need); 896 } 897