1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1980, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifndef lint 33 #if 0 34 static char sccsid[] = "@(#)tape.c 8.4 (Berkeley) 5/1/95"; 35 #endif 36 #endif /* not lint */ 37 38 #include <sys/param.h> 39 #include <sys/socket.h> 40 #include <sys/wait.h> 41 #include <sys/stat.h> 42 43 #include <ufs/ufs/dinode.h> 44 #include <ufs/ffs/fs.h> 45 46 #include <protocols/dumprestore.h> 47 48 #include <assert.h> 49 #include <errno.h> 50 #include <fcntl.h> 51 #include <limits.h> 52 #include <setjmp.h> 53 #include <signal.h> 54 #include <stdio.h> 55 #include <stdlib.h> 56 #include <string.h> 57 #include <time.h> 58 #include <unistd.h> 59 60 #include "dump.h" 61 62 ino_t curino; /* current inumber; used globally */ 63 int newtape; /* new tape flag */ 64 union u_spcl u_spcl; /* mapping of variables in a control block */ 65 66 static int tapefd; /* tape file descriptor */ 67 static long asize; /* number of 0.1" units written on cur tape */ 68 static int writesize; /* size of malloc()ed buffer for tape */ 69 static int64_t lastspclrec = -1; /* tape block number of last written header */ 70 static int trecno = 0; /* next record to write in current block */ 71 static long blocksthisvol; /* number of blocks on current output file */ 72 static char *nexttape; 73 static FILE *popenfp = NULL; 74 75 static int atomic_read(int, void *, int); 76 static int atomic_write(int, const void *, int); 77 static void worker(int, int); 78 static void create_workers(void); 79 static void flushtape(void); 80 static void killall(void); 81 static void rollforward(void); 82 83 /* 84 * Concurrent dump mods (Caltech) - disk block reading and tape writing 85 * are exported to several worker processes. While one worker writes the 86 * tape, the others read disk blocks; they pass control of the tape in 87 * a ring via signals. The parent process traverses the file system and 88 * sends writeheader()'s and lists of daddr's to the workers via pipes. 89 * The following structure defines the instruction packets sent to workers. 90 */ 91 struct req { 92 ufs2_daddr_t dblk; 93 int count; 94 }; 95 static int reqsiz; 96 97 #define WORKERS 3 /* 1 worker writing, 1 reading, 1 for slack */ 98 static struct worker { 99 int64_t tapea; /* header number at start of this chunk */ 100 int64_t firstrec; /* record number of this block */ 101 int count; /* count to next header (used for TS_TAPE */ 102 /* after EOT) */ 103 int inode; /* inode that we are currently dealing with */ 104 int fd; /* FD for this worker */ 105 int pid; /* PID for this worker */ 106 int sent; /* 1 == we've sent this worker requests */ 107 char (*tblock)[TP_BSIZE]; /* buffer for data blocks */ 108 struct req *req; /* buffer for requests */ 109 } workers[WORKERS+1]; 110 static struct worker *wp; 111 112 static char (*nextblock)[TP_BSIZE]; 113 114 static int master; /* pid of master, for sending error signals */ 115 static int tenths; /* length of tape used per block written */ 116 static volatile sig_atomic_t caught; /* have we caught the signal to proceed? */ 117 static volatile sig_atomic_t ready; /* reached the lock point without having */ 118 /* received the SIGUSR2 signal from the prev worker? */ 119 static jmp_buf jmpbuf; /* where to jump to if we are ready when the */ 120 /* SIGUSR2 arrives from the previous worker */ 121 122 int 123 alloctape(void) 124 { 125 int pgoff = getpagesize() - 1; 126 char *buf; 127 int i; 128 129 writesize = ntrec * TP_BSIZE; 130 reqsiz = (ntrec + 1) * sizeof(struct req); 131 /* 132 * CDC 92181's and 92185's make 0.8" gaps in 1600-bpi start/stop mode 133 * (see DEC TU80 User's Guide). The shorter gaps of 6250-bpi require 134 * repositioning after stopping, i.e, streaming mode, where the gap is 135 * variable, 0.30" to 0.45". The gap is maximal when the tape stops. 136 */ 137 if (blocksperfile == 0 && !unlimited) 138 tenths = writesize / density + 139 (cartridge ? 16 : density == 625 ? 5 : 8); 140 /* 141 * Allocate tape buffer contiguous with the array of instruction 142 * packets, so flushtape() can write them together with one write(). 143 * Align tape buffer on page boundary to speed up tape write(). 144 */ 145 for (i = 0; i <= WORKERS; i++) { 146 buf = (char *) 147 malloc((unsigned)(reqsiz + writesize + pgoff + TP_BSIZE)); 148 if (buf == NULL) 149 return(0); 150 workers[i].tblock = (char (*)[TP_BSIZE]) 151 (((long)&buf[ntrec + 1] + pgoff) &~ pgoff); 152 workers[i].req = (struct req *)workers[i].tblock - ntrec - 1; 153 } 154 wp = &workers[0]; 155 wp->count = 1; 156 wp->tapea = 0; 157 wp->firstrec = 0; 158 nextblock = wp->tblock; 159 return(1); 160 } 161 162 void 163 writerec(char *dp, int isspcl) 164 { 165 166 wp->req[trecno].dblk = (ufs2_daddr_t)0; 167 wp->req[trecno].count = 1; 168 /* Can't do a structure assignment due to alignment problems */ 169 bcopy(dp, *(nextblock)++, sizeof (union u_spcl)); 170 if (isspcl) 171 lastspclrec = spcl.c_tapea; 172 trecno++; 173 spcl.c_tapea++; 174 if (trecno >= ntrec) 175 flushtape(); 176 } 177 178 void 179 dumpblock(ufs2_daddr_t blkno, int size) 180 { 181 int avail, tpblks; 182 ufs2_daddr_t dblkno; 183 184 dblkno = fsbtodb(sblock, blkno); 185 tpblks = size >> tp_bshift; 186 while ((avail = MIN(tpblks, ntrec - trecno)) > 0) { 187 wp->req[trecno].dblk = dblkno; 188 wp->req[trecno].count = avail; 189 trecno += avail; 190 spcl.c_tapea += avail; 191 if (trecno >= ntrec) 192 flushtape(); 193 dblkno += avail << (tp_bshift - dev_bshift); 194 tpblks -= avail; 195 } 196 } 197 198 int nogripe = 0; 199 200 void 201 tperror(int signo __unused) 202 { 203 204 if (pipeout) { 205 msg("write error on %s\n", tape); 206 quit("Cannot recover\n"); 207 /* NOTREACHED */ 208 } 209 msg("write error %ld blocks into volume %d\n", blocksthisvol, tapeno); 210 broadcast("DUMP WRITE ERROR!\n"); 211 if (!query("Do you want to restart?")) 212 dumpabort(0); 213 msg("Closing this volume. Prepare to restart with new media;\n"); 214 msg("this dump volume will be rewritten.\n"); 215 killall(); 216 nogripe = 1; 217 close_rewind(); 218 Exit(X_REWRITE); 219 } 220 221 void 222 sigpipe(int signo __unused) 223 { 224 225 quit("Broken pipe\n"); 226 } 227 228 static void 229 flushtape(void) 230 { 231 int i, blks, got; 232 int64_t lastfirstrec; 233 234 int siz = (char *)nextblock - (char *)wp->req; 235 236 wp->req[trecno].count = 0; /* Sentinel */ 237 238 if (atomic_write(wp->fd, (const void *)wp->req, siz) != siz) 239 quit("error writing command pipe: %s\n", strerror(errno)); 240 wp->sent = 1; /* we sent a request, read the response later */ 241 242 lastfirstrec = wp->firstrec; 243 244 if (++wp >= &workers[WORKERS]) 245 wp = &workers[0]; 246 247 /* Read results back from next worker */ 248 if (wp->sent) { 249 if (atomic_read(wp->fd, (void *)&got, sizeof got) 250 != sizeof got) { 251 perror(" DUMP: error reading command pipe in master"); 252 dumpabort(0); 253 } 254 wp->sent = 0; 255 256 /* Check for end of tape */ 257 if (got < writesize) { 258 msg("End of tape detected\n"); 259 260 /* 261 * Drain the results, don't care what the values were. 262 * If we read them here then trewind won't... 263 */ 264 for (i = 0; i < WORKERS; i++) { 265 if (workers[i].sent) { 266 if (atomic_read(workers[i].fd, 267 (void *)&got, sizeof got) 268 != sizeof got) { 269 perror(" DUMP: error reading command pipe in master"); 270 dumpabort(0); 271 } 272 workers[i].sent = 0; 273 } 274 } 275 276 close_rewind(); 277 rollforward(); 278 return; 279 } 280 } 281 282 blks = 0; 283 if (spcl.c_type != TS_END && spcl.c_type != TS_CLRI && 284 spcl.c_type != TS_BITS) { 285 assert(spcl.c_count <= TP_NINDIR); 286 for (i = 0; i < spcl.c_count; i++) 287 if (spcl.c_addr[i] != 0) 288 blks++; 289 } 290 wp->count = lastspclrec + blks + 1 - spcl.c_tapea; 291 wp->tapea = spcl.c_tapea; 292 wp->firstrec = lastfirstrec + ntrec; 293 wp->inode = curino; 294 nextblock = wp->tblock; 295 trecno = 0; 296 asize += tenths; 297 blockswritten += ntrec; 298 blocksthisvol += ntrec; 299 if (!pipeout && !unlimited && (blocksperfile ? 300 (blocksthisvol >= blocksperfile) : (asize > tsize))) { 301 close_rewind(); 302 startnewtape(0); 303 } 304 timeest(); 305 } 306 307 void 308 trewind(void) 309 { 310 struct stat sb; 311 int f; 312 int got; 313 314 for (f = 0; f < WORKERS; f++) { 315 /* 316 * Drain the results, but unlike EOT we DO (or should) care 317 * what the return values were, since if we detect EOT after 318 * we think we've written the last blocks to the tape anyway, 319 * we have to replay those blocks with rollforward. 320 * 321 * fixme: punt for now. 322 */ 323 if (workers[f].sent) { 324 if (atomic_read(workers[f].fd, (void *)&got, sizeof got) 325 != sizeof got) { 326 perror(" DUMP: error reading command pipe in master"); 327 dumpabort(0); 328 } 329 workers[f].sent = 0; 330 if (got != writesize) { 331 msg("EOT detected in last 2 tape records!\n"); 332 msg("Use a longer tape, decrease the size estimate\n"); 333 quit("or use no size estimate at all.\n"); 334 } 335 } 336 (void) close(workers[f].fd); 337 } 338 while (wait((int *)NULL) >= 0) /* wait for any signals from workers */ 339 /* void */; 340 341 if (pipeout) 342 return; 343 344 msg("Closing %s\n", tape); 345 346 if (popenout) { 347 tapefd = -1; 348 (void)pclose(popenfp); 349 popenfp = NULL; 350 return; 351 } 352 #ifdef RDUMP 353 if (host) { 354 rmtclose(); 355 while (rmtopen(tape, 0) < 0) 356 sleep(10); 357 rmtclose(); 358 return; 359 } 360 #endif 361 if (fstat(tapefd, &sb) == 0 && S_ISFIFO(sb.st_mode)) { 362 (void)close(tapefd); 363 return; 364 } 365 (void) close(tapefd); 366 while ((f = open(tape, 0)) < 0) 367 sleep (10); 368 (void) close(f); 369 } 370 371 void 372 close_rewind() 373 { 374 time_t tstart_changevol, tend_changevol; 375 376 trewind(); 377 if (nexttape) 378 return; 379 (void)time((time_t *)&(tstart_changevol)); 380 if (!nogripe) { 381 msg("Change Volumes: Mount volume #%d\n", tapeno+1); 382 broadcast("CHANGE DUMP VOLUMES!\a\a\n"); 383 } 384 while (!query("Is the new volume mounted and ready to go?")) 385 if (query("Do you want to abort?")) { 386 dumpabort(0); 387 /*NOTREACHED*/ 388 } 389 (void)time((time_t *)&(tend_changevol)); 390 if ((tstart_changevol != (time_t)-1) && (tend_changevol != (time_t)-1)) 391 tstart_writing += (tend_changevol - tstart_changevol); 392 } 393 394 void 395 rollforward(void) 396 { 397 struct req *p, *q, *prev; 398 struct worker *twp; 399 int i, size, got; 400 int64_t savedtapea; 401 union u_spcl *ntb, *otb; 402 twp = &workers[WORKERS]; 403 ntb = (union u_spcl *)twp->tblock[1]; 404 405 /* 406 * Each of the N workers should have requests that need to 407 * be replayed on the next tape. Use the extra worker buffers 408 * (workers[WORKERS]) to construct request lists to be sent to 409 * each worker in turn. 410 */ 411 for (i = 0; i < WORKERS; i++) { 412 q = &twp->req[1]; 413 otb = (union u_spcl *)wp->tblock; 414 415 /* 416 * For each request in the current worker, copy it to twp. 417 */ 418 419 prev = NULL; 420 for (p = wp->req; p->count > 0; p += p->count) { 421 *q = *p; 422 if (p->dblk == 0) 423 *ntb++ = *otb++; /* copy the datablock also */ 424 prev = q; 425 q += q->count; 426 } 427 if (prev == NULL) 428 quit("rollforward: protocol botch"); 429 if (prev->dblk != 0) 430 prev->count -= 1; 431 else 432 ntb--; 433 q -= 1; 434 q->count = 0; 435 q = &twp->req[0]; 436 if (i == 0) { 437 q->dblk = 0; 438 q->count = 1; 439 trecno = 0; 440 nextblock = twp->tblock; 441 savedtapea = spcl.c_tapea; 442 spcl.c_tapea = wp->tapea; 443 startnewtape(0); 444 spcl.c_tapea = savedtapea; 445 lastspclrec = savedtapea - 1; 446 } 447 size = (char *)ntb - (char *)q; 448 if (atomic_write(wp->fd, (const void *)q, size) != size) { 449 perror(" DUMP: error writing command pipe"); 450 dumpabort(0); 451 } 452 wp->sent = 1; 453 if (++wp >= &workers[WORKERS]) 454 wp = &workers[0]; 455 456 q->count = 1; 457 458 if (prev->dblk != 0) { 459 /* 460 * If the last one was a disk block, make the 461 * first of this one be the last bit of that disk 462 * block... 463 */ 464 q->dblk = prev->dblk + 465 prev->count * (TP_BSIZE / DEV_BSIZE); 466 ntb = (union u_spcl *)twp->tblock; 467 } else { 468 /* 469 * It wasn't a disk block. Copy the data to its 470 * new location in the buffer. 471 */ 472 q->dblk = 0; 473 *((union u_spcl *)twp->tblock) = *ntb; 474 ntb = (union u_spcl *)twp->tblock[1]; 475 } 476 } 477 wp->req[0] = *q; 478 nextblock = wp->tblock; 479 if (q->dblk == 0) 480 nextblock++; 481 trecno = 1; 482 483 /* 484 * Clear the first workers' response. One hopes that it 485 * worked ok, otherwise the tape is much too short! 486 */ 487 if (wp->sent) { 488 if (atomic_read(wp->fd, (void *)&got, sizeof got) 489 != sizeof got) { 490 perror(" DUMP: error reading command pipe in master"); 491 dumpabort(0); 492 } 493 wp->sent = 0; 494 495 if (got != writesize) { 496 quit("EOT detected at start of the tape!\n"); 497 } 498 } 499 } 500 501 /* 502 * We implement taking and restoring checkpoints on the tape level. 503 * When each tape is opened, a new process is created by forking; this 504 * saves all of the necessary context in the parent. The child 505 * continues the dump; the parent waits around, saving the context. 506 * If the child returns X_REWRITE, then it had problems writing that tape; 507 * this causes the parent to fork again, duplicating the context, and 508 * everything continues as if nothing had happened. 509 */ 510 void 511 startnewtape(int top) 512 { 513 int parentpid; 514 int childpid; 515 int status; 516 char *p; 517 sig_t interrupt_save; 518 519 interrupt_save = signal(SIGINT, SIG_IGN); 520 parentpid = getpid(); 521 522 restore_check_point: 523 (void)signal(SIGINT, interrupt_save); 524 /* 525 * All signals are inherited... 526 */ 527 setproctitle(NULL); /* Restore the proctitle. */ 528 childpid = fork(); 529 if (childpid < 0) { 530 msg("Context save fork fails in parent %d\n", parentpid); 531 Exit(X_ABORT); 532 } 533 if (childpid != 0) { 534 /* 535 * PARENT: 536 * save the context by waiting 537 * until the child doing all of the work returns. 538 * don't catch the interrupt 539 */ 540 signal(SIGINT, SIG_IGN); 541 #ifdef TDEBUG 542 msg("Tape: %d; parent process: %d child process %d\n", 543 tapeno+1, parentpid, childpid); 544 #endif /* TDEBUG */ 545 if (waitpid(childpid, &status, 0) == -1) 546 msg("Waiting for child %d: %s\n", childpid, 547 strerror(errno)); 548 if (status & 0xFF) { 549 msg("Child %d returns LOB status %o\n", 550 childpid, status&0xFF); 551 } 552 status = (status >> 8) & 0xFF; 553 #ifdef TDEBUG 554 switch(status) { 555 case X_FINOK: 556 msg("Child %d finishes X_FINOK\n", childpid); 557 break; 558 case X_ABORT: 559 msg("Child %d finishes X_ABORT\n", childpid); 560 break; 561 case X_REWRITE: 562 msg("Child %d finishes X_REWRITE\n", childpid); 563 break; 564 default: 565 msg("Child %d finishes unknown %d\n", 566 childpid, status); 567 break; 568 } 569 #endif /* TDEBUG */ 570 switch(status) { 571 case X_FINOK: 572 Exit(X_FINOK); 573 case X_ABORT: 574 Exit(X_ABORT); 575 case X_REWRITE: 576 goto restore_check_point; 577 default: 578 msg("Bad return code from dump: %d\n", status); 579 Exit(X_ABORT); 580 } 581 /*NOTREACHED*/ 582 } else { /* we are the child; just continue */ 583 #ifdef TDEBUG 584 sleep(4); /* allow time for parent's message to get out */ 585 msg("Child on Tape %d has parent %d, my pid = %d\n", 586 tapeno+1, parentpid, getpid()); 587 #endif /* TDEBUG */ 588 /* 589 * If we have a name like "/dev/rmt0,/dev/rmt1", 590 * use the name before the comma first, and save 591 * the remaining names for subsequent volumes. 592 */ 593 tapeno++; /* current tape sequence */ 594 if (nexttape || strchr(tape, ',')) { 595 if (nexttape && *nexttape) 596 tape = nexttape; 597 if ((p = strchr(tape, ',')) != NULL) { 598 *p = '\0'; 599 nexttape = p + 1; 600 } else 601 nexttape = NULL; 602 msg("Dumping volume %d on %s\n", tapeno, tape); 603 } 604 if (pipeout) { 605 tapefd = STDOUT_FILENO; 606 } else if (popenout) { 607 char volno[sizeof("2147483647")]; 608 609 (void)sprintf(volno, "%d", spcl.c_volume + 1); 610 if (setenv("DUMP_VOLUME", volno, 1) == -1) { 611 msg("Cannot set $DUMP_VOLUME.\n"); 612 dumpabort(0); 613 } 614 popenfp = popen(popenout, "w"); 615 if (popenfp == NULL) { 616 msg("Cannot open output pipeline \"%s\".\n", 617 popenout); 618 dumpabort(0); 619 } 620 tapefd = fileno(popenfp); 621 } else { 622 #ifdef RDUMP 623 while ((tapefd = (host ? rmtopen(tape, 2) : 624 open(tape, O_WRONLY|O_CREAT, 0666))) < 0) 625 #else 626 while ((tapefd = 627 open(tape, O_WRONLY|O_CREAT, 0666)) < 0) 628 #endif 629 { 630 msg("Cannot open output \"%s\".\n", tape); 631 if (!query("Do you want to retry the open?")) 632 dumpabort(0); 633 } 634 } 635 636 create_workers(); /* Share open tape file descriptor with workers */ 637 if (popenout) 638 close(tapefd); /* Give up our copy of it. */ 639 signal(SIGINFO, infosch); 640 641 asize = 0; 642 blocksthisvol = 0; 643 if (top) 644 newtape++; /* new tape signal */ 645 spcl.c_count = wp->count; 646 /* 647 * measure firstrec in TP_BSIZE units since restore doesn't 648 * know the correct ntrec value... 649 */ 650 spcl.c_firstrec = wp->firstrec; 651 spcl.c_volume++; 652 spcl.c_type = TS_TAPE; 653 writeheader((ino_t)wp->inode); 654 if (tapeno > 1) 655 msg("Volume %d begins with blocks from inode %d\n", 656 tapeno, wp->inode); 657 } 658 } 659 660 void 661 dumpabort(int signo __unused) 662 { 663 664 if (master != 0 && master != getpid()) 665 /* Signals master to call dumpabort */ 666 (void) kill(master, SIGTERM); 667 else { 668 killall(); 669 msg("The ENTIRE dump is aborted.\n"); 670 } 671 #ifdef RDUMP 672 rmtclose(); 673 #endif 674 Exit(X_ABORT); 675 } 676 677 void 678 Exit(int status) 679 { 680 681 #ifdef TDEBUG 682 msg("pid = %d exits with status %d\n", getpid(), status); 683 #endif /* TDEBUG */ 684 exit(status); 685 } 686 687 /* 688 * proceed - handler for SIGUSR2, used to synchronize IO between the workers. 689 */ 690 void 691 proceed(int signo __unused) 692 { 693 694 if (ready) 695 longjmp(jmpbuf, 1); 696 caught++; 697 } 698 699 void 700 create_workers(void) 701 { 702 int cmd[2]; 703 int i, j; 704 705 master = getpid(); 706 707 signal(SIGTERM, dumpabort); /* Worker sends SIGTERM on dumpabort() */ 708 signal(SIGPIPE, sigpipe); 709 signal(SIGUSR1, tperror); /* Worker sends SIGUSR1 on tape errors */ 710 signal(SIGUSR2, proceed); /* Worker sends SIGUSR2 to next worker */ 711 712 for (i = 0; i < WORKERS; i++) { 713 if (i == wp - &workers[0]) { 714 caught = 1; 715 } else { 716 caught = 0; 717 } 718 719 if (socketpair(AF_UNIX, SOCK_STREAM, 0, cmd) < 0 || 720 (workers[i].pid = fork()) < 0) 721 quit("too many workers, %d (recompile smaller): %s\n", 722 i, strerror(errno)); 723 724 workers[i].fd = cmd[1]; 725 workers[i].sent = 0; 726 if (workers[i].pid == 0) { /* Worker starts up here */ 727 for (j = 0; j <= i; j++) 728 (void) close(workers[j].fd); 729 signal(SIGINT, SIG_IGN); /* Master handles this */ 730 worker(cmd[0], i); 731 Exit(X_FINOK); 732 } 733 } 734 735 for (i = 0; i < WORKERS; i++) 736 (void) atomic_write(workers[i].fd, 737 (const void *) &workers[(i + 1) % WORKERS].pid, 738 sizeof workers[0].pid); 739 740 master = 0; 741 } 742 743 void 744 killall(void) 745 { 746 int i; 747 748 for (i = 0; i < WORKERS; i++) 749 if (workers[i].pid > 0) { 750 (void) kill(workers[i].pid, SIGKILL); 751 workers[i].sent = 0; 752 } 753 } 754 755 /* 756 * Synchronization - each process has a lockfile, and shares file 757 * descriptors to the following process's lockfile. When our write 758 * completes, we release our lock on the following process's lock- 759 * file, allowing the following process to lock it and proceed. We 760 * get the lock back for the next cycle by swapping descriptors. 761 */ 762 static void 763 worker(int cmd, int worker_number) 764 { 765 int nread; 766 int nextworker, size, wrote, eot_count; 767 768 /* 769 * Need our own seek pointer. 770 */ 771 (void) close(diskfd); 772 if ((diskfd = open(disk, O_RDONLY)) < 0) 773 quit("worker couldn't reopen disk: %s\n", strerror(errno)); 774 775 /* 776 * Need the pid of the next worker in the loop... 777 */ 778 if ((nread = atomic_read(cmd, (void *)&nextworker, sizeof nextworker)) 779 != sizeof nextworker) { 780 quit("master/worker protocol botched - didn't get pid of next worker.\n"); 781 } 782 783 /* 784 * Get list of blocks to dump, read the blocks into tape buffer 785 */ 786 while ((nread = atomic_read(cmd, (void *)wp->req, reqsiz)) == reqsiz) { 787 struct req *p = wp->req; 788 789 for (trecno = 0; trecno < ntrec; 790 trecno += p->count, p += p->count) { 791 if (p->dblk) { 792 blkread(p->dblk, wp->tblock[trecno], 793 p->count * TP_BSIZE); 794 } else { 795 if (p->count != 1 || atomic_read(cmd, 796 (void *)wp->tblock[trecno], 797 TP_BSIZE) != TP_BSIZE) 798 quit("master/worker protocol botched.\n"); 799 } 800 } 801 if (setjmp(jmpbuf) == 0) { 802 ready = 1; 803 if (!caught) 804 (void) pause(); 805 } 806 ready = 0; 807 caught = 0; 808 809 /* Try to write the data... */ 810 eot_count = 0; 811 size = 0; 812 813 wrote = 0; 814 while (eot_count < 10 && size < writesize) { 815 #ifdef RDUMP 816 if (host) 817 wrote = rmtwrite(wp->tblock[0]+size, 818 writesize-size); 819 else 820 #endif 821 wrote = write(tapefd, wp->tblock[0]+size, 822 writesize-size); 823 #ifdef WRITEDEBUG 824 printf("worker %d wrote %d\n", worker_number, wrote); 825 #endif 826 if (wrote < 0) 827 break; 828 if (wrote == 0) 829 eot_count++; 830 size += wrote; 831 } 832 833 #ifdef WRITEDEBUG 834 if (size != writesize) 835 printf("worker %d only wrote %d out of %d bytes and gave up.\n", 836 worker_number, size, writesize); 837 #endif 838 839 /* 840 * Handle ENOSPC as an EOT condition. 841 */ 842 if (wrote < 0 && errno == ENOSPC) { 843 wrote = 0; 844 eot_count++; 845 } 846 847 if (eot_count > 0) 848 size = 0; 849 850 if (wrote < 0) { 851 (void) kill(master, SIGUSR1); 852 for (;;) 853 (void) sigpause(0); 854 } else { 855 /* 856 * pass size of write back to master 857 * (for EOT handling) 858 */ 859 (void)atomic_write(cmd, (const void *)&size, 860 sizeof size); 861 } 862 863 /* 864 * If partial write, don't want next worker to go. 865 * Also jolts him awake. 866 */ 867 (void) kill(nextworker, SIGUSR2); 868 } 869 if (nread != 0) 870 quit("error reading command pipe: %s\n", strerror(errno)); 871 } 872 873 /* 874 * Since a read from a pipe may not return all we asked for, 875 * loop until the count is satisfied (or error). 876 */ 877 static int 878 atomic_read(int fd, void *buf, int count) 879 { 880 int got, need = count; 881 882 while ((got = read(fd, buf, need)) > 0 && (need -= got) > 0) 883 buf += got; 884 return (got < 0 ? got : count - need); 885 } 886 887 /* 888 * Since a write to a pipe may not write all we ask if we get a signal, 889 * loop until the count is satisfied (or error). 890 */ 891 static int 892 atomic_write(int fd, const void *buf, int count) 893 { 894 int got, need = count; 895 896 while ((got = write(fd, buf, need)) > 0 && (need -= got) > 0) 897 buf += got; 898 return (got < 0 ? got : count - need); 899 } 900