1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1980, 1986, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #if 0 33 #ifndef lint 34 static const char sccsid[] = "@(#)utilities.c 8.6 (Berkeley) 5/19/95"; 35 #endif /* not lint */ 36 #endif 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/time.h> 42 #include <sys/types.h> 43 #include <sys/sysctl.h> 44 #include <sys/disk.h> 45 #include <sys/disklabel.h> 46 #include <sys/ioctl.h> 47 #include <sys/stat.h> 48 49 #include <ufs/ufs/dinode.h> 50 #include <ufs/ufs/dir.h> 51 #include <ufs/ffs/fs.h> 52 53 #include <err.h> 54 #include <errno.h> 55 #include <string.h> 56 #include <ctype.h> 57 #include <fstab.h> 58 #include <stdint.h> 59 #include <stdio.h> 60 #include <stdlib.h> 61 #include <time.h> 62 #include <unistd.h> 63 #include <libufs.h> 64 65 #include "fsck.h" 66 67 static void slowio_start(void); 68 static void slowio_end(void); 69 static void printIOstats(void); 70 71 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */ 72 static struct timespec startpass, finishpass; 73 struct timeval slowio_starttime; 74 int slowio_delay_usec = 10000; /* Initial IO delay for background fsck */ 75 int slowio_pollcnt; 76 static struct bufarea cgblk; /* backup buffer for cylinder group blocks */ 77 static TAILQ_HEAD(buflist, bufarea) bufhead; /* head of buffer cache list */ 78 static int numbufs; /* size of buffer cache */ 79 static char *buftype[BT_NUMBUFTYPES] = BT_NAMES; 80 static struct bufarea *cgbufs; /* header for cylinder group cache */ 81 static int flushtries; /* number of tries to reclaim memory */ 82 83 void 84 fsutilinit(void) 85 { 86 diskreads = totaldiskreads = totalreads = 0; 87 bzero(&startpass, sizeof(struct timespec)); 88 bzero(&finishpass, sizeof(struct timespec)); 89 bzero(&slowio_starttime, sizeof(struct timeval)); 90 slowio_delay_usec = 10000; 91 slowio_pollcnt = 0; 92 bzero(&cgblk, sizeof(struct bufarea)); 93 TAILQ_INIT(&bufhead); 94 numbufs = 0; 95 /* buftype ? */ 96 cgbufs = NULL; 97 flushtries = 0; 98 } 99 100 int 101 ftypeok(union dinode *dp) 102 { 103 switch (DIP(dp, di_mode) & IFMT) { 104 105 case IFDIR: 106 case IFREG: 107 case IFBLK: 108 case IFCHR: 109 case IFLNK: 110 case IFSOCK: 111 case IFIFO: 112 return (1); 113 114 default: 115 if (debug) 116 printf("bad file type 0%o\n", DIP(dp, di_mode)); 117 return (0); 118 } 119 } 120 121 int 122 reply(const char *question) 123 { 124 int persevere; 125 char c; 126 127 if (preen) 128 pfatal("INTERNAL ERROR: GOT TO reply()"); 129 persevere = !strcmp(question, "CONTINUE"); 130 printf("\n"); 131 if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) { 132 printf("%s? no\n\n", question); 133 resolved = 0; 134 return (0); 135 } 136 if (yflag || (persevere && nflag)) { 137 printf("%s? yes\n\n", question); 138 return (1); 139 } 140 do { 141 printf("%s? [yn] ", question); 142 (void) fflush(stdout); 143 c = getc(stdin); 144 while (c != '\n' && getc(stdin) != '\n') { 145 if (feof(stdin)) { 146 resolved = 0; 147 return (0); 148 } 149 } 150 } while (c != 'y' && c != 'Y' && c != 'n' && c != 'N'); 151 printf("\n"); 152 if (c == 'y' || c == 'Y') 153 return (1); 154 resolved = 0; 155 return (0); 156 } 157 158 /* 159 * Look up state information for an inode. 160 */ 161 struct inostat * 162 inoinfo(ino_t inum) 163 { 164 static struct inostat unallocated = { USTATE, 0, 0 }; 165 struct inostatlist *ilp; 166 int iloff; 167 168 if (inum > maxino) 169 errx(EEXIT, "inoinfo: inumber %ju out of range", 170 (uintmax_t)inum); 171 ilp = &inostathead[inum / sblock.fs_ipg]; 172 iloff = inum % sblock.fs_ipg; 173 if (iloff >= ilp->il_numalloced) 174 return (&unallocated); 175 return (&ilp->il_stat[iloff]); 176 } 177 178 /* 179 * Malloc buffers and set up cache. 180 */ 181 void 182 bufinit(void) 183 { 184 struct bufarea *bp; 185 long bufcnt, i; 186 char *bufp; 187 188 pbp = pdirbp = (struct bufarea *)0; 189 bufp = Malloc((unsigned int)sblock.fs_bsize); 190 if (bufp == NULL) 191 errx(EEXIT, "cannot allocate buffer pool"); 192 cgblk.b_un.b_buf = bufp; 193 initbarea(&cgblk, BT_CYLGRP); 194 TAILQ_INIT(&bufhead); 195 bufcnt = MAXBUFS; 196 if (bufcnt < MINBUFS) 197 bufcnt = MINBUFS; 198 for (i = 0; i < bufcnt; i++) { 199 bp = (struct bufarea *)Malloc(sizeof(struct bufarea)); 200 bufp = Malloc((unsigned int)sblock.fs_bsize); 201 if (bp == NULL || bufp == NULL) { 202 if (i >= MINBUFS) 203 break; 204 errx(EEXIT, "cannot allocate buffer pool"); 205 } 206 bp->b_un.b_buf = bufp; 207 TAILQ_INSERT_HEAD(&bufhead, bp, b_list); 208 initbarea(bp, BT_UNKNOWN); 209 } 210 numbufs = i; /* save number of buffers */ 211 for (i = 0; i < BT_NUMBUFTYPES; i++) { 212 readtime[i].tv_sec = totalreadtime[i].tv_sec = 0; 213 readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0; 214 readcnt[i] = totalreadcnt[i] = 0; 215 } 216 } 217 218 /* 219 * Manage cylinder group buffers. 220 */ 221 static struct bufarea *cgbufs; /* header for cylinder group cache */ 222 static int flushtries; /* number of tries to reclaim memory */ 223 224 struct bufarea * 225 cglookup(int cg) 226 { 227 struct bufarea *cgbp; 228 struct cg *cgp; 229 230 if (cgbufs == NULL) { 231 cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea)); 232 if (cgbufs == NULL) 233 errx(EEXIT, "cannot allocate cylinder group buffers"); 234 } 235 cgbp = &cgbufs[cg]; 236 if (cgbp->b_un.b_cg != NULL) 237 return (cgbp); 238 cgp = NULL; 239 if (flushtries == 0) 240 cgp = malloc((unsigned int)sblock.fs_cgsize); 241 if (cgp == NULL) { 242 getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize); 243 return (&cgblk); 244 } 245 cgbp->b_un.b_cg = cgp; 246 initbarea(cgbp, BT_CYLGRP); 247 getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize); 248 return (cgbp); 249 } 250 251 /* 252 * Mark a cylinder group buffer as dirty. 253 * Update its check-hash if they are enabled. 254 */ 255 void 256 cgdirty(struct bufarea *cgbp) 257 { 258 struct cg *cg; 259 260 cg = cgbp->b_un.b_cg; 261 if ((sblock.fs_metackhash & CK_CYLGRP) != 0) { 262 cg->cg_ckhash = 0; 263 cg->cg_ckhash = 264 calculate_crc32c(~0L, (void *)cg, sblock.fs_cgsize); 265 } 266 dirty(cgbp); 267 } 268 269 /* 270 * Attempt to flush a cylinder group cache entry. 271 * Return whether the flush was successful. 272 */ 273 int 274 flushentry(void) 275 { 276 struct bufarea *cgbp; 277 278 if (flushtries == sblock.fs_ncg || cgbufs == NULL) 279 return (0); 280 cgbp = &cgbufs[flushtries++]; 281 if (cgbp->b_un.b_cg == NULL) 282 return (0); 283 flush(fswritefd, cgbp); 284 free(cgbp->b_un.b_buf); 285 cgbp->b_un.b_buf = NULL; 286 return (1); 287 } 288 289 /* 290 * Manage a cache of directory blocks. 291 */ 292 struct bufarea * 293 getdatablk(ufs2_daddr_t blkno, long size, int type) 294 { 295 struct bufarea *bp; 296 297 TAILQ_FOREACH(bp, &bufhead, b_list) 298 if (bp->b_bno == fsbtodb(&sblock, blkno)) 299 goto foundit; 300 TAILQ_FOREACH_REVERSE(bp, &bufhead, buflist, b_list) 301 if ((bp->b_flags & B_INUSE) == 0) 302 break; 303 if (bp == NULL) 304 errx(EEXIT, "deadlocked buffer pool"); 305 bp->b_type = type; 306 getblk(bp, blkno, size); 307 /* fall through */ 308 foundit: 309 if (debug && bp->b_type != type) 310 printf("Buffer type changed from %s to %s\n", 311 buftype[bp->b_type], buftype[type]); 312 TAILQ_REMOVE(&bufhead, bp, b_list); 313 TAILQ_INSERT_HEAD(&bufhead, bp, b_list); 314 bp->b_flags |= B_INUSE; 315 return (bp); 316 } 317 318 void 319 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size) 320 { 321 ufs2_daddr_t dblk; 322 struct timespec start, finish; 323 324 dblk = fsbtodb(&sblock, blk); 325 if (bp->b_bno == dblk) { 326 totalreads++; 327 } else { 328 flush(fswritefd, bp); 329 if (debug) { 330 readcnt[bp->b_type]++; 331 clock_gettime(CLOCK_REALTIME_PRECISE, &start); 332 } 333 bp->b_errs = blread(fsreadfd, bp->b_un.b_buf, dblk, size); 334 if (debug) { 335 clock_gettime(CLOCK_REALTIME_PRECISE, &finish); 336 timespecsub(&finish, &start, &finish); 337 timespecadd(&readtime[bp->b_type], &finish, 338 &readtime[bp->b_type]); 339 } 340 bp->b_bno = dblk; 341 bp->b_size = size; 342 } 343 } 344 345 void 346 flush(int fd, struct bufarea *bp) 347 { 348 349 if (!bp->b_dirty) 350 return; 351 bp->b_dirty = 0; 352 if (fswritefd < 0) { 353 pfatal("WRITING IN READ_ONLY MODE.\n"); 354 return; 355 } 356 if (bp->b_errs != 0) 357 pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n", 358 (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ", 359 (long long)bp->b_bno); 360 bp->b_errs = 0; 361 /* 362 * Write using the appropriate function. 363 */ 364 switch (bp->b_type) { 365 case BT_SUPERBLK: 366 if (bp != &sblk) 367 pfatal("BUFFER %p DOES NOT MATCH SBLK %p\n", 368 bp, &sblk); 369 if (sbput(fd, bp->b_un.b_fs, 0) == 0) 370 fsmodified = 1; 371 break; 372 case BT_CYLGRP: 373 if (cgput(&disk, bp->b_un.b_cg) == 0) 374 fsmodified = 1; 375 break; 376 default: 377 blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size); 378 break; 379 } 380 } 381 382 void 383 rwerror(const char *mesg, ufs2_daddr_t blk) 384 { 385 386 if (bkgrdcheck) 387 exit(EEXIT); 388 if (preen == 0) 389 printf("\n"); 390 pfatal("CANNOT %s: %ld", mesg, (long)blk); 391 if (reply("CONTINUE") == 0) 392 exit(EEXIT); 393 } 394 395 void 396 ckfini(int markclean) 397 { 398 struct bufarea *bp, *nbp; 399 int ofsmodified, cnt; 400 401 if (bkgrdflag) { 402 unlink(snapname); 403 if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) { 404 cmd.value = FS_UNCLEAN; 405 cmd.size = markclean ? -1 : 1; 406 if (sysctlbyname("vfs.ffs.setflags", 0, 0, 407 &cmd, sizeof cmd) == -1) 408 rwerror("SET FILE SYSTEM FLAGS", FS_UNCLEAN); 409 if (!preen) { 410 printf("\n***** FILE SYSTEM MARKED %s *****\n", 411 markclean ? "CLEAN" : "DIRTY"); 412 if (!markclean) 413 rerun = 1; 414 } 415 } else if (!preen && !markclean) { 416 printf("\n***** FILE SYSTEM STILL DIRTY *****\n"); 417 rerun = 1; 418 } 419 } 420 if (debug && totalreads > 0) 421 printf("cache with %d buffers missed %ld of %ld (%d%%)\n", 422 numbufs, totaldiskreads, totalreads, 423 (int)(totaldiskreads * 100 / totalreads)); 424 if (fswritefd < 0) { 425 (void)close(fsreadfd); 426 return; 427 } 428 flush(fswritefd, &sblk); 429 if (havesb && cursnapshot == 0 && sblock.fs_magic == FS_UFS2_MAGIC && 430 sblk.b_bno != sblock.fs_sblockloc / dev_bsize && 431 !preen && reply("UPDATE STANDARD SUPERBLOCK")) { 432 /* Change the write destination to standard superblock */ 433 sblock.fs_sblockactualloc = sblock.fs_sblockloc; 434 sblk.b_bno = sblock.fs_sblockloc / dev_bsize; 435 sbdirty(); 436 flush(fswritefd, &sblk); 437 } 438 flush(fswritefd, &cgblk); 439 free(cgblk.b_un.b_buf); 440 cnt = 0; 441 TAILQ_FOREACH_REVERSE_SAFE(bp, &bufhead, buflist, b_list, nbp) { 442 TAILQ_REMOVE(&bufhead, bp, b_list); 443 cnt++; 444 flush(fswritefd, bp); 445 free(bp->b_un.b_buf); 446 free((char *)bp); 447 } 448 if (numbufs != cnt) 449 errx(EEXIT, "panic: lost %d buffers", numbufs - cnt); 450 if (cgbufs != NULL) { 451 for (cnt = 0; cnt < sblock.fs_ncg; cnt++) { 452 if (cgbufs[cnt].b_un.b_cg == NULL) 453 continue; 454 flush(fswritefd, &cgbufs[cnt]); 455 free(cgbufs[cnt].b_un.b_cg); 456 } 457 free(cgbufs); 458 } 459 pbp = pdirbp = (struct bufarea *)0; 460 if (cursnapshot == 0 && sblock.fs_clean != markclean) { 461 if ((sblock.fs_clean = markclean) != 0) { 462 sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK); 463 sblock.fs_pendingblocks = 0; 464 sblock.fs_pendinginodes = 0; 465 } 466 sbdirty(); 467 ofsmodified = fsmodified; 468 flush(fswritefd, &sblk); 469 fsmodified = ofsmodified; 470 if (!preen) { 471 printf("\n***** FILE SYSTEM MARKED %s *****\n", 472 markclean ? "CLEAN" : "DIRTY"); 473 if (!markclean) 474 rerun = 1; 475 } 476 } else if (!preen) { 477 if (markclean) { 478 printf("\n***** FILE SYSTEM IS CLEAN *****\n"); 479 } else { 480 printf("\n***** FILE SYSTEM STILL DIRTY *****\n"); 481 rerun = 1; 482 } 483 } 484 (void)close(fsreadfd); 485 (void)close(fswritefd); 486 } 487 488 /* 489 * Print out I/O statistics. 490 */ 491 void 492 IOstats(char *what) 493 { 494 int i; 495 496 if (debug == 0) 497 return; 498 if (diskreads == 0) { 499 printf("%s: no I/O\n\n", what); 500 return; 501 } 502 if (startpass.tv_sec == 0) 503 startpass = startprog; 504 printf("%s: I/O statistics\n", what); 505 printIOstats(); 506 totaldiskreads += diskreads; 507 diskreads = 0; 508 for (i = 0; i < BT_NUMBUFTYPES; i++) { 509 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]); 510 totalreadcnt[i] += readcnt[i]; 511 readtime[i].tv_sec = readtime[i].tv_nsec = 0; 512 readcnt[i] = 0; 513 } 514 clock_gettime(CLOCK_REALTIME_PRECISE, &startpass); 515 } 516 517 void 518 finalIOstats(void) 519 { 520 int i; 521 522 if (debug == 0) 523 return; 524 printf("Final I/O statistics\n"); 525 totaldiskreads += diskreads; 526 diskreads = totaldiskreads; 527 startpass = startprog; 528 for (i = 0; i < BT_NUMBUFTYPES; i++) { 529 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]); 530 totalreadcnt[i] += readcnt[i]; 531 readtime[i] = totalreadtime[i]; 532 readcnt[i] = totalreadcnt[i]; 533 } 534 printIOstats(); 535 } 536 537 static void printIOstats(void) 538 { 539 long long msec, totalmsec; 540 int i; 541 542 clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass); 543 timespecsub(&finishpass, &startpass, &finishpass); 544 printf("Running time: %jd.%03ld sec\n", 545 (intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000); 546 printf("buffer reads by type:\n"); 547 for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++) 548 totalmsec += readtime[i].tv_sec * 1000 + 549 readtime[i].tv_nsec / 1000000; 550 if (totalmsec == 0) 551 totalmsec = 1; 552 for (i = 0; i < BT_NUMBUFTYPES; i++) { 553 if (readcnt[i] == 0) 554 continue; 555 msec = 556 readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000; 557 printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n", 558 buftype[i], readcnt[i], readcnt[i] * 100 / diskreads, 559 (readcnt[i] * 1000 / diskreads) % 10, 560 (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000, 561 msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10); 562 } 563 printf("\n"); 564 } 565 566 int 567 blread(int fd, char *buf, ufs2_daddr_t blk, long size) 568 { 569 char *cp; 570 int i, errs; 571 off_t offset; 572 573 offset = blk; 574 offset *= dev_bsize; 575 if (bkgrdflag) 576 slowio_start(); 577 totalreads++; 578 diskreads++; 579 if (lseek(fd, offset, 0) < 0) 580 rwerror("SEEK BLK", blk); 581 else if (read(fd, buf, (int)size) == size) { 582 if (bkgrdflag) 583 slowio_end(); 584 return (0); 585 } 586 587 /* 588 * This is handled specially here instead of in rwerror because 589 * rwerror is used for all sorts of errors, not just true read/write 590 * errors. It should be refactored and fixed. 591 */ 592 if (surrender) { 593 pfatal("CANNOT READ_BLK: %ld", (long)blk); 594 errx(EEXIT, "ABORTING DUE TO READ ERRORS"); 595 } else 596 rwerror("READ BLK", blk); 597 598 if (lseek(fd, offset, 0) < 0) 599 rwerror("SEEK BLK", blk); 600 errs = 0; 601 memset(buf, 0, (size_t)size); 602 printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:"); 603 for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) { 604 if (read(fd, cp, (int)secsize) != secsize) { 605 (void)lseek(fd, offset + i + secsize, 0); 606 if (secsize != dev_bsize && dev_bsize != 1) 607 printf(" %jd (%jd),", 608 (intmax_t)(blk * dev_bsize + i) / secsize, 609 (intmax_t)blk + i / dev_bsize); 610 else 611 printf(" %jd,", (intmax_t)blk + i / dev_bsize); 612 errs++; 613 } 614 } 615 printf("\n"); 616 if (errs) 617 resolved = 0; 618 return (errs); 619 } 620 621 void 622 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size) 623 { 624 int i; 625 char *cp; 626 off_t offset; 627 628 if (fd < 0) 629 return; 630 offset = blk; 631 offset *= dev_bsize; 632 if (lseek(fd, offset, 0) < 0) 633 rwerror("SEEK BLK", blk); 634 else if (write(fd, buf, size) == size) { 635 fsmodified = 1; 636 return; 637 } 638 resolved = 0; 639 rwerror("WRITE BLK", blk); 640 if (lseek(fd, offset, 0) < 0) 641 rwerror("SEEK BLK", blk); 642 printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:"); 643 for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize) 644 if (write(fd, cp, dev_bsize) != dev_bsize) { 645 (void)lseek(fd, offset + i + dev_bsize, 0); 646 printf(" %jd,", (intmax_t)blk + i / dev_bsize); 647 } 648 printf("\n"); 649 return; 650 } 651 652 void 653 blerase(int fd, ufs2_daddr_t blk, long size) 654 { 655 off_t ioarg[2]; 656 657 if (fd < 0) 658 return; 659 ioarg[0] = blk * dev_bsize; 660 ioarg[1] = size; 661 ioctl(fd, DIOCGDELETE, ioarg); 662 /* we don't really care if we succeed or not */ 663 return; 664 } 665 666 /* 667 * Fill a contiguous region with all-zeroes. Note ZEROBUFSIZE is by 668 * definition a multiple of dev_bsize. 669 */ 670 void 671 blzero(int fd, ufs2_daddr_t blk, long size) 672 { 673 static char *zero; 674 off_t offset, len; 675 676 if (fd < 0) 677 return; 678 if (zero == NULL) { 679 zero = calloc(ZEROBUFSIZE, 1); 680 if (zero == NULL) 681 errx(EEXIT, "cannot allocate buffer pool"); 682 } 683 offset = blk * dev_bsize; 684 if (lseek(fd, offset, 0) < 0) 685 rwerror("SEEK BLK", blk); 686 while (size > 0) { 687 len = MIN(ZEROBUFSIZE, size); 688 if (write(fd, zero, len) != len) 689 rwerror("WRITE BLK", blk); 690 blk += len / dev_bsize; 691 size -= len; 692 } 693 } 694 695 /* 696 * Verify cylinder group's magic number and other parameters. If the 697 * test fails, offer an option to rebuild the whole cylinder group. 698 */ 699 int 700 check_cgmagic(int cg, struct bufarea *cgbp) 701 { 702 struct cg *cgp = cgbp->b_un.b_cg; 703 704 /* 705 * Extended cylinder group checks. 706 */ 707 if (cg_chkmagic(cgp) && 708 ((sblock.fs_magic == FS_UFS1_MAGIC && 709 cgp->cg_old_niblk == sblock.fs_ipg && 710 cgp->cg_ndblk <= sblock.fs_fpg && 711 cgp->cg_old_ncyl <= sblock.fs_old_cpg) || 712 (sblock.fs_magic == FS_UFS2_MAGIC && 713 cgp->cg_niblk == sblock.fs_ipg && 714 cgp->cg_ndblk <= sblock.fs_fpg && 715 cgp->cg_initediblk <= sblock.fs_ipg))) { 716 return (1); 717 } 718 pfatal("CYLINDER GROUP %d: BAD MAGIC NUMBER", cg); 719 if (!reply("REBUILD CYLINDER GROUP")) { 720 printf("YOU WILL NEED TO RERUN FSCK.\n"); 721 rerun = 1; 722 return (1); 723 } 724 /* 725 * Zero out the cylinder group and then initialize critical fields. 726 * Bit maps and summaries will be recalculated by later passes. 727 */ 728 memset(cgp, 0, (size_t)sblock.fs_cgsize); 729 cgp->cg_magic = CG_MAGIC; 730 cgp->cg_cgx = cg; 731 cgp->cg_niblk = sblock.fs_ipg; 732 cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); 733 if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size) 734 cgp->cg_ndblk = sblock.fs_fpg; 735 else 736 cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg); 737 cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield); 738 if (sblock.fs_magic == FS_UFS1_MAGIC) { 739 cgp->cg_niblk = 0; 740 cgp->cg_initediblk = 0; 741 cgp->cg_old_ncyl = sblock.fs_old_cpg; 742 cgp->cg_old_niblk = sblock.fs_ipg; 743 cgp->cg_old_btotoff = cgp->cg_iusedoff; 744 cgp->cg_old_boff = cgp->cg_old_btotoff + 745 sblock.fs_old_cpg * sizeof(int32_t); 746 cgp->cg_iusedoff = cgp->cg_old_boff + 747 sblock.fs_old_cpg * sizeof(u_int16_t); 748 } 749 cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); 750 cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT); 751 if (sblock.fs_contigsumsize > 0) { 752 cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag; 753 cgp->cg_clustersumoff = 754 roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t)); 755 cgp->cg_clustersumoff -= sizeof(u_int32_t); 756 cgp->cg_clusteroff = cgp->cg_clustersumoff + 757 (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); 758 cgp->cg_nextfreeoff = cgp->cg_clusteroff + 759 howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); 760 } 761 cgdirty(cgbp); 762 return (0); 763 } 764 765 /* 766 * allocate a data block with the specified number of fragments 767 */ 768 ufs2_daddr_t 769 allocblk(long frags) 770 { 771 int i, j, k, cg, baseblk; 772 struct bufarea *cgbp; 773 struct cg *cgp; 774 775 if (frags <= 0 || frags > sblock.fs_frag) 776 return (0); 777 for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) { 778 for (j = 0; j <= sblock.fs_frag - frags; j++) { 779 if (testbmap(i + j)) 780 continue; 781 for (k = 1; k < frags; k++) 782 if (testbmap(i + j + k)) 783 break; 784 if (k < frags) { 785 j += k; 786 continue; 787 } 788 cg = dtog(&sblock, i + j); 789 cgbp = cglookup(cg); 790 cgp = cgbp->b_un.b_cg; 791 if (!check_cgmagic(cg, cgbp)) 792 return (0); 793 baseblk = dtogd(&sblock, i + j); 794 for (k = 0; k < frags; k++) { 795 setbmap(i + j + k); 796 clrbit(cg_blksfree(cgp), baseblk + k); 797 } 798 n_blks += frags; 799 if (frags == sblock.fs_frag) 800 cgp->cg_cs.cs_nbfree--; 801 else 802 cgp->cg_cs.cs_nffree -= frags; 803 cgdirty(cgbp); 804 return (i + j); 805 } 806 } 807 return (0); 808 } 809 810 /* 811 * Free a previously allocated block 812 */ 813 void 814 freeblk(ufs2_daddr_t blkno, long frags) 815 { 816 struct inodesc idesc; 817 818 idesc.id_blkno = blkno; 819 idesc.id_numfrags = frags; 820 (void)pass4check(&idesc); 821 } 822 823 /* Slow down IO so as to leave some disk bandwidth for other processes */ 824 void 825 slowio_start() 826 { 827 828 /* Delay one in every 8 operations */ 829 slowio_pollcnt = (slowio_pollcnt + 1) & 7; 830 if (slowio_pollcnt == 0) { 831 gettimeofday(&slowio_starttime, NULL); 832 } 833 } 834 835 void 836 slowio_end() 837 { 838 struct timeval tv; 839 int delay_usec; 840 841 if (slowio_pollcnt != 0) 842 return; 843 844 /* Update the slowdown interval. */ 845 gettimeofday(&tv, NULL); 846 delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 + 847 (tv.tv_usec - slowio_starttime.tv_usec); 848 if (delay_usec < 64) 849 delay_usec = 64; 850 if (delay_usec > 2500000) 851 delay_usec = 2500000; 852 slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6; 853 /* delay by 8 times the average IO delay */ 854 if (slowio_delay_usec > 64) 855 usleep(slowio_delay_usec * 8); 856 } 857 858 /* 859 * Find a pathname 860 */ 861 void 862 getpathname(char *namebuf, ino_t curdir, ino_t ino) 863 { 864 int len; 865 char *cp; 866 struct inodesc idesc; 867 static int busy = 0; 868 869 if (curdir == ino && ino == UFS_ROOTINO) { 870 (void)strcpy(namebuf, "/"); 871 return; 872 } 873 if (busy || !INO_IS_DVALID(curdir)) { 874 (void)strcpy(namebuf, "?"); 875 return; 876 } 877 busy = 1; 878 memset(&idesc, 0, sizeof(struct inodesc)); 879 idesc.id_type = DATA; 880 idesc.id_fix = IGNORE; 881 cp = &namebuf[MAXPATHLEN - 1]; 882 *cp = '\0'; 883 if (curdir != ino) { 884 idesc.id_parent = curdir; 885 goto namelookup; 886 } 887 while (ino != UFS_ROOTINO) { 888 idesc.id_number = ino; 889 idesc.id_func = findino; 890 idesc.id_name = strdup(".."); 891 if ((ckinode(ginode(ino), &idesc) & FOUND) == 0) 892 break; 893 namelookup: 894 idesc.id_number = idesc.id_parent; 895 idesc.id_parent = ino; 896 idesc.id_func = findname; 897 idesc.id_name = namebuf; 898 if ((ckinode(ginode(idesc.id_number), &idesc)&FOUND) == 0) 899 break; 900 len = strlen(namebuf); 901 cp -= len; 902 memmove(cp, namebuf, (size_t)len); 903 *--cp = '/'; 904 if (cp < &namebuf[UFS_MAXNAMLEN]) 905 break; 906 ino = idesc.id_number; 907 } 908 busy = 0; 909 if (ino != UFS_ROOTINO) 910 *--cp = '?'; 911 memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp)); 912 } 913 914 void 915 catch(int sig __unused) 916 { 917 918 ckfini(0); 919 exit(12); 920 } 921 922 /* 923 * When preening, allow a single quit to signal 924 * a special exit after file system checks complete 925 * so that reboot sequence may be interrupted. 926 */ 927 void 928 catchquit(int sig __unused) 929 { 930 printf("returning to single-user after file system check\n"); 931 returntosingle = 1; 932 (void)signal(SIGQUIT, SIG_DFL); 933 } 934 935 /* 936 * determine whether an inode should be fixed. 937 */ 938 int 939 dofix(struct inodesc *idesc, const char *msg) 940 { 941 942 switch (idesc->id_fix) { 943 944 case DONTKNOW: 945 if (idesc->id_type == DATA) 946 direrror(idesc->id_number, msg); 947 else 948 pwarn("%s", msg); 949 if (preen) { 950 printf(" (SALVAGED)\n"); 951 idesc->id_fix = FIX; 952 return (ALTERED); 953 } 954 if (reply("SALVAGE") == 0) { 955 idesc->id_fix = NOFIX; 956 return (0); 957 } 958 idesc->id_fix = FIX; 959 return (ALTERED); 960 961 case FIX: 962 return (ALTERED); 963 964 case NOFIX: 965 case IGNORE: 966 return (0); 967 968 default: 969 errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix); 970 } 971 /* NOTREACHED */ 972 return (0); 973 } 974 975 #include <stdarg.h> 976 977 /* 978 * An unexpected inconsistency occurred. 979 * Die if preening or file system is running with soft dependency protocol, 980 * otherwise just print message and continue. 981 */ 982 void 983 pfatal(const char *fmt, ...) 984 { 985 va_list ap; 986 va_start(ap, fmt); 987 if (!preen) { 988 (void)vfprintf(stdout, fmt, ap); 989 va_end(ap); 990 if (usedsoftdep) 991 (void)fprintf(stdout, 992 "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n"); 993 /* 994 * Force foreground fsck to clean up inconsistency. 995 */ 996 if (bkgrdflag) { 997 cmd.value = FS_NEEDSFSCK; 998 cmd.size = 1; 999 if (sysctlbyname("vfs.ffs.setflags", 0, 0, 1000 &cmd, sizeof cmd) == -1) 1001 pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n"); 1002 fprintf(stdout, "CANNOT RUN IN BACKGROUND\n"); 1003 ckfini(0); 1004 exit(EEXIT); 1005 } 1006 return; 1007 } 1008 if (cdevname == NULL) 1009 cdevname = strdup("fsck"); 1010 (void)fprintf(stdout, "%s: ", cdevname); 1011 (void)vfprintf(stdout, fmt, ap); 1012 (void)fprintf(stdout, 1013 "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n", 1014 cdevname, usedsoftdep ? " SOFT UPDATE " : " "); 1015 /* 1016 * Force foreground fsck to clean up inconsistency. 1017 */ 1018 if (bkgrdflag) { 1019 cmd.value = FS_NEEDSFSCK; 1020 cmd.size = 1; 1021 if (sysctlbyname("vfs.ffs.setflags", 0, 0, 1022 &cmd, sizeof cmd) == -1) 1023 pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n"); 1024 } 1025 ckfini(0); 1026 exit(EEXIT); 1027 } 1028 1029 /* 1030 * Pwarn just prints a message when not preening or running soft dependency 1031 * protocol, or a warning (preceded by filename) when preening. 1032 */ 1033 void 1034 pwarn(const char *fmt, ...) 1035 { 1036 va_list ap; 1037 va_start(ap, fmt); 1038 if (preen) 1039 (void)fprintf(stdout, "%s: ", cdevname); 1040 (void)vfprintf(stdout, fmt, ap); 1041 va_end(ap); 1042 } 1043 1044 /* 1045 * Stub for routines from kernel. 1046 */ 1047 void 1048 panic(const char *fmt, ...) 1049 { 1050 va_list ap; 1051 va_start(ap, fmt); 1052 pfatal("INTERNAL INCONSISTENCY:"); 1053 (void)vfprintf(stdout, fmt, ap); 1054 va_end(ap); 1055 exit(EEXIT); 1056 } 1057