xref: /freebsd/sbin/fsck_ffs/fsutil.c (revision 094fc1ed0f2627525c7b0342efcbad5be7a8546a)
1 /*
2  * Copyright (c) 1980, 1986, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #if 0
31 #ifndef lint
32 static const char sccsid[] = "@(#)utilities.c	8.6 (Berkeley) 5/19/95";
33 #endif /* not lint */
34 #endif
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/time.h>
40 #include <sys/types.h>
41 #include <sys/sysctl.h>
42 #include <sys/disk.h>
43 #include <sys/disklabel.h>
44 #include <sys/ioctl.h>
45 #include <sys/stat.h>
46 
47 #include <ufs/ufs/dinode.h>
48 #include <ufs/ufs/dir.h>
49 #include <ufs/ffs/fs.h>
50 
51 #include <err.h>
52 #include <errno.h>
53 #include <string.h>
54 #include <ctype.h>
55 #include <fstab.h>
56 #include <stdint.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <time.h>
60 #include <unistd.h>
61 #include <libufs.h>
62 
63 #include "fsck.h"
64 
65 static void slowio_start(void);
66 static void slowio_end(void);
67 static void printIOstats(void);
68 
69 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */
70 static struct timespec startpass, finishpass;
71 struct timeval slowio_starttime;
72 int slowio_delay_usec = 10000;	/* Initial IO delay for background fsck */
73 int slowio_pollcnt;
74 static struct bufarea cgblk;	/* backup buffer for cylinder group blocks */
75 static TAILQ_HEAD(buflist, bufarea) bufhead;	/* head of buffer cache list */
76 static int numbufs;				/* size of buffer cache */
77 static char *buftype[BT_NUMBUFTYPES] = BT_NAMES;
78 static struct bufarea *cgbufs;	/* header for cylinder group cache */
79 static int flushtries;		/* number of tries to reclaim memory */
80 
81 void
82 fsutilinit(void)
83 {
84 	diskreads = totaldiskreads = totalreads = 0;
85 	bzero(&startpass, sizeof(struct timespec));
86 	bzero(&finishpass, sizeof(struct timespec));
87 	bzero(&slowio_starttime, sizeof(struct timeval));
88 	slowio_delay_usec = 10000;
89 	slowio_pollcnt = 0;
90 	bzero(&cgblk, sizeof(struct bufarea));
91 	TAILQ_INIT(&bufhead);
92 	numbufs = 0;
93 	/* buftype ? */
94 	cgbufs = NULL;
95 	flushtries = 0;
96 }
97 
98 int
99 ftypeok(union dinode *dp)
100 {
101 	switch (DIP(dp, di_mode) & IFMT) {
102 
103 	case IFDIR:
104 	case IFREG:
105 	case IFBLK:
106 	case IFCHR:
107 	case IFLNK:
108 	case IFSOCK:
109 	case IFIFO:
110 		return (1);
111 
112 	default:
113 		if (debug)
114 			printf("bad file type 0%o\n", DIP(dp, di_mode));
115 		return (0);
116 	}
117 }
118 
119 int
120 reply(const char *question)
121 {
122 	int persevere;
123 	char c;
124 
125 	if (preen)
126 		pfatal("INTERNAL ERROR: GOT TO reply()");
127 	persevere = !strcmp(question, "CONTINUE");
128 	printf("\n");
129 	if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) {
130 		printf("%s? no\n\n", question);
131 		resolved = 0;
132 		return (0);
133 	}
134 	if (yflag || (persevere && nflag)) {
135 		printf("%s? yes\n\n", question);
136 		return (1);
137 	}
138 	do	{
139 		printf("%s? [yn] ", question);
140 		(void) fflush(stdout);
141 		c = getc(stdin);
142 		while (c != '\n' && getc(stdin) != '\n') {
143 			if (feof(stdin)) {
144 				resolved = 0;
145 				return (0);
146 			}
147 		}
148 	} while (c != 'y' && c != 'Y' && c != 'n' && c != 'N');
149 	printf("\n");
150 	if (c == 'y' || c == 'Y')
151 		return (1);
152 	resolved = 0;
153 	return (0);
154 }
155 
156 /*
157  * Look up state information for an inode.
158  */
159 struct inostat *
160 inoinfo(ino_t inum)
161 {
162 	static struct inostat unallocated = { USTATE, 0, 0 };
163 	struct inostatlist *ilp;
164 	int iloff;
165 
166 	if (inum > maxino)
167 		errx(EEXIT, "inoinfo: inumber %ju out of range",
168 		    (uintmax_t)inum);
169 	ilp = &inostathead[inum / sblock.fs_ipg];
170 	iloff = inum % sblock.fs_ipg;
171 	if (iloff >= ilp->il_numalloced)
172 		return (&unallocated);
173 	return (&ilp->il_stat[iloff]);
174 }
175 
176 /*
177  * Malloc buffers and set up cache.
178  */
179 void
180 bufinit(void)
181 {
182 	struct bufarea *bp;
183 	long bufcnt, i;
184 	char *bufp;
185 
186 	pbp = pdirbp = (struct bufarea *)0;
187 	bufp = Malloc((unsigned int)sblock.fs_bsize);
188 	if (bufp == NULL)
189 		errx(EEXIT, "cannot allocate buffer pool");
190 	cgblk.b_un.b_buf = bufp;
191 	initbarea(&cgblk, BT_CYLGRP);
192 	TAILQ_INIT(&bufhead);
193 	bufcnt = MAXBUFS;
194 	if (bufcnt < MINBUFS)
195 		bufcnt = MINBUFS;
196 	for (i = 0; i < bufcnt; i++) {
197 		bp = (struct bufarea *)Malloc(sizeof(struct bufarea));
198 		bufp = Malloc((unsigned int)sblock.fs_bsize);
199 		if (bp == NULL || bufp == NULL) {
200 			if (i >= MINBUFS)
201 				break;
202 			errx(EEXIT, "cannot allocate buffer pool");
203 		}
204 		bp->b_un.b_buf = bufp;
205 		TAILQ_INSERT_HEAD(&bufhead, bp, b_list);
206 		initbarea(bp, BT_UNKNOWN);
207 	}
208 	numbufs = i;	/* save number of buffers */
209 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
210 		readtime[i].tv_sec = totalreadtime[i].tv_sec = 0;
211 		readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0;
212 		readcnt[i] = totalreadcnt[i] = 0;
213 	}
214 }
215 
216 /*
217  * Manage cylinder group buffers.
218  */
219 static struct bufarea *cgbufs;	/* header for cylinder group cache */
220 static int flushtries;		/* number of tries to reclaim memory */
221 
222 struct bufarea *
223 cgget(int cg)
224 {
225 	struct bufarea *cgbp;
226 	struct cg *cgp;
227 
228 	if (cgbufs == NULL) {
229 		cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea));
230 		if (cgbufs == NULL)
231 			errx(EEXIT, "cannot allocate cylinder group buffers");
232 	}
233 	cgbp = &cgbufs[cg];
234 	if (cgbp->b_un.b_cg != NULL)
235 		return (cgbp);
236 	cgp = NULL;
237 	if (flushtries == 0)
238 		cgp = malloc((unsigned int)sblock.fs_cgsize);
239 	if (cgp == NULL) {
240 		getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
241 		return (&cgblk);
242 	}
243 	cgbp->b_un.b_cg = cgp;
244 	initbarea(cgbp, BT_CYLGRP);
245 	getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize);
246 	return (cgbp);
247 }
248 
249 /*
250  * Attempt to flush a cylinder group cache entry.
251  * Return whether the flush was successful.
252  */
253 int
254 flushentry(void)
255 {
256 	struct bufarea *cgbp;
257 
258 	if (flushtries == sblock.fs_ncg || cgbufs == NULL)
259 		return (0);
260 	cgbp = &cgbufs[flushtries++];
261 	if (cgbp->b_un.b_cg == NULL)
262 		return (0);
263 	flush(fswritefd, cgbp);
264 	free(cgbp->b_un.b_buf);
265 	cgbp->b_un.b_buf = NULL;
266 	return (1);
267 }
268 
269 /*
270  * Manage a cache of directory blocks.
271  */
272 struct bufarea *
273 getdatablk(ufs2_daddr_t blkno, long size, int type)
274 {
275 	struct bufarea *bp;
276 
277 	TAILQ_FOREACH(bp, &bufhead, b_list)
278 		if (bp->b_bno == fsbtodb(&sblock, blkno))
279 			goto foundit;
280 	TAILQ_FOREACH_REVERSE(bp, &bufhead, buflist, b_list)
281 		if ((bp->b_flags & B_INUSE) == 0)
282 			break;
283 	if (bp == NULL)
284 		errx(EEXIT, "deadlocked buffer pool");
285 	bp->b_type = type;
286 	getblk(bp, blkno, size);
287 	/* fall through */
288 foundit:
289 	if (debug && bp->b_type != type)
290 		printf("Buffer type changed from %s to %s\n",
291 		    buftype[bp->b_type], buftype[type]);
292 	TAILQ_REMOVE(&bufhead, bp, b_list);
293 	TAILQ_INSERT_HEAD(&bufhead, bp, b_list);
294 	bp->b_flags |= B_INUSE;
295 	return (bp);
296 }
297 
298 /*
299  * Timespec operations (from <sys/time.h>).
300  */
301 #define	timespecsub(vvp, uvp)						\
302 	do {								\
303 		(vvp)->tv_sec -= (uvp)->tv_sec;				\
304 		(vvp)->tv_nsec -= (uvp)->tv_nsec;			\
305 		if ((vvp)->tv_nsec < 0) {				\
306 			(vvp)->tv_sec--;				\
307 			(vvp)->tv_nsec += 1000000000;			\
308 		}							\
309 	} while (0)
310 #define	timespecadd(vvp, uvp)						\
311 	do {								\
312 		(vvp)->tv_sec += (uvp)->tv_sec;				\
313 		(vvp)->tv_nsec += (uvp)->tv_nsec;			\
314 		if ((vvp)->tv_nsec >= 1000000000) {			\
315 			(vvp)->tv_sec++;				\
316 			(vvp)->tv_nsec -= 1000000000;			\
317 		}							\
318 	} while (0)
319 
320 void
321 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size)
322 {
323 	ufs2_daddr_t dblk;
324 	struct timespec start, finish;
325 
326 	dblk = fsbtodb(&sblock, blk);
327 	if (bp->b_bno == dblk) {
328 		totalreads++;
329 	} else {
330 		flush(fswritefd, bp);
331 		if (debug) {
332 			readcnt[bp->b_type]++;
333 			clock_gettime(CLOCK_REALTIME_PRECISE, &start);
334 		}
335 		bp->b_errs = blread(fsreadfd, bp->b_un.b_buf, dblk, size);
336 		if (debug) {
337 			clock_gettime(CLOCK_REALTIME_PRECISE, &finish);
338 			timespecsub(&finish, &start);
339 			timespecadd(&readtime[bp->b_type], &finish);
340 		}
341 		bp->b_bno = dblk;
342 		bp->b_size = size;
343 	}
344 }
345 
346 void
347 flush(int fd, struct bufarea *bp)
348 {
349 	int i, j;
350 
351 	if (!bp->b_dirty)
352 		return;
353 	/*
354 	 * Calculate any needed check hashes.
355 	 */
356 	switch (bp->b_type) {
357 	case BT_CYLGRP:
358 		if ((sblock.fs_metackhash & CK_CYLGRP) == 0)
359 			break;
360 		bp->b_un.b_cg->cg_ckhash = 0;
361 		bp->b_un.b_cg->cg_ckhash =
362 		    calculate_crc32c(~0L, bp->b_un.b_buf, bp->b_size);
363 		break;
364 	default:
365 		break;
366 	}
367 	bp->b_dirty = 0;
368 	if (fswritefd < 0) {
369 		pfatal("WRITING IN READ_ONLY MODE.\n");
370 		return;
371 	}
372 	if (bp->b_errs != 0)
373 		pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n",
374 		    (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ",
375 		    (long long)bp->b_bno);
376 	bp->b_errs = 0;
377 	blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size);
378 	if (bp != &sblk)
379 		return;
380 	for (i = 0, j = 0; i < sblock.fs_cssize; i += sblock.fs_bsize, j++) {
381 		blwrite(fswritefd, (char *)sblock.fs_csp + i,
382 		    fsbtodb(&sblock, sblock.fs_csaddr + j * sblock.fs_frag),
383 		    MIN(sblock.fs_cssize - i, sblock.fs_bsize));
384 	}
385 }
386 
387 void
388 rwerror(const char *mesg, ufs2_daddr_t blk)
389 {
390 
391 	if (bkgrdcheck)
392 		exit(EEXIT);
393 	if (preen == 0)
394 		printf("\n");
395 	pfatal("CANNOT %s: %ld", mesg, (long)blk);
396 	if (reply("CONTINUE") == 0)
397 		exit(EEXIT);
398 }
399 
400 void
401 ckfini(int markclean)
402 {
403 	struct bufarea *bp, *nbp;
404 	int ofsmodified, cnt;
405 
406 	if (bkgrdflag) {
407 		unlink(snapname);
408 		if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) {
409 			cmd.value = FS_UNCLEAN;
410 			cmd.size = markclean ? -1 : 1;
411 			if (sysctlbyname("vfs.ffs.setflags", 0, 0,
412 			    &cmd, sizeof cmd) == -1)
413 				rwerror("SET FILE SYSTEM FLAGS", FS_UNCLEAN);
414 			if (!preen) {
415 				printf("\n***** FILE SYSTEM MARKED %s *****\n",
416 				    markclean ? "CLEAN" : "DIRTY");
417 				if (!markclean)
418 					rerun = 1;
419 			}
420 		} else if (!preen && !markclean) {
421 			printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
422 			rerun = 1;
423 		}
424 	}
425 	if (debug && totalreads > 0)
426 		printf("cache with %d buffers missed %ld of %ld (%d%%)\n",
427 		    numbufs, totaldiskreads, totalreads,
428 		    (int)(totaldiskreads * 100 / totalreads));
429 	if (fswritefd < 0) {
430 		(void)close(fsreadfd);
431 		return;
432 	}
433 	flush(fswritefd, &sblk);
434 	if (havesb && cursnapshot == 0 && sblock.fs_magic == FS_UFS2_MAGIC &&
435 	    sblk.b_bno != sblock.fs_sblockloc / dev_bsize &&
436 	    !preen && reply("UPDATE STANDARD SUPERBLOCK")) {
437 		sblk.b_bno = sblock.fs_sblockloc / dev_bsize;
438 		sbdirty();
439 		flush(fswritefd, &sblk);
440 	}
441 	flush(fswritefd, &cgblk);
442 	free(cgblk.b_un.b_buf);
443 	cnt = 0;
444 	TAILQ_FOREACH_REVERSE_SAFE(bp, &bufhead, buflist, b_list, nbp) {
445 		TAILQ_REMOVE(&bufhead, bp, b_list);
446 		cnt++;
447 		flush(fswritefd, bp);
448 		free(bp->b_un.b_buf);
449 		free((char *)bp);
450 	}
451 	if (numbufs != cnt)
452 		errx(EEXIT, "panic: lost %d buffers", numbufs - cnt);
453 	if (cgbufs != NULL) {
454 		for (cnt = 0; cnt < sblock.fs_ncg; cnt++) {
455 			if (cgbufs[cnt].b_un.b_cg == NULL)
456 				continue;
457 			flush(fswritefd, &cgbufs[cnt]);
458 			free(cgbufs[cnt].b_un.b_cg);
459 		}
460 		free(cgbufs);
461 	}
462 	pbp = pdirbp = (struct bufarea *)0;
463 	if (cursnapshot == 0 && sblock.fs_clean != markclean) {
464 		if ((sblock.fs_clean = markclean) != 0) {
465 			sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK);
466 			sblock.fs_pendingblocks = 0;
467 			sblock.fs_pendinginodes = 0;
468 		}
469 		sbdirty();
470 		ofsmodified = fsmodified;
471 		flush(fswritefd, &sblk);
472 		fsmodified = ofsmodified;
473 		if (!preen) {
474 			printf("\n***** FILE SYSTEM MARKED %s *****\n",
475 			    markclean ? "CLEAN" : "DIRTY");
476 			if (!markclean)
477 				rerun = 1;
478 		}
479 	} else if (!preen) {
480 		if (markclean) {
481 			printf("\n***** FILE SYSTEM IS CLEAN *****\n");
482 		} else {
483 			printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
484 			rerun = 1;
485 		}
486 	}
487 	(void)close(fsreadfd);
488 	(void)close(fswritefd);
489 }
490 
491 /*
492  * Print out I/O statistics.
493  */
494 void
495 IOstats(char *what)
496 {
497 	int i;
498 
499 	if (debug == 0)
500 		return;
501 	if (diskreads == 0) {
502 		printf("%s: no I/O\n\n", what);
503 		return;
504 	}
505 	if (startpass.tv_sec == 0)
506 		startpass = startprog;
507 	printf("%s: I/O statistics\n", what);
508 	printIOstats();
509 	totaldiskreads += diskreads;
510 	diskreads = 0;
511 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
512 		timespecadd(&totalreadtime[i], &readtime[i]);
513 		totalreadcnt[i] += readcnt[i];
514 		readtime[i].tv_sec = readtime[i].tv_nsec = 0;
515 		readcnt[i] = 0;
516 	}
517 	clock_gettime(CLOCK_REALTIME_PRECISE, &startpass);
518 }
519 
520 void
521 finalIOstats(void)
522 {
523 	int i;
524 
525 	if (debug == 0)
526 		return;
527 	printf("Final I/O statistics\n");
528 	totaldiskreads += diskreads;
529 	diskreads = totaldiskreads;
530 	startpass = startprog;
531 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
532 		timespecadd(&totalreadtime[i], &readtime[i]);
533 		totalreadcnt[i] += readcnt[i];
534 		readtime[i] = totalreadtime[i];
535 		readcnt[i] = totalreadcnt[i];
536 	}
537 	printIOstats();
538 }
539 
540 static void printIOstats(void)
541 {
542 	long long msec, totalmsec;
543 	int i;
544 
545 	clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass);
546 	timespecsub(&finishpass, &startpass);
547 	printf("Running time: %jd.%03ld sec\n",
548 		(intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000);
549 	printf("buffer reads by type:\n");
550 	for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++)
551 		totalmsec += readtime[i].tv_sec * 1000 +
552 		    readtime[i].tv_nsec / 1000000;
553 	if (totalmsec == 0)
554 		totalmsec = 1;
555 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
556 		if (readcnt[i] == 0)
557 			continue;
558 		msec =
559 		    readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000;
560 		printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n",
561 		    buftype[i], readcnt[i], readcnt[i] * 100 / diskreads,
562 		    (readcnt[i] * 1000 / diskreads) % 10,
563 		    (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000,
564 		    msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10);
565 	}
566 	printf("\n");
567 }
568 
569 int
570 blread(int fd, char *buf, ufs2_daddr_t blk, long size)
571 {
572 	char *cp;
573 	int i, errs;
574 	off_t offset;
575 
576 	offset = blk;
577 	offset *= dev_bsize;
578 	if (bkgrdflag)
579 		slowio_start();
580 	totalreads++;
581 	diskreads++;
582 	if (lseek(fd, offset, 0) < 0)
583 		rwerror("SEEK BLK", blk);
584 	else if (read(fd, buf, (int)size) == size) {
585 		if (bkgrdflag)
586 			slowio_end();
587 		return (0);
588 	}
589 
590 	/*
591 	 * This is handled specially here instead of in rwerror because
592 	 * rwerror is used for all sorts of errors, not just true read/write
593 	 * errors.  It should be refactored and fixed.
594 	 */
595 	if (surrender) {
596 		pfatal("CANNOT READ_BLK: %ld", (long)blk);
597 		errx(EEXIT, "ABORTING DUE TO READ ERRORS");
598 	} else
599 		rwerror("READ BLK", blk);
600 
601 	if (lseek(fd, offset, 0) < 0)
602 		rwerror("SEEK BLK", blk);
603 	errs = 0;
604 	memset(buf, 0, (size_t)size);
605 	printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:");
606 	for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) {
607 		if (read(fd, cp, (int)secsize) != secsize) {
608 			(void)lseek(fd, offset + i + secsize, 0);
609 			if (secsize != dev_bsize && dev_bsize != 1)
610 				printf(" %jd (%jd),",
611 				    (intmax_t)(blk * dev_bsize + i) / secsize,
612 				    (intmax_t)blk + i / dev_bsize);
613 			else
614 				printf(" %jd,", (intmax_t)blk + i / dev_bsize);
615 			errs++;
616 		}
617 	}
618 	printf("\n");
619 	if (errs)
620 		resolved = 0;
621 	return (errs);
622 }
623 
624 void
625 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size)
626 {
627 	int i;
628 	char *cp;
629 	off_t offset;
630 
631 	if (fd < 0)
632 		return;
633 	offset = blk;
634 	offset *= dev_bsize;
635 	if (lseek(fd, offset, 0) < 0)
636 		rwerror("SEEK BLK", blk);
637 	else if (write(fd, buf, size) == size) {
638 		fsmodified = 1;
639 		return;
640 	}
641 	resolved = 0;
642 	rwerror("WRITE BLK", blk);
643 	if (lseek(fd, offset, 0) < 0)
644 		rwerror("SEEK BLK", blk);
645 	printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:");
646 	for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize)
647 		if (write(fd, cp, dev_bsize) != dev_bsize) {
648 			(void)lseek(fd, offset + i + dev_bsize, 0);
649 			printf(" %jd,", (intmax_t)blk + i / dev_bsize);
650 		}
651 	printf("\n");
652 	return;
653 }
654 
655 void
656 blerase(int fd, ufs2_daddr_t blk, long size)
657 {
658 	off_t ioarg[2];
659 
660 	if (fd < 0)
661 		return;
662 	ioarg[0] = blk * dev_bsize;
663 	ioarg[1] = size;
664 	ioctl(fd, DIOCGDELETE, ioarg);
665 	/* we don't really care if we succeed or not */
666 	return;
667 }
668 
669 /*
670  * Fill a contiguous region with all-zeroes.  Note ZEROBUFSIZE is by
671  * definition a multiple of dev_bsize.
672  */
673 void
674 blzero(int fd, ufs2_daddr_t blk, long size)
675 {
676 	static char *zero;
677 	off_t offset, len;
678 
679 	if (fd < 0)
680 		return;
681 	if (zero == NULL) {
682 		zero = calloc(ZEROBUFSIZE, 1);
683 		if (zero == NULL)
684 			errx(EEXIT, "cannot allocate buffer pool");
685 	}
686 	offset = blk * dev_bsize;
687 	if (lseek(fd, offset, 0) < 0)
688 		rwerror("SEEK BLK", blk);
689 	while (size > 0) {
690 		len = MIN(ZEROBUFSIZE, size);
691 		if (write(fd, zero, len) != len)
692 			rwerror("WRITE BLK", blk);
693 		blk += len / dev_bsize;
694 		size -= len;
695 	}
696 }
697 
698 /*
699  * Verify cylinder group's magic number and other parameters.  If the
700  * test fails, offer an option to rebuild the whole cylinder group.
701  */
702 int
703 check_cgmagic(int cg, struct bufarea *cgbp)
704 {
705 	struct cg *cgp = cgbp->b_un.b_cg;
706 
707 	/*
708 	 * Extended cylinder group checks.
709 	 */
710 	if (cg_chkmagic(cgp) &&
711 	    ((sblock.fs_magic == FS_UFS1_MAGIC &&
712 	      cgp->cg_old_niblk == sblock.fs_ipg &&
713 	      cgp->cg_ndblk <= sblock.fs_fpg &&
714 	      cgp->cg_old_ncyl <= sblock.fs_old_cpg) ||
715 	     (sblock.fs_magic == FS_UFS2_MAGIC &&
716 	      cgp->cg_niblk == sblock.fs_ipg &&
717 	      cgp->cg_ndblk <= sblock.fs_fpg &&
718 	      cgp->cg_initediblk <= sblock.fs_ipg))) {
719 		return (1);
720 	}
721 	pfatal("CYLINDER GROUP %d: BAD MAGIC NUMBER", cg);
722 	if (!reply("REBUILD CYLINDER GROUP")) {
723 		printf("YOU WILL NEED TO RERUN FSCK.\n");
724 		rerun = 1;
725 		return (1);
726 	}
727 	/*
728 	 * Zero out the cylinder group and then initialize critical fields.
729 	 * Bit maps and summaries will be recalculated by later passes.
730 	 */
731 	memset(cgp, 0, (size_t)sblock.fs_cgsize);
732 	cgp->cg_magic = CG_MAGIC;
733 	cgp->cg_cgx = cg;
734 	cgp->cg_niblk = sblock.fs_ipg;
735 	cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
736 	if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size)
737 		cgp->cg_ndblk = sblock.fs_fpg;
738 	else
739 		cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg);
740 	cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
741 	if (sblock.fs_magic == FS_UFS1_MAGIC) {
742 		cgp->cg_niblk = 0;
743 		cgp->cg_initediblk = 0;
744 		cgp->cg_old_ncyl = sblock.fs_old_cpg;
745 		cgp->cg_old_niblk = sblock.fs_ipg;
746 		cgp->cg_old_btotoff = cgp->cg_iusedoff;
747 		cgp->cg_old_boff = cgp->cg_old_btotoff +
748 		    sblock.fs_old_cpg * sizeof(int32_t);
749 		cgp->cg_iusedoff = cgp->cg_old_boff +
750 		    sblock.fs_old_cpg * sizeof(u_int16_t);
751 	}
752 	cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
753 	cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
754 	if (sblock.fs_contigsumsize > 0) {
755 		cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag;
756 		cgp->cg_clustersumoff =
757 		    roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t));
758 		cgp->cg_clustersumoff -= sizeof(u_int32_t);
759 		cgp->cg_clusteroff = cgp->cg_clustersumoff +
760 		    (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
761 		cgp->cg_nextfreeoff = cgp->cg_clusteroff +
762 		    howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
763 	}
764 	dirty(cgbp);
765 	return (0);
766 }
767 
768 /*
769  * allocate a data block with the specified number of fragments
770  */
771 ufs2_daddr_t
772 allocblk(long frags)
773 {
774 	int i, j, k, cg, baseblk;
775 	struct bufarea *cgbp;
776 	struct cg *cgp;
777 
778 	if (frags <= 0 || frags > sblock.fs_frag)
779 		return (0);
780 	for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) {
781 		for (j = 0; j <= sblock.fs_frag - frags; j++) {
782 			if (testbmap(i + j))
783 				continue;
784 			for (k = 1; k < frags; k++)
785 				if (testbmap(i + j + k))
786 					break;
787 			if (k < frags) {
788 				j += k;
789 				continue;
790 			}
791 			cg = dtog(&sblock, i + j);
792 			cgbp = cgget(cg);
793 			cgp = cgbp->b_un.b_cg;
794 			if (!check_cgmagic(cg, cgbp))
795 				return (0);
796 			baseblk = dtogd(&sblock, i + j);
797 			for (k = 0; k < frags; k++) {
798 				setbmap(i + j + k);
799 				clrbit(cg_blksfree(cgp), baseblk + k);
800 			}
801 			n_blks += frags;
802 			if (frags == sblock.fs_frag)
803 				cgp->cg_cs.cs_nbfree--;
804 			else
805 				cgp->cg_cs.cs_nffree -= frags;
806 			dirty(cgbp);
807 			return (i + j);
808 		}
809 	}
810 	return (0);
811 }
812 
813 /*
814  * Free a previously allocated block
815  */
816 void
817 freeblk(ufs2_daddr_t blkno, long frags)
818 {
819 	struct inodesc idesc;
820 
821 	idesc.id_blkno = blkno;
822 	idesc.id_numfrags = frags;
823 	(void)pass4check(&idesc);
824 }
825 
826 /* Slow down IO so as to leave some disk bandwidth for other processes */
827 void
828 slowio_start()
829 {
830 
831 	/* Delay one in every 8 operations */
832 	slowio_pollcnt = (slowio_pollcnt + 1) & 7;
833 	if (slowio_pollcnt == 0) {
834 		gettimeofday(&slowio_starttime, NULL);
835 	}
836 }
837 
838 void
839 slowio_end()
840 {
841 	struct timeval tv;
842 	int delay_usec;
843 
844 	if (slowio_pollcnt != 0)
845 		return;
846 
847 	/* Update the slowdown interval. */
848 	gettimeofday(&tv, NULL);
849 	delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 +
850 	    (tv.tv_usec - slowio_starttime.tv_usec);
851 	if (delay_usec < 64)
852 		delay_usec = 64;
853 	if (delay_usec > 2500000)
854 		delay_usec = 2500000;
855 	slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6;
856 	/* delay by 8 times the average IO delay */
857 	if (slowio_delay_usec > 64)
858 		usleep(slowio_delay_usec * 8);
859 }
860 
861 /*
862  * Find a pathname
863  */
864 void
865 getpathname(char *namebuf, ino_t curdir, ino_t ino)
866 {
867 	int len;
868 	char *cp;
869 	struct inodesc idesc;
870 	static int busy = 0;
871 
872 	if (curdir == ino && ino == UFS_ROOTINO) {
873 		(void)strcpy(namebuf, "/");
874 		return;
875 	}
876 	if (busy || !INO_IS_DVALID(curdir)) {
877 		(void)strcpy(namebuf, "?");
878 		return;
879 	}
880 	busy = 1;
881 	memset(&idesc, 0, sizeof(struct inodesc));
882 	idesc.id_type = DATA;
883 	idesc.id_fix = IGNORE;
884 	cp = &namebuf[MAXPATHLEN - 1];
885 	*cp = '\0';
886 	if (curdir != ino) {
887 		idesc.id_parent = curdir;
888 		goto namelookup;
889 	}
890 	while (ino != UFS_ROOTINO) {
891 		idesc.id_number = ino;
892 		idesc.id_func = findino;
893 		idesc.id_name = strdup("..");
894 		if ((ckinode(ginode(ino), &idesc) & FOUND) == 0)
895 			break;
896 	namelookup:
897 		idesc.id_number = idesc.id_parent;
898 		idesc.id_parent = ino;
899 		idesc.id_func = findname;
900 		idesc.id_name = namebuf;
901 		if ((ckinode(ginode(idesc.id_number), &idesc)&FOUND) == 0)
902 			break;
903 		len = strlen(namebuf);
904 		cp -= len;
905 		memmove(cp, namebuf, (size_t)len);
906 		*--cp = '/';
907 		if (cp < &namebuf[UFS_MAXNAMLEN])
908 			break;
909 		ino = idesc.id_number;
910 	}
911 	busy = 0;
912 	if (ino != UFS_ROOTINO)
913 		*--cp = '?';
914 	memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp));
915 }
916 
917 void
918 catch(int sig __unused)
919 {
920 
921 	ckfini(0);
922 	exit(12);
923 }
924 
925 /*
926  * When preening, allow a single quit to signal
927  * a special exit after file system checks complete
928  * so that reboot sequence may be interrupted.
929  */
930 void
931 catchquit(int sig __unused)
932 {
933 	printf("returning to single-user after file system check\n");
934 	returntosingle = 1;
935 	(void)signal(SIGQUIT, SIG_DFL);
936 }
937 
938 /*
939  * determine whether an inode should be fixed.
940  */
941 int
942 dofix(struct inodesc *idesc, const char *msg)
943 {
944 
945 	switch (idesc->id_fix) {
946 
947 	case DONTKNOW:
948 		if (idesc->id_type == DATA)
949 			direrror(idesc->id_number, msg);
950 		else
951 			pwarn("%s", msg);
952 		if (preen) {
953 			printf(" (SALVAGED)\n");
954 			idesc->id_fix = FIX;
955 			return (ALTERED);
956 		}
957 		if (reply("SALVAGE") == 0) {
958 			idesc->id_fix = NOFIX;
959 			return (0);
960 		}
961 		idesc->id_fix = FIX;
962 		return (ALTERED);
963 
964 	case FIX:
965 		return (ALTERED);
966 
967 	case NOFIX:
968 	case IGNORE:
969 		return (0);
970 
971 	default:
972 		errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix);
973 	}
974 	/* NOTREACHED */
975 	return (0);
976 }
977 
978 #include <stdarg.h>
979 
980 /*
981  * An unexpected inconsistency occurred.
982  * Die if preening or file system is running with soft dependency protocol,
983  * otherwise just print message and continue.
984  */
985 void
986 pfatal(const char *fmt, ...)
987 {
988 	va_list ap;
989 	va_start(ap, fmt);
990 	if (!preen) {
991 		(void)vfprintf(stdout, fmt, ap);
992 		va_end(ap);
993 		if (usedsoftdep)
994 			(void)fprintf(stdout,
995 			    "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n");
996 		/*
997 		 * Force foreground fsck to clean up inconsistency.
998 		 */
999 		if (bkgrdflag) {
1000 			cmd.value = FS_NEEDSFSCK;
1001 			cmd.size = 1;
1002 			if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1003 			    &cmd, sizeof cmd) == -1)
1004 				pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1005 			fprintf(stdout, "CANNOT RUN IN BACKGROUND\n");
1006 			ckfini(0);
1007 			exit(EEXIT);
1008 		}
1009 		return;
1010 	}
1011 	if (cdevname == NULL)
1012 		cdevname = strdup("fsck");
1013 	(void)fprintf(stdout, "%s: ", cdevname);
1014 	(void)vfprintf(stdout, fmt, ap);
1015 	(void)fprintf(stdout,
1016 	    "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n",
1017 	    cdevname, usedsoftdep ? " SOFT UPDATE " : " ");
1018 	/*
1019 	 * Force foreground fsck to clean up inconsistency.
1020 	 */
1021 	if (bkgrdflag) {
1022 		cmd.value = FS_NEEDSFSCK;
1023 		cmd.size = 1;
1024 		if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1025 		    &cmd, sizeof cmd) == -1)
1026 			pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1027 	}
1028 	ckfini(0);
1029 	exit(EEXIT);
1030 }
1031 
1032 /*
1033  * Pwarn just prints a message when not preening or running soft dependency
1034  * protocol, or a warning (preceded by filename) when preening.
1035  */
1036 void
1037 pwarn(const char *fmt, ...)
1038 {
1039 	va_list ap;
1040 	va_start(ap, fmt);
1041 	if (preen)
1042 		(void)fprintf(stdout, "%s: ", cdevname);
1043 	(void)vfprintf(stdout, fmt, ap);
1044 	va_end(ap);
1045 }
1046 
1047 /*
1048  * Stub for routines from kernel.
1049  */
1050 void
1051 panic(const char *fmt, ...)
1052 {
1053 	va_list ap;
1054 	va_start(ap, fmt);
1055 	pfatal("INTERNAL INCONSISTENCY:");
1056 	(void)vfprintf(stdout, fmt, ap);
1057 	va_end(ap);
1058 	exit(EEXIT);
1059 }
1060