xref: /freebsd/sbin/fsck_ffs/fsutil.c (revision d8a0fe102c0cfdfcd5b818f850eff09d8536c9bc)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1986, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #if 0
33 #ifndef lint
34 static const char sccsid[] = "@(#)utilities.c	8.6 (Berkeley) 5/19/95";
35 #endif /* not lint */
36 #endif
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/time.h>
42 #include <sys/types.h>
43 #include <sys/sysctl.h>
44 #include <sys/disk.h>
45 #include <sys/disklabel.h>
46 #include <sys/ioctl.h>
47 #include <sys/stat.h>
48 
49 #include <ufs/ufs/dinode.h>
50 #include <ufs/ufs/dir.h>
51 #include <ufs/ffs/fs.h>
52 
53 #include <err.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <fstab.h>
58 #include <stdint.h>
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <time.h>
62 #include <unistd.h>
63 #include <libufs.h>
64 
65 #include "fsck.h"
66 
67 static void slowio_start(void);
68 static void slowio_end(void);
69 static void printIOstats(void);
70 
71 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */
72 static struct timespec startpass, finishpass;
73 struct timeval slowio_starttime;
74 int slowio_delay_usec = 10000;	/* Initial IO delay for background fsck */
75 int slowio_pollcnt;
76 static struct bufarea cgblk;	/* backup buffer for cylinder group blocks */
77 static TAILQ_HEAD(buflist, bufarea) bufhead;	/* head of buffer cache list */
78 static int numbufs;				/* size of buffer cache */
79 static char *buftype[BT_NUMBUFTYPES] = BT_NAMES;
80 static struct bufarea *cgbufs;	/* header for cylinder group cache */
81 static int flushtries;		/* number of tries to reclaim memory */
82 
83 void
84 fsutilinit(void)
85 {
86 	diskreads = totaldiskreads = totalreads = 0;
87 	bzero(&startpass, sizeof(struct timespec));
88 	bzero(&finishpass, sizeof(struct timespec));
89 	bzero(&slowio_starttime, sizeof(struct timeval));
90 	slowio_delay_usec = 10000;
91 	slowio_pollcnt = 0;
92 	bzero(&cgblk, sizeof(struct bufarea));
93 	TAILQ_INIT(&bufhead);
94 	numbufs = 0;
95 	/* buftype ? */
96 	cgbufs = NULL;
97 	flushtries = 0;
98 }
99 
100 int
101 ftypeok(union dinode *dp)
102 {
103 	switch (DIP(dp, di_mode) & IFMT) {
104 
105 	case IFDIR:
106 	case IFREG:
107 	case IFBLK:
108 	case IFCHR:
109 	case IFLNK:
110 	case IFSOCK:
111 	case IFIFO:
112 		return (1);
113 
114 	default:
115 		if (debug)
116 			printf("bad file type 0%o\n", DIP(dp, di_mode));
117 		return (0);
118 	}
119 }
120 
121 int
122 reply(const char *question)
123 {
124 	int persevere;
125 	char c;
126 
127 	if (preen)
128 		pfatal("INTERNAL ERROR: GOT TO reply()");
129 	persevere = !strcmp(question, "CONTINUE");
130 	printf("\n");
131 	if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) {
132 		printf("%s? no\n\n", question);
133 		resolved = 0;
134 		return (0);
135 	}
136 	if (yflag || (persevere && nflag)) {
137 		printf("%s? yes\n\n", question);
138 		return (1);
139 	}
140 	do	{
141 		printf("%s? [yn] ", question);
142 		(void) fflush(stdout);
143 		c = getc(stdin);
144 		while (c != '\n' && getc(stdin) != '\n') {
145 			if (feof(stdin)) {
146 				resolved = 0;
147 				return (0);
148 			}
149 		}
150 	} while (c != 'y' && c != 'Y' && c != 'n' && c != 'N');
151 	printf("\n");
152 	if (c == 'y' || c == 'Y')
153 		return (1);
154 	resolved = 0;
155 	return (0);
156 }
157 
158 /*
159  * Look up state information for an inode.
160  */
161 struct inostat *
162 inoinfo(ino_t inum)
163 {
164 	static struct inostat unallocated = { USTATE, 0, 0 };
165 	struct inostatlist *ilp;
166 	int iloff;
167 
168 	if (inum > maxino)
169 		errx(EEXIT, "inoinfo: inumber %ju out of range",
170 		    (uintmax_t)inum);
171 	ilp = &inostathead[inum / sblock.fs_ipg];
172 	iloff = inum % sblock.fs_ipg;
173 	if (iloff >= ilp->il_numalloced)
174 		return (&unallocated);
175 	return (&ilp->il_stat[iloff]);
176 }
177 
178 /*
179  * Malloc buffers and set up cache.
180  */
181 void
182 bufinit(void)
183 {
184 	struct bufarea *bp;
185 	long bufcnt, i;
186 	char *bufp;
187 
188 	pbp = pdirbp = (struct bufarea *)0;
189 	bufp = Malloc((unsigned int)sblock.fs_bsize);
190 	if (bufp == NULL)
191 		errx(EEXIT, "cannot allocate buffer pool");
192 	cgblk.b_un.b_buf = bufp;
193 	initbarea(&cgblk, BT_CYLGRP);
194 	TAILQ_INIT(&bufhead);
195 	bufcnt = MAXBUFS;
196 	if (bufcnt < MINBUFS)
197 		bufcnt = MINBUFS;
198 	for (i = 0; i < bufcnt; i++) {
199 		bp = (struct bufarea *)Malloc(sizeof(struct bufarea));
200 		bufp = Malloc((unsigned int)sblock.fs_bsize);
201 		if (bp == NULL || bufp == NULL) {
202 			if (i >= MINBUFS)
203 				break;
204 			errx(EEXIT, "cannot allocate buffer pool");
205 		}
206 		bp->b_un.b_buf = bufp;
207 		TAILQ_INSERT_HEAD(&bufhead, bp, b_list);
208 		initbarea(bp, BT_UNKNOWN);
209 	}
210 	numbufs = i;	/* save number of buffers */
211 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
212 		readtime[i].tv_sec = totalreadtime[i].tv_sec = 0;
213 		readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0;
214 		readcnt[i] = totalreadcnt[i] = 0;
215 	}
216 }
217 
218 /*
219  * Manage cylinder group buffers.
220  */
221 static struct bufarea *cgbufs;	/* header for cylinder group cache */
222 static int flushtries;		/* number of tries to reclaim memory */
223 
224 struct bufarea *
225 cgget(int cg)
226 {
227 	struct bufarea *cgbp;
228 	struct cg *cgp;
229 
230 	if (cgbufs == NULL) {
231 		cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea));
232 		if (cgbufs == NULL)
233 			errx(EEXIT, "cannot allocate cylinder group buffers");
234 	}
235 	cgbp = &cgbufs[cg];
236 	if (cgbp->b_un.b_cg != NULL)
237 		return (cgbp);
238 	cgp = NULL;
239 	if (flushtries == 0)
240 		cgp = malloc((unsigned int)sblock.fs_cgsize);
241 	if (cgp == NULL) {
242 		getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
243 		return (&cgblk);
244 	}
245 	cgbp->b_un.b_cg = cgp;
246 	initbarea(cgbp, BT_CYLGRP);
247 	getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize);
248 	return (cgbp);
249 }
250 
251 /*
252  * Attempt to flush a cylinder group cache entry.
253  * Return whether the flush was successful.
254  */
255 int
256 flushentry(void)
257 {
258 	struct bufarea *cgbp;
259 
260 	if (flushtries == sblock.fs_ncg || cgbufs == NULL)
261 		return (0);
262 	cgbp = &cgbufs[flushtries++];
263 	if (cgbp->b_un.b_cg == NULL)
264 		return (0);
265 	flush(fswritefd, cgbp);
266 	free(cgbp->b_un.b_buf);
267 	cgbp->b_un.b_buf = NULL;
268 	return (1);
269 }
270 
271 /*
272  * Manage a cache of directory blocks.
273  */
274 struct bufarea *
275 getdatablk(ufs2_daddr_t blkno, long size, int type)
276 {
277 	struct bufarea *bp;
278 
279 	TAILQ_FOREACH(bp, &bufhead, b_list)
280 		if (bp->b_bno == fsbtodb(&sblock, blkno))
281 			goto foundit;
282 	TAILQ_FOREACH_REVERSE(bp, &bufhead, buflist, b_list)
283 		if ((bp->b_flags & B_INUSE) == 0)
284 			break;
285 	if (bp == NULL)
286 		errx(EEXIT, "deadlocked buffer pool");
287 	bp->b_type = type;
288 	getblk(bp, blkno, size);
289 	/* fall through */
290 foundit:
291 	if (debug && bp->b_type != type)
292 		printf("Buffer type changed from %s to %s\n",
293 		    buftype[bp->b_type], buftype[type]);
294 	TAILQ_REMOVE(&bufhead, bp, b_list);
295 	TAILQ_INSERT_HEAD(&bufhead, bp, b_list);
296 	bp->b_flags |= B_INUSE;
297 	return (bp);
298 }
299 
300 /*
301  * Timespec operations (from <sys/time.h>).
302  */
303 #define	timespecsub(vvp, uvp)						\
304 	do {								\
305 		(vvp)->tv_sec -= (uvp)->tv_sec;				\
306 		(vvp)->tv_nsec -= (uvp)->tv_nsec;			\
307 		if ((vvp)->tv_nsec < 0) {				\
308 			(vvp)->tv_sec--;				\
309 			(vvp)->tv_nsec += 1000000000;			\
310 		}							\
311 	} while (0)
312 #define	timespecadd(vvp, uvp)						\
313 	do {								\
314 		(vvp)->tv_sec += (uvp)->tv_sec;				\
315 		(vvp)->tv_nsec += (uvp)->tv_nsec;			\
316 		if ((vvp)->tv_nsec >= 1000000000) {			\
317 			(vvp)->tv_sec++;				\
318 			(vvp)->tv_nsec -= 1000000000;			\
319 		}							\
320 	} while (0)
321 
322 void
323 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size)
324 {
325 	ufs2_daddr_t dblk;
326 	struct timespec start, finish;
327 
328 	dblk = fsbtodb(&sblock, blk);
329 	if (bp->b_bno == dblk) {
330 		totalreads++;
331 	} else {
332 		flush(fswritefd, bp);
333 		if (debug) {
334 			readcnt[bp->b_type]++;
335 			clock_gettime(CLOCK_REALTIME_PRECISE, &start);
336 		}
337 		bp->b_errs = blread(fsreadfd, bp->b_un.b_buf, dblk, size);
338 		if (debug) {
339 			clock_gettime(CLOCK_REALTIME_PRECISE, &finish);
340 			timespecsub(&finish, &start);
341 			timespecadd(&readtime[bp->b_type], &finish);
342 		}
343 		bp->b_bno = dblk;
344 		bp->b_size = size;
345 	}
346 }
347 
348 void
349 flush(int fd, struct bufarea *bp)
350 {
351 	int i, j;
352 
353 	if (!bp->b_dirty)
354 		return;
355 	/*
356 	 * Calculate any needed check hashes.
357 	 */
358 	switch (bp->b_type) {
359 	case BT_CYLGRP:
360 		if ((sblock.fs_metackhash & CK_CYLGRP) == 0)
361 			break;
362 		bp->b_un.b_cg->cg_ckhash = 0;
363 		bp->b_un.b_cg->cg_ckhash =
364 		    calculate_crc32c(~0L, bp->b_un.b_buf, bp->b_size);
365 		break;
366 	default:
367 		break;
368 	}
369 	bp->b_dirty = 0;
370 	if (fswritefd < 0) {
371 		pfatal("WRITING IN READ_ONLY MODE.\n");
372 		return;
373 	}
374 	if (bp->b_errs != 0)
375 		pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n",
376 		    (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ",
377 		    (long long)bp->b_bno);
378 	bp->b_errs = 0;
379 	blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size);
380 	if (bp != &sblk)
381 		return;
382 	for (i = 0, j = 0; i < sblock.fs_cssize; i += sblock.fs_bsize, j++) {
383 		blwrite(fswritefd, (char *)sblock.fs_csp + i,
384 		    fsbtodb(&sblock, sblock.fs_csaddr + j * sblock.fs_frag),
385 		    MIN(sblock.fs_cssize - i, sblock.fs_bsize));
386 	}
387 }
388 
389 void
390 rwerror(const char *mesg, ufs2_daddr_t blk)
391 {
392 
393 	if (bkgrdcheck)
394 		exit(EEXIT);
395 	if (preen == 0)
396 		printf("\n");
397 	pfatal("CANNOT %s: %ld", mesg, (long)blk);
398 	if (reply("CONTINUE") == 0)
399 		exit(EEXIT);
400 }
401 
402 void
403 ckfini(int markclean)
404 {
405 	struct bufarea *bp, *nbp;
406 	int ofsmodified, cnt;
407 
408 	if (bkgrdflag) {
409 		unlink(snapname);
410 		if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) {
411 			cmd.value = FS_UNCLEAN;
412 			cmd.size = markclean ? -1 : 1;
413 			if (sysctlbyname("vfs.ffs.setflags", 0, 0,
414 			    &cmd, sizeof cmd) == -1)
415 				rwerror("SET FILE SYSTEM FLAGS", FS_UNCLEAN);
416 			if (!preen) {
417 				printf("\n***** FILE SYSTEM MARKED %s *****\n",
418 				    markclean ? "CLEAN" : "DIRTY");
419 				if (!markclean)
420 					rerun = 1;
421 			}
422 		} else if (!preen && !markclean) {
423 			printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
424 			rerun = 1;
425 		}
426 	}
427 	if (debug && totalreads > 0)
428 		printf("cache with %d buffers missed %ld of %ld (%d%%)\n",
429 		    numbufs, totaldiskreads, totalreads,
430 		    (int)(totaldiskreads * 100 / totalreads));
431 	if (fswritefd < 0) {
432 		(void)close(fsreadfd);
433 		return;
434 	}
435 	flush(fswritefd, &sblk);
436 	if (havesb && cursnapshot == 0 && sblock.fs_magic == FS_UFS2_MAGIC &&
437 	    sblk.b_bno != sblock.fs_sblockloc / dev_bsize &&
438 	    !preen && reply("UPDATE STANDARD SUPERBLOCK")) {
439 		sblk.b_bno = sblock.fs_sblockloc / dev_bsize;
440 		sbdirty();
441 		flush(fswritefd, &sblk);
442 	}
443 	flush(fswritefd, &cgblk);
444 	free(cgblk.b_un.b_buf);
445 	cnt = 0;
446 	TAILQ_FOREACH_REVERSE_SAFE(bp, &bufhead, buflist, b_list, nbp) {
447 		TAILQ_REMOVE(&bufhead, bp, b_list);
448 		cnt++;
449 		flush(fswritefd, bp);
450 		free(bp->b_un.b_buf);
451 		free((char *)bp);
452 	}
453 	if (numbufs != cnt)
454 		errx(EEXIT, "panic: lost %d buffers", numbufs - cnt);
455 	if (cgbufs != NULL) {
456 		for (cnt = 0; cnt < sblock.fs_ncg; cnt++) {
457 			if (cgbufs[cnt].b_un.b_cg == NULL)
458 				continue;
459 			flush(fswritefd, &cgbufs[cnt]);
460 			free(cgbufs[cnt].b_un.b_cg);
461 		}
462 		free(cgbufs);
463 	}
464 	pbp = pdirbp = (struct bufarea *)0;
465 	if (cursnapshot == 0 && sblock.fs_clean != markclean) {
466 		if ((sblock.fs_clean = markclean) != 0) {
467 			sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK);
468 			sblock.fs_pendingblocks = 0;
469 			sblock.fs_pendinginodes = 0;
470 		}
471 		sbdirty();
472 		ofsmodified = fsmodified;
473 		flush(fswritefd, &sblk);
474 		fsmodified = ofsmodified;
475 		if (!preen) {
476 			printf("\n***** FILE SYSTEM MARKED %s *****\n",
477 			    markclean ? "CLEAN" : "DIRTY");
478 			if (!markclean)
479 				rerun = 1;
480 		}
481 	} else if (!preen) {
482 		if (markclean) {
483 			printf("\n***** FILE SYSTEM IS CLEAN *****\n");
484 		} else {
485 			printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
486 			rerun = 1;
487 		}
488 	}
489 	(void)close(fsreadfd);
490 	(void)close(fswritefd);
491 }
492 
493 /*
494  * Print out I/O statistics.
495  */
496 void
497 IOstats(char *what)
498 {
499 	int i;
500 
501 	if (debug == 0)
502 		return;
503 	if (diskreads == 0) {
504 		printf("%s: no I/O\n\n", what);
505 		return;
506 	}
507 	if (startpass.tv_sec == 0)
508 		startpass = startprog;
509 	printf("%s: I/O statistics\n", what);
510 	printIOstats();
511 	totaldiskreads += diskreads;
512 	diskreads = 0;
513 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
514 		timespecadd(&totalreadtime[i], &readtime[i]);
515 		totalreadcnt[i] += readcnt[i];
516 		readtime[i].tv_sec = readtime[i].tv_nsec = 0;
517 		readcnt[i] = 0;
518 	}
519 	clock_gettime(CLOCK_REALTIME_PRECISE, &startpass);
520 }
521 
522 void
523 finalIOstats(void)
524 {
525 	int i;
526 
527 	if (debug == 0)
528 		return;
529 	printf("Final I/O statistics\n");
530 	totaldiskreads += diskreads;
531 	diskreads = totaldiskreads;
532 	startpass = startprog;
533 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
534 		timespecadd(&totalreadtime[i], &readtime[i]);
535 		totalreadcnt[i] += readcnt[i];
536 		readtime[i] = totalreadtime[i];
537 		readcnt[i] = totalreadcnt[i];
538 	}
539 	printIOstats();
540 }
541 
542 static void printIOstats(void)
543 {
544 	long long msec, totalmsec;
545 	int i;
546 
547 	clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass);
548 	timespecsub(&finishpass, &startpass);
549 	printf("Running time: %jd.%03ld sec\n",
550 		(intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000);
551 	printf("buffer reads by type:\n");
552 	for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++)
553 		totalmsec += readtime[i].tv_sec * 1000 +
554 		    readtime[i].tv_nsec / 1000000;
555 	if (totalmsec == 0)
556 		totalmsec = 1;
557 	for (i = 0; i < BT_NUMBUFTYPES; i++) {
558 		if (readcnt[i] == 0)
559 			continue;
560 		msec =
561 		    readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000;
562 		printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n",
563 		    buftype[i], readcnt[i], readcnt[i] * 100 / diskreads,
564 		    (readcnt[i] * 1000 / diskreads) % 10,
565 		    (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000,
566 		    msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10);
567 	}
568 	printf("\n");
569 }
570 
571 int
572 blread(int fd, char *buf, ufs2_daddr_t blk, long size)
573 {
574 	char *cp;
575 	int i, errs;
576 	off_t offset;
577 
578 	offset = blk;
579 	offset *= dev_bsize;
580 	if (bkgrdflag)
581 		slowio_start();
582 	totalreads++;
583 	diskreads++;
584 	if (lseek(fd, offset, 0) < 0)
585 		rwerror("SEEK BLK", blk);
586 	else if (read(fd, buf, (int)size) == size) {
587 		if (bkgrdflag)
588 			slowio_end();
589 		return (0);
590 	}
591 
592 	/*
593 	 * This is handled specially here instead of in rwerror because
594 	 * rwerror is used for all sorts of errors, not just true read/write
595 	 * errors.  It should be refactored and fixed.
596 	 */
597 	if (surrender) {
598 		pfatal("CANNOT READ_BLK: %ld", (long)blk);
599 		errx(EEXIT, "ABORTING DUE TO READ ERRORS");
600 	} else
601 		rwerror("READ BLK", blk);
602 
603 	if (lseek(fd, offset, 0) < 0)
604 		rwerror("SEEK BLK", blk);
605 	errs = 0;
606 	memset(buf, 0, (size_t)size);
607 	printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:");
608 	for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) {
609 		if (read(fd, cp, (int)secsize) != secsize) {
610 			(void)lseek(fd, offset + i + secsize, 0);
611 			if (secsize != dev_bsize && dev_bsize != 1)
612 				printf(" %jd (%jd),",
613 				    (intmax_t)(blk * dev_bsize + i) / secsize,
614 				    (intmax_t)blk + i / dev_bsize);
615 			else
616 				printf(" %jd,", (intmax_t)blk + i / dev_bsize);
617 			errs++;
618 		}
619 	}
620 	printf("\n");
621 	if (errs)
622 		resolved = 0;
623 	return (errs);
624 }
625 
626 void
627 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size)
628 {
629 	int i;
630 	char *cp;
631 	off_t offset;
632 
633 	if (fd < 0)
634 		return;
635 	offset = blk;
636 	offset *= dev_bsize;
637 	if (lseek(fd, offset, 0) < 0)
638 		rwerror("SEEK BLK", blk);
639 	else if (write(fd, buf, size) == size) {
640 		fsmodified = 1;
641 		return;
642 	}
643 	resolved = 0;
644 	rwerror("WRITE BLK", blk);
645 	if (lseek(fd, offset, 0) < 0)
646 		rwerror("SEEK BLK", blk);
647 	printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:");
648 	for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize)
649 		if (write(fd, cp, dev_bsize) != dev_bsize) {
650 			(void)lseek(fd, offset + i + dev_bsize, 0);
651 			printf(" %jd,", (intmax_t)blk + i / dev_bsize);
652 		}
653 	printf("\n");
654 	return;
655 }
656 
657 void
658 blerase(int fd, ufs2_daddr_t blk, long size)
659 {
660 	off_t ioarg[2];
661 
662 	if (fd < 0)
663 		return;
664 	ioarg[0] = blk * dev_bsize;
665 	ioarg[1] = size;
666 	ioctl(fd, DIOCGDELETE, ioarg);
667 	/* we don't really care if we succeed or not */
668 	return;
669 }
670 
671 /*
672  * Fill a contiguous region with all-zeroes.  Note ZEROBUFSIZE is by
673  * definition a multiple of dev_bsize.
674  */
675 void
676 blzero(int fd, ufs2_daddr_t blk, long size)
677 {
678 	static char *zero;
679 	off_t offset, len;
680 
681 	if (fd < 0)
682 		return;
683 	if (zero == NULL) {
684 		zero = calloc(ZEROBUFSIZE, 1);
685 		if (zero == NULL)
686 			errx(EEXIT, "cannot allocate buffer pool");
687 	}
688 	offset = blk * dev_bsize;
689 	if (lseek(fd, offset, 0) < 0)
690 		rwerror("SEEK BLK", blk);
691 	while (size > 0) {
692 		len = MIN(ZEROBUFSIZE, size);
693 		if (write(fd, zero, len) != len)
694 			rwerror("WRITE BLK", blk);
695 		blk += len / dev_bsize;
696 		size -= len;
697 	}
698 }
699 
700 /*
701  * Verify cylinder group's magic number and other parameters.  If the
702  * test fails, offer an option to rebuild the whole cylinder group.
703  */
704 int
705 check_cgmagic(int cg, struct bufarea *cgbp)
706 {
707 	struct cg *cgp = cgbp->b_un.b_cg;
708 
709 	/*
710 	 * Extended cylinder group checks.
711 	 */
712 	if (cg_chkmagic(cgp) &&
713 	    ((sblock.fs_magic == FS_UFS1_MAGIC &&
714 	      cgp->cg_old_niblk == sblock.fs_ipg &&
715 	      cgp->cg_ndblk <= sblock.fs_fpg &&
716 	      cgp->cg_old_ncyl <= sblock.fs_old_cpg) ||
717 	     (sblock.fs_magic == FS_UFS2_MAGIC &&
718 	      cgp->cg_niblk == sblock.fs_ipg &&
719 	      cgp->cg_ndblk <= sblock.fs_fpg &&
720 	      cgp->cg_initediblk <= sblock.fs_ipg))) {
721 		return (1);
722 	}
723 	pfatal("CYLINDER GROUP %d: BAD MAGIC NUMBER", cg);
724 	if (!reply("REBUILD CYLINDER GROUP")) {
725 		printf("YOU WILL NEED TO RERUN FSCK.\n");
726 		rerun = 1;
727 		return (1);
728 	}
729 	/*
730 	 * Zero out the cylinder group and then initialize critical fields.
731 	 * Bit maps and summaries will be recalculated by later passes.
732 	 */
733 	memset(cgp, 0, (size_t)sblock.fs_cgsize);
734 	cgp->cg_magic = CG_MAGIC;
735 	cgp->cg_cgx = cg;
736 	cgp->cg_niblk = sblock.fs_ipg;
737 	cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
738 	if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size)
739 		cgp->cg_ndblk = sblock.fs_fpg;
740 	else
741 		cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg);
742 	cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
743 	if (sblock.fs_magic == FS_UFS1_MAGIC) {
744 		cgp->cg_niblk = 0;
745 		cgp->cg_initediblk = 0;
746 		cgp->cg_old_ncyl = sblock.fs_old_cpg;
747 		cgp->cg_old_niblk = sblock.fs_ipg;
748 		cgp->cg_old_btotoff = cgp->cg_iusedoff;
749 		cgp->cg_old_boff = cgp->cg_old_btotoff +
750 		    sblock.fs_old_cpg * sizeof(int32_t);
751 		cgp->cg_iusedoff = cgp->cg_old_boff +
752 		    sblock.fs_old_cpg * sizeof(u_int16_t);
753 	}
754 	cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
755 	cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
756 	if (sblock.fs_contigsumsize > 0) {
757 		cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag;
758 		cgp->cg_clustersumoff =
759 		    roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t));
760 		cgp->cg_clustersumoff -= sizeof(u_int32_t);
761 		cgp->cg_clusteroff = cgp->cg_clustersumoff +
762 		    (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
763 		cgp->cg_nextfreeoff = cgp->cg_clusteroff +
764 		    howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
765 	}
766 	dirty(cgbp);
767 	return (0);
768 }
769 
770 /*
771  * allocate a data block with the specified number of fragments
772  */
773 ufs2_daddr_t
774 allocblk(long frags)
775 {
776 	int i, j, k, cg, baseblk;
777 	struct bufarea *cgbp;
778 	struct cg *cgp;
779 
780 	if (frags <= 0 || frags > sblock.fs_frag)
781 		return (0);
782 	for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) {
783 		for (j = 0; j <= sblock.fs_frag - frags; j++) {
784 			if (testbmap(i + j))
785 				continue;
786 			for (k = 1; k < frags; k++)
787 				if (testbmap(i + j + k))
788 					break;
789 			if (k < frags) {
790 				j += k;
791 				continue;
792 			}
793 			cg = dtog(&sblock, i + j);
794 			cgbp = cgget(cg);
795 			cgp = cgbp->b_un.b_cg;
796 			if (!check_cgmagic(cg, cgbp))
797 				return (0);
798 			baseblk = dtogd(&sblock, i + j);
799 			for (k = 0; k < frags; k++) {
800 				setbmap(i + j + k);
801 				clrbit(cg_blksfree(cgp), baseblk + k);
802 			}
803 			n_blks += frags;
804 			if (frags == sblock.fs_frag)
805 				cgp->cg_cs.cs_nbfree--;
806 			else
807 				cgp->cg_cs.cs_nffree -= frags;
808 			dirty(cgbp);
809 			return (i + j);
810 		}
811 	}
812 	return (0);
813 }
814 
815 /*
816  * Free a previously allocated block
817  */
818 void
819 freeblk(ufs2_daddr_t blkno, long frags)
820 {
821 	struct inodesc idesc;
822 
823 	idesc.id_blkno = blkno;
824 	idesc.id_numfrags = frags;
825 	(void)pass4check(&idesc);
826 }
827 
828 /* Slow down IO so as to leave some disk bandwidth for other processes */
829 void
830 slowio_start()
831 {
832 
833 	/* Delay one in every 8 operations */
834 	slowio_pollcnt = (slowio_pollcnt + 1) & 7;
835 	if (slowio_pollcnt == 0) {
836 		gettimeofday(&slowio_starttime, NULL);
837 	}
838 }
839 
840 void
841 slowio_end()
842 {
843 	struct timeval tv;
844 	int delay_usec;
845 
846 	if (slowio_pollcnt != 0)
847 		return;
848 
849 	/* Update the slowdown interval. */
850 	gettimeofday(&tv, NULL);
851 	delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 +
852 	    (tv.tv_usec - slowio_starttime.tv_usec);
853 	if (delay_usec < 64)
854 		delay_usec = 64;
855 	if (delay_usec > 2500000)
856 		delay_usec = 2500000;
857 	slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6;
858 	/* delay by 8 times the average IO delay */
859 	if (slowio_delay_usec > 64)
860 		usleep(slowio_delay_usec * 8);
861 }
862 
863 /*
864  * Find a pathname
865  */
866 void
867 getpathname(char *namebuf, ino_t curdir, ino_t ino)
868 {
869 	int len;
870 	char *cp;
871 	struct inodesc idesc;
872 	static int busy = 0;
873 
874 	if (curdir == ino && ino == UFS_ROOTINO) {
875 		(void)strcpy(namebuf, "/");
876 		return;
877 	}
878 	if (busy || !INO_IS_DVALID(curdir)) {
879 		(void)strcpy(namebuf, "?");
880 		return;
881 	}
882 	busy = 1;
883 	memset(&idesc, 0, sizeof(struct inodesc));
884 	idesc.id_type = DATA;
885 	idesc.id_fix = IGNORE;
886 	cp = &namebuf[MAXPATHLEN - 1];
887 	*cp = '\0';
888 	if (curdir != ino) {
889 		idesc.id_parent = curdir;
890 		goto namelookup;
891 	}
892 	while (ino != UFS_ROOTINO) {
893 		idesc.id_number = ino;
894 		idesc.id_func = findino;
895 		idesc.id_name = strdup("..");
896 		if ((ckinode(ginode(ino), &idesc) & FOUND) == 0)
897 			break;
898 	namelookup:
899 		idesc.id_number = idesc.id_parent;
900 		idesc.id_parent = ino;
901 		idesc.id_func = findname;
902 		idesc.id_name = namebuf;
903 		if ((ckinode(ginode(idesc.id_number), &idesc)&FOUND) == 0)
904 			break;
905 		len = strlen(namebuf);
906 		cp -= len;
907 		memmove(cp, namebuf, (size_t)len);
908 		*--cp = '/';
909 		if (cp < &namebuf[UFS_MAXNAMLEN])
910 			break;
911 		ino = idesc.id_number;
912 	}
913 	busy = 0;
914 	if (ino != UFS_ROOTINO)
915 		*--cp = '?';
916 	memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp));
917 }
918 
919 void
920 catch(int sig __unused)
921 {
922 
923 	ckfini(0);
924 	exit(12);
925 }
926 
927 /*
928  * When preening, allow a single quit to signal
929  * a special exit after file system checks complete
930  * so that reboot sequence may be interrupted.
931  */
932 void
933 catchquit(int sig __unused)
934 {
935 	printf("returning to single-user after file system check\n");
936 	returntosingle = 1;
937 	(void)signal(SIGQUIT, SIG_DFL);
938 }
939 
940 /*
941  * determine whether an inode should be fixed.
942  */
943 int
944 dofix(struct inodesc *idesc, const char *msg)
945 {
946 
947 	switch (idesc->id_fix) {
948 
949 	case DONTKNOW:
950 		if (idesc->id_type == DATA)
951 			direrror(idesc->id_number, msg);
952 		else
953 			pwarn("%s", msg);
954 		if (preen) {
955 			printf(" (SALVAGED)\n");
956 			idesc->id_fix = FIX;
957 			return (ALTERED);
958 		}
959 		if (reply("SALVAGE") == 0) {
960 			idesc->id_fix = NOFIX;
961 			return (0);
962 		}
963 		idesc->id_fix = FIX;
964 		return (ALTERED);
965 
966 	case FIX:
967 		return (ALTERED);
968 
969 	case NOFIX:
970 	case IGNORE:
971 		return (0);
972 
973 	default:
974 		errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix);
975 	}
976 	/* NOTREACHED */
977 	return (0);
978 }
979 
980 #include <stdarg.h>
981 
982 /*
983  * An unexpected inconsistency occurred.
984  * Die if preening or file system is running with soft dependency protocol,
985  * otherwise just print message and continue.
986  */
987 void
988 pfatal(const char *fmt, ...)
989 {
990 	va_list ap;
991 	va_start(ap, fmt);
992 	if (!preen) {
993 		(void)vfprintf(stdout, fmt, ap);
994 		va_end(ap);
995 		if (usedsoftdep)
996 			(void)fprintf(stdout,
997 			    "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n");
998 		/*
999 		 * Force foreground fsck to clean up inconsistency.
1000 		 */
1001 		if (bkgrdflag) {
1002 			cmd.value = FS_NEEDSFSCK;
1003 			cmd.size = 1;
1004 			if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1005 			    &cmd, sizeof cmd) == -1)
1006 				pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1007 			fprintf(stdout, "CANNOT RUN IN BACKGROUND\n");
1008 			ckfini(0);
1009 			exit(EEXIT);
1010 		}
1011 		return;
1012 	}
1013 	if (cdevname == NULL)
1014 		cdevname = strdup("fsck");
1015 	(void)fprintf(stdout, "%s: ", cdevname);
1016 	(void)vfprintf(stdout, fmt, ap);
1017 	(void)fprintf(stdout,
1018 	    "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n",
1019 	    cdevname, usedsoftdep ? " SOFT UPDATE " : " ");
1020 	/*
1021 	 * Force foreground fsck to clean up inconsistency.
1022 	 */
1023 	if (bkgrdflag) {
1024 		cmd.value = FS_NEEDSFSCK;
1025 		cmd.size = 1;
1026 		if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1027 		    &cmd, sizeof cmd) == -1)
1028 			pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1029 	}
1030 	ckfini(0);
1031 	exit(EEXIT);
1032 }
1033 
1034 /*
1035  * Pwarn just prints a message when not preening or running soft dependency
1036  * protocol, or a warning (preceded by filename) when preening.
1037  */
1038 void
1039 pwarn(const char *fmt, ...)
1040 {
1041 	va_list ap;
1042 	va_start(ap, fmt);
1043 	if (preen)
1044 		(void)fprintf(stdout, "%s: ", cdevname);
1045 	(void)vfprintf(stdout, fmt, ap);
1046 	va_end(ap);
1047 }
1048 
1049 /*
1050  * Stub for routines from kernel.
1051  */
1052 void
1053 panic(const char *fmt, ...)
1054 {
1055 	va_list ap;
1056 	va_start(ap, fmt);
1057 	pfatal("INTERNAL INCONSISTENCY:");
1058 	(void)vfprintf(stdout, fmt, ap);
1059 	va_end(ap);
1060 	exit(EEXIT);
1061 }
1062