1 /* 2 * Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz 3 * Copyright (c) 1980, 1989, 1993 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Christoph Herrmann and Thomas-Henning von Kamptz, Munich and Frankfurt. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgment: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors, as well as Christoph 21 * Herrmann and Thomas-Henning von Kamptz. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $TSHeader: src/sbin/growfs/growfs.c,v 1.5 2000/12/12 19:31:00 tomsoft Exp $ 39 * $FreeBSD$ 40 * 41 */ 42 43 #ifndef lint 44 static const char copyright[] = 45 "@(#) Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz\n\ 46 Copyright (c) 1980, 1989, 1993 The Regents of the University of California.\n\ 47 All rights reserved.\n"; 48 #endif /* not lint */ 49 50 #ifndef lint 51 static const char rcsid[] = 52 "$FreeBSD$"; 53 #endif /* not lint */ 54 55 /* ********************************************************** INCLUDES ***** */ 56 #include <sys/param.h> 57 #include <sys/disklabel.h> 58 #include <sys/ioctl.h> 59 #include <sys/stat.h> 60 61 #include <stdio.h> 62 #include <paths.h> 63 #include <ctype.h> 64 #include <err.h> 65 #include <fcntl.h> 66 #include <stdlib.h> 67 #include <string.h> 68 #include <unistd.h> 69 #include <ufs/ufs/dinode.h> 70 #include <ufs/ffs/fs.h> 71 72 #include "debug.h" 73 74 /* *************************************************** GLOBALS & TYPES ***** */ 75 #ifdef FS_DEBUG 76 int _dbg_lvl_ = (DL_INFO); /* DL_TRC */ 77 #endif /* FS_DEBUG */ 78 79 static union { 80 struct fs fs; 81 char pad[SBSIZE]; 82 } fsun1, fsun2; 83 #define sblock fsun1.fs /* the new superblock */ 84 #define osblock fsun2.fs /* the old superblock */ 85 86 static union { 87 struct cg cg; 88 char pad[MAXBSIZE]; 89 } cgun1, cgun2; 90 #define acg cgun1.cg /* a cylinder cgroup (new) */ 91 #define aocg cgun2.cg /* an old cylinder group */ 92 93 static char ablk[MAXBSIZE]; /* a block */ 94 static char i1blk[MAXBSIZE]; /* some indirect blocks */ 95 static char i2blk[MAXBSIZE]; 96 static char i3blk[MAXBSIZE]; 97 98 /* where to write back updated blocks */ 99 static daddr_t in_src, i1_src, i2_src, i3_src; 100 101 /* what object contains the reference */ 102 enum pointer_source { 103 GFS_PS_INODE, 104 GFS_PS_IND_BLK_LVL1, 105 GFS_PS_IND_BLK_LVL2, 106 GFS_PS_IND_BLK_LVL3 107 }; 108 109 static struct csum *fscs; /* cylinder summary */ 110 111 static struct dinode zino[MAXBSIZE/sizeof(struct dinode)]; /* some inodes */ 112 113 /* 114 * An array of elements of type struct gfs_bpp describes all blocks to 115 * be relocated in order to free the space needed for the cylinder group 116 * summary for all cylinder groups located in the first cylinder group. 117 */ 118 struct gfs_bpp { 119 daddr_t old; /* old block number */ 120 daddr_t new; /* new block number */ 121 #define GFS_FL_FIRST 1 122 #define GFS_FL_LAST 2 123 unsigned long flags; /* special handling required */ 124 int found; /* how many references were updated */ 125 }; 126 127 /* ******************************************************** PROTOTYPES ***** */ 128 static void rdfs(daddr_t, int, char *, int); 129 static void wtfs(daddr_t, int, char *, int, int); 130 static daddr_t alloc(void); 131 static int charsperline(void); 132 static void usage(void); 133 static int isblock(struct fs *, unsigned char *, int); 134 static void clrblock(struct fs *, unsigned char *, int); 135 static void setblock(struct fs *, unsigned char *, int); 136 static void initcg(int, time_t, int, int); 137 static void updjcg(int, time_t, int, int, int); 138 static void updcsloc(time_t, int, int, int); 139 static struct disklabel *get_disklabel(int); 140 static void return_disklabel(int, struct disklabel *, int); 141 static struct dinode *ginode(ino_t, int, int); 142 static void frag_adjust(daddr_t, int); 143 static void cond_bl_upd(ufs_daddr_t *, struct gfs_bpp *, 144 enum pointer_source, int, int); 145 static void updclst(int); 146 static void updrefs(int, ino_t, struct gfs_bpp *, int, int, int); 147 148 /* ************************************************************ growfs ***** */ 149 /* 150 * Here we actually start growing the filesystem. We basically read the 151 * cylinder summary from the first cylinder group as we want to update 152 * this on the fly during our various operations. First we handle the 153 * changes in the former last cylinder group. Afterwards we create all new 154 * cylinder groups. Now we handle the cylinder group containing the 155 * cylinder summary which might result in a relocation of the whole 156 * structure. In the end we write back the updated cylinder summary, the 157 * new superblock, and slightly patched versions of the super block 158 * copies. 159 */ 160 static void 161 growfs(int fsi, int fso, int Nflag) 162 { 163 DBG_FUNC("growfs") 164 long i; 165 long cylno, j; 166 time_t utime; 167 int width; 168 char tmpbuf[100]; 169 #ifdef FSIRAND 170 static int randinit=0; 171 172 DBG_ENTER; 173 174 if (!randinit) { 175 randinit = 1; 176 srandomdev(); 177 } 178 #else /* not FSIRAND */ 179 180 DBG_ENTER; 181 182 #endif /* FSIRAND */ 183 time(&utime); 184 185 /* 186 * Get the cylinder summary into the memory. 187 */ 188 fscs = (struct csum *)calloc(1, (size_t)sblock.fs_cssize); 189 if(fscs == NULL) { 190 errx(1, "calloc failed"); 191 } 192 for (i = 0; i < osblock.fs_cssize; i += osblock.fs_bsize) { 193 rdfs(fsbtodb(&osblock, osblock.fs_csaddr + 194 numfrags(&osblock, i)), MIN(osblock.fs_cssize - i, 195 osblock.fs_bsize), ((char *)fscs) + i, fsi); 196 } 197 198 #ifdef FS_DEBUG 199 { 200 struct csum *dbg_csp; 201 int dbg_csc; 202 char dbg_line[80]; 203 204 dbg_csp=fscs; 205 for(dbg_csc=0; dbg_csc<osblock.fs_ncg; dbg_csc++) { 206 snprintf(dbg_line, 80, "%d. old csum in old location", dbg_csc); 207 DBG_DUMP_CSUM(&osblock, 208 dbg_line, 209 dbg_csp++); 210 } 211 } 212 #endif /* FS_DEBUG */ 213 DBG_PRINT0("fscs read\n"); 214 215 /* 216 * Do all needed changes in the former last cylinder group. 217 */ 218 updjcg(osblock.fs_ncg-1, utime, fsi, fso, Nflag); 219 220 /* 221 * Dump out summary information about file system. 222 */ 223 printf("growfs:\t%d sectors in %d %s of %d tracks, %d sectors\n", 224 sblock.fs_size * NSPF(&sblock), sblock.fs_ncyl, 225 "cylinders", sblock.fs_ntrak, sblock.fs_nsect); 226 #define B2MBFACTOR (1 / (1024.0 * 1024.0)) 227 printf("\t%.1fMB in %d cyl groups (%d c/g, %.2fMB/g, %d i/g)\n", 228 (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, 229 sblock.fs_ncg, sblock.fs_cpg, 230 (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, 231 sblock.fs_ipg); 232 #undef B2MBFACTOR 233 234 /* 235 * Now build the cylinders group blocks and 236 * then print out indices of cylinder groups. 237 */ 238 printf("super-block backups (for fsck -b #) at:\n"); 239 i = 0; 240 width = charsperline(); 241 242 /* 243 * Iterate for only the new cylinder groups. 244 */ 245 for (cylno = osblock.fs_ncg; cylno < sblock.fs_ncg; cylno++) { 246 initcg(cylno, utime, fso, Nflag); 247 j = sprintf(tmpbuf, " %ld%s", 248 fsbtodb(&sblock, cgsblock(&sblock, cylno)), 249 cylno < (sblock.fs_ncg-1) ? "," : "" ); 250 if (i + j >= width) { 251 printf("\n"); 252 i = 0; 253 } 254 i += j; 255 printf("%s", tmpbuf); 256 fflush(stdout); 257 } 258 printf("\n"); 259 260 /* 261 * Do all needed changes in the first cylinder group. 262 * allocate blocks in new location 263 */ 264 updcsloc(utime, fsi, fso, Nflag); 265 266 /* 267 * Now write the cylinder summary back to disk. 268 */ 269 for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) { 270 wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), 271 MIN(sblock.fs_cssize - i, sblock.fs_bsize), 272 ((char *)fscs) + i, fso, Nflag); 273 } 274 DBG_PRINT0("fscs written\n"); 275 276 #ifdef FS_DEBUG 277 { 278 struct csum *dbg_csp; 279 int dbg_csc; 280 char dbg_line[80]; 281 282 dbg_csp=fscs; 283 for(dbg_csc=0; dbg_csc<sblock.fs_ncg; dbg_csc++) { 284 snprintf(dbg_line, 80, "%d. new csum in new location", dbg_csc); 285 DBG_DUMP_CSUM(&sblock, 286 dbg_line, 287 dbg_csp++); 288 } 289 } 290 #endif /* FS_DEBUG */ 291 292 /* 293 * Now write the new superblock back to disk. 294 */ 295 sblock.fs_time = utime; 296 wtfs((int)SBOFF / DEV_BSIZE, SBSIZE, (char *)&sblock, fso, Nflag); 297 DBG_PRINT0("sblock written\n"); 298 DBG_DUMP_FS(&sblock, 299 "new initial sblock"); 300 301 /* 302 * Clean up the dynamic fields in our superblock copies. 303 */ 304 sblock.fs_fmod = 0; 305 sblock.fs_clean = 1; 306 sblock.fs_ronly = 0; 307 sblock.fs_cgrotor = 0; 308 sblock.fs_state = 0; 309 memset((void *)&sblock.fs_fsmnt, 0, sizeof(sblock.fs_fsmnt)); 310 sblock.fs_flags &= FS_DOSOFTDEP; 311 312 /* 313 * XXX 314 * The following fields are currently distributed from the superblock 315 * to the copies: 316 * fs_minfree 317 * fs_rotdelay 318 * fs_maxcontig 319 * fs_maxbpg 320 * fs_minfree, 321 * fs_optim 322 * fs_flags regarding SOFTPDATES 323 * 324 * We probably should rather change the summary for the cylinder group 325 * statistics here to the value of what would be in there, if the file 326 * system were created initially with the new size. Therefor we still 327 * need to find an easy way of calculating that. 328 * Possibly we can try to read the first superblock copy and apply the 329 * "diffed" stats between the old and new superblock by still copying 330 * certain parameters onto that. 331 */ 332 333 /* 334 * Write out the duplicate super blocks. 335 */ 336 for (cylno = 0; cylno < sblock.fs_ncg; cylno++) { 337 wtfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)), 338 SBSIZE, (char *)&sblock, fso, Nflag); 339 } 340 DBG_PRINT0("sblock copies written\n"); 341 DBG_DUMP_FS(&sblock, 342 "new other sblocks"); 343 344 DBG_LEAVE; 345 return; 346 } 347 348 /* ************************************************************ initcg ***** */ 349 /* 350 * This creates a new cylinder group structure, for more details please see 351 * the source of newfs(8), as this function is taken over almost unchanged. 352 * As this is never called for the first cylinder group, the special 353 * provisions for that case are removed here. 354 */ 355 static void 356 initcg(int cylno, time_t utime, int fso, int Nflag) 357 { 358 DBG_FUNC("initcg") 359 daddr_t cbase, d, dlower, dupper, dmax, blkno; 360 long i; 361 register struct csum *cs; 362 #ifdef FSIRAND 363 long j; 364 #endif 365 366 DBG_ENTER; 367 368 /* 369 * Determine block bounds for cylinder group. 370 */ 371 cbase = cgbase(&sblock, cylno); 372 dmax = cbase + sblock.fs_fpg; 373 if (dmax > sblock.fs_size) { 374 dmax = sblock.fs_size; 375 } 376 dlower = cgsblock(&sblock, cylno) - cbase; 377 dupper = cgdmin(&sblock, cylno) - cbase; 378 if (cylno == 0) { 379 dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); 380 } 381 cs = fscs + cylno; 382 memset(&acg, 0, (size_t)sblock.fs_cgsize); 383 acg.cg_time = utime; 384 acg.cg_magic = CG_MAGIC; 385 acg.cg_cgx = cylno; 386 if (cylno == sblock.fs_ncg - 1) { 387 acg.cg_ncyl = sblock.fs_ncyl % sblock.fs_cpg; 388 } else { 389 acg.cg_ncyl = sblock.fs_cpg; 390 } 391 acg.cg_niblk = sblock.fs_ipg; 392 acg.cg_ndblk = dmax - cbase; 393 if (sblock.fs_contigsumsize > 0) { 394 acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; 395 } 396 acg.cg_btotoff = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield); 397 acg.cg_boff = acg.cg_btotoff + sblock.fs_cpg * sizeof(int32_t); 398 acg.cg_iusedoff = acg.cg_boff + 399 sblock.fs_cpg * sblock.fs_nrpos * sizeof(u_int16_t); 400 acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, NBBY); 401 if (sblock.fs_contigsumsize <= 0) { 402 acg.cg_nextfreeoff = acg.cg_freeoff + 403 howmany(sblock.fs_cpg* sblock.fs_spc/ NSPF(&sblock), NBBY); 404 } else { 405 acg.cg_clustersumoff = acg.cg_freeoff + howmany 406 (sblock.fs_cpg * sblock.fs_spc / NSPF(&sblock), NBBY) - 407 sizeof(u_int32_t); 408 acg.cg_clustersumoff = 409 roundup(acg.cg_clustersumoff, sizeof(u_int32_t)); 410 acg.cg_clusteroff = acg.cg_clustersumoff + 411 (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); 412 acg.cg_nextfreeoff = acg.cg_clusteroff + howmany 413 (sblock.fs_cpg * sblock.fs_spc / NSPB(&sblock), NBBY); 414 } 415 if (acg.cg_nextfreeoff-(long)(&acg.cg_firstfield) > sblock.fs_cgsize) { 416 /* 417 * XXX This should never happen as we would have had that panic 418 * already on filesystem creation 419 */ 420 errx(37, "panic: cylinder group too big"); 421 } 422 acg.cg_cs.cs_nifree += sblock.fs_ipg; 423 if (cylno == 0) 424 for (i = 0; (size_t)i < ROOTINO; i++) { 425 setbit(cg_inosused(&acg), i); 426 acg.cg_cs.cs_nifree--; 427 } 428 for (i = 0; i < sblock.fs_ipg / INOPF(&sblock); i += sblock.fs_frag) { 429 #ifdef FSIRAND 430 for (j = 0; j < sblock.fs_bsize / sizeof(struct dinode); j++) { 431 zino[j].di_gen = random(); 432 } 433 #endif 434 wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i), 435 sblock.fs_bsize, (char *)zino, fso, Nflag); 436 } 437 for (d = 0; d < dlower; d += sblock.fs_frag) { 438 blkno = d / sblock.fs_frag; 439 setblock(&sblock, cg_blksfree(&acg), blkno); 440 if (sblock.fs_contigsumsize > 0) { 441 setbit(cg_clustersfree(&acg), blkno); 442 } 443 acg.cg_cs.cs_nbfree++; 444 cg_blktot(&acg)[cbtocylno(&sblock, d)]++; 445 cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) 446 [cbtorpos(&sblock, d)]++; 447 } 448 sblock.fs_dsize += dlower; 449 sblock.fs_dsize += acg.cg_ndblk - dupper; 450 if ((i = dupper % sblock.fs_frag)) { 451 acg.cg_frsum[sblock.fs_frag - i]++; 452 for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { 453 setbit(cg_blksfree(&acg), dupper); 454 acg.cg_cs.cs_nffree++; 455 } 456 } 457 for (d = dupper; d + sblock.fs_frag <= dmax - cbase; ) { 458 blkno = d / sblock.fs_frag; 459 setblock(&sblock, cg_blksfree(&acg), blkno); 460 if (sblock.fs_contigsumsize > 0) { 461 setbit(cg_clustersfree(&acg), blkno); 462 } 463 acg.cg_cs.cs_nbfree++; 464 cg_blktot(&acg)[cbtocylno(&sblock, d)]++; 465 cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) 466 [cbtorpos(&sblock, d)]++; 467 d += sblock.fs_frag; 468 } 469 if (d < dmax - cbase) { 470 acg.cg_frsum[dmax - cbase - d]++; 471 for (; d < dmax - cbase; d++) { 472 setbit(cg_blksfree(&acg), d); 473 acg.cg_cs.cs_nffree++; 474 } 475 } 476 if (sblock.fs_contigsumsize > 0) { 477 int32_t *sump = cg_clustersum(&acg); 478 u_char *mapp = cg_clustersfree(&acg); 479 int map = *mapp++; 480 int bit = 1; 481 int run = 0; 482 483 for (i = 0; i < acg.cg_nclusterblks; i++) { 484 if ((map & bit) != 0) { 485 run++; 486 } else if (run != 0) { 487 if (run > sblock.fs_contigsumsize) { 488 run = sblock.fs_contigsumsize; 489 } 490 sump[run]++; 491 run = 0; 492 } 493 if ((i & (NBBY - 1)) != (NBBY - 1)) { 494 bit <<= 1; 495 } else { 496 map = *mapp++; 497 bit = 1; 498 } 499 } 500 if (run != 0) { 501 if (run > sblock.fs_contigsumsize) { 502 run = sblock.fs_contigsumsize; 503 } 504 sump[run]++; 505 } 506 } 507 sblock.fs_cstotal.cs_ndir += acg.cg_cs.cs_ndir; 508 sblock.fs_cstotal.cs_nffree += acg.cg_cs.cs_nffree; 509 sblock.fs_cstotal.cs_nbfree += acg.cg_cs.cs_nbfree; 510 sblock.fs_cstotal.cs_nifree += acg.cg_cs.cs_nifree; 511 *cs = acg.cg_cs; 512 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), 513 sblock.fs_bsize, (char *)&acg, fso, Nflag); 514 DBG_DUMP_CG(&sblock, 515 "new cg", 516 &acg); 517 518 DBG_LEAVE; 519 return; 520 } 521 522 /* ******************************************************* frag_adjust ***** */ 523 /* 524 * Here we add or subtract (sign +1/-1) the available fragments in a given 525 * block to or from the fragment statistics. By subtracting before and adding 526 * after an operation on the free frag map we can easy update the fragment 527 * statistic, which seems to be otherwise an rather complex operation. 528 */ 529 static void 530 frag_adjust(daddr_t frag, int sign) 531 { 532 DBG_FUNC("frag_adjust") 533 int fragsize; 534 int f; 535 536 DBG_ENTER; 537 538 fragsize=0; 539 /* 540 * Here frag only needs to point to any fragment in the block we want 541 * to examine. 542 */ 543 for(f=rounddown(frag, sblock.fs_frag); 544 f<roundup(frag+1, sblock.fs_frag); 545 f++) { 546 /* 547 * Count contiguos free fragments. 548 */ 549 if(isset(cg_blksfree(&acg), f)) { 550 fragsize++; 551 } else { 552 if(fragsize && fragsize<sblock.fs_frag) { 553 /* 554 * We found something in between. 555 */ 556 acg.cg_frsum[fragsize]+=sign; 557 DBG_PRINT2("frag_adjust [%d]+=%d\n", 558 fragsize, 559 sign); 560 } 561 fragsize=0; 562 } 563 } 564 if(fragsize && fragsize<sblock.fs_frag) { 565 /* 566 * We found something. 567 */ 568 acg.cg_frsum[fragsize]+=sign; 569 DBG_PRINT2("frag_adjust [%d]+=%d\n", 570 fragsize, 571 sign); 572 } 573 DBG_PRINT2("frag_adjust [[%d]]+=%d\n", 574 fragsize, 575 sign); 576 577 DBG_LEAVE; 578 return; 579 } 580 581 /* ******************************************************* cond_bl_upd ***** */ 582 /* 583 * Here we conditionally update a pointer to a fragment. We check for all 584 * relocated blocks if any of it's fragments is referenced by the current 585 * field, and update the pointer to the respective fragment in our new 586 * block. If we find a reference we write back the block immediately, 587 * as there is no easy way for our general block reading engine to figure 588 * out if a write back operation is needed. 589 */ 590 static void 591 cond_bl_upd(ufs_daddr_t *block, struct gfs_bpp *field, 592 enum pointer_source source, int fso, int Nflag) 593 { 594 DBG_FUNC("cond_bl_upd") 595 struct gfs_bpp *f; 596 char *src; 597 daddr_t dst=0; 598 599 DBG_ENTER; 600 601 f=field; 602 while(f->old) { /* for all old blocks */ 603 if(*block/sblock.fs_frag == f->old) { 604 /* 605 * The fragment is part of the block, so update. 606 */ 607 *block=(f->new*sblock.fs_frag+(*block%sblock.fs_frag)); 608 f->found++; 609 DBG_PRINT3("scg (%d->%d)[%d] reference updated\n", 610 f->old, 611 f->new, 612 *block%sblock.fs_frag); 613 614 /* Write the block back to disk immediately */ 615 switch (source) { 616 case GFS_PS_INODE: 617 src=ablk; 618 dst=in_src; 619 break; 620 case GFS_PS_IND_BLK_LVL1: 621 src=i1blk; 622 dst=i1_src; 623 break; 624 case GFS_PS_IND_BLK_LVL2: 625 src=i2blk; 626 dst=i2_src; 627 break; 628 case GFS_PS_IND_BLK_LVL3: 629 src=i3blk; 630 dst=i3_src; 631 break; 632 default: /* error */ 633 src=NULL; 634 break; 635 } 636 if(src) { 637 /* 638 * XXX If src is not of type inode we have to 639 * implement copy on write here in case 640 * of active snapshots. 641 */ 642 wtfs(dst, sblock.fs_bsize, src, fso, Nflag); 643 } 644 645 /* 646 * The same block can't be found again in this loop. 647 */ 648 break; 649 } 650 f++; 651 } 652 653 DBG_LEAVE; 654 return; 655 } 656 657 /* ************************************************************ updjcg ***** */ 658 /* 659 * Here we do all needed work for the former last cylinder group. It has to be 660 * changed in any case, even if the filesystem ended exactly on the end of 661 * this group, as there is some slightly inconsistent handling of the number 662 * of cylinders in the cylinder group. We start again by reading the cylinder 663 * group from disk. If the last block was not fully available, we first handle 664 * the missing fragments, then we handle all new full blocks in that file 665 * system and finally we handle the new last fragmented block in the file 666 * system. We again have to handle the fragment statistics rotational layout 667 * tables and cluster summary during all those operations. 668 */ 669 static void 670 updjcg(int cylno, time_t utime, int fsi, int fso, int Nflag) 671 { 672 DBG_FUNC("updjcg") 673 daddr_t cbase, dmax, dupper; 674 struct csum *cs; 675 int i,k; 676 int j=0; 677 678 DBG_ENTER; 679 680 /* 681 * Read the former last (joining) cylinder group from disk, and make 682 * a copy. 683 */ 684 rdfs(fsbtodb(&osblock, cgtod(&osblock, cylno)), osblock.fs_cgsize, 685 (char *)&aocg, fsi); 686 DBG_PRINT0("jcg read\n"); 687 DBG_DUMP_CG(&sblock, 688 "old joining cg", 689 &aocg); 690 691 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); 692 693 /* 694 * If the cylinder group had already it's new final size almost 695 * nothing is to be done ... except: 696 * For some reason the value of cg_ncyl in the last cylinder group has 697 * to be zero instead of fs_cpg. As this is now no longer the last 698 * cylinder group we have to change that value now to fs_cpg. 699 */ 700 701 if(cgbase(&osblock, cylno+1) == osblock.fs_size) { 702 acg.cg_ncyl=sblock.fs_cpg; 703 704 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), sblock.fs_cgsize, 705 (char *)&acg, fso, Nflag); 706 DBG_PRINT0("jcg written\n"); 707 DBG_DUMP_CG(&sblock, 708 "new joining cg", 709 &acg); 710 711 DBG_LEAVE; 712 return; 713 } 714 715 /* 716 * Set up some variables needed later. 717 */ 718 cbase = cgbase(&sblock, cylno); 719 dmax = cbase + sblock.fs_fpg; 720 if (dmax > sblock.fs_size) 721 dmax = sblock.fs_size; 722 dupper = cgdmin(&sblock, cylno) - cbase; 723 if (cylno == 0) { 724 dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); 725 } 726 727 /* 728 * Set pointer to the cylinder summary for our cylinder group. 729 */ 730 cs = fscs + cylno; 731 732 /* 733 * Touch the cylinder group, update all fields in the cylinder group as 734 * needed, update the free space in the superblock. 735 */ 736 acg.cg_time = utime; 737 if (cylno == sblock.fs_ncg - 1) { 738 /* 739 * This is still the last cylinder group. 740 */ 741 acg.cg_ncyl = sblock.fs_ncyl % sblock.fs_cpg; 742 } else { 743 acg.cg_ncyl = sblock.fs_cpg; 744 } 745 DBG_PRINT4("jcg dbg: %d %u %d %u\n", 746 cylno, 747 sblock.fs_ncg, 748 acg.cg_ncyl, 749 sblock.fs_cpg); 750 acg.cg_ndblk = dmax - cbase; 751 sblock.fs_dsize += acg.cg_ndblk-aocg.cg_ndblk; 752 if (sblock.fs_contigsumsize > 0) { 753 acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; 754 } 755 756 /* 757 * Now we have to update the free fragment bitmap for our new free 758 * space. There again we have to handle the fragmentation and also 759 * the rotational layout tables and the cluster summary. This is 760 * also done per fragment for the first new block if the old file 761 * system end was not on a block boundary, per fragment for the new 762 * last block if the new file system end is not on a block boundary, 763 * and per block for all space in between. 764 * 765 * Handle the first new block here if it was partially available 766 * before. 767 */ 768 if(osblock.fs_size % sblock.fs_frag) { 769 if(roundup(osblock.fs_size, sblock.fs_frag)<=sblock.fs_size) { 770 /* 771 * The new space is enough to fill at least this 772 * block 773 */ 774 j=0; 775 for(i=roundup(osblock.fs_size-cbase, sblock.fs_frag)-1; 776 i>=osblock.fs_size-cbase; 777 i--) { 778 setbit(cg_blksfree(&acg), i); 779 acg.cg_cs.cs_nffree++; 780 j++; 781 } 782 783 /* 784 * Check if the fragment just created could join an 785 * already existing fragment at the former end of the 786 * file system. 787 */ 788 if(isblock(&sblock, cg_blksfree(&acg), 789 ((osblock.fs_size - cgbase(&sblock, cylno))/ 790 sblock.fs_frag))) { 791 /* 792 * The block is now completely available 793 */ 794 DBG_PRINT0("block was\n"); 795 acg.cg_frsum[osblock.fs_size%sblock.fs_frag]--; 796 acg.cg_cs.cs_nbfree++; 797 acg.cg_cs.cs_nffree-=sblock.fs_frag; 798 k=rounddown(osblock.fs_size-cbase, 799 sblock.fs_frag); 800 cg_blktot(&acg)[cbtocylno(&sblock, k)]++; 801 cg_blks(&sblock, &acg, cbtocylno(&sblock, k)) 802 [cbtorpos(&sblock, k)]++; 803 updclst((osblock.fs_size-cbase)/sblock.fs_frag); 804 } else { 805 /* 806 * Lets rejoin a possible partially growed 807 * fragment. 808 */ 809 k=0; 810 while(isset(cg_blksfree(&acg), i) && 811 (i>=rounddown(osblock.fs_size-cbase, 812 sblock.fs_frag))) { 813 i--; 814 k++; 815 } 816 if(k) { 817 acg.cg_frsum[k]--; 818 } 819 acg.cg_frsum[k+j]++; 820 } 821 } else { 822 /* 823 * We only grow by some fragments within this last 824 * block. 825 */ 826 for(i=sblock.fs_size-cbase-1; 827 i>=osblock.fs_size-cbase; 828 i--) { 829 setbit(cg_blksfree(&acg), i); 830 acg.cg_cs.cs_nffree++; 831 j++; 832 } 833 /* 834 * Lets rejoin a possible partially growed fragment. 835 */ 836 k=0; 837 while(isset(cg_blksfree(&acg), i) && 838 (i>=rounddown(osblock.fs_size-cbase, 839 sblock.fs_frag))) { 840 i--; 841 k++; 842 } 843 if(k) { 844 acg.cg_frsum[k]--; 845 } 846 acg.cg_frsum[k+j]++; 847 } 848 } 849 850 /* 851 * Handle all new complete blocks here. 852 */ 853 for(i=roundup(osblock.fs_size-cbase, sblock.fs_frag); 854 i+sblock.fs_frag<=dmax-cbase; /* XXX <= or only < ? */ 855 i+=sblock.fs_frag) { 856 j = i / sblock.fs_frag; 857 setblock(&sblock, cg_blksfree(&acg), j); 858 updclst(j); 859 acg.cg_cs.cs_nbfree++; 860 cg_blktot(&acg)[cbtocylno(&sblock, i)]++; 861 cg_blks(&sblock, &acg, cbtocylno(&sblock, i)) 862 [cbtorpos(&sblock, i)]++; 863 } 864 865 /* 866 * Handle the last new block if there are stll some new fragments left. 867 * Here we don't have to bother about the cluster summary or the even 868 * the rotational layout table. 869 */ 870 if (i < (dmax - cbase)) { 871 acg.cg_frsum[dmax - cbase - i]++; 872 for (; i < dmax - cbase; i++) { 873 setbit(cg_blksfree(&acg), i); 874 acg.cg_cs.cs_nffree++; 875 } 876 } 877 878 sblock.fs_cstotal.cs_nffree += 879 (acg.cg_cs.cs_nffree - aocg.cg_cs.cs_nffree); 880 sblock.fs_cstotal.cs_nbfree += 881 (acg.cg_cs.cs_nbfree - aocg.cg_cs.cs_nbfree); 882 /* 883 * The following statistics are not changed here: 884 * sblock.fs_cstotal.cs_ndir 885 * sblock.fs_cstotal.cs_nifree 886 * As the statistics for this cylinder group are ready, copy it to 887 * the summary information array. 888 */ 889 *cs = acg.cg_cs; 890 891 /* 892 * Write the updated "joining" cylinder group back to disk. 893 */ 894 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), sblock.fs_cgsize, 895 (char *)&acg, fso, Nflag); 896 DBG_PRINT0("jcg written\n"); 897 DBG_DUMP_CG(&sblock, 898 "new joining cg", 899 &acg); 900 901 DBG_LEAVE; 902 return; 903 } 904 905 /* ********************************************************** updcsloc ***** */ 906 /* 907 * Here we update the location of the cylinder summary. We have two possible 908 * ways of growing the cylinder summary. 909 * (1) We can try to grow the summary in the current location, and relocate 910 * possibly used blocks within the current cylinder group. 911 * (2) Alternatively we can relocate the whole cylinder summary to the first 912 * new completely empty cylinder group. Once the cylinder summary is no 913 * longer in the beginning of the first cylinder group you should never 914 * use a version of fsck which is not aware of the possibility to have 915 * this structure in a non standard place. 916 * Option (1) is considered to be less intrusive to the structure of the file- 917 * system. So we try to stick to that whenever possible. If there is not enough 918 * space in the cylinder group containing the cylinder summary we have to use 919 * method (2). In case of active snapshots in the filesystem we probably can 920 * completely avoid implementing copy on write if we stick to method (2) only. 921 */ 922 static void 923 updcsloc(time_t utime, int fsi, int fso, int Nflag) 924 { 925 DBG_FUNC("updcsloc") 926 struct csum *cs; 927 int ocscg, ncscg; 928 int blocks; 929 daddr_t cbase, dupper, odupper, d, f, g; 930 int ind; 931 int cylno, inc; 932 struct gfs_bpp *bp; 933 int i, l; 934 int lcs=0; 935 int block; 936 937 DBG_ENTER; 938 939 if(howmany(sblock.fs_cssize, sblock.fs_fsize) == 940 howmany(osblock.fs_cssize, osblock.fs_fsize)) { 941 /* 942 * No new fragment needed. 943 */ 944 DBG_LEAVE; 945 return; 946 } 947 ocscg=dtog(&osblock, osblock.fs_csaddr); 948 cs=fscs+ocscg; 949 blocks = 1+howmany(sblock.fs_cssize, sblock.fs_bsize)- 950 howmany(osblock.fs_cssize, osblock.fs_bsize); 951 952 /* 953 * Read original cylinder group from disk, and make a copy. 954 */ 955 rdfs(fsbtodb(&osblock, cgtod(&osblock, ocscg)), osblock.fs_cgsize, 956 (char *)&aocg, fsi); 957 DBG_PRINT0("oscg read\n"); 958 DBG_DUMP_CG(&sblock, 959 "old summary cg", 960 &aocg); 961 962 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); 963 964 /* 965 * Touch the cylinder group, set up local variables needed later 966 * and update the superblock. 967 */ 968 acg.cg_time = utime; 969 970 /* 971 * XXX In the case of having active snapshots we may need much more 972 * blocks for the copy on write. We need each block twice, and 973 * also up to 8*3 blocks for indirect blocks for all possible 974 * references. 975 */ 976 if(/*((int)sblock.fs_time&0x3)>0||*/ cs->cs_nbfree < blocks) { 977 /* 978 * There is not enough space in the old cylinder group to 979 * relocate all blocks as needed, so we relocate the whole 980 * cylinder group summary to a new group. We try to use the 981 * first complete new cylinder group just created. Within the 982 * cylinder group we allign the area immediately after the 983 * cylinder group information location in order to be as 984 * close as possible to the original implementation of ffs. 985 * 986 * First we have to make sure we'll find enough space in the 987 * new cylinder group. If not, then we currently give up. 988 * We start with freeing everything which was used by the 989 * fragments of the old cylinder summary in the current group. 990 * Now we write back the group meta data, read in the needed 991 * meta data from the new cylinder group, and start allocating 992 * within that group. Here we can assume, the group to be 993 * completely empty. Which makes the handling of fragments and 994 * clusters a lot easier. 995 */ 996 DBG_TRC; 997 if(sblock.fs_ncg-osblock.fs_ncg < 2) { 998 errx(2, "panic: not enough space"); 999 } 1000 1001 /* 1002 * Point "d" to the first fragment not used by the cylinder 1003 * summary. 1004 */ 1005 d=osblock.fs_csaddr+(osblock.fs_cssize/osblock.fs_fsize); 1006 1007 /* 1008 * Set up last cluster size ("lcs") already here. Calculate 1009 * the size for the trailing cluster just behind where "d" 1010 * points to. 1011 */ 1012 if(sblock.fs_contigsumsize > 0) { 1013 for(block=howmany(d%sblock.fs_fpg, sblock.fs_frag), 1014 lcs=0; lcs<sblock.fs_contigsumsize; 1015 block++, lcs++) { 1016 if(isclr(cg_clustersfree(&acg), block)){ 1017 break; 1018 } 1019 } 1020 } 1021 1022 /* 1023 * Point "d" to the last frag used by the cylinder summary. 1024 */ 1025 d--; 1026 1027 DBG_PRINT1("d=%d\n", 1028 d); 1029 if((d+1)%sblock.fs_frag) { 1030 /* 1031 * The end of the cylinder summary is not a complete 1032 * block. 1033 */ 1034 DBG_TRC; 1035 frag_adjust(d%sblock.fs_fpg, -1); 1036 for(; (d+1)%sblock.fs_frag; d--) { 1037 DBG_PRINT1("d=%d\n", 1038 d); 1039 setbit(cg_blksfree(&acg), d%sblock.fs_fpg); 1040 acg.cg_cs.cs_nffree++; 1041 sblock.fs_cstotal.cs_nffree++; 1042 } 1043 /* 1044 * Point "d" to the last fragment of the last 1045 * (incomplete) block of the clinder summary. 1046 */ 1047 d++; 1048 frag_adjust(d%sblock.fs_fpg, 1); 1049 1050 if(isblock(&sblock, cg_blksfree(&acg), 1051 (d%sblock.fs_fpg)/sblock.fs_frag)) { 1052 DBG_PRINT1("d=%d\n", 1053 d); 1054 acg.cg_cs.cs_nffree-=sblock.fs_frag; 1055 acg.cg_cs.cs_nbfree++; 1056 sblock.fs_cstotal.cs_nffree-=sblock.fs_frag; 1057 sblock.fs_cstotal.cs_nbfree++; 1058 cg_blktot(&acg)[cbtocylno(&sblock, 1059 d%sblock.fs_fpg)]++; 1060 cg_blks(&sblock, &acg, cbtocylno(&sblock, 1061 d%sblock.fs_fpg))[cbtorpos(&sblock, 1062 d%sblock.fs_fpg)]++; 1063 if(sblock.fs_contigsumsize > 0) { 1064 setbit(cg_clustersfree(&acg), 1065 (d%sblock.fs_fpg)/sblock.fs_frag); 1066 if(lcs < sblock.fs_contigsumsize) { 1067 if(lcs) { 1068 cg_clustersum(&acg) 1069 [lcs]--; 1070 } 1071 lcs++; 1072 cg_clustersum(&acg)[lcs]++; 1073 } 1074 } 1075 } 1076 /* 1077 * Point "d" to the first fragment of the block before 1078 * the last incomplete block. 1079 */ 1080 d--; 1081 } 1082 1083 DBG_PRINT1("d=%d\n", 1084 d); 1085 for(d=rounddown(d, sblock.fs_frag); d >= osblock.fs_csaddr; 1086 d-=sblock.fs_frag) { 1087 DBG_TRC; 1088 DBG_PRINT1("d=%d\n", 1089 d); 1090 setblock(&sblock, cg_blksfree(&acg), 1091 (d%sblock.fs_fpg)/sblock.fs_frag); 1092 acg.cg_cs.cs_nbfree++; 1093 sblock.fs_cstotal.cs_nbfree++; 1094 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]++; 1095 cg_blks(&sblock, &acg, cbtocylno(&sblock, 1096 d%sblock.fs_fpg))[cbtorpos(&sblock, 1097 d%sblock.fs_fpg)]++; 1098 if(sblock.fs_contigsumsize > 0) { 1099 setbit(cg_clustersfree(&acg), 1100 (d%sblock.fs_fpg)/sblock.fs_frag); 1101 /* 1102 * The last cluster size is already set up. 1103 */ 1104 if(lcs < sblock.fs_contigsumsize) { 1105 if(lcs) { 1106 cg_clustersum(&acg)[lcs]--; 1107 } 1108 lcs++; 1109 cg_clustersum(&acg)[lcs]++; 1110 } 1111 } 1112 } 1113 *cs = acg.cg_cs; 1114 1115 /* 1116 * Now write the former cylinder group containing the cylinder 1117 * summary back to disk. 1118 */ 1119 wtfs(fsbtodb(&sblock, cgtod(&sblock, ocscg)), sblock.fs_cgsize, 1120 (char *)&acg, fso, Nflag); 1121 DBG_PRINT0("oscg written\n"); 1122 DBG_DUMP_CG(&sblock, 1123 "old summary cg", 1124 &acg); 1125 1126 /* 1127 * Find the beginning of the new cylinder group containing the 1128 * cylinder summary. 1129 */ 1130 sblock.fs_csaddr=cgdmin(&sblock, osblock.fs_ncg); 1131 ncscg=dtog(&sblock, sblock.fs_csaddr); 1132 cs=fscs+ncscg; 1133 1134 /* 1135 * Read the future cylinder group containing the cylinder 1136 * summary from disk, and make a copy. 1137 */ 1138 rdfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)), 1139 sblock.fs_cgsize, (char *)&aocg, fsi); 1140 DBG_PRINT0("nscg read\n"); 1141 DBG_DUMP_CG(&sblock, 1142 "new summary cg", 1143 &aocg); 1144 1145 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); 1146 1147 /* 1148 * Allocate all complete blocks used by the new cylinder 1149 * summary. 1150 */ 1151 for(d=sblock.fs_csaddr; d+sblock.fs_frag <= 1152 sblock.fs_csaddr+(sblock.fs_cssize/sblock.fs_fsize); 1153 d+=sblock.fs_frag) { 1154 clrblock(&sblock, cg_blksfree(&acg), 1155 (d%sblock.fs_fpg)/sblock.fs_frag); 1156 acg.cg_cs.cs_nbfree--; 1157 sblock.fs_cstotal.cs_nbfree--; 1158 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]--; 1159 cg_blks(&sblock, &acg, cbtocylno(&sblock, 1160 d%sblock.fs_fpg))[cbtorpos(&sblock, 1161 d%sblock.fs_fpg)]--; 1162 if(sblock.fs_contigsumsize > 0) { 1163 clrbit(cg_clustersfree(&acg), 1164 (d%sblock.fs_fpg)/sblock.fs_frag); 1165 } 1166 } 1167 1168 /* 1169 * Allocate all fragments used by the cylinder summary in the 1170 * last block. 1171 */ 1172 if(d<sblock.fs_csaddr+(sblock.fs_cssize/sblock.fs_fsize)) { 1173 for(; d-sblock.fs_csaddr< 1174 sblock.fs_cssize/sblock.fs_fsize; 1175 d++) { 1176 clrbit(cg_blksfree(&acg), d%sblock.fs_fpg); 1177 acg.cg_cs.cs_nffree--; 1178 sblock.fs_cstotal.cs_nffree--; 1179 } 1180 acg.cg_cs.cs_nbfree--; 1181 acg.cg_cs.cs_nffree+=sblock.fs_frag; 1182 sblock.fs_cstotal.cs_nbfree--; 1183 sblock.fs_cstotal.cs_nffree+=sblock.fs_frag; 1184 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]--; 1185 cg_blks(&sblock, &acg, cbtocylno(&sblock, 1186 d%sblock.fs_fpg))[cbtorpos(&sblock, 1187 d%sblock.fs_fpg)]--; 1188 if(sblock.fs_contigsumsize > 0) { 1189 clrbit(cg_clustersfree(&acg), 1190 (d%sblock.fs_fpg)/sblock.fs_frag); 1191 } 1192 1193 frag_adjust(d%sblock.fs_fpg, +1); 1194 } 1195 /* 1196 * XXX Handle the cluster statistics here in the case this 1197 * cylinder group is now almost full, and the remaining 1198 * space is less then the maximum cluster size. This is 1199 * probably not needed, as you would hardly find a file 1200 * system which has only MAXCSBUFS+FS_MAXCONTIG of free 1201 * space right behind the cylinder group information in 1202 * any new cylinder group. 1203 */ 1204 1205 /* 1206 * Update our statistics in the cylinder summary. 1207 */ 1208 *cs = acg.cg_cs; 1209 1210 /* 1211 * Write the new cylinder group containing the cylinder summary 1212 * back to disk. 1213 */ 1214 wtfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)), sblock.fs_cgsize, 1215 (char *)&acg, fso, Nflag); 1216 DBG_PRINT0("nscg written\n"); 1217 DBG_DUMP_CG(&sblock, 1218 "new summary cg", 1219 &acg); 1220 1221 DBG_LEAVE; 1222 return; 1223 } 1224 /* 1225 * We have got enough of space in the current cylinder group, so we 1226 * can relocate just a few blocks, and let the summary information 1227 * grow in place where it is right now. 1228 */ 1229 DBG_TRC; 1230 1231 cbase = cgbase(&osblock, ocscg); /* old and new are equal */ 1232 dupper = sblock.fs_csaddr - cbase + 1233 howmany(sblock.fs_cssize, sblock.fs_fsize); 1234 odupper = osblock.fs_csaddr - cbase + 1235 howmany(osblock.fs_cssize, osblock.fs_fsize); 1236 1237 sblock.fs_dsize -= dupper-odupper; 1238 1239 /* 1240 * Allocate the space for the array of blocks to be relocated. 1241 */ 1242 bp=(struct gfs_bpp *)malloc(((dupper-odupper)/sblock.fs_frag+2)* 1243 sizeof(struct gfs_bpp)); 1244 if(bp == NULL) { 1245 errx(1, "malloc failed"); 1246 } 1247 memset((char *)bp, 0, sizeof(struct gfs_bpp)); 1248 1249 /* 1250 * Lock all new frags needed for the cylinder group summary. This is 1251 * done per fragment in the first and last block of the new required 1252 * area, and per block for all other blocks. 1253 * 1254 * Handle the first new block here (but only if some fragments where 1255 * already used for the cylinder summary). 1256 */ 1257 ind=0; 1258 frag_adjust(odupper, -1); 1259 for(d=odupper; ((d<dupper)&&(d%sblock.fs_frag)); d++) { 1260 DBG_PRINT1("scg first frag check loop d=%d\n", 1261 d); 1262 if(isclr(cg_blksfree(&acg), d)) { 1263 if (!ind) { 1264 bp[ind].old=d/sblock.fs_frag; 1265 bp[ind].flags|=GFS_FL_FIRST; 1266 if(roundup(d, sblock.fs_frag) >= dupper) { 1267 bp[ind].flags|=GFS_FL_LAST; 1268 } 1269 ind++; 1270 } 1271 } else { 1272 clrbit(cg_blksfree(&acg), d); 1273 acg.cg_cs.cs_nffree--; 1274 sblock.fs_cstotal.cs_nffree--; 1275 } 1276 /* 1277 * No cluster handling is needed here, as there was at least 1278 * one fragment in use by the cylinder summary in the old 1279 * file system. 1280 * No block-free counter handling here as this block was not 1281 * a free block. 1282 */ 1283 } 1284 frag_adjust(odupper, 1); 1285 1286 /* 1287 * Handle all needed complete blocks here. 1288 */ 1289 for(; d+sblock.fs_frag<=dupper; d+=sblock.fs_frag) { 1290 DBG_PRINT1("scg block check loop d=%d\n", 1291 d); 1292 if(!isblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag)) { 1293 for(f=d; f<d+sblock.fs_frag; f++) { 1294 if(isset(cg_blksfree(&aocg), f)) { 1295 acg.cg_cs.cs_nffree--; 1296 sblock.fs_cstotal.cs_nffree--; 1297 } 1298 } 1299 clrblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag); 1300 bp[ind].old=d/sblock.fs_frag; 1301 ind++; 1302 } else { 1303 clrblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag); 1304 acg.cg_cs.cs_nbfree--; 1305 sblock.fs_cstotal.cs_nbfree--; 1306 cg_blktot(&acg)[cbtocylno(&sblock, d)]--; 1307 cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) 1308 [cbtorpos(&sblock, d)]--; 1309 if(sblock.fs_contigsumsize > 0) { 1310 clrbit(cg_clustersfree(&acg), d/sblock.fs_frag); 1311 for(lcs=0, l=(d/sblock.fs_frag)+1; 1312 lcs<sblock.fs_contigsumsize; 1313 l++, lcs++ ) { 1314 if(isclr(cg_clustersfree(&acg),l)){ 1315 break; 1316 } 1317 } 1318 if(lcs < sblock.fs_contigsumsize) { 1319 cg_clustersum(&acg)[lcs+1]--; 1320 if(lcs) { 1321 cg_clustersum(&acg)[lcs]++; 1322 } 1323 } 1324 } 1325 } 1326 /* 1327 * No fragment counter handling is needed here, as this finally 1328 * doesn't change after the relocation. 1329 */ 1330 } 1331 1332 /* 1333 * Handle all fragments needed in the last new affected block. 1334 */ 1335 if(d<dupper) { 1336 frag_adjust(dupper-1, -1); 1337 1338 if(isblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag)) { 1339 acg.cg_cs.cs_nbfree--; 1340 sblock.fs_cstotal.cs_nbfree--; 1341 acg.cg_cs.cs_nffree+=sblock.fs_frag; 1342 sblock.fs_cstotal.cs_nffree+=sblock.fs_frag; 1343 cg_blktot(&acg)[cbtocylno(&sblock, d)]--; 1344 cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) 1345 [cbtorpos(&sblock, d)]--; 1346 if(sblock.fs_contigsumsize > 0) { 1347 clrbit(cg_clustersfree(&acg), d/sblock.fs_frag); 1348 for(lcs=0, l=(d/sblock.fs_frag)+1; 1349 lcs<sblock.fs_contigsumsize; 1350 l++, lcs++ ) { 1351 if(isclr(cg_clustersfree(&acg),l)){ 1352 break; 1353 } 1354 } 1355 if(lcs < sblock.fs_contigsumsize) { 1356 cg_clustersum(&acg)[lcs+1]--; 1357 if(lcs) { 1358 cg_clustersum(&acg)[lcs]++; 1359 } 1360 } 1361 } 1362 } 1363 1364 for(; d<dupper; d++) { 1365 DBG_PRINT1("scg second frag check loop d=%d\n", 1366 d); 1367 if(isclr(cg_blksfree(&acg), d)) { 1368 bp[ind].old=d/sblock.fs_frag; 1369 bp[ind].flags|=GFS_FL_LAST; 1370 } else { 1371 clrbit(cg_blksfree(&acg), d); 1372 acg.cg_cs.cs_nffree--; 1373 sblock.fs_cstotal.cs_nffree--; 1374 } 1375 } 1376 if(bp[ind].flags & GFS_FL_LAST) { /* we have to advance here */ 1377 ind++; 1378 } 1379 frag_adjust(dupper-1, 1); 1380 } 1381 1382 /* 1383 * If we found a block to relocate just do so. 1384 */ 1385 if(ind) { 1386 for(i=0; i<ind; i++) { 1387 if(!bp[i].old) { /* no more blocks listed */ 1388 /* 1389 * XXX A relative blocknumber should not be 1390 * zero, which is not explicitly 1391 * guaranteed by our code. 1392 */ 1393 break; 1394 } 1395 /* 1396 * Allocate a complete block in the same (current) 1397 * cylinder group. 1398 */ 1399 bp[i].new=alloc()/sblock.fs_frag; 1400 1401 /* 1402 * There is no frag_adjust() needed for the new block 1403 * as it will have no fragments yet :-). 1404 */ 1405 for(f=bp[i].old*sblock.fs_frag, 1406 g=bp[i].new*sblock.fs_frag; 1407 f<(bp[i].old+1)*sblock.fs_frag; 1408 f++, g++) { 1409 if(isset(cg_blksfree(&aocg), f)) { 1410 setbit(cg_blksfree(&acg), g); 1411 acg.cg_cs.cs_nffree++; 1412 sblock.fs_cstotal.cs_nffree++; 1413 } 1414 } 1415 1416 /* 1417 * Special handling is required if this was the first 1418 * block. We have to consider the fragments which were 1419 * used by the cylinder summary in the original block 1420 * which re to be free in the copy of our block. We 1421 * have to be careful if this first block happens to 1422 * be also the last block to be relocated. 1423 */ 1424 if(bp[i].flags & GFS_FL_FIRST) { 1425 for(f=bp[i].old*sblock.fs_frag, 1426 g=bp[i].new*sblock.fs_frag; 1427 f<odupper; 1428 f++, g++) { 1429 setbit(cg_blksfree(&acg), g); 1430 acg.cg_cs.cs_nffree++; 1431 sblock.fs_cstotal.cs_nffree++; 1432 } 1433 if(!(bp[i].flags & GFS_FL_LAST)) { 1434 frag_adjust(bp[i].new*sblock.fs_frag,1); 1435 } 1436 1437 } 1438 1439 /* 1440 * Special handling is required if this is the last 1441 * block to be relocated. 1442 */ 1443 if(bp[i].flags & GFS_FL_LAST) { 1444 frag_adjust(bp[i].new*sblock.fs_frag, 1); 1445 frag_adjust(bp[i].old*sblock.fs_frag, -1); 1446 for(f=dupper; 1447 f<roundup(dupper, sblock.fs_frag); 1448 f++) { 1449 if(isclr(cg_blksfree(&acg), f)) { 1450 setbit(cg_blksfree(&acg), f); 1451 acg.cg_cs.cs_nffree++; 1452 sblock.fs_cstotal.cs_nffree++; 1453 } 1454 } 1455 frag_adjust(bp[i].old*sblock.fs_frag, 1); 1456 } 1457 1458 /* 1459 * !!! Attach the cylindergroup offset here. 1460 */ 1461 bp[i].old+=cbase/sblock.fs_frag; 1462 bp[i].new+=cbase/sblock.fs_frag; 1463 1464 /* 1465 * Copy the content of the block. 1466 */ 1467 /* 1468 * XXX Here we will have to implement a copy on write 1469 * in the case we have any active snapshots. 1470 */ 1471 rdfs(fsbtodb(&sblock, bp[i].old*sblock.fs_frag), 1472 sblock.fs_bsize, (char *)&ablk, fsi); 1473 wtfs(fsbtodb(&sblock, bp[i].new*sblock.fs_frag), 1474 sblock.fs_bsize, (char *)&ablk, fso, Nflag); 1475 DBG_DUMP_HEX(&sblock, 1476 "copied full block", 1477 (unsigned char *)&ablk); 1478 1479 DBG_PRINT2("scg (%d->%d) block relocated\n", 1480 bp[i].old, 1481 bp[i].new); 1482 } 1483 1484 /* 1485 * Now we have to update all references to any fragment which 1486 * belongs to any block relocated. We iterate now over all 1487 * cylinder groups, within those over all non zero length 1488 * inodes. 1489 */ 1490 for(cylno=0; cylno<osblock.fs_ncg; cylno++) { 1491 DBG_PRINT1("scg doing cg (%d)\n", 1492 cylno); 1493 for(inc=osblock.fs_ipg-1 ; inc>=0 ; inc--) { 1494 updrefs(cylno, (ino_t)inc, bp, fsi, fso, Nflag); 1495 } 1496 } 1497 1498 /* 1499 * All inodes are checked, now make sure the number of 1500 * references found make sense. 1501 */ 1502 for(i=0; i<ind; i++) { 1503 if(!bp[i].found || (bp[i].found>sblock.fs_frag)) { 1504 warnx("error: %d refs found for block %d.", 1505 bp[i].found, bp[i].old); 1506 } 1507 1508 } 1509 } 1510 /* 1511 * The following statistics are not changed here: 1512 * sblock.fs_cstotal.cs_ndir 1513 * sblock.fs_cstotal.cs_nifree 1514 * The following statistics were already updated on the fly: 1515 * sblock.fs_cstotal.cs_nffree 1516 * sblock.fs_cstotal.cs_nbfree 1517 * As the statistics for this cylinder group are ready, copy it to 1518 * the summary information array. 1519 */ 1520 1521 *cs = acg.cg_cs; 1522 1523 /* 1524 * Write summary cylinder group back to disk. 1525 */ 1526 wtfs(fsbtodb(&sblock, cgtod(&sblock, ocscg)), sblock.fs_cgsize, 1527 (char *)&acg, fso, Nflag); 1528 DBG_PRINT0("scg written\n"); 1529 DBG_DUMP_CG(&sblock, 1530 "new summary cg", 1531 &acg); 1532 1533 DBG_LEAVE; 1534 return; 1535 } 1536 1537 /* ************************************************************** rdfs ***** */ 1538 /* 1539 * Here we read some block(s) from disk. 1540 */ 1541 static void 1542 rdfs(daddr_t bno, int size, char *bf, int fsi) 1543 { 1544 DBG_FUNC("rdfs") 1545 int n; 1546 1547 DBG_ENTER; 1548 1549 if (lseek(fsi, (off_t)bno * DEV_BSIZE, 0) < 0) { 1550 err(33, "rdfs: seek error: %ld", (long)bno); 1551 } 1552 n = read(fsi, bf, (size_t)size); 1553 if (n != size) { 1554 err(34, "rdfs: read error: %ld", (long)bno); 1555 } 1556 1557 DBG_LEAVE; 1558 return; 1559 } 1560 1561 /* ************************************************************** wtfs ***** */ 1562 /* 1563 * Here we write some block(s) to disk. 1564 */ 1565 static void 1566 wtfs(daddr_t bno, int size, char *bf, int fso, int Nflag) 1567 { 1568 DBG_FUNC("wtfs") 1569 int n; 1570 1571 DBG_ENTER; 1572 1573 if (Nflag) { 1574 DBG_LEAVE; 1575 return; 1576 } 1577 if (lseek(fso, (off_t)bno * DEV_BSIZE, SEEK_SET) < 0) { 1578 err(35, "wtfs: seek error: %ld", (long)bno); 1579 } 1580 n = write(fso, bf, (size_t)size); 1581 if (n != size) { 1582 err(36, "wtfs: write error: %ld", (long)bno); 1583 } 1584 1585 DBG_LEAVE; 1586 return; 1587 } 1588 1589 /* ************************************************************* alloc ***** */ 1590 /* 1591 * Here we allocate a free block in the current cylinder group. It is assumed, 1592 * that acg contains the current cylinder group. As we may take a block from 1593 * somewhere in the filesystem we have to handle cluster summary here. 1594 */ 1595 static daddr_t 1596 alloc(void) 1597 { 1598 DBG_FUNC("alloc") 1599 daddr_t d, blkno; 1600 int lcs1, lcs2; 1601 int l; 1602 int csmin, csmax; 1603 int dlower, dupper, dmax; 1604 1605 DBG_ENTER; 1606 1607 if (acg.cg_magic != CG_MAGIC) { 1608 warnx("acg: bad magic number"); 1609 DBG_LEAVE; 1610 return (0); 1611 } 1612 if (acg.cg_cs.cs_nbfree == 0) { 1613 warnx("error: cylinder group ran out of space"); 1614 DBG_LEAVE; 1615 return (0); 1616 } 1617 /* 1618 * We start seeking for free blocks only from the space available after 1619 * the end of the new grown cylinder summary. Otherwise we allocate a 1620 * block here which we have to relocate a couple of seconds later again 1621 * again, and we are not prepared to to this anyway. 1622 */ 1623 blkno=-1; 1624 dlower=cgsblock(&sblock, acg.cg_cgx)-cgbase(&sblock, acg.cg_cgx); 1625 dupper=cgdmin(&sblock, acg.cg_cgx)-cgbase(&sblock, acg.cg_cgx); 1626 dmax=cgbase(&sblock, acg.cg_cgx)+sblock.fs_fpg; 1627 if (dmax > sblock.fs_size) { 1628 dmax = sblock.fs_size; 1629 } 1630 dmax-=cgbase(&sblock, acg.cg_cgx); /* retransform into cg */ 1631 csmin=sblock.fs_csaddr-cgbase(&sblock, acg.cg_cgx); 1632 csmax=csmin+howmany(sblock.fs_cssize, sblock.fs_fsize); 1633 DBG_PRINT3("seek range: dl=%d, du=%d, dm=%d\n", 1634 dlower, 1635 dupper, 1636 dmax); 1637 DBG_PRINT2("range cont: csmin=%d, csmax=%d\n", 1638 csmin, 1639 csmax); 1640 1641 for(d=0; (d<dlower && blkno==-1); d+=sblock.fs_frag) { 1642 if(d>=csmin && d<=csmax) { 1643 continue; 1644 } 1645 if(isblock(&sblock, cg_blksfree(&acg), fragstoblks(&sblock, 1646 d))) { 1647 blkno = fragstoblks(&sblock, d);/* Yeah found a block */ 1648 break; 1649 } 1650 } 1651 for(d=dupper; (d<dmax && blkno==-1); d+=sblock.fs_frag) { 1652 if(d>=csmin && d<=csmax) { 1653 continue; 1654 } 1655 if(isblock(&sblock, cg_blksfree(&acg), fragstoblks(&sblock, 1656 d))) { 1657 blkno = fragstoblks(&sblock, d);/* Yeah found a block */ 1658 break; 1659 } 1660 } 1661 if(blkno==-1) { 1662 warnx("internal error: couldn't find promised block in cg"); 1663 DBG_LEAVE; 1664 return (0); 1665 } 1666 1667 /* 1668 * This is needed if the block was found already in the first loop. 1669 */ 1670 d=blkstofrags(&sblock, blkno); 1671 1672 clrblock(&sblock, cg_blksfree(&acg), blkno); 1673 if (sblock.fs_contigsumsize > 0) { 1674 /* 1675 * Handle the cluster allocation bitmap. 1676 */ 1677 clrbit(cg_clustersfree(&acg), blkno); 1678 /* 1679 * We possibly have split a cluster here, so we have to do 1680 * recalculate the sizes of the remaining cluster halves now, 1681 * and use them for updating the cluster summary information. 1682 * 1683 * Lets start with the blocks before our allocated block ... 1684 */ 1685 for(lcs1=0, l=blkno-1; lcs1<sblock.fs_contigsumsize; 1686 l--, lcs1++ ) { 1687 if(isclr(cg_clustersfree(&acg),l)){ 1688 break; 1689 } 1690 } 1691 /* 1692 * ... and continue with the blocks right after our allocated 1693 * block. 1694 */ 1695 for(lcs2=0, l=blkno+1; lcs2<sblock.fs_contigsumsize; 1696 l++, lcs2++ ) { 1697 if(isclr(cg_clustersfree(&acg),l)){ 1698 break; 1699 } 1700 } 1701 1702 /* 1703 * Now update all counters. 1704 */ 1705 cg_clustersum(&acg)[MIN(lcs1+lcs2+1,sblock.fs_contigsumsize)]--; 1706 if(lcs1) { 1707 cg_clustersum(&acg)[lcs1]++; 1708 } 1709 if(lcs2) { 1710 cg_clustersum(&acg)[lcs2]++; 1711 } 1712 } 1713 /* 1714 * Update all statistics based on blocks. 1715 */ 1716 acg.cg_cs.cs_nbfree--; 1717 sblock.fs_cstotal.cs_nbfree--; 1718 cg_blktot(&acg)[cbtocylno(&sblock, d)]--; 1719 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))[cbtorpos(&sblock, d)]--; 1720 1721 DBG_LEAVE; 1722 return (d); 1723 } 1724 1725 /* *********************************************************** isblock ***** */ 1726 /* 1727 * Here we check if all frags of a block are free. For more details again 1728 * please see the source of newfs(8), as this function is taken over almost 1729 * unchanged. 1730 */ 1731 static int 1732 isblock(struct fs *fs, unsigned char *cp, int h) 1733 { 1734 DBG_FUNC("isblock") 1735 unsigned char mask; 1736 1737 DBG_ENTER; 1738 1739 switch (fs->fs_frag) { 1740 case 8: 1741 DBG_LEAVE; 1742 return (cp[h] == 0xff); 1743 case 4: 1744 mask = 0x0f << ((h & 0x1) << 2); 1745 DBG_LEAVE; 1746 return ((cp[h >> 1] & mask) == mask); 1747 case 2: 1748 mask = 0x03 << ((h & 0x3) << 1); 1749 DBG_LEAVE; 1750 return ((cp[h >> 2] & mask) == mask); 1751 case 1: 1752 mask = 0x01 << (h & 0x7); 1753 DBG_LEAVE; 1754 return ((cp[h >> 3] & mask) == mask); 1755 default: 1756 fprintf(stderr, "isblock bad fs_frag %d\n", fs->fs_frag); 1757 DBG_LEAVE; 1758 return (0); 1759 } 1760 } 1761 1762 /* ********************************************************** clrblock ***** */ 1763 /* 1764 * Here we allocate a complete block in the block map. For more details again 1765 * please see the source of newfs(8), as this function is taken over almost 1766 * unchanged. 1767 */ 1768 static void 1769 clrblock(struct fs *fs, unsigned char *cp, int h) 1770 { 1771 DBG_FUNC("clrblock") 1772 1773 DBG_ENTER; 1774 1775 switch ((fs)->fs_frag) { 1776 case 8: 1777 cp[h] = 0; 1778 break; 1779 case 4: 1780 cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); 1781 break; 1782 case 2: 1783 cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); 1784 break; 1785 case 1: 1786 cp[h >> 3] &= ~(0x01 << (h & 0x7)); 1787 break; 1788 default: 1789 warnx("clrblock bad fs_frag %d", fs->fs_frag); 1790 break; 1791 } 1792 1793 DBG_LEAVE; 1794 return; 1795 } 1796 1797 /* ********************************************************** setblock ***** */ 1798 /* 1799 * Here we free a complete block in the free block map. For more details again 1800 * please see the source of newfs(8), as this function is taken over almost 1801 * unchanged. 1802 */ 1803 static void 1804 setblock(struct fs *fs, unsigned char *cp, int h) 1805 { 1806 DBG_FUNC("setblock") 1807 1808 DBG_ENTER; 1809 1810 switch (fs->fs_frag) { 1811 case 8: 1812 cp[h] = 0xff; 1813 break; 1814 case 4: 1815 cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); 1816 break; 1817 case 2: 1818 cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); 1819 break; 1820 case 1: 1821 cp[h >> 3] |= (0x01 << (h & 0x7)); 1822 break; 1823 default: 1824 warnx("setblock bad fs_frag %d", fs->fs_frag); 1825 break; 1826 } 1827 1828 DBG_LEAVE; 1829 return; 1830 } 1831 1832 /* ************************************************************ ginode ***** */ 1833 /* 1834 * This function provides access to an individual inode. We find out in which 1835 * block the requested inode is located, read it from disk if needed, and 1836 * return the pointer into that block. We maintain a cache of one block to 1837 * not read the same block again and again if we iterate linearly over all 1838 * inodes. 1839 */ 1840 static struct dinode * 1841 ginode(ino_t inumber, int fsi, int cg) 1842 { 1843 DBG_FUNC("ginode") 1844 ufs_daddr_t iblk; 1845 static ino_t startinum=0; /* first inode in cached block */ 1846 struct dinode *pi; 1847 1848 DBG_ENTER; 1849 1850 pi=(struct dinode *)ablk; 1851 inumber+=(cg * sblock.fs_ipg); 1852 if (startinum == 0 || inumber < startinum || 1853 inumber >= startinum + INOPB(&sblock)) { 1854 /* 1855 * The block needed is not cached, so we have to read it from 1856 * disk now. 1857 */ 1858 iblk = ino_to_fsba(&sblock, inumber); 1859 in_src=fsbtodb(&sblock, iblk); 1860 rdfs(in_src, sblock.fs_bsize, (char *)&ablk, fsi); 1861 startinum = (inumber / INOPB(&sblock)) * INOPB(&sblock); 1862 } 1863 1864 DBG_LEAVE; 1865 return (&(pi[inumber % INOPB(&sblock)])); 1866 } 1867 1868 /* ****************************************************** charsperline ***** */ 1869 /* 1870 * Figure out how many lines our current terminal has. For more details again 1871 * please see the source of newfs(8), as this function is taken over almost 1872 * unchanged. 1873 */ 1874 static int 1875 charsperline(void) 1876 { 1877 DBG_FUNC("charsperline") 1878 int columns; 1879 char *cp; 1880 struct winsize ws; 1881 1882 DBG_ENTER; 1883 1884 columns = 0; 1885 if (ioctl(0, TIOCGWINSZ, &ws) != -1) { 1886 columns = ws.ws_col; 1887 } 1888 if (columns == 0 && (cp = getenv("COLUMNS"))) { 1889 columns = atoi(cp); 1890 } 1891 if (columns == 0) { 1892 columns = 80; /* last resort */ 1893 } 1894 1895 DBG_LEAVE; 1896 return columns; 1897 } 1898 1899 /* ************************************************************** main ***** */ 1900 /* 1901 * growfs(8) is a utility which allows to increase the size of an existing 1902 * ufs filesystem. Currently this can only be done on unmounted file system. 1903 * It recognizes some command line options to specify the new desired size, 1904 * and it does some basic checkings. The old file system size is determined 1905 * and after some more checks like we can really access the new last block 1906 * on the disk etc. we calculate the new parameters for the superblock. After 1907 * having done this we just call growfs() which will do the work. Before 1908 * we finish the only thing left is to update the disklabel. 1909 * We still have to provide support for snapshots. Therefore we first have to 1910 * understand what data structures are always replicated in the snapshot on 1911 * creation, for all other blocks we touch during our procedure, we have to 1912 * keep the old blocks unchanged somewhere available for the snapshots. If we 1913 * are lucky, then we only have to handle our blocks to be relocated in that 1914 * way. 1915 * Also we have to consider in what order we actually update the critical 1916 * data structures of the filesystem to make sure, that in case of a disaster 1917 * fsck(8) is still able to restore any lost data. 1918 * The foreseen last step then will be to provide for growing even mounted 1919 * file systems. There we have to extend the mount() system call to provide 1920 * userland access to the file system locking facility. 1921 */ 1922 int 1923 main(int argc, char **argv) 1924 { 1925 DBG_FUNC("main") 1926 char *device, *special, *cp; 1927 char ch; 1928 unsigned long size=0; 1929 size_t len; 1930 int Nflag=0; 1931 int ExpertFlag=0; 1932 struct stat st; 1933 struct disklabel *lp; 1934 struct partition *pp; 1935 int fsi,fso; 1936 char reply[5]; 1937 #ifdef FSMAXSNAP 1938 int j; 1939 #endif /* FSMAXSNAP */ 1940 1941 DBG_ENTER; 1942 1943 while((ch=getopt(argc, argv, "Ns:vy")) != -1) { 1944 switch(ch) { 1945 case 'N': 1946 Nflag=1; 1947 break; 1948 case 's': 1949 size=(size_t)atol(optarg); 1950 if(size<1) { 1951 usage(); 1952 } 1953 break; 1954 case 'v': /* for compatibility to newfs */ 1955 break; 1956 case 'y': 1957 ExpertFlag=1; 1958 break; 1959 case '?': 1960 /* FALLTHROUGH */ 1961 default: 1962 usage(); 1963 } 1964 } 1965 argc -= optind; 1966 argv += optind; 1967 1968 if(argc != 1) { 1969 usage(); 1970 } 1971 device=*argv; 1972 1973 /* 1974 * Now try to guess the (raw)device name. 1975 */ 1976 if (0 == strrchr(device, '/')) { 1977 /* 1978 * No path prefix was given, so try in that order: 1979 * /dev/r%s 1980 * /dev/%s 1981 * /dev/vinum/r%s 1982 * /dev/vinum/%s. 1983 * 1984 * FreeBSD now doesn't distinguish between raw and block 1985 * devices any longer, but it should still work this way. 1986 */ 1987 len=strlen(device)+strlen(_PATH_DEV)+2+strlen("vinum/"); 1988 special=(char *)malloc(len); 1989 if(special == NULL) { 1990 errx(1, "malloc failed"); 1991 } 1992 snprintf(special, len, "%sr%s", _PATH_DEV, device); 1993 if (stat(special, &st) == -1) { 1994 snprintf(special, len, "%s%s", _PATH_DEV, device); 1995 if (stat(special, &st) == -1) { 1996 snprintf(special, len, "%svinum/r%s", 1997 _PATH_DEV, device); 1998 if (stat(special, &st) == -1) { 1999 /* For now this is the 'last resort' */ 2000 snprintf(special, len, "%svinum/%s", 2001 _PATH_DEV, device); 2002 } 2003 } 2004 } 2005 device = special; 2006 } 2007 2008 /* 2009 * Try to access our devices for writing ... 2010 */ 2011 if (Nflag) { 2012 fso = -1; 2013 } else { 2014 fso = open(device, O_WRONLY); 2015 if (fso < 0) { 2016 err(1, "%s", device); 2017 } 2018 } 2019 2020 /* 2021 * ... and reading. 2022 */ 2023 fsi = open(device, O_RDONLY); 2024 if (fsi < 0) { 2025 err(1, "%s", device); 2026 } 2027 2028 /* 2029 * Try to read a label and gess the slice if not specified. This 2030 * code should guess the right thing and avaid to bother the user 2031 * user with the task of specifying the option -v on vinum volumes. 2032 */ 2033 cp=device+strlen(device)-1; 2034 lp = get_disklabel(fsi); 2035 if(lp->d_type == DTYPE_VINUM) { 2036 pp = &lp->d_partitions[0]; 2037 } else if (isdigit(*cp)) { 2038 pp = &lp->d_partitions[2]; 2039 } else if (*cp>='a' && *cp<='h') { 2040 pp = &lp->d_partitions[*cp - 'a']; 2041 } else { 2042 errx(1, "unknown device"); 2043 } 2044 2045 /* 2046 * Check if that partition looks suited for growing a file system. 2047 */ 2048 if (pp->p_size < 1) { 2049 errx(1, "partition is unavailable"); 2050 } 2051 if (pp->p_fstype != FS_BSDFFS) { 2052 errx(1, "partition not 4.2BSD"); 2053 } 2054 2055 /* 2056 * Read the current superblock, and take a backup. 2057 */ 2058 rdfs((daddr_t)(SBOFF/DEV_BSIZE), SBSIZE, (char *)&(osblock), fsi); 2059 if (osblock.fs_magic != FS_MAGIC) { 2060 errx(1, "superblock not recognized"); 2061 } 2062 memcpy((void *)&fsun1, (void *)&fsun2, sizeof(fsun2)); 2063 2064 DBG_OPEN("/tmp/growfs.debug"); /* already here we need a superblock */ 2065 DBG_DUMP_FS(&sblock, 2066 "old sblock"); 2067 2068 /* 2069 * Determine size to grow to. Default to the full size specified in 2070 * the disk label. 2071 */ 2072 sblock.fs_size = dbtofsb(&osblock, pp->p_size); 2073 if (size != 0) { 2074 if (size > pp->p_size){ 2075 errx(1, "There is not enough space (%d < %ld)", 2076 pp->p_size, size); 2077 } 2078 sblock.fs_size = dbtofsb(&osblock, size); 2079 } 2080 2081 /* 2082 * Are we really growing ? 2083 */ 2084 if(osblock.fs_size >= sblock.fs_size) { 2085 errx(1, "we are not growing (%d->%d)", osblock.fs_size, 2086 sblock.fs_size); 2087 } 2088 2089 2090 #ifdef FSMAXSNAP 2091 /* 2092 * Check if we find an active snapshot. 2093 */ 2094 if(ExpertFlag == 0) { 2095 for(j=0; j<FSMAXSNAP; j++) { 2096 if(sblock.fs_snapinum[j]) { 2097 errx(1, "active snapshot found in filesystem\n" 2098 " please remove all snapshots before " 2099 "using growfs\n"); 2100 } 2101 if(!sblock.fs_snapinum[j]) { /* list is dense */ 2102 break; 2103 } 2104 } 2105 } 2106 #endif 2107 2108 if (ExpertFlag == 0 && Nflag == 0) { 2109 printf("We strongly recommend you to make a backup " 2110 "before growing the Filesystem\n\n" 2111 " Did you backup your data (Yes/No) ? "); 2112 fgets(reply, sizeof(reply), stdin); 2113 if (strcmp(reply, "Yes\n")){ 2114 printf("\n Nothing done \n"); 2115 exit (0); 2116 } 2117 } 2118 2119 printf("new filesystemsize is: %d frags\n", sblock.fs_size); 2120 2121 /* 2122 * Try to access our new last block in the filesystem. Even if we 2123 * later on realize we have to abort our operation, on that block 2124 * there should be no data, so we can't destroy something yet. 2125 */ 2126 wtfs((daddr_t)pp->p_size-1, DEV_BSIZE, (char *)&sblock, fso, Nflag); 2127 2128 /* 2129 * Now calculate new superblock values and check for reasonable 2130 * bound for new file system size: 2131 * fs_size: is derived from label or user input 2132 * fs_dsize: should get updated in the routines creating or 2133 * updating the cylinder groups on the fly 2134 * fs_cstotal: should get updated in the routines creating or 2135 * updating the cylinder groups 2136 */ 2137 2138 /* 2139 * Update the number of cylinders in the filesystem. 2140 */ 2141 sblock.fs_ncyl = sblock.fs_size * NSPF(&sblock) / sblock.fs_spc; 2142 if (sblock.fs_size * NSPF(&sblock) > sblock.fs_ncyl * sblock.fs_spc) { 2143 sblock.fs_ncyl++; 2144 } 2145 2146 /* 2147 * Update the number of cylinder groups in the filesystem. 2148 */ 2149 sblock.fs_ncg = sblock.fs_ncyl / sblock.fs_cpg; 2150 if (sblock.fs_ncyl % sblock.fs_cpg) { 2151 sblock.fs_ncg++; 2152 } 2153 2154 if ((sblock.fs_size - (sblock.fs_ncg-1) * sblock.fs_fpg) < 2155 sblock.fs_fpg && cgdmin(&sblock, (sblock.fs_ncg-1))- 2156 cgbase(&sblock, (sblock.fs_ncg-1)) > (sblock.fs_size - 2157 (sblock.fs_ncg-1) * sblock.fs_fpg )) { 2158 /* 2159 * The space in the new last cylinder group is too small, 2160 * so revert back. 2161 */ 2162 sblock.fs_ncg--; 2163 #if 1 /* this is a bit more safe */ 2164 sblock.fs_ncyl = sblock.fs_ncg * sblock.fs_cpg; 2165 #else 2166 sblock.fs_ncyl -= sblock.fs_ncyl % sblock.fs_cpg; 2167 #endif 2168 sblock.fs_ncyl -= sblock.fs_ncyl % sblock.fs_cpg; 2169 printf( "Warning: %d sector(s) cannot be allocated.\n", 2170 (sblock.fs_size-(sblock.fs_ncg)*sblock.fs_fpg) * 2171 NSPF(&sblock)); 2172 sblock.fs_size = sblock.fs_ncyl * sblock.fs_spc / NSPF(&sblock); 2173 } 2174 2175 /* 2176 * Update the space for the cylinder group summary information in the 2177 * respective cylinder group data area. 2178 */ 2179 sblock.fs_cssize = 2180 fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); 2181 2182 if(osblock.fs_size >= sblock.fs_size) { 2183 errx(1, "not enough new space"); 2184 } 2185 2186 DBG_PRINT0("sblock calculated\n"); 2187 2188 /* 2189 * Ok, everything prepared, so now let's do the tricks. 2190 */ 2191 growfs(fsi, fso, Nflag); 2192 2193 /* 2194 * Update the disk label. 2195 */ 2196 pp->p_fsize = sblock.fs_fsize; 2197 pp->p_frag = sblock.fs_frag; 2198 pp->p_cpg = sblock.fs_cpg; 2199 2200 return_disklabel(fso, lp, Nflag); 2201 DBG_PRINT0("label rewritten\n"); 2202 2203 close(fsi); 2204 if(fso>-1) close(fso); 2205 2206 DBG_CLOSE; 2207 2208 DBG_LEAVE; 2209 return 0; 2210 } 2211 2212 /* ************************************************** return_disklabel ***** */ 2213 /* 2214 * Write the updated disklabel back to disk. 2215 */ 2216 static void 2217 return_disklabel(int fd, struct disklabel *lp, int Nflag) 2218 { 2219 DBG_FUNC("return_disklabel") 2220 u_short sum; 2221 u_short *ptr; 2222 2223 DBG_ENTER; 2224 2225 if(!lp) { 2226 DBG_LEAVE; 2227 return; 2228 } 2229 if(!Nflag) { 2230 lp->d_checksum=0; 2231 sum = 0; 2232 ptr=(u_short *)lp; 2233 2234 /* 2235 * recalculate checksum 2236 */ 2237 while(ptr < (u_short *)&lp->d_partitions[lp->d_npartitions]) { 2238 sum ^= *ptr++; 2239 } 2240 lp->d_checksum=sum; 2241 2242 if (ioctl(fd, DIOCWDINFO, (char *)lp) < 0) { 2243 errx(1, "DIOCWDINFO failed"); 2244 } 2245 } 2246 free(lp); 2247 2248 DBG_LEAVE; 2249 return ; 2250 } 2251 2252 /* ***************************************************** get_disklabel ***** */ 2253 /* 2254 * Read the disklabel from disk. 2255 */ 2256 static struct disklabel * 2257 get_disklabel(int fd) 2258 { 2259 DBG_FUNC("get_disklabel") 2260 static struct disklabel *lab; 2261 2262 DBG_ENTER; 2263 2264 lab=(struct disklabel *)malloc(sizeof(struct disklabel)); 2265 if (!lab) { 2266 errx(1, "malloc failed"); 2267 } 2268 if (ioctl(fd, DIOCGDINFO, (char *)lab) < 0) { 2269 errx(1, "DIOCGDINFO failed"); 2270 } 2271 2272 DBG_LEAVE; 2273 return (lab); 2274 } 2275 2276 2277 /* ************************************************************* usage ***** */ 2278 /* 2279 * Dump a line of usage. 2280 */ 2281 static void 2282 usage(void) 2283 { 2284 DBG_FUNC("usage") 2285 2286 DBG_ENTER; 2287 2288 fprintf(stderr, "usage: growfs [-Ny] [-s size] special\n"); 2289 2290 DBG_LEAVE; 2291 exit(1); 2292 } 2293 2294 /* *********************************************************** updclst ***** */ 2295 /* 2296 * This updates most paramters and the bitmap related to cluster. We have to 2297 * assume, that sblock, osblock, acg are set up. 2298 */ 2299 static void 2300 updclst(int block) 2301 { 2302 DBG_FUNC("updclst") 2303 static int lcs=0; 2304 2305 DBG_ENTER; 2306 2307 if(sblock.fs_contigsumsize < 1) { /* no clustering */ 2308 return; 2309 } 2310 /* 2311 * update cluster allocation map 2312 */ 2313 setbit(cg_clustersfree(&acg), block); 2314 2315 /* 2316 * update cluster summary table 2317 */ 2318 if(!lcs) { 2319 /* 2320 * calculate size for the trailing cluster 2321 */ 2322 for(block--; lcs<sblock.fs_contigsumsize; block--, lcs++ ) { 2323 if(isclr(cg_clustersfree(&acg), block)){ 2324 break; 2325 } 2326 } 2327 } 2328 if(lcs < sblock.fs_contigsumsize) { 2329 if(lcs) { 2330 cg_clustersum(&acg)[lcs]--; 2331 } 2332 lcs++; 2333 cg_clustersum(&acg)[lcs]++; 2334 } 2335 2336 DBG_LEAVE; 2337 return; 2338 } 2339 2340 /* *********************************************************** updrefs ***** */ 2341 /* 2342 * This updates all references to relocated blocks for the given inode. The 2343 * inode is given as number within the cylinder group, and the number of the 2344 * cylinder group. 2345 */ 2346 static void 2347 updrefs(int cg, ino_t in, struct gfs_bpp *bp, int fsi, int fso, int Nflag) 2348 { 2349 DBG_FUNC("updrefs") 2350 unsigned int ictr, ind2ctr, ind3ctr; 2351 ufs_daddr_t *iptr, *ind2ptr, *ind3ptr; 2352 struct dinode *ino; 2353 int remaining_blocks; 2354 2355 DBG_ENTER; 2356 2357 /* 2358 * XXX We should skip unused inodes even from beeing read from disk 2359 * here by using the bitmap. 2360 */ 2361 ino=ginode(in, fsi, cg); 2362 if(!((ino->di_mode & IFMT)==IFDIR || (ino->di_mode & IFMT)==IFREG || 2363 (ino->di_mode & IFMT)==IFLNK)) { 2364 DBG_LEAVE; 2365 return; /* only check DIR, FILE, LINK */ 2366 } 2367 if(((ino->di_mode & IFMT)==IFLNK) && (ino->di_size<MAXSYMLINKLEN)) { 2368 DBG_LEAVE; 2369 return; /* skip short symlinks */ 2370 } 2371 if(!ino->di_size) { 2372 DBG_LEAVE; 2373 return; /* skip empty file */ 2374 } 2375 if(!ino->di_blocks) { 2376 DBG_LEAVE; 2377 return; /* skip empty swiss cheesy file or old fastlink */ 2378 } 2379 DBG_PRINT2("scg checking inode (%d in %d)\n", 2380 in, 2381 cg); 2382 2383 /* 2384 * Start checking all direct blocks. 2385 */ 2386 remaining_blocks=howmany(ino->di_size, sblock.fs_bsize); 2387 for(ictr=0; ictr < MIN(NDADDR, (unsigned int)remaining_blocks); 2388 ictr++) { 2389 iptr=&(ino->di_db[ictr]); 2390 if(*iptr) { 2391 cond_bl_upd(iptr, bp, GFS_PS_INODE, fso, Nflag); 2392 } 2393 } 2394 DBG_PRINT0("~~scg direct blocks checked\n"); 2395 2396 remaining_blocks-=NDADDR; 2397 if(remaining_blocks<0) { 2398 DBG_LEAVE; 2399 return; 2400 } 2401 if(ino->di_ib[0]) { 2402 /* 2403 * Start checking first indirect block 2404 */ 2405 cond_bl_upd(&(ino->di_ib[0]), bp, GFS_PS_INODE, fso, Nflag); 2406 i1_src=fsbtodb(&sblock, ino->di_ib[0]); 2407 rdfs(i1_src, sblock.fs_bsize, (char *)&i1blk, fsi); 2408 for(ictr=0; ictr < MIN(howmany(sblock.fs_bsize, 2409 sizeof(ufs_daddr_t)), (unsigned int)remaining_blocks); 2410 ictr++) { 2411 iptr=&((ufs_daddr_t *)&i1blk)[ictr]; 2412 if(*iptr) { 2413 cond_bl_upd(iptr, bp, GFS_PS_IND_BLK_LVL1, 2414 fso, Nflag); 2415 } 2416 } 2417 } 2418 DBG_PRINT0("scg indirect_1 blocks checked\n"); 2419 2420 remaining_blocks-= howmany(sblock.fs_bsize, sizeof(ufs_daddr_t)); 2421 if(remaining_blocks<0) { 2422 DBG_LEAVE; 2423 return; 2424 } 2425 if(ino->di_ib[1]) { 2426 /* 2427 * Start checking second indirect block 2428 */ 2429 cond_bl_upd(&(ino->di_ib[1]), bp, GFS_PS_INODE, fso, Nflag); 2430 i2_src=fsbtodb(&sblock, ino->di_ib[1]); 2431 rdfs(i2_src, sblock.fs_bsize, (char *)&i2blk, fsi); 2432 for(ind2ctr=0; ind2ctr < howmany(sblock.fs_bsize, 2433 sizeof(ufs_daddr_t)); ind2ctr++) { 2434 ind2ptr=&((ufs_daddr_t *)&i2blk)[ind2ctr]; 2435 if(!*ind2ptr) { 2436 continue; 2437 } 2438 cond_bl_upd(ind2ptr, bp, GFS_PS_IND_BLK_LVL2, fso, 2439 Nflag); 2440 i1_src=fsbtodb(&sblock, *ind2ptr); 2441 rdfs(i1_src, sblock.fs_bsize, (char *)&i1blk, fsi); 2442 for(ictr=0; ictr<MIN(howmany((unsigned int) 2443 sblock.fs_bsize, sizeof(ufs_daddr_t)), 2444 (unsigned int)remaining_blocks); ictr++) { 2445 iptr=&((ufs_daddr_t *)&i1blk)[ictr]; 2446 if(*iptr) { 2447 cond_bl_upd(iptr, bp, 2448 GFS_PS_IND_BLK_LVL1, fso, Nflag); 2449 } 2450 } 2451 } 2452 } 2453 DBG_PRINT0("scg indirect_2 blocks checked\n"); 2454 2455 #define SQUARE(a) ((a)*(a)) 2456 remaining_blocks-=SQUARE(howmany(sblock.fs_bsize, sizeof(ufs_daddr_t))); 2457 #undef SQUARE 2458 if(remaining_blocks<0) { 2459 DBG_LEAVE; 2460 return; 2461 } 2462 2463 if(ino->di_ib[2]) { 2464 /* 2465 * Start checking third indirect block 2466 */ 2467 cond_bl_upd(&(ino->di_ib[2]), bp, GFS_PS_INODE, fso, Nflag); 2468 i3_src=fsbtodb(&sblock, ino->di_ib[2]); 2469 rdfs(i3_src, sblock.fs_bsize, (char *)&i3blk, fsi); 2470 for(ind3ctr=0; ind3ctr < howmany(sblock.fs_bsize, 2471 sizeof(ufs_daddr_t)); ind3ctr ++) { 2472 ind3ptr=&((ufs_daddr_t *)&i3blk)[ind3ctr]; 2473 if(!*ind3ptr) { 2474 continue; 2475 } 2476 cond_bl_upd(ind3ptr, bp, GFS_PS_IND_BLK_LVL3, fso, 2477 Nflag); 2478 i2_src=fsbtodb(&sblock, *ind3ptr); 2479 rdfs(i2_src, sblock.fs_bsize, (char *)&i2blk, fsi); 2480 for(ind2ctr=0; ind2ctr < howmany(sblock.fs_bsize, 2481 sizeof(ufs_daddr_t)); ind2ctr ++) { 2482 ind2ptr=&((ufs_daddr_t *)&i2blk)[ind2ctr]; 2483 if(!*ind2ptr) { 2484 continue; 2485 } 2486 cond_bl_upd(ind2ptr, bp, GFS_PS_IND_BLK_LVL2, 2487 fso, Nflag); 2488 i1_src=fsbtodb(&sblock, *ind2ptr); 2489 rdfs(i1_src, sblock.fs_bsize, (char *)&i1blk, 2490 fsi); 2491 for(ictr=0; ictr < MIN(howmany(sblock.fs_bsize, 2492 sizeof(ufs_daddr_t)), 2493 (unsigned int)remaining_blocks); ictr++) { 2494 iptr=&((ufs_daddr_t *)&i1blk)[ictr]; 2495 if(*iptr) { 2496 cond_bl_upd(iptr, bp, 2497 GFS_PS_IND_BLK_LVL1, fso, 2498 Nflag); 2499 } 2500 } 2501 } 2502 } 2503 } 2504 2505 DBG_PRINT0("scg indirect_3 blocks checked\n"); 2506 2507 DBG_LEAVE; 2508 return; 2509 } 2510 2511