1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * Copyright (c) 1982, 1986, 1989, 1993 9 * The Regents of the University of California. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 36 * $FreeBSD$ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/conf.h> 42 #include <sys/vnode.h> 43 #include <sys/stat.h> 44 #include <sys/mount.h> 45 #include <sys/sysctl.h> 46 #include <sys/syslog.h> 47 #include <sys/buf.h> 48 #include <sys/endian.h> 49 50 #include <fs/ext2fs/fs.h> 51 #include <fs/ext2fs/inode.h> 52 #include <fs/ext2fs/ext2_mount.h> 53 #include <fs/ext2fs/ext2fs.h> 54 #include <fs/ext2fs/ext2_extern.h> 55 56 static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); 57 static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); 58 static u_long ext2_dirpref(struct inode *); 59 static e4fs_daddr_t ext2_hashalloc(struct inode *, int, long, int, 60 daddr_t (*)(struct inode *, int, daddr_t, 61 int)); 62 static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); 63 static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); 64 65 /* 66 * Allocate a block in the filesystem. 67 * 68 * A preference may be optionally specified. If a preference is given 69 * the following hierarchy is used to allocate a block: 70 * 1) allocate the requested block. 71 * 2) allocate a rotationally optimal block in the same cylinder. 72 * 3) allocate a block in the same cylinder group. 73 * 4) quadradically rehash into other cylinder groups, until an 74 * available block is located. 75 * If no block preference is given the following hierarchy is used 76 * to allocate a block: 77 * 1) allocate a block in the cylinder group that contains the 78 * inode for the file. 79 * 2) quadradically rehash into other cylinder groups, until an 80 * available block is located. 81 */ 82 int 83 ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, 84 struct ucred *cred, e4fs_daddr_t *bnp) 85 { 86 struct m_ext2fs *fs; 87 struct ext2mount *ump; 88 e4fs_daddr_t bno; 89 int cg; 90 91 *bnp = 0; 92 fs = ip->i_e2fs; 93 ump = ip->i_ump; 94 mtx_assert(EXT2_MTX(ump), MA_OWNED); 95 #ifdef INVARIANTS 96 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { 97 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", 98 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); 99 panic("ext2_alloc: bad size"); 100 } 101 if (cred == NOCRED) 102 panic("ext2_alloc: missing credential"); 103 #endif /* INVARIANTS */ 104 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0) 105 goto nospace; 106 if (cred->cr_uid != 0 && 107 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount) 108 goto nospace; 109 if (bpref >= fs->e2fs->e2fs_bcount) 110 bpref = 0; 111 if (bpref == 0) 112 cg = ino_to_cg(fs, ip->i_number); 113 else 114 cg = dtog(fs, bpref); 115 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, 116 ext2_alloccg); 117 if (bno > 0) { 118 /* set next_alloc fields as done in block_getblk */ 119 ip->i_next_alloc_block = lbn; 120 ip->i_next_alloc_goal = bno; 121 122 ip->i_blocks += btodb(fs->e2fs_bsize); 123 ip->i_flag |= IN_CHANGE | IN_UPDATE; 124 *bnp = bno; 125 return (0); 126 } 127 nospace: 128 EXT2_UNLOCK(ump); 129 ext2_fserr(fs, cred->cr_uid, "filesystem full"); 130 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); 131 return (ENOSPC); 132 } 133 134 /* 135 * Allocate EA's block for inode. 136 */ 137 e4fs_daddr_t 138 ext2_alloc_meta(struct inode *ip) 139 { 140 struct m_ext2fs *fs; 141 daddr_t blk; 142 143 fs = ip->i_e2fs; 144 145 EXT2_LOCK(ip->i_ump); 146 blk = ext2_hashalloc(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize, 147 ext2_alloccg); 148 if (0 == blk) 149 EXT2_UNLOCK(ip->i_ump); 150 151 return (blk); 152 } 153 154 /* 155 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 156 * 157 * The vnode and an array of buffer pointers for a range of sequential 158 * logical blocks to be made contiguous is given. The allocator attempts 159 * to find a range of sequential blocks starting as close as possible to 160 * an fs_rotdelay offset from the end of the allocation for the logical 161 * block immediately preceding the current range. If successful, the 162 * physical block numbers in the buffer pointers and in the inode are 163 * changed to reflect the new allocation. If unsuccessful, the allocation 164 * is left unchanged. The success in doing the reallocation is returned. 165 * Note that the error return is not reflected back to the user. Rather 166 * the previous block allocation will be used. 167 */ 168 169 static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); 170 171 static int doasyncfree = 1; 172 173 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 174 "Use asychronous writes to update block pointers when freeing blocks"); 175 176 static int doreallocblks = 0; 177 178 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 179 180 int 181 ext2_reallocblks(struct vop_reallocblks_args *ap) 182 { 183 struct m_ext2fs *fs; 184 struct inode *ip; 185 struct vnode *vp; 186 struct buf *sbp, *ebp; 187 uint32_t *bap, *sbap, *ebap; 188 struct ext2mount *ump; 189 struct cluster_save *buflist; 190 struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; 191 e2fs_lbn_t start_lbn, end_lbn; 192 int soff; 193 e2fs_daddr_t newblk, blkno; 194 int i, len, start_lvl, end_lvl, pref, ssize; 195 196 if (doreallocblks == 0) 197 return (ENOSPC); 198 199 vp = ap->a_vp; 200 ip = VTOI(vp); 201 fs = ip->i_e2fs; 202 ump = ip->i_ump; 203 204 if (fs->e2fs_contigsumsize <= 0 || ip->i_flag & IN_E4EXTENTS) 205 return (ENOSPC); 206 207 buflist = ap->a_buflist; 208 len = buflist->bs_nchildren; 209 start_lbn = buflist->bs_children[0]->b_lblkno; 210 end_lbn = start_lbn + len - 1; 211 #ifdef INVARIANTS 212 for (i = 1; i < len; i++) 213 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 214 panic("ext2_reallocblks: non-cluster"); 215 #endif 216 /* 217 * If the cluster crosses the boundary for the first indirect 218 * block, leave space for the indirect block. Indirect blocks 219 * are initially laid out in a position after the last direct 220 * block. Block reallocation would usually destroy locality by 221 * moving the indirect block out of the way to make room for 222 * data blocks if we didn't compensate here. We should also do 223 * this for other indirect block boundaries, but it is only 224 * important for the first one. 225 */ 226 if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) 227 return (ENOSPC); 228 /* 229 * If the latest allocation is in a new cylinder group, assume that 230 * the filesystem has decided to move and do not force it back to 231 * the previous cylinder group. 232 */ 233 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 234 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 235 return (ENOSPC); 236 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || 237 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) 238 return (ENOSPC); 239 /* 240 * Get the starting offset and block map for the first block. 241 */ 242 if (start_lvl == 0) { 243 sbap = &ip->i_db[0]; 244 soff = start_lbn; 245 } else { 246 idp = &start_ap[start_lvl - 1]; 247 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { 248 brelse(sbp); 249 return (ENOSPC); 250 } 251 sbap = (u_int *)sbp->b_data; 252 soff = idp->in_off; 253 } 254 /* 255 * If the block range spans two block maps, get the second map. 256 */ 257 ebap = NULL; 258 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 259 ssize = len; 260 } else { 261 #ifdef INVARIANTS 262 if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 263 panic("ext2_reallocblks: start == end"); 264 #endif 265 ssize = len - (idp->in_off + 1); 266 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) 267 goto fail; 268 ebap = (u_int *)ebp->b_data; 269 } 270 /* 271 * Find the preferred location for the cluster. 272 */ 273 EXT2_LOCK(ump); 274 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); 275 /* 276 * Search the block map looking for an allocation of the desired size. 277 */ 278 if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, 279 len, ext2_clusteralloc)) == 0) { 280 EXT2_UNLOCK(ump); 281 goto fail; 282 } 283 /* 284 * We have found a new contiguous block. 285 * 286 * First we have to replace the old block pointers with the new 287 * block pointers in the inode and indirect blocks associated 288 * with the file. 289 */ 290 #ifdef DEBUG 291 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 292 (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn); 293 #endif /* DEBUG */ 294 blkno = newblk; 295 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 296 if (i == ssize) { 297 bap = ebap; 298 soff = -i; 299 } 300 #ifdef INVARIANTS 301 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 302 panic("ext2_reallocblks: alloc mismatch"); 303 #endif 304 #ifdef DEBUG 305 printf(" %d,", *bap); 306 #endif /* DEBUG */ 307 *bap++ = blkno; 308 } 309 /* 310 * Next we must write out the modified inode and indirect blocks. 311 * For strict correctness, the writes should be synchronous since 312 * the old block values may have been written to disk. In practise 313 * they are almost never written, but if we are concerned about 314 * strict correctness, the `doasyncfree' flag should be set to zero. 315 * 316 * The test on `doasyncfree' should be changed to test a flag 317 * that shows whether the associated buffers and inodes have 318 * been written. The flag should be set when the cluster is 319 * started and cleared whenever the buffer or inode is flushed. 320 * We can then check below to see if it is set, and do the 321 * synchronous write only when it has been cleared. 322 */ 323 if (sbap != &ip->i_db[0]) { 324 if (doasyncfree) 325 bdwrite(sbp); 326 else 327 bwrite(sbp); 328 } else { 329 ip->i_flag |= IN_CHANGE | IN_UPDATE; 330 if (!doasyncfree) 331 ext2_update(vp, 1); 332 } 333 if (ssize < len) { 334 if (doasyncfree) 335 bdwrite(ebp); 336 else 337 bwrite(ebp); 338 } 339 /* 340 * Last, free the old blocks and assign the new blocks to the buffers. 341 */ 342 #ifdef DEBUG 343 printf("\n\tnew:"); 344 #endif /* DEBUG */ 345 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 346 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 347 fs->e2fs_bsize); 348 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 349 #ifdef DEBUG 350 printf(" %d,", blkno); 351 #endif /* DEBUG */ 352 } 353 #ifdef DEBUG 354 printf("\n"); 355 #endif /* DEBUG */ 356 return (0); 357 358 fail: 359 if (ssize < len) 360 brelse(ebp); 361 if (sbap != &ip->i_db[0]) 362 brelse(sbp); 363 return (ENOSPC); 364 } 365 366 /* 367 * Allocate an inode in the filesystem. 368 * 369 */ 370 int 371 ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 372 { 373 struct timespec ts; 374 struct inode *pip; 375 struct m_ext2fs *fs; 376 struct inode *ip; 377 struct ext2mount *ump; 378 ino_t ino, ipref; 379 int error, cg; 380 381 *vpp = NULL; 382 pip = VTOI(pvp); 383 fs = pip->i_e2fs; 384 ump = pip->i_ump; 385 386 EXT2_LOCK(ump); 387 if (fs->e2fs->e2fs_ficount == 0) 388 goto noinodes; 389 /* 390 * If it is a directory then obtain a cylinder group based on 391 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is 392 * always the next inode. 393 */ 394 if ((mode & IFMT) == IFDIR) { 395 cg = ext2_dirpref(pip); 396 if (fs->e2fs_contigdirs[cg] < 255) 397 fs->e2fs_contigdirs[cg]++; 398 } else { 399 cg = ino_to_cg(fs, pip->i_number); 400 if (fs->e2fs_contigdirs[cg] > 0) 401 fs->e2fs_contigdirs[cg]--; 402 } 403 ipref = cg * fs->e2fs->e2fs_ipg + 1; 404 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); 405 406 if (ino == 0) 407 goto noinodes; 408 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 409 if (error) { 410 ext2_vfree(pvp, ino, mode); 411 return (error); 412 } 413 ip = VTOI(*vpp); 414 415 /* 416 * The question is whether using VGET was such good idea at all: 417 * Linux doesn't read the old inode in when it is allocating a 418 * new one. I will set at least i_size and i_blocks to zero. 419 */ 420 ip->i_flag = 0; 421 ip->i_size = 0; 422 ip->i_blocks = 0; 423 ip->i_mode = 0; 424 ip->i_flags = 0; 425 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_EXTENTS) 426 && (S_ISREG(mode) || S_ISDIR(mode))) 427 ext4_ext_tree_init(ip); 428 else 429 memset(ip->i_data, 0, sizeof(ip->i_data)); 430 431 432 /* 433 * Set up a new generation number for this inode. 434 * Avoid zero values. 435 */ 436 do { 437 ip->i_gen = arc4random(); 438 } while (ip->i_gen == 0); 439 440 vfs_timestamp(&ts); 441 ip->i_birthtime = ts.tv_sec; 442 ip->i_birthnsec = ts.tv_nsec; 443 444 /* 445 printf("ext2_valloc: allocated inode %d\n", ino); 446 */ 447 return (0); 448 noinodes: 449 EXT2_UNLOCK(ump); 450 ext2_fserr(fs, cred->cr_uid, "out of inodes"); 451 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); 452 return (ENOSPC); 453 } 454 455 /* 456 * Find a cylinder to place a directory. 457 * 458 * The policy implemented by this algorithm is to allocate a 459 * directory inode in the same cylinder group as its parent 460 * directory, but also to reserve space for its files inodes 461 * and data. Restrict the number of directories which may be 462 * allocated one after another in the same cylinder group 463 * without intervening allocation of files. 464 * 465 * If we allocate a first level directory then force allocation 466 * in another cylinder group. 467 * 468 */ 469 static u_long 470 ext2_dirpref(struct inode *pip) 471 { 472 struct m_ext2fs *fs; 473 int cg, prefcg, cgsize; 474 u_int avgifree, avgbfree, avgndir, curdirsize; 475 u_int minifree, minbfree, maxndir; 476 u_int mincg, minndir; 477 u_int dirsize, maxcontigdirs; 478 479 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); 480 fs = pip->i_e2fs; 481 482 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; 483 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount; 484 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; 485 486 /* 487 * Force allocation in another cg if creating a first level dir. 488 */ 489 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); 490 if (ITOV(pip)->v_vflag & VV_ROOT) { 491 prefcg = arc4random() % fs->e2fs_gcount; 492 mincg = prefcg; 493 minndir = fs->e2fs_ipg; 494 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 495 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 496 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 497 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 498 mincg = cg; 499 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 500 } 501 for (cg = 0; cg < prefcg; cg++) 502 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 503 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 504 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 505 mincg = cg; 506 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 507 } 508 return (mincg); 509 } 510 /* 511 * Count various limits which used for 512 * optimal allocation of a directory inode. 513 */ 514 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); 515 minifree = avgifree - avgifree / 4; 516 if (minifree < 1) 517 minifree = 1; 518 minbfree = avgbfree - avgbfree / 4; 519 if (minbfree < 1) 520 minbfree = 1; 521 cgsize = fs->e2fs_fsize * fs->e2fs_fpg; 522 dirsize = AVGDIRSIZE; 523 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; 524 if (dirsize < curdirsize) 525 dirsize = curdirsize; 526 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); 527 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); 528 if (maxcontigdirs == 0) 529 maxcontigdirs = 1; 530 531 /* 532 * Limit number of dirs in one cg and reserve space for 533 * regular files, but only if we have no deficit in 534 * inodes or space. 535 */ 536 prefcg = ino_to_cg(fs, pip->i_number); 537 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 538 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 539 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 540 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 541 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 542 return (cg); 543 } 544 for (cg = 0; cg < prefcg; cg++) 545 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 546 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 547 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 548 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 549 return (cg); 550 } 551 /* 552 * This is a backstop when we have deficit in space. 553 */ 554 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 555 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 556 return (cg); 557 for (cg = 0; cg < prefcg; cg++) 558 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 559 break; 560 return (cg); 561 } 562 563 /* 564 * Select the desired position for the next block in a file. 565 * 566 * we try to mimic what Remy does in inode_getblk/block_getblk 567 * 568 * we note: blocknr == 0 means that we're about to allocate either 569 * a direct block or a pointer block at the first level of indirection 570 * (In other words, stuff that will go in i_db[] or i_ib[]) 571 * 572 * blocknr != 0 means that we're allocating a block that is none 573 * of the above. Then, blocknr tells us the number of the block 574 * that will hold the pointer 575 */ 576 e4fs_daddr_t 577 ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, 578 e2fs_daddr_t blocknr) 579 { 580 struct m_ext2fs *fs; 581 int tmp; 582 583 fs = ip->i_e2fs; 584 585 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 586 587 /* 588 * If the next block is actually what we thought it is, then set the 589 * goal to what we thought it should be. 590 */ 591 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) 592 return ip->i_next_alloc_goal; 593 594 /* 595 * Now check whether we were provided with an array that basically 596 * tells us previous blocks to which we want to stay close. 597 */ 598 if (bap) 599 for (tmp = indx - 1; tmp >= 0; tmp--) 600 if (bap[tmp]) 601 return bap[tmp]; 602 603 /* 604 * Else lets fall back to the blocknr or, if there is none, follow 605 * the rule that a block should be allocated near its inode. 606 */ 607 return (blocknr ? blocknr : 608 (e2fs_daddr_t)(ip->i_block_group * 609 EXT2_BLOCKS_PER_GROUP(fs)) + fs->e2fs->e2fs_first_dblock); 610 } 611 612 /* 613 * Implement the cylinder overflow algorithm. 614 * 615 * The policy implemented by this algorithm is: 616 * 1) allocate the block in its requested cylinder group. 617 * 2) quadradically rehash on the cylinder group number. 618 * 3) brute force search for a free block. 619 */ 620 static e4fs_daddr_t 621 ext2_hashalloc(struct inode *ip, int cg, long pref, int size, 622 daddr_t (*allocator) (struct inode *, int, daddr_t, int)) 623 { 624 struct m_ext2fs *fs; 625 e4fs_daddr_t result; 626 int i, icg = cg; 627 628 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 629 fs = ip->i_e2fs; 630 /* 631 * 1: preferred cylinder group 632 */ 633 result = (*allocator)(ip, cg, pref, size); 634 if (result) 635 return (result); 636 /* 637 * 2: quadratic rehash 638 */ 639 for (i = 1; i < fs->e2fs_gcount; i *= 2) { 640 cg += i; 641 if (cg >= fs->e2fs_gcount) 642 cg -= fs->e2fs_gcount; 643 result = (*allocator)(ip, cg, 0, size); 644 if (result) 645 return (result); 646 } 647 /* 648 * 3: brute force search 649 * Note that we start at i == 2, since 0 was checked initially, 650 * and 1 is always checked in the quadratic rehash. 651 */ 652 cg = (icg + 2) % fs->e2fs_gcount; 653 for (i = 2; i < fs->e2fs_gcount; i++) { 654 result = (*allocator)(ip, cg, 0, size); 655 if (result) 656 return (result); 657 cg++; 658 if (cg == fs->e2fs_gcount) 659 cg = 0; 660 } 661 return (0); 662 } 663 664 static unsigned long 665 ext2_cg_num_gdb(struct m_ext2fs *fs, int cg) 666 { 667 int gd_per_block, metagroup, first, last; 668 669 gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); 670 metagroup = cg / gd_per_block; 671 first = metagroup * gd_per_block; 672 last = first + gd_per_block - 1; 673 674 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 675 metagroup < fs->e2fs->e3fs_first_meta_bg) { 676 if (!ext2_cg_has_sb(fs, cg)) 677 return (0); 678 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) 679 return (fs->e2fs->e3fs_first_meta_bg); 680 return (fs->e2fs_gdbcount); 681 } 682 683 if (cg == first || cg == first + 1 || cg == last) 684 return (1); 685 return (0); 686 687 } 688 689 static int 690 ext2_num_base_meta_blocks(struct m_ext2fs *fs, int cg) 691 { 692 int num, gd_per_block; 693 694 gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); 695 num = ext2_cg_has_sb(fs, cg); 696 697 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 698 cg < fs->e2fs->e3fs_first_meta_bg * gd_per_block) { 699 if (num) { 700 num += ext2_cg_num_gdb(fs, cg); 701 num += fs->e2fs->e2fs_reserved_ngdb; 702 } 703 } else { 704 num += ext2_cg_num_gdb(fs, cg); 705 } 706 707 return (num); 708 } 709 710 static int 711 ext2_get_cg_number(struct m_ext2fs *fs, daddr_t blk) 712 { 713 int cg; 714 715 if (fs->e2fs->e2fs_bpg == fs->e2fs_bsize * 8) 716 cg = (blk - fs->e2fs->e2fs_first_dblock) / (fs->e2fs_bsize * 8); 717 else 718 cg = blk - fs->e2fs->e2fs_first_dblock; 719 720 return (cg); 721 } 722 723 static void 724 ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 725 { 726 int i; 727 728 if (start_bit >= end_bit) 729 return; 730 731 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 732 setbit(bitmap, i); 733 if (i < end_bit) 734 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 735 } 736 737 static int 738 ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) 739 { 740 int bit, bit_max, inodes_per_block; 741 uint32_t start, tmp; 742 743 if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || 744 !(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) 745 return (0); 746 747 memset(bp->b_data, 0, fs->e2fs_bsize); 748 749 bit_max = ext2_num_base_meta_blocks(fs, cg); 750 if ((bit_max >> 3) >= fs->e2fs_bsize) 751 return (EINVAL); 752 753 for (bit = 0; bit < bit_max; bit++) 754 setbit(bp->b_data, bit); 755 756 start = cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; 757 758 /* Set bits for block and inode bitmaps, and inode table */ 759 tmp = fs->e2fs_gd[cg].ext2bgd_b_bitmap; 760 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 761 tmp == ext2_get_cg_number(fs, cg)) 762 setbit(bp->b_data, tmp - start); 763 764 tmp = fs->e2fs_gd[cg].ext2bgd_i_bitmap; 765 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 766 tmp == ext2_get_cg_number(fs, cg)) 767 setbit(bp->b_data, tmp - start); 768 769 tmp = fs->e2fs_gd[cg].ext2bgd_i_tables; 770 inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); 771 while( tmp < fs->e2fs_gd[cg].ext2bgd_i_tables + 772 fs->e2fs->e2fs_ipg / inodes_per_block ) { 773 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 774 tmp == ext2_get_cg_number(fs, cg)) 775 setbit(bp->b_data, tmp - start); 776 tmp++; 777 } 778 779 /* 780 * Also if the number of blocks within the group is less than 781 * the blocksize * 8 ( which is the size of bitmap ), set rest 782 * of the block bitmap to 1 783 */ 784 ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, 785 bp->b_data); 786 787 /* Clean the flag */ 788 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; 789 790 return (0); 791 } 792 793 /* 794 * Determine whether a block can be allocated. 795 * 796 * Check to see if a block of the appropriate size is available, 797 * and if it is, allocate it. 798 */ 799 static daddr_t 800 ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 801 { 802 struct m_ext2fs *fs; 803 struct buf *bp; 804 struct ext2mount *ump; 805 daddr_t bno, runstart, runlen; 806 int bit, loc, end, error, start; 807 char *bbp; 808 /* XXX ondisk32 */ 809 fs = ip->i_e2fs; 810 ump = ip->i_ump; 811 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) 812 return (0); 813 EXT2_UNLOCK(ump); 814 error = bread(ip->i_devvp, fsbtodb(fs, 815 fs->e2fs_gd[cg].ext2bgd_b_bitmap), 816 (int)fs->e2fs_bsize, NOCRED, &bp); 817 if (error) { 818 brelse(bp); 819 EXT2_LOCK(ump); 820 return (0); 821 } 822 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { 823 error = ext2_cg_block_bitmap_init(fs, cg, bp); 824 if (error) { 825 brelse(bp); 826 EXT2_LOCK(ump); 827 return (0); 828 } 829 } 830 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) { 831 /* 832 * Another thread allocated the last block in this 833 * group while we were waiting for the buffer. 834 */ 835 brelse(bp); 836 EXT2_LOCK(ump); 837 return (0); 838 } 839 bbp = (char *)bp->b_data; 840 841 if (dtog(fs, bpref) != cg) 842 bpref = 0; 843 if (bpref != 0) { 844 bpref = dtogd(fs, bpref); 845 /* 846 * if the requested block is available, use it 847 */ 848 if (isclr(bbp, bpref)) { 849 bno = bpref; 850 goto gotit; 851 } 852 } 853 /* 854 * no blocks in the requested cylinder, so take next 855 * available one in this cylinder group. 856 * first try to get 8 contigous blocks, then fall back to a single 857 * block. 858 */ 859 if (bpref) 860 start = dtogd(fs, bpref) / NBBY; 861 else 862 start = 0; 863 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 864 retry: 865 runlen = 0; 866 runstart = 0; 867 for (loc = start; loc < end; loc++) { 868 if (bbp[loc] == (char)0xff) { 869 runlen = 0; 870 continue; 871 } 872 873 /* Start of a run, find the number of high clear bits. */ 874 if (runlen == 0) { 875 bit = fls(bbp[loc]); 876 runlen = NBBY - bit; 877 runstart = loc * NBBY + bit; 878 } else if (bbp[loc] == 0) { 879 /* Continue a run. */ 880 runlen += NBBY; 881 } else { 882 /* 883 * Finish the current run. If it isn't long 884 * enough, start a new one. 885 */ 886 bit = ffs(bbp[loc]) - 1; 887 runlen += bit; 888 if (runlen >= 8) { 889 bno = runstart; 890 goto gotit; 891 } 892 893 /* Run was too short, start a new one. */ 894 bit = fls(bbp[loc]); 895 runlen = NBBY - bit; 896 runstart = loc * NBBY + bit; 897 } 898 899 /* If the current run is long enough, use it. */ 900 if (runlen >= 8) { 901 bno = runstart; 902 goto gotit; 903 } 904 } 905 if (start != 0) { 906 end = start; 907 start = 0; 908 goto retry; 909 } 910 bno = ext2_mapsearch(fs, bbp, bpref); 911 if (bno < 0) { 912 brelse(bp); 913 EXT2_LOCK(ump); 914 return (0); 915 } 916 gotit: 917 #ifdef INVARIANTS 918 if (isset(bbp, bno)) { 919 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", 920 cg, (intmax_t)bno, fs->e2fs_fsmnt); 921 panic("ext2fs_alloccg: dup alloc"); 922 } 923 #endif 924 setbit(bbp, bno); 925 EXT2_LOCK(ump); 926 ext2_clusteracct(fs, bbp, cg, bno, -1); 927 fs->e2fs->e2fs_fbcount--; 928 fs->e2fs_gd[cg].ext2bgd_nbfree--; 929 fs->e2fs_fmod = 1; 930 EXT2_UNLOCK(ump); 931 bdwrite(bp); 932 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 933 } 934 935 /* 936 * Determine whether a cluster can be allocated. 937 */ 938 static daddr_t 939 ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) 940 { 941 struct m_ext2fs *fs; 942 struct ext2mount *ump; 943 struct buf *bp; 944 char *bbp; 945 int bit, error, got, i, loc, run; 946 int32_t *lp; 947 daddr_t bno; 948 949 fs = ip->i_e2fs; 950 ump = ip->i_ump; 951 952 if (fs->e2fs_maxcluster[cg] < len) 953 return (0); 954 955 EXT2_UNLOCK(ump); 956 error = bread(ip->i_devvp, 957 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 958 (int)fs->e2fs_bsize, NOCRED, &bp); 959 if (error) 960 goto fail_lock; 961 962 bbp = (char *)bp->b_data; 963 EXT2_LOCK(ump); 964 /* 965 * Check to see if a cluster of the needed size (or bigger) is 966 * available in this cylinder group. 967 */ 968 lp = &fs->e2fs_clustersum[cg].cs_sum[len]; 969 for (i = len; i <= fs->e2fs_contigsumsize; i++) 970 if (*lp++ > 0) 971 break; 972 if (i > fs->e2fs_contigsumsize) { 973 /* 974 * Update the cluster summary information to reflect 975 * the true maximum-sized cluster so that future cluster 976 * allocation requests can avoid reading the bitmap only 977 * to find no cluster. 978 */ 979 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; 980 for (i = len - 1; i > 0; i--) 981 if (*lp-- > 0) 982 break; 983 fs->e2fs_maxcluster[cg] = i; 984 goto fail; 985 } 986 EXT2_UNLOCK(ump); 987 988 /* Search the bitmap to find a big enough cluster like in FFS. */ 989 if (dtog(fs, bpref) != cg) 990 bpref = 0; 991 if (bpref != 0) 992 bpref = dtogd(fs, bpref); 993 loc = bpref / NBBY; 994 bit = 1 << (bpref % NBBY); 995 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { 996 if ((bbp[loc] & bit) != 0) 997 run = 0; 998 else { 999 run++; 1000 if (run == len) 1001 break; 1002 } 1003 if ((got & (NBBY - 1)) != (NBBY - 1)) 1004 bit <<= 1; 1005 else { 1006 loc++; 1007 bit = 1; 1008 } 1009 } 1010 1011 if (got >= fs->e2fs->e2fs_fpg) 1012 goto fail_lock; 1013 1014 /* Allocate the cluster that we found. */ 1015 for (i = 1; i < len; i++) 1016 if (!isclr(bbp, got - run + i)) 1017 panic("ext2_clusteralloc: map mismatch"); 1018 1019 bno = got - run + 1; 1020 if (bno >= fs->e2fs->e2fs_fpg) 1021 panic("ext2_clusteralloc: allocated out of group"); 1022 1023 EXT2_LOCK(ump); 1024 for (i = 0; i < len; i += fs->e2fs_fpb) { 1025 setbit(bbp, bno + i); 1026 ext2_clusteracct(fs, bbp, cg, bno + i, -1); 1027 fs->e2fs->e2fs_fbcount--; 1028 fs->e2fs_gd[cg].ext2bgd_nbfree--; 1029 } 1030 fs->e2fs_fmod = 1; 1031 EXT2_UNLOCK(ump); 1032 1033 bdwrite(bp); 1034 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 1035 1036 fail_lock: 1037 EXT2_LOCK(ump); 1038 fail: 1039 brelse(bp); 1040 return (0); 1041 } 1042 1043 static int 1044 ext2_zero_inode_table(struct inode *ip, int cg) 1045 { 1046 struct m_ext2fs *fs; 1047 struct buf *bp; 1048 int i, all_blks, used_blks; 1049 1050 fs = ip->i_e2fs; 1051 1052 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) 1053 return (0); 1054 1055 all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / 1056 fs->e2fs_bsize; 1057 1058 used_blks = howmany(fs->e2fs->e2fs_ipg - 1059 fs->e2fs_gd[cg].ext4bgd_i_unused, 1060 fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); 1061 1062 for (i = 0; i < all_blks - used_blks; i++) { 1063 bp = getblk(ip->i_devvp, fsbtodb(fs, 1064 fs->e2fs_gd[cg].ext2bgd_i_tables + used_blks + i), 1065 fs->e2fs_bsize, 0, 0, 0); 1066 if (!bp) 1067 return (EIO); 1068 1069 vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); 1070 bawrite(bp); 1071 } 1072 1073 fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; 1074 1075 return (0); 1076 } 1077 1078 /* 1079 * Determine whether an inode can be allocated. 1080 * 1081 * Check to see if an inode is available, and if it is, 1082 * allocate it using tode in the specified cylinder group. 1083 */ 1084 static daddr_t 1085 ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) 1086 { 1087 struct m_ext2fs *fs; 1088 struct buf *bp; 1089 struct ext2mount *ump; 1090 int error, start, len; 1091 char *ibp, *loc; 1092 1093 ipref--; /* to avoid a lot of (ipref -1) */ 1094 if (ipref == -1) 1095 ipref = 0; 1096 fs = ip->i_e2fs; 1097 ump = ip->i_ump; 1098 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) 1099 return (0); 1100 EXT2_UNLOCK(ump); 1101 error = bread(ip->i_devvp, fsbtodb(fs, 1102 fs->e2fs_gd[cg].ext2bgd_i_bitmap), 1103 (int)fs->e2fs_bsize, NOCRED, &bp); 1104 if (error) { 1105 brelse(bp); 1106 EXT2_LOCK(ump); 1107 return (0); 1108 } 1109 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { 1110 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { 1111 memset(bp->b_data, 0, fs->e2fs_bsize); 1112 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; 1113 } 1114 error = ext2_zero_inode_table(ip, cg); 1115 if (error) { 1116 brelse(bp); 1117 EXT2_LOCK(ump); 1118 return (0); 1119 } 1120 } 1121 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) { 1122 /* 1123 * Another thread allocated the last i-node in this 1124 * group while we were waiting for the buffer. 1125 */ 1126 brelse(bp); 1127 EXT2_LOCK(ump); 1128 return (0); 1129 } 1130 ibp = (char *)bp->b_data; 1131 if (ipref) { 1132 ipref %= fs->e2fs->e2fs_ipg; 1133 if (isclr(ibp, ipref)) 1134 goto gotit; 1135 } 1136 start = ipref / NBBY; 1137 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); 1138 loc = memcchr(&ibp[start], 0xff, len); 1139 if (loc == NULL) { 1140 len = start + 1; 1141 start = 0; 1142 loc = memcchr(&ibp[start], 0xff, len); 1143 if (loc == NULL) { 1144 printf("cg = %d, ipref = %lld, fs = %s\n", 1145 cg, (long long)ipref, fs->e2fs_fsmnt); 1146 panic("ext2fs_nodealloccg: map corrupted"); 1147 /* NOTREACHED */ 1148 } 1149 } 1150 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; 1151 gotit: 1152 setbit(ibp, ipref); 1153 EXT2_LOCK(ump); 1154 fs->e2fs_gd[cg].ext2bgd_nifree--; 1155 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) 1156 fs->e2fs_gd[cg].ext4bgd_i_unused--; 1157 fs->e2fs->e2fs_ficount--; 1158 fs->e2fs_fmod = 1; 1159 if ((mode & IFMT) == IFDIR) { 1160 fs->e2fs_gd[cg].ext2bgd_ndirs++; 1161 fs->e2fs_total_dir++; 1162 } 1163 EXT2_UNLOCK(ump); 1164 bdwrite(bp); 1165 return (cg * fs->e2fs->e2fs_ipg + ipref + 1); 1166 } 1167 1168 /* 1169 * Free a block or fragment. 1170 * 1171 */ 1172 void 1173 ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) 1174 { 1175 struct m_ext2fs *fs; 1176 struct buf *bp; 1177 struct ext2mount *ump; 1178 int cg, error; 1179 char *bbp; 1180 1181 fs = ip->i_e2fs; 1182 ump = ip->i_ump; 1183 cg = dtog(fs, bno); 1184 if (bno >= fs->e2fs->e2fs_bcount) { 1185 printf("bad block %lld, ino %ju\n", (long long)bno, 1186 (uintmax_t)ip->i_number); 1187 ext2_fserr(fs, ip->i_uid, "bad block"); 1188 return; 1189 } 1190 error = bread(ip->i_devvp, 1191 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 1192 (int)fs->e2fs_bsize, NOCRED, &bp); 1193 if (error) { 1194 brelse(bp); 1195 return; 1196 } 1197 bbp = (char *)bp->b_data; 1198 bno = dtogd(fs, bno); 1199 if (isclr(bbp, bno)) { 1200 printf("block = %lld, fs = %s\n", 1201 (long long)bno, fs->e2fs_fsmnt); 1202 panic("ext2_blkfree: freeing free block"); 1203 } 1204 clrbit(bbp, bno); 1205 EXT2_LOCK(ump); 1206 ext2_clusteracct(fs, bbp, cg, bno, 1); 1207 fs->e2fs->e2fs_fbcount++; 1208 fs->e2fs_gd[cg].ext2bgd_nbfree++; 1209 fs->e2fs_fmod = 1; 1210 EXT2_UNLOCK(ump); 1211 bdwrite(bp); 1212 } 1213 1214 /* 1215 * Free an inode. 1216 * 1217 */ 1218 int 1219 ext2_vfree(struct vnode *pvp, ino_t ino, int mode) 1220 { 1221 struct m_ext2fs *fs; 1222 struct inode *pip; 1223 struct buf *bp; 1224 struct ext2mount *ump; 1225 int error, cg; 1226 char *ibp; 1227 1228 pip = VTOI(pvp); 1229 fs = pip->i_e2fs; 1230 ump = pip->i_ump; 1231 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) 1232 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", 1233 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); 1234 1235 cg = ino_to_cg(fs, ino); 1236 error = bread(pip->i_devvp, 1237 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), 1238 (int)fs->e2fs_bsize, NOCRED, &bp); 1239 if (error) { 1240 brelse(bp); 1241 return (0); 1242 } 1243 ibp = (char *)bp->b_data; 1244 ino = (ino - 1) % fs->e2fs->e2fs_ipg; 1245 if (isclr(ibp, ino)) { 1246 printf("ino = %llu, fs = %s\n", 1247 (unsigned long long)ino, fs->e2fs_fsmnt); 1248 if (fs->e2fs_ronly == 0) 1249 panic("ext2_vfree: freeing free inode"); 1250 } 1251 clrbit(ibp, ino); 1252 EXT2_LOCK(ump); 1253 fs->e2fs->e2fs_ficount++; 1254 fs->e2fs_gd[cg].ext2bgd_nifree++; 1255 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) 1256 fs->e2fs_gd[cg].ext4bgd_i_unused++; 1257 if ((mode & IFMT) == IFDIR) { 1258 fs->e2fs_gd[cg].ext2bgd_ndirs--; 1259 fs->e2fs_total_dir--; 1260 } 1261 fs->e2fs_fmod = 1; 1262 EXT2_UNLOCK(ump); 1263 bdwrite(bp); 1264 return (0); 1265 } 1266 1267 /* 1268 * Find a block in the specified cylinder group. 1269 * 1270 * It is a panic if a request is made to find a block if none are 1271 * available. 1272 */ 1273 static daddr_t 1274 ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) 1275 { 1276 char *loc; 1277 int start, len; 1278 1279 /* 1280 * find the fragment by searching through the free block 1281 * map for an appropriate bit pattern 1282 */ 1283 if (bpref) 1284 start = dtogd(fs, bpref) / NBBY; 1285 else 1286 start = 0; 1287 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 1288 loc = memcchr(&bbp[start], 0xff, len); 1289 if (loc == NULL) { 1290 len = start + 1; 1291 start = 0; 1292 loc = memcchr(&bbp[start], 0xff, len); 1293 if (loc == NULL) { 1294 printf("start = %d, len = %d, fs = %s\n", 1295 start, len, fs->e2fs_fsmnt); 1296 panic("ext2_mapsearch: map corrupted"); 1297 /* NOTREACHED */ 1298 } 1299 } 1300 return ((loc - bbp) * NBBY + ffs(~*loc) - 1); 1301 } 1302 1303 /* 1304 * Fserr prints the name of a filesystem with an error diagnostic. 1305 * 1306 * The form of the error message is: 1307 * fs: error message 1308 */ 1309 void 1310 ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) 1311 { 1312 1313 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); 1314 } 1315 1316 int 1317 ext2_cg_has_sb(struct m_ext2fs *fs, int cg) 1318 { 1319 int a3, a5, a7; 1320 1321 if (cg == 0) 1322 return (1); 1323 1324 if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { 1325 if (cg == fs->e2fs->e4fs_backup_bgs[0] || 1326 cg == fs->e2fs->e4fs_backup_bgs[1]) 1327 return (1); 1328 return (0); 1329 } 1330 1331 if ((cg <= 1) || 1332 !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) 1333 return (1); 1334 1335 if (!(cg & 1)) 1336 return (0); 1337 1338 for (a3 = 3, a5 = 5, a7 = 7; 1339 a3 <= cg || a5 <= cg || a7 <= cg; 1340 a3 *= 3, a5 *= 5, a7 *= 7) 1341 if (cg == a3 || cg == a5 || cg == a7) 1342 return (1); 1343 return (0); 1344 } 1345