1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * SPDX-License-Identifier: BSD-3-Clause 9 * 10 * Copyright (c) 1982, 1986, 1989, 1993 11 * The Regents of the University of California. All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 38 * $FreeBSD$ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/conf.h> 44 #include <sys/vnode.h> 45 #include <sys/stat.h> 46 #include <sys/mount.h> 47 #include <sys/sysctl.h> 48 #include <sys/syslog.h> 49 #include <sys/buf.h> 50 #include <sys/endian.h> 51 52 #include <fs/ext2fs/fs.h> 53 #include <fs/ext2fs/inode.h> 54 #include <fs/ext2fs/ext2_mount.h> 55 #include <fs/ext2fs/ext2fs.h> 56 #include <fs/ext2fs/ext2_extern.h> 57 58 static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); 59 static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); 60 static u_long ext2_dirpref(struct inode *); 61 static e4fs_daddr_t ext2_hashalloc(struct inode *, int, long, int, 62 daddr_t (*)(struct inode *, int, daddr_t, 63 int)); 64 static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); 65 static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); 66 67 /* 68 * Allocate a block in the filesystem. 69 * 70 * A preference may be optionally specified. If a preference is given 71 * the following hierarchy is used to allocate a block: 72 * 1) allocate the requested block. 73 * 2) allocate a rotationally optimal block in the same cylinder. 74 * 3) allocate a block in the same cylinder group. 75 * 4) quadradically rehash into other cylinder groups, until an 76 * available block is located. 77 * If no block preference is given the following hierarchy is used 78 * to allocate a block: 79 * 1) allocate a block in the cylinder group that contains the 80 * inode for the file. 81 * 2) quadradically rehash into other cylinder groups, until an 82 * available block is located. 83 */ 84 int 85 ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, 86 struct ucred *cred, e4fs_daddr_t *bnp) 87 { 88 struct m_ext2fs *fs; 89 struct ext2mount *ump; 90 e4fs_daddr_t bno; 91 int cg; 92 93 *bnp = 0; 94 fs = ip->i_e2fs; 95 ump = ip->i_ump; 96 mtx_assert(EXT2_MTX(ump), MA_OWNED); 97 #ifdef INVARIANTS 98 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { 99 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", 100 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); 101 panic("ext2_alloc: bad size"); 102 } 103 if (cred == NOCRED) 104 panic("ext2_alloc: missing credential"); 105 #endif /* INVARIANTS */ 106 if (size == fs->e2fs_bsize && fs->e2fs_fbcount == 0) 107 goto nospace; 108 if (cred->cr_uid != 0 && 109 fs->e2fs_fbcount < fs->e2fs_rbcount) 110 goto nospace; 111 if (bpref >= fs->e2fs_bcount) 112 bpref = 0; 113 if (bpref == 0) 114 cg = ino_to_cg(fs, ip->i_number); 115 else 116 cg = dtog(fs, bpref); 117 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, 118 ext2_alloccg); 119 if (bno > 0) { 120 /* set next_alloc fields as done in block_getblk */ 121 ip->i_next_alloc_block = lbn; 122 ip->i_next_alloc_goal = bno; 123 124 ip->i_blocks += btodb(fs->e2fs_bsize); 125 ip->i_flag |= IN_CHANGE | IN_UPDATE; 126 *bnp = bno; 127 return (0); 128 } 129 nospace: 130 EXT2_UNLOCK(ump); 131 ext2_fserr(fs, cred->cr_uid, "filesystem full"); 132 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); 133 return (ENOSPC); 134 } 135 136 /* 137 * Allocate EA's block for inode. 138 */ 139 e4fs_daddr_t 140 ext2_alloc_meta(struct inode *ip) 141 { 142 struct m_ext2fs *fs; 143 daddr_t blk; 144 145 fs = ip->i_e2fs; 146 147 EXT2_LOCK(ip->i_ump); 148 blk = ext2_hashalloc(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize, 149 ext2_alloccg); 150 if (0 == blk) 151 EXT2_UNLOCK(ip->i_ump); 152 153 return (blk); 154 } 155 156 /* 157 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 158 * 159 * The vnode and an array of buffer pointers for a range of sequential 160 * logical blocks to be made contiguous is given. The allocator attempts 161 * to find a range of sequential blocks starting as close as possible to 162 * an fs_rotdelay offset from the end of the allocation for the logical 163 * block immediately preceding the current range. If successful, the 164 * physical block numbers in the buffer pointers and in the inode are 165 * changed to reflect the new allocation. If unsuccessful, the allocation 166 * is left unchanged. The success in doing the reallocation is returned. 167 * Note that the error return is not reflected back to the user. Rather 168 * the previous block allocation will be used. 169 */ 170 171 static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); 172 173 static int doasyncfree = 1; 174 175 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 176 "Use asychronous writes to update block pointers when freeing blocks"); 177 178 static int doreallocblks = 0; 179 180 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 181 182 int 183 ext2_reallocblks(struct vop_reallocblks_args *ap) 184 { 185 struct m_ext2fs *fs; 186 struct inode *ip; 187 struct vnode *vp; 188 struct buf *sbp, *ebp; 189 uint32_t *bap, *sbap, *ebap; 190 struct ext2mount *ump; 191 struct cluster_save *buflist; 192 struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; 193 e2fs_lbn_t start_lbn, end_lbn; 194 int soff; 195 e2fs_daddr_t newblk, blkno; 196 int i, len, start_lvl, end_lvl, pref, ssize; 197 198 if (doreallocblks == 0) 199 return (ENOSPC); 200 201 vp = ap->a_vp; 202 ip = VTOI(vp); 203 fs = ip->i_e2fs; 204 ump = ip->i_ump; 205 206 if (fs->e2fs_contigsumsize <= 0 || ip->i_flag & IN_E4EXTENTS) 207 return (ENOSPC); 208 209 buflist = ap->a_buflist; 210 len = buflist->bs_nchildren; 211 start_lbn = buflist->bs_children[0]->b_lblkno; 212 end_lbn = start_lbn + len - 1; 213 #ifdef INVARIANTS 214 for (i = 1; i < len; i++) 215 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 216 panic("ext2_reallocblks: non-cluster"); 217 #endif 218 /* 219 * If the cluster crosses the boundary for the first indirect 220 * block, leave space for the indirect block. Indirect blocks 221 * are initially laid out in a position after the last direct 222 * block. Block reallocation would usually destroy locality by 223 * moving the indirect block out of the way to make room for 224 * data blocks if we didn't compensate here. We should also do 225 * this for other indirect block boundaries, but it is only 226 * important for the first one. 227 */ 228 if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) 229 return (ENOSPC); 230 /* 231 * If the latest allocation is in a new cylinder group, assume that 232 * the filesystem has decided to move and do not force it back to 233 * the previous cylinder group. 234 */ 235 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 236 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 237 return (ENOSPC); 238 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || 239 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) 240 return (ENOSPC); 241 /* 242 * Get the starting offset and block map for the first block. 243 */ 244 if (start_lvl == 0) { 245 sbap = &ip->i_db[0]; 246 soff = start_lbn; 247 } else { 248 idp = &start_ap[start_lvl - 1]; 249 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { 250 brelse(sbp); 251 return (ENOSPC); 252 } 253 sbap = (u_int *)sbp->b_data; 254 soff = idp->in_off; 255 } 256 /* 257 * If the block range spans two block maps, get the second map. 258 */ 259 ebap = NULL; 260 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 261 ssize = len; 262 } else { 263 #ifdef INVARIANTS 264 if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 265 panic("ext2_reallocblks: start == end"); 266 #endif 267 ssize = len - (idp->in_off + 1); 268 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) 269 goto fail; 270 ebap = (u_int *)ebp->b_data; 271 } 272 /* 273 * Find the preferred location for the cluster. 274 */ 275 EXT2_LOCK(ump); 276 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); 277 /* 278 * Search the block map looking for an allocation of the desired size. 279 */ 280 if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, 281 len, ext2_clusteralloc)) == 0) { 282 EXT2_UNLOCK(ump); 283 goto fail; 284 } 285 /* 286 * We have found a new contiguous block. 287 * 288 * First we have to replace the old block pointers with the new 289 * block pointers in the inode and indirect blocks associated 290 * with the file. 291 */ 292 #ifdef DEBUG 293 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 294 (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn); 295 #endif /* DEBUG */ 296 blkno = newblk; 297 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 298 if (i == ssize) { 299 bap = ebap; 300 soff = -i; 301 } 302 #ifdef INVARIANTS 303 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 304 panic("ext2_reallocblks: alloc mismatch"); 305 #endif 306 #ifdef DEBUG 307 printf(" %d,", *bap); 308 #endif /* DEBUG */ 309 *bap++ = blkno; 310 } 311 /* 312 * Next we must write out the modified inode and indirect blocks. 313 * For strict correctness, the writes should be synchronous since 314 * the old block values may have been written to disk. In practise 315 * they are almost never written, but if we are concerned about 316 * strict correctness, the `doasyncfree' flag should be set to zero. 317 * 318 * The test on `doasyncfree' should be changed to test a flag 319 * that shows whether the associated buffers and inodes have 320 * been written. The flag should be set when the cluster is 321 * started and cleared whenever the buffer or inode is flushed. 322 * We can then check below to see if it is set, and do the 323 * synchronous write only when it has been cleared. 324 */ 325 if (sbap != &ip->i_db[0]) { 326 if (doasyncfree) 327 bdwrite(sbp); 328 else 329 bwrite(sbp); 330 } else { 331 ip->i_flag |= IN_CHANGE | IN_UPDATE; 332 if (!doasyncfree) 333 ext2_update(vp, 1); 334 } 335 if (ssize < len) { 336 if (doasyncfree) 337 bdwrite(ebp); 338 else 339 bwrite(ebp); 340 } 341 /* 342 * Last, free the old blocks and assign the new blocks to the buffers. 343 */ 344 #ifdef DEBUG 345 printf("\n\tnew:"); 346 #endif /* DEBUG */ 347 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 348 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 349 fs->e2fs_bsize); 350 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 351 #ifdef DEBUG 352 printf(" %d,", blkno); 353 #endif /* DEBUG */ 354 } 355 #ifdef DEBUG 356 printf("\n"); 357 #endif /* DEBUG */ 358 return (0); 359 360 fail: 361 if (ssize < len) 362 brelse(ebp); 363 if (sbap != &ip->i_db[0]) 364 brelse(sbp); 365 return (ENOSPC); 366 } 367 368 /* 369 * Allocate an inode in the filesystem. 370 * 371 */ 372 int 373 ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 374 { 375 struct timespec ts; 376 struct m_ext2fs *fs; 377 struct ext2mount *ump; 378 struct inode *pip; 379 struct inode *ip; 380 struct vnode *vp; 381 struct thread *td; 382 ino_t ino, ipref; 383 int error, cg; 384 385 *vpp = NULL; 386 pip = VTOI(pvp); 387 fs = pip->i_e2fs; 388 ump = pip->i_ump; 389 390 EXT2_LOCK(ump); 391 if (fs->e2fs->e2fs_ficount == 0) 392 goto noinodes; 393 /* 394 * If it is a directory then obtain a cylinder group based on 395 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is 396 * always the next inode. 397 */ 398 if ((mode & IFMT) == IFDIR) { 399 cg = ext2_dirpref(pip); 400 if (fs->e2fs_contigdirs[cg] < 255) 401 fs->e2fs_contigdirs[cg]++; 402 } else { 403 cg = ino_to_cg(fs, pip->i_number); 404 if (fs->e2fs_contigdirs[cg] > 0) 405 fs->e2fs_contigdirs[cg]--; 406 } 407 ipref = cg * fs->e2fs->e2fs_ipg + 1; 408 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); 409 if (ino == 0) 410 goto noinodes; 411 412 td = curthread; 413 error = vfs_hash_get(ump->um_mountp, ino, LK_EXCLUSIVE, td, vpp, NULL, NULL); 414 if (error || *vpp != NULL) { 415 EXT2_UNLOCK(ump); 416 return (error); 417 } 418 419 ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); 420 if (ip == NULL) { 421 EXT2_UNLOCK(ump); 422 return (ENOMEM); 423 } 424 425 /* Allocate a new vnode/inode. */ 426 if ((error = getnewvnode("ext2fs", ump->um_mountp, &ext2_vnodeops, &vp)) != 0) { 427 free(ip, M_EXT2NODE); 428 EXT2_UNLOCK(ump); 429 return (error); 430 } 431 432 vp->v_data = ip; 433 ip->i_vnode = vp; 434 ip->i_e2fs = fs = ump->um_e2fs; 435 ip->i_ump = ump; 436 ip->i_number = ino; 437 ip->i_block_group = ino_to_cg(fs, ino); 438 ip->i_next_alloc_block = 0; 439 ip->i_next_alloc_goal = 0; 440 441 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 442 error = insmntque(vp, ump->um_mountp); 443 if (error) { 444 free(ip, M_EXT2NODE); 445 EXT2_UNLOCK(ump); 446 return (error); 447 } 448 449 error = vfs_hash_insert(vp, ino, LK_EXCLUSIVE, td, vpp, NULL, NULL); 450 if (error || *vpp != NULL) { 451 *vpp = NULL; 452 free(ip, M_EXT2NODE); 453 EXT2_UNLOCK(ump); 454 return (error); 455 } 456 457 if ((error = ext2_vinit(ump->um_mountp, &ext2_fifoops, &vp)) != 0) { 458 vput(vp); 459 *vpp = NULL; 460 free(ip, M_EXT2NODE); 461 EXT2_UNLOCK(ump); 462 return (error); 463 } 464 465 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_EXTENTS) 466 && (S_ISREG(mode) || S_ISDIR(mode))) 467 ext4_ext_tree_init(ip); 468 else 469 memset(ip->i_data, 0, sizeof(ip->i_data)); 470 471 472 /* 473 * Set up a new generation number for this inode. 474 * Avoid zero values. 475 */ 476 do { 477 ip->i_gen = arc4random(); 478 } while (ip->i_gen == 0); 479 480 vfs_timestamp(&ts); 481 ip->i_birthtime = ts.tv_sec; 482 ip->i_birthnsec = ts.tv_nsec; 483 484 *vpp = vp; 485 486 return (0); 487 488 noinodes: 489 EXT2_UNLOCK(ump); 490 ext2_fserr(fs, cred->cr_uid, "out of inodes"); 491 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); 492 return (ENOSPC); 493 } 494 495 /* 496 * 64-bit compatible getters and setters for struct ext2_gd from ext2fs.h 497 */ 498 uint64_t 499 e2fs_gd_get_b_bitmap(struct ext2_gd *gd) 500 { 501 502 return (((uint64_t)(gd->ext4bgd_b_bitmap_hi) << 32) | 503 gd->ext2bgd_b_bitmap); 504 } 505 506 uint64_t 507 e2fs_gd_get_i_bitmap(struct ext2_gd *gd) 508 { 509 510 return (((uint64_t)(gd->ext4bgd_i_bitmap_hi) << 32) | 511 gd->ext2bgd_i_bitmap); 512 } 513 514 uint64_t 515 e2fs_gd_get_i_tables(struct ext2_gd *gd) 516 { 517 518 return (((uint64_t)(gd->ext4bgd_i_tables_hi) << 32) | 519 gd->ext2bgd_i_tables); 520 } 521 522 static uint32_t 523 e2fs_gd_get_nbfree(struct ext2_gd *gd) 524 { 525 526 return (((uint32_t)(gd->ext4bgd_nbfree_hi) << 16) | 527 gd->ext2bgd_nbfree); 528 } 529 530 static void 531 e2fs_gd_set_nbfree(struct ext2_gd *gd, uint32_t val) 532 { 533 534 gd->ext2bgd_nbfree = val & 0xffff; 535 gd->ext4bgd_nbfree_hi = val >> 16; 536 } 537 538 static uint32_t 539 e2fs_gd_get_nifree(struct ext2_gd *gd) 540 { 541 542 return (((uint32_t)(gd->ext4bgd_nifree_hi) << 16) | 543 gd->ext2bgd_nifree); 544 } 545 546 static void 547 e2fs_gd_set_nifree(struct ext2_gd *gd, uint32_t val) 548 { 549 550 gd->ext2bgd_nifree = val & 0xffff; 551 gd->ext4bgd_nifree_hi = val >> 16; 552 } 553 554 uint32_t 555 e2fs_gd_get_ndirs(struct ext2_gd *gd) 556 { 557 558 return (((uint32_t)(gd->ext4bgd_ndirs_hi) << 16) | 559 gd->ext2bgd_ndirs); 560 } 561 562 static void 563 e2fs_gd_set_ndirs(struct ext2_gd *gd, uint32_t val) 564 { 565 566 gd->ext2bgd_ndirs = val & 0xffff; 567 gd->ext4bgd_ndirs_hi = val >> 16; 568 } 569 570 static uint32_t 571 e2fs_gd_get_i_unused(struct ext2_gd *gd) 572 { 573 return (((uint32_t)(gd->ext4bgd_i_unused_hi) << 16) | 574 gd->ext4bgd_i_unused); 575 } 576 577 static void 578 e2fs_gd_set_i_unused(struct ext2_gd *gd, uint32_t val) 579 { 580 581 gd->ext4bgd_i_unused = val & 0xffff; 582 gd->ext4bgd_i_unused_hi = val >> 16; 583 } 584 585 /* 586 * Find a cylinder to place a directory. 587 * 588 * The policy implemented by this algorithm is to allocate a 589 * directory inode in the same cylinder group as its parent 590 * directory, but also to reserve space for its files inodes 591 * and data. Restrict the number of directories which may be 592 * allocated one after another in the same cylinder group 593 * without intervening allocation of files. 594 * 595 * If we allocate a first level directory then force allocation 596 * in another cylinder group. 597 * 598 */ 599 static u_long 600 ext2_dirpref(struct inode *pip) 601 { 602 struct m_ext2fs *fs; 603 int cg, prefcg, cgsize; 604 uint64_t avgbfree, minbfree; 605 u_int avgifree, avgndir, curdirsize; 606 u_int minifree, maxndir; 607 u_int mincg, minndir; 608 u_int dirsize, maxcontigdirs; 609 610 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); 611 fs = pip->i_e2fs; 612 613 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; 614 avgbfree = fs->e2fs_fbcount / fs->e2fs_gcount; 615 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; 616 617 /* 618 * Force allocation in another cg if creating a first level dir. 619 */ 620 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); 621 if (ITOV(pip)->v_vflag & VV_ROOT) { 622 prefcg = arc4random() % fs->e2fs_gcount; 623 mincg = prefcg; 624 minndir = fs->e2fs_ipg; 625 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 626 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && 627 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && 628 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { 629 mincg = cg; 630 minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); 631 } 632 for (cg = 0; cg < prefcg; cg++) 633 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && 634 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && 635 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { 636 mincg = cg; 637 minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); 638 } 639 return (mincg); 640 } 641 /* 642 * Count various limits which used for 643 * optimal allocation of a directory inode. 644 */ 645 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); 646 minifree = avgifree - avgifree / 4; 647 if (minifree < 1) 648 minifree = 1; 649 minbfree = avgbfree - avgbfree / 4; 650 if (minbfree < 1) 651 minbfree = 1; 652 cgsize = fs->e2fs_fsize * fs->e2fs_fpg; 653 dirsize = AVGDIRSIZE; 654 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; 655 if (dirsize < curdirsize) 656 dirsize = curdirsize; 657 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); 658 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); 659 if (maxcontigdirs == 0) 660 maxcontigdirs = 1; 661 662 /* 663 * Limit number of dirs in one cg and reserve space for 664 * regular files, but only if we have no deficit in 665 * inodes or space. 666 */ 667 prefcg = ino_to_cg(fs, pip->i_number); 668 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 669 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && 670 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && 671 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { 672 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 673 return (cg); 674 } 675 for (cg = 0; cg < prefcg; cg++) 676 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && 677 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && 678 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { 679 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 680 return (cg); 681 } 682 /* 683 * This is a backstop when we have deficit in space. 684 */ 685 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 686 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) 687 return (cg); 688 for (cg = 0; cg < prefcg; cg++) 689 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) 690 break; 691 return (cg); 692 } 693 694 /* 695 * Select the desired position for the next block in a file. 696 * 697 * we try to mimic what Remy does in inode_getblk/block_getblk 698 * 699 * we note: blocknr == 0 means that we're about to allocate either 700 * a direct block or a pointer block at the first level of indirection 701 * (In other words, stuff that will go in i_db[] or i_ib[]) 702 * 703 * blocknr != 0 means that we're allocating a block that is none 704 * of the above. Then, blocknr tells us the number of the block 705 * that will hold the pointer 706 */ 707 e4fs_daddr_t 708 ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, 709 e2fs_daddr_t blocknr) 710 { 711 struct m_ext2fs *fs; 712 int tmp; 713 714 fs = ip->i_e2fs; 715 716 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 717 718 /* 719 * If the next block is actually what we thought it is, then set the 720 * goal to what we thought it should be. 721 */ 722 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) 723 return ip->i_next_alloc_goal; 724 725 /* 726 * Now check whether we were provided with an array that basically 727 * tells us previous blocks to which we want to stay close. 728 */ 729 if (bap) 730 for (tmp = indx - 1; tmp >= 0; tmp--) 731 if (bap[tmp]) 732 return bap[tmp]; 733 734 /* 735 * Else lets fall back to the blocknr or, if there is none, follow 736 * the rule that a block should be allocated near its inode. 737 */ 738 return (blocknr ? blocknr : 739 (e2fs_daddr_t)(ip->i_block_group * 740 EXT2_BLOCKS_PER_GROUP(fs)) + fs->e2fs->e2fs_first_dblock); 741 } 742 743 /* 744 * Implement the cylinder overflow algorithm. 745 * 746 * The policy implemented by this algorithm is: 747 * 1) allocate the block in its requested cylinder group. 748 * 2) quadradically rehash on the cylinder group number. 749 * 3) brute force search for a free block. 750 */ 751 static e4fs_daddr_t 752 ext2_hashalloc(struct inode *ip, int cg, long pref, int size, 753 daddr_t (*allocator) (struct inode *, int, daddr_t, int)) 754 { 755 struct m_ext2fs *fs; 756 e4fs_daddr_t result; 757 int i, icg = cg; 758 759 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 760 fs = ip->i_e2fs; 761 /* 762 * 1: preferred cylinder group 763 */ 764 result = (*allocator)(ip, cg, pref, size); 765 if (result) 766 return (result); 767 /* 768 * 2: quadratic rehash 769 */ 770 for (i = 1; i < fs->e2fs_gcount; i *= 2) { 771 cg += i; 772 if (cg >= fs->e2fs_gcount) 773 cg -= fs->e2fs_gcount; 774 result = (*allocator)(ip, cg, 0, size); 775 if (result) 776 return (result); 777 } 778 /* 779 * 3: brute force search 780 * Note that we start at i == 2, since 0 was checked initially, 781 * and 1 is always checked in the quadratic rehash. 782 */ 783 cg = (icg + 2) % fs->e2fs_gcount; 784 for (i = 2; i < fs->e2fs_gcount; i++) { 785 result = (*allocator)(ip, cg, 0, size); 786 if (result) 787 return (result); 788 cg++; 789 if (cg == fs->e2fs_gcount) 790 cg = 0; 791 } 792 return (0); 793 } 794 795 static uint64_t 796 ext2_cg_number_gdb_nometa(struct m_ext2fs *fs, int cg) 797 { 798 799 if (!ext2_cg_has_sb(fs, cg)) 800 return (0); 801 802 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) 803 return (fs->e2fs->e3fs_first_meta_bg); 804 805 return ((fs->e2fs_gcount + EXT2_DESCS_PER_BLOCK(fs) - 1) / 806 EXT2_DESCS_PER_BLOCK(fs)); 807 } 808 809 static uint64_t 810 ext2_cg_number_gdb_meta(struct m_ext2fs *fs, int cg) 811 { 812 unsigned long metagroup; 813 int first, last; 814 815 metagroup = cg / EXT2_DESCS_PER_BLOCK(fs); 816 first = metagroup * EXT2_DESCS_PER_BLOCK(fs); 817 last = first + EXT2_DESCS_PER_BLOCK(fs) - 1; 818 819 if (cg == first || cg == first + 1 || cg == last) 820 return (1); 821 822 return (0); 823 } 824 825 uint64_t 826 ext2_cg_number_gdb(struct m_ext2fs *fs, int cg) 827 { 828 unsigned long first_meta_bg, metagroup; 829 830 first_meta_bg = fs->e2fs->e3fs_first_meta_bg; 831 metagroup = cg / EXT2_DESCS_PER_BLOCK(fs); 832 833 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 834 metagroup < first_meta_bg) 835 return (ext2_cg_number_gdb_nometa(fs, cg)); 836 837 return ext2_cg_number_gdb_meta(fs, cg); 838 } 839 840 static int 841 ext2_number_base_meta_blocks(struct m_ext2fs *fs, int cg) 842 { 843 int number; 844 845 number = ext2_cg_has_sb(fs, cg); 846 847 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 848 cg < fs->e2fs->e3fs_first_meta_bg * EXT2_DESCS_PER_BLOCK(fs)) { 849 if (number) { 850 number += ext2_cg_number_gdb(fs, cg); 851 number += fs->e2fs->e2fs_reserved_ngdb; 852 } 853 } else { 854 number += ext2_cg_number_gdb(fs, cg); 855 } 856 857 return (number); 858 } 859 860 static void 861 ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 862 { 863 int i; 864 865 if (start_bit >= end_bit) 866 return; 867 868 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 869 setbit(bitmap, i); 870 if (i < end_bit) 871 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 872 } 873 874 static int 875 ext2_get_group_number(struct m_ext2fs *fs, e4fs_daddr_t block) 876 { 877 878 return ((block - fs->e2fs->e2fs_first_dblock) / fs->e2fs_bsize); 879 } 880 881 static int 882 ext2_block_in_group(struct m_ext2fs *fs, e4fs_daddr_t block, int cg) 883 { 884 885 return ((ext2_get_group_number(fs, block) == cg) ? 1 : 0); 886 } 887 888 static int 889 ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) 890 { 891 int bit, bit_max, inodes_per_block; 892 uint64_t start, tmp; 893 894 if (!(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) 895 return (0); 896 897 memset(bp->b_data, 0, fs->e2fs_bsize); 898 899 bit_max = ext2_number_base_meta_blocks(fs, cg); 900 if ((bit_max >> 3) >= fs->e2fs_bsize) 901 return (EINVAL); 902 903 for (bit = 0; bit < bit_max; bit++) 904 setbit(bp->b_data, bit); 905 906 start = (uint64_t)cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; 907 908 /* Set bits for block and inode bitmaps, and inode table. */ 909 tmp = e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg]); 910 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 911 ext2_block_in_group(fs, tmp, cg)) 912 setbit(bp->b_data, tmp - start); 913 914 tmp = e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg]); 915 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 916 ext2_block_in_group(fs, tmp, cg)) 917 setbit(bp->b_data, tmp - start); 918 919 tmp = e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]); 920 inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); 921 while( tmp < e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + 922 fs->e2fs->e2fs_ipg / inodes_per_block ) { 923 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 924 ext2_block_in_group(fs, tmp, cg)) 925 setbit(bp->b_data, tmp - start); 926 tmp++; 927 } 928 929 /* 930 * Also if the number of blocks within the group is less than 931 * the blocksize * 8 ( which is the size of bitmap ), set rest 932 * of the block bitmap to 1 933 */ 934 ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, 935 bp->b_data); 936 937 /* Clean the flag */ 938 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; 939 940 return (0); 941 } 942 943 static int 944 ext2_b_bitmap_validate(struct m_ext2fs *fs, struct buf *bp, int cg) 945 { 946 struct ext2_gd *gd; 947 uint64_t group_first_block; 948 unsigned int offset, max_bit; 949 950 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG)) { 951 /* 952 * It is not possible to check block bitmap in case of this feature, 953 * because the inode and block bitmaps and inode table 954 * blocks may not be in the group at all. 955 * So, skip check in this case. 956 */ 957 return (0); 958 } 959 960 gd = &fs->e2fs_gd[cg]; 961 max_bit = fs->e2fs_fpg; 962 group_first_block = ((uint64_t)cg) * fs->e2fs->e2fs_fpg + 963 fs->e2fs->e2fs_first_dblock; 964 965 /* Check block bitmap block number */ 966 offset = e2fs_gd_get_b_bitmap(gd) - group_first_block; 967 if (offset >= max_bit || !isset(bp->b_data, offset)) { 968 printf("ext2fs: bad block bitmap, group %d\n", cg); 969 return (EINVAL); 970 } 971 972 /* Check inode bitmap block number */ 973 offset = e2fs_gd_get_i_bitmap(gd) - group_first_block; 974 if (offset >= max_bit || !isset(bp->b_data, offset)) { 975 printf("ext2fs: bad inode bitmap, group %d\n", cg); 976 return (EINVAL); 977 } 978 979 /* Check inode table */ 980 offset = e2fs_gd_get_i_tables(gd) - group_first_block; 981 if (offset >= max_bit || offset + fs->e2fs_itpg >= max_bit) { 982 printf("ext2fs: bad inode table, group %d\n", cg); 983 return (EINVAL); 984 } 985 986 return (0); 987 } 988 989 /* 990 * Determine whether a block can be allocated. 991 * 992 * Check to see if a block of the appropriate size is available, 993 * and if it is, allocate it. 994 */ 995 static daddr_t 996 ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 997 { 998 struct m_ext2fs *fs; 999 struct buf *bp; 1000 struct ext2mount *ump; 1001 daddr_t bno, runstart, runlen; 1002 int bit, loc, end, error, start; 1003 char *bbp; 1004 /* XXX ondisk32 */ 1005 fs = ip->i_e2fs; 1006 ump = ip->i_ump; 1007 if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) 1008 return (0); 1009 1010 EXT2_UNLOCK(ump); 1011 error = bread(ip->i_devvp, fsbtodb(fs, 1012 e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), 1013 (int)fs->e2fs_bsize, NOCRED, &bp); 1014 if (error) 1015 goto fail; 1016 1017 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || 1018 EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { 1019 error = ext2_cg_block_bitmap_init(fs, cg, bp); 1020 if (error) 1021 goto fail; 1022 1023 ext2_gd_b_bitmap_csum_set(fs, cg, bp); 1024 } 1025 error = ext2_gd_b_bitmap_csum_verify(fs, cg, bp); 1026 if (error) 1027 goto fail; 1028 1029 error = ext2_b_bitmap_validate(fs,bp, cg); 1030 if (error) 1031 goto fail; 1032 1033 /* 1034 * Check, that another thread did not not allocate the last block in this 1035 * group while we were waiting for the buffer. 1036 */ 1037 if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) 1038 goto fail; 1039 1040 bbp = (char *)bp->b_data; 1041 1042 if (dtog(fs, bpref) != cg) 1043 bpref = 0; 1044 if (bpref != 0) { 1045 bpref = dtogd(fs, bpref); 1046 /* 1047 * if the requested block is available, use it 1048 */ 1049 if (isclr(bbp, bpref)) { 1050 bno = bpref; 1051 goto gotit; 1052 } 1053 } 1054 /* 1055 * no blocks in the requested cylinder, so take next 1056 * available one in this cylinder group. 1057 * first try to get 8 contigous blocks, then fall back to a single 1058 * block. 1059 */ 1060 if (bpref) 1061 start = dtogd(fs, bpref) / NBBY; 1062 else 1063 start = 0; 1064 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 1065 retry: 1066 runlen = 0; 1067 runstart = 0; 1068 for (loc = start; loc < end; loc++) { 1069 if (bbp[loc] == (char)0xff) { 1070 runlen = 0; 1071 continue; 1072 } 1073 1074 /* Start of a run, find the number of high clear bits. */ 1075 if (runlen == 0) { 1076 bit = fls(bbp[loc]); 1077 runlen = NBBY - bit; 1078 runstart = loc * NBBY + bit; 1079 } else if (bbp[loc] == 0) { 1080 /* Continue a run. */ 1081 runlen += NBBY; 1082 } else { 1083 /* 1084 * Finish the current run. If it isn't long 1085 * enough, start a new one. 1086 */ 1087 bit = ffs(bbp[loc]) - 1; 1088 runlen += bit; 1089 if (runlen >= 8) { 1090 bno = runstart; 1091 goto gotit; 1092 } 1093 1094 /* Run was too short, start a new one. */ 1095 bit = fls(bbp[loc]); 1096 runlen = NBBY - bit; 1097 runstart = loc * NBBY + bit; 1098 } 1099 1100 /* If the current run is long enough, use it. */ 1101 if (runlen >= 8) { 1102 bno = runstart; 1103 goto gotit; 1104 } 1105 } 1106 if (start != 0) { 1107 end = start; 1108 start = 0; 1109 goto retry; 1110 } 1111 bno = ext2_mapsearch(fs, bbp, bpref); 1112 if (bno < 0) 1113 goto fail; 1114 1115 gotit: 1116 #ifdef INVARIANTS 1117 if (isset(bbp, bno)) { 1118 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", 1119 cg, (intmax_t)bno, fs->e2fs_fsmnt); 1120 panic("ext2fs_alloccg: dup alloc"); 1121 } 1122 #endif 1123 setbit(bbp, bno); 1124 EXT2_LOCK(ump); 1125 ext2_clusteracct(fs, bbp, cg, bno, -1); 1126 fs->e2fs_fbcount--; 1127 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], 1128 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); 1129 fs->e2fs_fmod = 1; 1130 EXT2_UNLOCK(ump); 1131 ext2_gd_b_bitmap_csum_set(fs, cg, bp); 1132 bdwrite(bp); 1133 return (((uint64_t)cg) * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 1134 1135 fail: 1136 brelse(bp); 1137 EXT2_LOCK(ump); 1138 return (0); 1139 } 1140 1141 /* 1142 * Determine whether a cluster can be allocated. 1143 */ 1144 static daddr_t 1145 ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) 1146 { 1147 struct m_ext2fs *fs; 1148 struct ext2mount *ump; 1149 struct buf *bp; 1150 char *bbp; 1151 int bit, error, got, i, loc, run; 1152 int32_t *lp; 1153 daddr_t bno; 1154 1155 fs = ip->i_e2fs; 1156 ump = ip->i_ump; 1157 1158 if (fs->e2fs_maxcluster[cg] < len) 1159 return (0); 1160 1161 EXT2_UNLOCK(ump); 1162 error = bread(ip->i_devvp, 1163 fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), 1164 (int)fs->e2fs_bsize, NOCRED, &bp); 1165 if (error) 1166 goto fail_lock; 1167 1168 bbp = (char *)bp->b_data; 1169 EXT2_LOCK(ump); 1170 /* 1171 * Check to see if a cluster of the needed size (or bigger) is 1172 * available in this cylinder group. 1173 */ 1174 lp = &fs->e2fs_clustersum[cg].cs_sum[len]; 1175 for (i = len; i <= fs->e2fs_contigsumsize; i++) 1176 if (*lp++ > 0) 1177 break; 1178 if (i > fs->e2fs_contigsumsize) { 1179 /* 1180 * Update the cluster summary information to reflect 1181 * the true maximum-sized cluster so that future cluster 1182 * allocation requests can avoid reading the bitmap only 1183 * to find no cluster. 1184 */ 1185 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; 1186 for (i = len - 1; i > 0; i--) 1187 if (*lp-- > 0) 1188 break; 1189 fs->e2fs_maxcluster[cg] = i; 1190 goto fail; 1191 } 1192 EXT2_UNLOCK(ump); 1193 1194 /* Search the bitmap to find a big enough cluster like in FFS. */ 1195 if (dtog(fs, bpref) != cg) 1196 bpref = 0; 1197 if (bpref != 0) 1198 bpref = dtogd(fs, bpref); 1199 loc = bpref / NBBY; 1200 bit = 1 << (bpref % NBBY); 1201 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { 1202 if ((bbp[loc] & bit) != 0) 1203 run = 0; 1204 else { 1205 run++; 1206 if (run == len) 1207 break; 1208 } 1209 if ((got & (NBBY - 1)) != (NBBY - 1)) 1210 bit <<= 1; 1211 else { 1212 loc++; 1213 bit = 1; 1214 } 1215 } 1216 1217 if (got >= fs->e2fs->e2fs_fpg) 1218 goto fail_lock; 1219 1220 /* Allocate the cluster that we found. */ 1221 for (i = 1; i < len; i++) 1222 if (!isclr(bbp, got - run + i)) 1223 panic("ext2_clusteralloc: map mismatch"); 1224 1225 bno = got - run + 1; 1226 if (bno >= fs->e2fs->e2fs_fpg) 1227 panic("ext2_clusteralloc: allocated out of group"); 1228 1229 EXT2_LOCK(ump); 1230 for (i = 0; i < len; i += fs->e2fs_fpb) { 1231 setbit(bbp, bno + i); 1232 ext2_clusteracct(fs, bbp, cg, bno + i, -1); 1233 fs->e2fs_fbcount--; 1234 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], 1235 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); 1236 } 1237 fs->e2fs_fmod = 1; 1238 EXT2_UNLOCK(ump); 1239 1240 bdwrite(bp); 1241 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 1242 1243 fail_lock: 1244 EXT2_LOCK(ump); 1245 fail: 1246 brelse(bp); 1247 return (0); 1248 } 1249 1250 static int 1251 ext2_zero_inode_table(struct inode *ip, int cg) 1252 { 1253 struct m_ext2fs *fs; 1254 struct buf *bp; 1255 int i, all_blks, used_blks; 1256 1257 fs = ip->i_e2fs; 1258 1259 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) 1260 return (0); 1261 1262 all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / 1263 fs->e2fs_bsize; 1264 1265 used_blks = howmany(fs->e2fs->e2fs_ipg - 1266 e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]), 1267 fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); 1268 1269 for (i = 0; i < all_blks - used_blks; i++) { 1270 bp = getblk(ip->i_devvp, fsbtodb(fs, 1271 e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + used_blks + i), 1272 fs->e2fs_bsize, 0, 0, 0); 1273 if (!bp) 1274 return (EIO); 1275 1276 vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); 1277 bawrite(bp); 1278 } 1279 1280 fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; 1281 1282 return (0); 1283 } 1284 1285 /* 1286 * Determine whether an inode can be allocated. 1287 * 1288 * Check to see if an inode is available, and if it is, 1289 * allocate it using tode in the specified cylinder group. 1290 */ 1291 static daddr_t 1292 ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) 1293 { 1294 struct m_ext2fs *fs; 1295 struct buf *bp; 1296 struct ext2mount *ump; 1297 int error, start, len, ifree; 1298 char *ibp, *loc; 1299 1300 ipref--; /* to avoid a lot of (ipref -1) */ 1301 if (ipref == -1) 1302 ipref = 0; 1303 fs = ip->i_e2fs; 1304 ump = ip->i_ump; 1305 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) 1306 return (0); 1307 EXT2_UNLOCK(ump); 1308 error = bread(ip->i_devvp, fsbtodb(fs, 1309 e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), 1310 (int)fs->e2fs_bsize, NOCRED, &bp); 1311 if (error) { 1312 brelse(bp); 1313 EXT2_LOCK(ump); 1314 return (0); 1315 } 1316 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || 1317 EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { 1318 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { 1319 memset(bp->b_data, 0, fs->e2fs_bsize); 1320 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; 1321 } 1322 ext2_gd_i_bitmap_csum_set(fs, cg, bp); 1323 error = ext2_zero_inode_table(ip, cg); 1324 if (error) { 1325 brelse(bp); 1326 EXT2_LOCK(ump); 1327 return (0); 1328 } 1329 } 1330 error = ext2_gd_i_bitmap_csum_verify(fs, cg, bp); 1331 if (error) { 1332 brelse(bp); 1333 EXT2_LOCK(ump); 1334 return (0); 1335 } 1336 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) { 1337 /* 1338 * Another thread allocated the last i-node in this 1339 * group while we were waiting for the buffer. 1340 */ 1341 brelse(bp); 1342 EXT2_LOCK(ump); 1343 return (0); 1344 } 1345 ibp = (char *)bp->b_data; 1346 if (ipref) { 1347 ipref %= fs->e2fs->e2fs_ipg; 1348 if (isclr(ibp, ipref)) 1349 goto gotit; 1350 } 1351 start = ipref / NBBY; 1352 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); 1353 loc = memcchr(&ibp[start], 0xff, len); 1354 if (loc == NULL) { 1355 len = start + 1; 1356 start = 0; 1357 loc = memcchr(&ibp[start], 0xff, len); 1358 if (loc == NULL) { 1359 printf("ext2fs: inode bitmap corrupted: " 1360 "cg = %d, ipref = %lld, fs = %s - run fsck\n", 1361 cg, (long long)ipref, fs->e2fs_fsmnt); 1362 brelse(bp); 1363 EXT2_LOCK(ump); 1364 return (0); 1365 } 1366 } 1367 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; 1368 gotit: 1369 setbit(ibp, ipref); 1370 EXT2_LOCK(ump); 1371 e2fs_gd_set_nifree(&fs->e2fs_gd[cg], 1372 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) - 1); 1373 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || 1374 EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { 1375 ifree = fs->e2fs->e2fs_ipg - e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]); 1376 if (ipref + 1 > ifree) 1377 e2fs_gd_set_i_unused(&fs->e2fs_gd[cg], 1378 fs->e2fs->e2fs_ipg - (ipref + 1)); 1379 } 1380 fs->e2fs->e2fs_ficount--; 1381 fs->e2fs_fmod = 1; 1382 if ((mode & IFMT) == IFDIR) { 1383 e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], 1384 e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) + 1); 1385 fs->e2fs_total_dir++; 1386 } 1387 EXT2_UNLOCK(ump); 1388 ext2_gd_i_bitmap_csum_set(fs, cg, bp); 1389 bdwrite(bp); 1390 return ((uint64_t)cg * fs->e2fs_ipg + ipref + 1); 1391 } 1392 1393 /* 1394 * Free a block or fragment. 1395 * 1396 */ 1397 void 1398 ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) 1399 { 1400 struct m_ext2fs *fs; 1401 struct buf *bp; 1402 struct ext2mount *ump; 1403 int cg, error; 1404 char *bbp; 1405 1406 fs = ip->i_e2fs; 1407 ump = ip->i_ump; 1408 cg = dtog(fs, bno); 1409 if (bno >= fs->e2fs_bcount) { 1410 printf("bad block %lld, ino %ju\n", (long long)bno, 1411 (uintmax_t)ip->i_number); 1412 ext2_fserr(fs, ip->i_uid, "bad block"); 1413 return; 1414 } 1415 error = bread(ip->i_devvp, 1416 fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), 1417 (int)fs->e2fs_bsize, NOCRED, &bp); 1418 if (error) { 1419 brelse(bp); 1420 return; 1421 } 1422 bbp = (char *)bp->b_data; 1423 bno = dtogd(fs, bno); 1424 if (isclr(bbp, bno)) { 1425 printf("block = %lld, fs = %s\n", 1426 (long long)bno, fs->e2fs_fsmnt); 1427 panic("ext2_blkfree: freeing free block"); 1428 } 1429 clrbit(bbp, bno); 1430 EXT2_LOCK(ump); 1431 ext2_clusteracct(fs, bbp, cg, bno, 1); 1432 fs->e2fs_fbcount++; 1433 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], 1434 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) + 1); 1435 fs->e2fs_fmod = 1; 1436 EXT2_UNLOCK(ump); 1437 ext2_gd_b_bitmap_csum_set(fs, cg, bp); 1438 bdwrite(bp); 1439 } 1440 1441 /* 1442 * Free an inode. 1443 * 1444 */ 1445 int 1446 ext2_vfree(struct vnode *pvp, ino_t ino, int mode) 1447 { 1448 struct m_ext2fs *fs; 1449 struct inode *pip; 1450 struct buf *bp; 1451 struct ext2mount *ump; 1452 int error, cg; 1453 char *ibp; 1454 1455 pip = VTOI(pvp); 1456 fs = pip->i_e2fs; 1457 ump = pip->i_ump; 1458 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) 1459 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", 1460 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); 1461 1462 cg = ino_to_cg(fs, ino); 1463 error = bread(pip->i_devvp, 1464 fsbtodb(fs, e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), 1465 (int)fs->e2fs_bsize, NOCRED, &bp); 1466 if (error) { 1467 brelse(bp); 1468 return (0); 1469 } 1470 ibp = (char *)bp->b_data; 1471 ino = (ino - 1) % fs->e2fs->e2fs_ipg; 1472 if (isclr(ibp, ino)) { 1473 printf("ino = %ju, fs = %s\n", 1474 ino, fs->e2fs_fsmnt); 1475 if (fs->e2fs_ronly == 0) 1476 panic("ext2_vfree: freeing free inode"); 1477 } 1478 clrbit(ibp, ino); 1479 EXT2_LOCK(ump); 1480 fs->e2fs->e2fs_ficount++; 1481 e2fs_gd_set_nifree(&fs->e2fs_gd[cg], 1482 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) + 1); 1483 if ((mode & IFMT) == IFDIR) { 1484 e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], 1485 e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) - 1); 1486 fs->e2fs_total_dir--; 1487 } 1488 fs->e2fs_fmod = 1; 1489 EXT2_UNLOCK(ump); 1490 ext2_gd_i_bitmap_csum_set(fs, cg, bp); 1491 bdwrite(bp); 1492 return (0); 1493 } 1494 1495 /* 1496 * Find a block in the specified cylinder group. 1497 * 1498 * It is a panic if a request is made to find a block if none are 1499 * available. 1500 */ 1501 static daddr_t 1502 ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) 1503 { 1504 char *loc; 1505 int start, len; 1506 1507 /* 1508 * find the fragment by searching through the free block 1509 * map for an appropriate bit pattern 1510 */ 1511 if (bpref) 1512 start = dtogd(fs, bpref) / NBBY; 1513 else 1514 start = 0; 1515 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 1516 loc = memcchr(&bbp[start], 0xff, len); 1517 if (loc == NULL) { 1518 len = start + 1; 1519 start = 0; 1520 loc = memcchr(&bbp[start], 0xff, len); 1521 if (loc == NULL) { 1522 printf("start = %d, len = %d, fs = %s\n", 1523 start, len, fs->e2fs_fsmnt); 1524 panic("ext2_mapsearch: map corrupted"); 1525 /* NOTREACHED */ 1526 } 1527 } 1528 return ((loc - bbp) * NBBY + ffs(~*loc) - 1); 1529 } 1530 1531 /* 1532 * Fserr prints the name of a filesystem with an error diagnostic. 1533 * 1534 * The form of the error message is: 1535 * fs: error message 1536 */ 1537 void 1538 ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) 1539 { 1540 1541 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); 1542 } 1543 1544 int 1545 ext2_cg_has_sb(struct m_ext2fs *fs, int cg) 1546 { 1547 int a3, a5, a7; 1548 1549 if (cg == 0) 1550 return (1); 1551 1552 if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { 1553 if (cg == fs->e2fs->e4fs_backup_bgs[0] || 1554 cg == fs->e2fs->e4fs_backup_bgs[1]) 1555 return (1); 1556 return (0); 1557 } 1558 1559 if ((cg <= 1) || 1560 !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) 1561 return (1); 1562 1563 if (!(cg & 1)) 1564 return (0); 1565 1566 for (a3 = 3, a5 = 5, a7 = 7; 1567 a3 <= cg || a5 <= cg || a7 <= cg; 1568 a3 *= 3, a5 *= 5, a7 *= 7) 1569 if (cg == a3 || cg == a5 || cg == a7) 1570 return (1); 1571 return (0); 1572 } 1573