1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * Copyright (c) 1982, 1986, 1989, 1993 9 * The Regents of the University of California. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 36 * $FreeBSD$ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/conf.h> 42 #include <sys/vnode.h> 43 #include <sys/stat.h> 44 #include <sys/mount.h> 45 #include <sys/sysctl.h> 46 #include <sys/syslog.h> 47 #include <sys/buf.h> 48 49 #include <fs/ext2fs/fs.h> 50 #include <fs/ext2fs/inode.h> 51 #include <fs/ext2fs/ext2_mount.h> 52 #include <fs/ext2fs/ext2fs.h> 53 #include <fs/ext2fs/ext2_extern.h> 54 55 static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); 56 static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); 57 static u_long ext2_dirpref(struct inode *); 58 static void ext2_fserr(struct m_ext2fs *, uid_t, char *); 59 static u_long ext2_hashalloc(struct inode *, int, long, int, 60 daddr_t (*)(struct inode *, int, daddr_t, 61 int)); 62 static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); 63 static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); 64 65 /* 66 * Allocate a block in the filesystem. 67 * 68 * A preference may be optionally specified. If a preference is given 69 * the following hierarchy is used to allocate a block: 70 * 1) allocate the requested block. 71 * 2) allocate a rotationally optimal block in the same cylinder. 72 * 3) allocate a block in the same cylinder group. 73 * 4) quadradically rehash into other cylinder groups, until an 74 * available block is located. 75 * If no block preference is given the following hierarchy is used 76 * to allocate a block: 77 * 1) allocate a block in the cylinder group that contains the 78 * inode for the file. 79 * 2) quadradically rehash into other cylinder groups, until an 80 * available block is located. 81 */ 82 int 83 ext2_alloc(struct inode *ip, int32_t lbn, int32_t bpref, int size, 84 struct ucred *cred, int32_t *bnp) 85 { 86 struct m_ext2fs *fs; 87 struct ext2mount *ump; 88 int32_t bno; 89 int cg; 90 *bnp = 0; 91 fs = ip->i_e2fs; 92 ump = ip->i_ump; 93 mtx_assert(EXT2_MTX(ump), MA_OWNED); 94 #ifdef INVARIANTS 95 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { 96 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", 97 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); 98 panic("ext2_alloc: bad size"); 99 } 100 if (cred == NOCRED) 101 panic("ext2_alloc: missing credential"); 102 #endif /* INVARIANTS */ 103 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0) 104 goto nospace; 105 if (cred->cr_uid != 0 && 106 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount) 107 goto nospace; 108 if (bpref >= fs->e2fs->e2fs_bcount) 109 bpref = 0; 110 if (bpref == 0) 111 cg = ino_to_cg(fs, ip->i_number); 112 else 113 cg = dtog(fs, bpref); 114 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, 115 ext2_alloccg); 116 if (bno > 0) { 117 /* set next_alloc fields as done in block_getblk */ 118 ip->i_next_alloc_block = lbn; 119 ip->i_next_alloc_goal = bno; 120 121 ip->i_blocks += btodb(fs->e2fs_bsize); 122 ip->i_flag |= IN_CHANGE | IN_UPDATE; 123 *bnp = bno; 124 return (0); 125 } 126 nospace: 127 EXT2_UNLOCK(ump); 128 ext2_fserr(fs, cred->cr_uid, "filesystem full"); 129 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); 130 return (ENOSPC); 131 } 132 133 /* 134 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 135 * 136 * The vnode and an array of buffer pointers for a range of sequential 137 * logical blocks to be made contiguous is given. The allocator attempts 138 * to find a range of sequential blocks starting as close as possible to 139 * an fs_rotdelay offset from the end of the allocation for the logical 140 * block immediately preceding the current range. If successful, the 141 * physical block numbers in the buffer pointers and in the inode are 142 * changed to reflect the new allocation. If unsuccessful, the allocation 143 * is left unchanged. The success in doing the reallocation is returned. 144 * Note that the error return is not reflected back to the user. Rather 145 * the previous block allocation will be used. 146 */ 147 148 static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); 149 150 static int doasyncfree = 0; 151 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 152 "Use asychronous writes to update block pointers when freeing blocks"); 153 154 static int doreallocblks = 0; 155 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 156 157 int 158 ext2_reallocblks(struct vop_reallocblks_args *ap) 159 { 160 struct m_ext2fs *fs; 161 struct inode *ip; 162 struct vnode *vp; 163 struct buf *sbp, *ebp; 164 uint32_t *bap, *sbap, *ebap = 0; 165 struct ext2mount *ump; 166 struct cluster_save *buflist; 167 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 168 e2fs_lbn_t start_lbn, end_lbn; 169 int32_t soff, newblk, blkno; 170 int i, len, start_lvl, end_lvl, pref, ssize; 171 172 if (doreallocblks == 0) 173 return (ENOSPC); 174 175 vp = ap->a_vp; 176 ip = VTOI(vp); 177 fs = ip->i_e2fs; 178 ump = ip->i_ump; 179 180 if (fs->e2fs_contigsumsize <= 0) 181 return (ENOSPC); 182 183 buflist = ap->a_buflist; 184 len = buflist->bs_nchildren; 185 start_lbn = buflist->bs_children[0]->b_lblkno; 186 end_lbn = start_lbn + len - 1; 187 #ifdef INVARIANTS 188 for (i = 1; i < len; i++) 189 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 190 panic("ext2_reallocblks: non-cluster"); 191 #endif 192 /* 193 * If the cluster crosses the boundary for the first indirect 194 * block, leave space for the indirect block. Indirect blocks 195 * are initially laid out in a position after the last direct 196 * block. Block reallocation would usually destroy locality by 197 * moving the indirect block out of the way to make room for 198 * data blocks if we didn't compensate here. We should also do 199 * this for other indirect block boundaries, but it is only 200 * important for the first one. 201 */ 202 if (start_lbn < NDADDR && end_lbn >= NDADDR) 203 return (ENOSPC); 204 /* 205 * If the latest allocation is in a new cylinder group, assume that 206 * the filesystem has decided to move and do not force it back to 207 * the previous cylinder group. 208 */ 209 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 210 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 211 return (ENOSPC); 212 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || 213 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) 214 return (ENOSPC); 215 /* 216 * Get the starting offset and block map for the first block. 217 */ 218 if (start_lvl == 0) { 219 sbap = &ip->i_db[0]; 220 soff = start_lbn; 221 } else { 222 idp = &start_ap[start_lvl - 1]; 223 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { 224 brelse(sbp); 225 return (ENOSPC); 226 } 227 sbap = (u_int *)sbp->b_data; 228 soff = idp->in_off; 229 } 230 /* 231 * If the block range spans two block maps, get the second map. 232 */ 233 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 234 ssize = len; 235 } else { 236 #ifdef INVARIANTS 237 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 238 panic("ext2_reallocblks: start == end"); 239 #endif 240 ssize = len - (idp->in_off + 1); 241 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) 242 goto fail; 243 ebap = (u_int *)ebp->b_data; 244 } 245 /* 246 * Find the preferred location for the cluster. 247 */ 248 EXT2_LOCK(ump); 249 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); 250 /* 251 * Search the block map looking for an allocation of the desired size. 252 */ 253 if ((newblk = (int32_t)ext2_hashalloc(ip, dtog(fs, pref), pref, 254 len, ext2_clusteralloc)) == 0){ 255 EXT2_UNLOCK(ump); 256 goto fail; 257 } 258 /* 259 * We have found a new contiguous block. 260 * 261 * First we have to replace the old block pointers with the new 262 * block pointers in the inode and indirect blocks associated 263 * with the file. 264 */ 265 #ifdef DEBUG 266 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 267 (intmax_t)start_lbn, (intmax_t)end_lbn); 268 #endif /* DEBUG */ 269 blkno = newblk; 270 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 271 if (i == ssize) { 272 bap = ebap; 273 soff = -i; 274 } 275 #ifdef INVARIANTS 276 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 277 panic("ext2_reallocblks: alloc mismatch"); 278 #endif 279 #ifdef DEBUG 280 printf(" %d,", *bap); 281 #endif /* DEBUG */ 282 *bap++ = blkno; 283 } 284 /* 285 * Next we must write out the modified inode and indirect blocks. 286 * For strict correctness, the writes should be synchronous since 287 * the old block values may have been written to disk. In practise 288 * they are almost never written, but if we are concerned about 289 * strict correctness, the `doasyncfree' flag should be set to zero. 290 * 291 * The test on `doasyncfree' should be changed to test a flag 292 * that shows whether the associated buffers and inodes have 293 * been written. The flag should be set when the cluster is 294 * started and cleared whenever the buffer or inode is flushed. 295 * We can then check below to see if it is set, and do the 296 * synchronous write only when it has been cleared. 297 */ 298 if (sbap != &ip->i_db[0]) { 299 if (doasyncfree) 300 bdwrite(sbp); 301 else 302 bwrite(sbp); 303 } else { 304 ip->i_flag |= IN_CHANGE | IN_UPDATE; 305 if (!doasyncfree) 306 ext2_update(vp, 1); 307 } 308 if (ssize < len) { 309 if (doasyncfree) 310 bdwrite(ebp); 311 else 312 bwrite(ebp); 313 } 314 /* 315 * Last, free the old blocks and assign the new blocks to the buffers. 316 */ 317 #ifdef DEBUG 318 printf("\n\tnew:"); 319 #endif /* DEBUG */ 320 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 321 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 322 fs->e2fs_bsize); 323 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 324 #ifdef DEBUG 325 printf(" %d,", blkno); 326 #endif /* DEBUG */ 327 } 328 #ifdef DEBUG 329 printf("\n"); 330 #endif /* DEBUG */ 331 return (0); 332 333 fail: 334 if (ssize < len) 335 brelse(ebp); 336 if (sbap != &ip->i_db[0]) 337 brelse(sbp); 338 return (ENOSPC); 339 } 340 341 /* 342 * Allocate an inode in the filesystem. 343 * 344 */ 345 int 346 ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 347 { 348 struct timespec ts; 349 struct inode *pip; 350 struct m_ext2fs *fs; 351 struct inode *ip; 352 struct ext2mount *ump; 353 ino_t ino, ipref; 354 int i, error, cg; 355 356 *vpp = NULL; 357 pip = VTOI(pvp); 358 fs = pip->i_e2fs; 359 ump = pip->i_ump; 360 361 EXT2_LOCK(ump); 362 if (fs->e2fs->e2fs_ficount == 0) 363 goto noinodes; 364 /* 365 * If it is a directory then obtain a cylinder group based on 366 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is 367 * always the next inode. 368 */ 369 if ((mode & IFMT) == IFDIR) { 370 cg = ext2_dirpref(pip); 371 if (fs->e2fs_contigdirs[cg] < 255) 372 fs->e2fs_contigdirs[cg]++; 373 } else { 374 cg = ino_to_cg(fs, pip->i_number); 375 if (fs->e2fs_contigdirs[cg] > 0) 376 fs->e2fs_contigdirs[cg]--; 377 } 378 ipref = cg * fs->e2fs->e2fs_ipg + 1; 379 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); 380 381 if (ino == 0) 382 goto noinodes; 383 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 384 if (error) { 385 ext2_vfree(pvp, ino, mode); 386 return (error); 387 } 388 ip = VTOI(*vpp); 389 390 /* 391 * The question is whether using VGET was such good idea at all: 392 * Linux doesn't read the old inode in when it is allocating a 393 * new one. I will set at least i_size and i_blocks to zero. 394 */ 395 ip->i_size = 0; 396 ip->i_blocks = 0; 397 ip->i_mode = 0; 398 ip->i_flags = 0; 399 /* now we want to make sure that the block pointers are zeroed out */ 400 for (i = 0; i < NDADDR; i++) 401 ip->i_db[i] = 0; 402 for (i = 0; i < NIADDR; i++) 403 ip->i_ib[i] = 0; 404 405 /* 406 * Set up a new generation number for this inode. 407 * XXX check if this makes sense in ext2 408 */ 409 if (ip->i_gen == 0 || ++ip->i_gen == 0) 410 ip->i_gen = random() / 2 + 1; 411 412 vfs_timestamp(&ts); 413 ip->i_birthtime = ts.tv_sec; 414 ip->i_birthnsec = ts.tv_nsec; 415 416 /* 417 printf("ext2_valloc: allocated inode %d\n", ino); 418 */ 419 return (0); 420 noinodes: 421 EXT2_UNLOCK(ump); 422 ext2_fserr(fs, cred->cr_uid, "out of inodes"); 423 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); 424 return (ENOSPC); 425 } 426 427 /* 428 * Find a cylinder to place a directory. 429 * 430 * The policy implemented by this algorithm is to allocate a 431 * directory inode in the same cylinder group as its parent 432 * directory, but also to reserve space for its files inodes 433 * and data. Restrict the number of directories which may be 434 * allocated one after another in the same cylinder group 435 * without intervening allocation of files. 436 * 437 * If we allocate a first level directory then force allocation 438 * in another cylinder group. 439 * 440 */ 441 static u_long 442 ext2_dirpref(struct inode *pip) 443 { 444 struct m_ext2fs *fs; 445 int cg, prefcg, dirsize, cgsize; 446 u_int avgifree, avgbfree, avgndir, curdirsize; 447 u_int minifree, minbfree, maxndir; 448 u_int mincg, minndir; 449 u_int maxcontigdirs; 450 451 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); 452 fs = pip->i_e2fs; 453 454 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; 455 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount; 456 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; 457 458 /* 459 * Force allocation in another cg if creating a first level dir. 460 */ 461 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); 462 if (ITOV(pip)->v_vflag & VV_ROOT) { 463 prefcg = arc4random() % fs->e2fs_gcount; 464 mincg = prefcg; 465 minndir = fs->e2fs_ipg; 466 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 467 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 468 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 469 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 470 mincg = cg; 471 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 472 } 473 for (cg = 0; cg < prefcg; cg++) 474 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 475 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 476 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 477 mincg = cg; 478 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 479 } 480 481 return (mincg); 482 } 483 484 /* 485 * Count various limits which used for 486 * optimal allocation of a directory inode. 487 */ 488 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); 489 minifree = avgifree - avgifree / 4; 490 if (minifree < 1) 491 minifree = 1; 492 minbfree = avgbfree - avgbfree / 4; 493 if (minbfree < 1) 494 minbfree = 1; 495 cgsize = fs->e2fs_fsize * fs->e2fs_fpg; 496 dirsize = AVGDIRSIZE; 497 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; 498 if (dirsize < curdirsize) 499 dirsize = curdirsize; 500 if (dirsize <= 0) 501 maxcontigdirs = 0; /* dirsize overflowed */ 502 else 503 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); 504 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); 505 if (maxcontigdirs == 0) 506 maxcontigdirs = 1; 507 508 /* 509 * Limit number of dirs in one cg and reserve space for 510 * regular files, but only if we have no deficit in 511 * inodes or space. 512 */ 513 prefcg = ino_to_cg(fs, pip->i_number); 514 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 515 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 516 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 517 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 518 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 519 return (cg); 520 } 521 for (cg = 0; cg < prefcg; cg++) 522 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 523 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 524 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 525 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 526 return (cg); 527 } 528 /* 529 * This is a backstop when we have deficit in space. 530 */ 531 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 532 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 533 return (cg); 534 for (cg = 0; cg < prefcg; cg++) 535 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 536 break; 537 return (cg); 538 } 539 540 /* 541 * Select the desired position for the next block in a file. 542 * 543 * we try to mimic what Remy does in inode_getblk/block_getblk 544 * 545 * we note: blocknr == 0 means that we're about to allocate either 546 * a direct block or a pointer block at the first level of indirection 547 * (In other words, stuff that will go in i_db[] or i_ib[]) 548 * 549 * blocknr != 0 means that we're allocating a block that is none 550 * of the above. Then, blocknr tells us the number of the block 551 * that will hold the pointer 552 */ 553 int32_t 554 ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, int32_t *bap, 555 int32_t blocknr) 556 { 557 int tmp; 558 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 559 560 /* if the next block is actually what we thought it is, 561 then set the goal to what we thought it should be 562 */ 563 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) 564 return ip->i_next_alloc_goal; 565 566 /* now check whether we were provided with an array that basically 567 tells us previous blocks to which we want to stay closeby 568 */ 569 if (bap) 570 for (tmp = indx - 1; tmp >= 0; tmp--) 571 if (bap[tmp]) 572 return bap[tmp]; 573 574 /* else let's fall back to the blocknr, or, if there is none, 575 follow the rule that a block should be allocated near its inode 576 */ 577 return blocknr ? blocknr : 578 (int32_t)(ip->i_block_group * 579 EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + 580 ip->i_e2fs->e2fs->e2fs_first_dblock; 581 } 582 583 /* 584 * Implement the cylinder overflow algorithm. 585 * 586 * The policy implemented by this algorithm is: 587 * 1) allocate the block in its requested cylinder group. 588 * 2) quadradically rehash on the cylinder group number. 589 * 3) brute force search for a free block. 590 */ 591 static u_long 592 ext2_hashalloc(struct inode *ip, int cg, long pref, int size, 593 daddr_t (*allocator)(struct inode *, int, daddr_t, int)) 594 { 595 struct m_ext2fs *fs; 596 ino_t result; 597 int i, icg = cg; 598 599 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 600 fs = ip->i_e2fs; 601 /* 602 * 1: preferred cylinder group 603 */ 604 result = (*allocator)(ip, cg, pref, size); 605 if (result) 606 return (result); 607 /* 608 * 2: quadratic rehash 609 */ 610 for (i = 1; i < fs->e2fs_gcount; i *= 2) { 611 cg += i; 612 if (cg >= fs->e2fs_gcount) 613 cg -= fs->e2fs_gcount; 614 result = (*allocator)(ip, cg, 0, size); 615 if (result) 616 return (result); 617 } 618 /* 619 * 3: brute force search 620 * Note that we start at i == 2, since 0 was checked initially, 621 * and 1 is always checked in the quadratic rehash. 622 */ 623 cg = (icg + 2) % fs->e2fs_gcount; 624 for (i = 2; i < fs->e2fs_gcount; i++) { 625 result = (*allocator)(ip, cg, 0, size); 626 if (result) 627 return (result); 628 cg++; 629 if (cg == fs->e2fs_gcount) 630 cg = 0; 631 } 632 return (0); 633 } 634 635 /* 636 * Determine whether a block can be allocated. 637 * 638 * Check to see if a block of the appropriate size is available, 639 * and if it is, allocate it. 640 */ 641 static daddr_t 642 ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 643 { 644 struct m_ext2fs *fs; 645 struct buf *bp; 646 struct ext2mount *ump; 647 daddr_t bno, runstart, runlen; 648 int bit, loc, end, error, start; 649 char *bbp; 650 /* XXX ondisk32 */ 651 fs = ip->i_e2fs; 652 ump = ip->i_ump; 653 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) 654 return (0); 655 EXT2_UNLOCK(ump); 656 error = bread(ip->i_devvp, fsbtodb(fs, 657 fs->e2fs_gd[cg].ext2bgd_b_bitmap), 658 (int)fs->e2fs_bsize, NOCRED, &bp); 659 if (error) { 660 brelse(bp); 661 EXT2_LOCK(ump); 662 return (0); 663 } 664 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) { 665 /* 666 * Another thread allocated the last block in this 667 * group while we were waiting for the buffer. 668 */ 669 brelse(bp); 670 EXT2_LOCK(ump); 671 return (0); 672 } 673 bbp = (char *)bp->b_data; 674 675 if (dtog(fs, bpref) != cg) 676 bpref = 0; 677 if (bpref != 0) { 678 bpref = dtogd(fs, bpref); 679 /* 680 * if the requested block is available, use it 681 */ 682 if (isclr(bbp, bpref)) { 683 bno = bpref; 684 goto gotit; 685 } 686 } 687 /* 688 * no blocks in the requested cylinder, so take next 689 * available one in this cylinder group. 690 * first try to get 8 contigous blocks, then fall back to a single 691 * block. 692 */ 693 if (bpref) 694 start = dtogd(fs, bpref) / NBBY; 695 else 696 start = 0; 697 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 698 retry: 699 runlen = 0; 700 runstart = 0; 701 for (loc = start; loc < end; loc++) { 702 if (bbp[loc] == (char)0xff) { 703 runlen = 0; 704 continue; 705 } 706 707 /* Start of a run, find the number of high clear bits. */ 708 if (runlen == 0) { 709 bit = fls(bbp[loc]); 710 runlen = NBBY - bit; 711 runstart = loc * NBBY + bit; 712 } else if (bbp[loc] == 0) { 713 /* Continue a run. */ 714 runlen += NBBY; 715 } else { 716 /* 717 * Finish the current run. If it isn't long 718 * enough, start a new one. 719 */ 720 bit = ffs(bbp[loc]) - 1; 721 runlen += bit; 722 if (runlen >= 8) { 723 bno = runstart; 724 goto gotit; 725 } 726 727 /* Run was too short, start a new one. */ 728 bit = fls(bbp[loc]); 729 runlen = NBBY - bit; 730 runstart = loc * NBBY + bit; 731 } 732 733 /* If the current run is long enough, use it. */ 734 if (runlen >= 8) { 735 bno = runstart; 736 goto gotit; 737 } 738 } 739 if (start != 0) { 740 end = start; 741 start = 0; 742 goto retry; 743 } 744 745 bno = ext2_mapsearch(fs, bbp, bpref); 746 if (bno < 0){ 747 brelse(bp); 748 EXT2_LOCK(ump); 749 return (0); 750 } 751 gotit: 752 #ifdef INVARIANTS 753 if (isset(bbp, bno)) { 754 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", 755 cg, (intmax_t)bno, fs->e2fs_fsmnt); 756 panic("ext2fs_alloccg: dup alloc"); 757 } 758 #endif 759 setbit(bbp, bno); 760 EXT2_LOCK(ump); 761 ext2_clusteracct(fs, bbp, cg, bno, -1); 762 fs->e2fs->e2fs_fbcount--; 763 fs->e2fs_gd[cg].ext2bgd_nbfree--; 764 fs->e2fs_fmod = 1; 765 EXT2_UNLOCK(ump); 766 bdwrite(bp); 767 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 768 } 769 770 /* 771 * Determine whether a cluster can be allocated. 772 */ 773 static daddr_t 774 ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) 775 { 776 struct m_ext2fs *fs; 777 struct ext2mount *ump; 778 struct buf *bp; 779 char *bbp; 780 int bit, error, got, i, loc, run; 781 int32_t *lp; 782 daddr_t bno; 783 784 fs = ip->i_e2fs; 785 ump = ip->i_ump; 786 787 if (fs->e2fs_maxcluster[cg] < len) 788 return (0); 789 790 EXT2_UNLOCK(ump); 791 error = bread(ip->i_devvp, 792 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 793 (int)fs->e2fs_bsize, NOCRED, &bp); 794 if (error) 795 goto fail_lock; 796 797 bbp = (char *)bp->b_data; 798 EXT2_LOCK(ump); 799 /* 800 * Check to see if a cluster of the needed size (or bigger) is 801 * available in this cylinder group. 802 */ 803 lp = &fs->e2fs_clustersum[cg].cs_sum[len]; 804 for (i = len; i <= fs->e2fs_contigsumsize; i++) 805 if (*lp++ > 0) 806 break; 807 if (i > fs->e2fs_contigsumsize) { 808 /* 809 * Update the cluster summary information to reflect 810 * the true maximum-sized cluster so that future cluster 811 * allocation requests can avoid reading the bitmap only 812 * to find no cluster. 813 */ 814 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; 815 for (i = len - 1; i > 0; i--) 816 if (*lp-- > 0) 817 break; 818 fs->e2fs_maxcluster[cg] = i; 819 goto fail; 820 } 821 EXT2_UNLOCK(ump); 822 823 /* Search the bitmap to find a big enough cluster like in FFS. */ 824 if (dtog(fs, bpref) != cg) 825 bpref = 0; 826 if (bpref != 0) 827 bpref = dtogd(fs, bpref); 828 loc = bpref / NBBY; 829 bit = 1 << (bpref % NBBY); 830 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { 831 if ((bbp[loc] & bit) != 0) 832 run = 0; 833 else { 834 run++; 835 if (run == len) 836 break; 837 } 838 if ((got & (NBBY - 1)) != (NBBY - 1)) 839 bit <<= 1; 840 else { 841 loc++; 842 bit = 1; 843 } 844 } 845 846 if (got >= fs->e2fs->e2fs_fpg) 847 goto fail_lock; 848 849 /* Allocate the cluster that we found. */ 850 for (i = 1; i < len; i++) 851 if (!isclr(bbp, got - run + i)) 852 panic("ext2_clusteralloc: map mismatch"); 853 854 bno = got - run + 1; 855 if (bno >= fs->e2fs->e2fs_fpg) 856 panic("ext2_clusteralloc: allocated out of group"); 857 858 EXT2_LOCK(ump); 859 for (i = 0; i < len; i += fs->e2fs_fpb) { 860 setbit(bbp, bno + i); 861 ext2_clusteracct(fs, bbp, cg, bno + i, -1); 862 fs->e2fs->e2fs_fbcount--; 863 fs->e2fs_gd[cg].ext2bgd_nbfree--; 864 } 865 fs->e2fs_fmod = 1; 866 EXT2_UNLOCK(ump); 867 868 bdwrite(bp); 869 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 870 871 fail_lock: 872 EXT2_LOCK(ump); 873 fail: 874 brelse(bp); 875 return (0); 876 } 877 878 /* 879 * Determine whether an inode can be allocated. 880 * 881 * Check to see if an inode is available, and if it is, 882 * allocate it using tode in the specified cylinder group. 883 */ 884 static daddr_t 885 ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) 886 { 887 struct m_ext2fs *fs; 888 struct buf *bp; 889 struct ext2mount *ump; 890 int error, start, len; 891 char *ibp, *loc; 892 ipref--; /* to avoid a lot of (ipref -1) */ 893 if (ipref == -1) 894 ipref = 0; 895 fs = ip->i_e2fs; 896 ump = ip->i_ump; 897 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) 898 return (0); 899 EXT2_UNLOCK(ump); 900 error = bread(ip->i_devvp, fsbtodb(fs, 901 fs->e2fs_gd[cg].ext2bgd_i_bitmap), 902 (int)fs->e2fs_bsize, NOCRED, &bp); 903 if (error) { 904 brelse(bp); 905 EXT2_LOCK(ump); 906 return (0); 907 } 908 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) { 909 /* 910 * Another thread allocated the last i-node in this 911 * group while we were waiting for the buffer. 912 */ 913 brelse(bp); 914 EXT2_LOCK(ump); 915 return (0); 916 } 917 ibp = (char *)bp->b_data; 918 if (ipref) { 919 ipref %= fs->e2fs->e2fs_ipg; 920 if (isclr(ibp, ipref)) 921 goto gotit; 922 } 923 start = ipref / NBBY; 924 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); 925 loc = memcchr(&ibp[start], 0xff, len); 926 if (loc == NULL) { 927 len = start + 1; 928 start = 0; 929 loc = memcchr(&ibp[start], 0xff, len); 930 if (loc == NULL) { 931 printf("cg = %d, ipref = %lld, fs = %s\n", 932 cg, (long long)ipref, fs->e2fs_fsmnt); 933 panic("ext2fs_nodealloccg: map corrupted"); 934 /* NOTREACHED */ 935 } 936 } 937 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; 938 gotit: 939 setbit(ibp, ipref); 940 EXT2_LOCK(ump); 941 fs->e2fs_gd[cg].ext2bgd_nifree--; 942 fs->e2fs->e2fs_ficount--; 943 fs->e2fs_fmod = 1; 944 if ((mode & IFMT) == IFDIR) { 945 fs->e2fs_gd[cg].ext2bgd_ndirs++; 946 fs->e2fs_total_dir++; 947 } 948 EXT2_UNLOCK(ump); 949 bdwrite(bp); 950 return (cg * fs->e2fs->e2fs_ipg + ipref +1); 951 } 952 953 /* 954 * Free a block or fragment. 955 * 956 */ 957 void 958 ext2_blkfree(struct inode *ip, int32_t bno, long size) 959 { 960 struct m_ext2fs *fs; 961 struct buf *bp; 962 struct ext2mount *ump; 963 int cg, error; 964 char *bbp; 965 966 fs = ip->i_e2fs; 967 ump = ip->i_ump; 968 cg = dtog(fs, bno); 969 if ((u_int)bno >= fs->e2fs->e2fs_bcount) { 970 printf("bad block %lld, ino %llu\n", (long long)bno, 971 (unsigned long long)ip->i_number); 972 ext2_fserr(fs, ip->i_uid, "bad block"); 973 return; 974 } 975 error = bread(ip->i_devvp, 976 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 977 (int)fs->e2fs_bsize, NOCRED, &bp); 978 if (error) { 979 brelse(bp); 980 return; 981 } 982 bbp = (char *)bp->b_data; 983 bno = dtogd(fs, bno); 984 if (isclr(bbp, bno)) { 985 printf("block = %lld, fs = %s\n", 986 (long long)bno, fs->e2fs_fsmnt); 987 panic("ext2_blkfree: freeing free block"); 988 } 989 clrbit(bbp, bno); 990 EXT2_LOCK(ump); 991 ext2_clusteracct(fs, bbp, cg, bno, 1); 992 fs->e2fs->e2fs_fbcount++; 993 fs->e2fs_gd[cg].ext2bgd_nbfree++; 994 fs->e2fs_fmod = 1; 995 EXT2_UNLOCK(ump); 996 bdwrite(bp); 997 } 998 999 /* 1000 * Free an inode. 1001 * 1002 */ 1003 int 1004 ext2_vfree(struct vnode *pvp, ino_t ino, int mode) 1005 { 1006 struct m_ext2fs *fs; 1007 struct inode *pip; 1008 struct buf *bp; 1009 struct ext2mount *ump; 1010 int error, cg; 1011 char * ibp; 1012 1013 pip = VTOI(pvp); 1014 fs = pip->i_e2fs; 1015 ump = pip->i_ump; 1016 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) 1017 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", 1018 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); 1019 1020 cg = ino_to_cg(fs, ino); 1021 error = bread(pip->i_devvp, 1022 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), 1023 (int)fs->e2fs_bsize, NOCRED, &bp); 1024 if (error) { 1025 brelse(bp); 1026 return (0); 1027 } 1028 ibp = (char *)bp->b_data; 1029 ino = (ino - 1) % fs->e2fs->e2fs_ipg; 1030 if (isclr(ibp, ino)) { 1031 printf("ino = %llu, fs = %s\n", 1032 (unsigned long long)ino, fs->e2fs_fsmnt); 1033 if (fs->e2fs_ronly == 0) 1034 panic("ext2_vfree: freeing free inode"); 1035 } 1036 clrbit(ibp, ino); 1037 EXT2_LOCK(ump); 1038 fs->e2fs->e2fs_ficount++; 1039 fs->e2fs_gd[cg].ext2bgd_nifree++; 1040 if ((mode & IFMT) == IFDIR) { 1041 fs->e2fs_gd[cg].ext2bgd_ndirs--; 1042 fs->e2fs_total_dir--; 1043 } 1044 fs->e2fs_fmod = 1; 1045 EXT2_UNLOCK(ump); 1046 bdwrite(bp); 1047 return (0); 1048 } 1049 1050 /* 1051 * Find a block in the specified cylinder group. 1052 * 1053 * It is a panic if a request is made to find a block if none are 1054 * available. 1055 */ 1056 static daddr_t 1057 ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) 1058 { 1059 char *loc; 1060 int start, len; 1061 1062 /* 1063 * find the fragment by searching through the free block 1064 * map for an appropriate bit pattern 1065 */ 1066 if (bpref) 1067 start = dtogd(fs, bpref) / NBBY; 1068 else 1069 start = 0; 1070 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 1071 loc = memcchr(&bbp[start], 0xff, len); 1072 if (loc == NULL) { 1073 len = start + 1; 1074 start = 0; 1075 loc = memcchr(&bbp[start], 0xff, len); 1076 if (loc == NULL) { 1077 printf("start = %d, len = %d, fs = %s\n", 1078 start, len, fs->e2fs_fsmnt); 1079 panic("ext2_mapsearch: map corrupted"); 1080 /* NOTREACHED */ 1081 } 1082 } 1083 return ((loc - bbp) * NBBY + ffs(~*loc) - 1); 1084 } 1085 1086 /* 1087 * Fserr prints the name of a filesystem with an error diagnostic. 1088 * 1089 * The form of the error message is: 1090 * fs: error message 1091 */ 1092 static void 1093 ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) 1094 { 1095 1096 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); 1097 } 1098 1099 int 1100 cg_has_sb(int i) 1101 { 1102 int a3, a5, a7; 1103 1104 if (i == 0 || i == 1) 1105 return 1; 1106 for (a3 = 3, a5 = 5, a7 = 7; 1107 a3 <= i || a5 <= i || a7 <= i; 1108 a3 *= 3, a5 *= 5, a7 *= 7) 1109 if (i == a3 || i == a5 || i == a7) 1110 return 1; 1111 return 0; 1112 } 1113