1d347a0daSSam Leffler /* $NetBSD: ffs_balloc.c,v 1.13 2004/06/20 22:20:18 jmc Exp $ */ 2d347a0daSSam Leffler /* From NetBSD: ffs_balloc.c,v 1.25 2001/08/08 08:36:36 lukem Exp */ 3d347a0daSSam Leffler 4d347a0daSSam Leffler /* 5d347a0daSSam Leffler * Copyright (c) 1982, 1986, 1989, 1993 6d347a0daSSam Leffler * The Regents of the University of California. All rights reserved. 7d347a0daSSam Leffler * 8d347a0daSSam Leffler * Redistribution and use in source and binary forms, with or without 9d347a0daSSam Leffler * modification, are permitted provided that the following conditions 10d347a0daSSam Leffler * are met: 11d347a0daSSam Leffler * 1. Redistributions of source code must retain the above copyright 12d347a0daSSam Leffler * notice, this list of conditions and the following disclaimer. 13d347a0daSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 14d347a0daSSam Leffler * notice, this list of conditions and the following disclaimer in the 15d347a0daSSam Leffler * documentation and/or other materials provided with the distribution. 16d347a0daSSam Leffler * 3. Neither the name of the University nor the names of its contributors 17d347a0daSSam Leffler * may be used to endorse or promote products derived from this software 18d347a0daSSam Leffler * without specific prior written permission. 19d347a0daSSam Leffler * 20d347a0daSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21d347a0daSSam Leffler * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22d347a0daSSam Leffler * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23d347a0daSSam Leffler * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24d347a0daSSam Leffler * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25d347a0daSSam Leffler * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26d347a0daSSam Leffler * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27d347a0daSSam Leffler * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28d347a0daSSam Leffler * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29d347a0daSSam Leffler * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30d347a0daSSam Leffler * SUCH DAMAGE. 31d347a0daSSam Leffler * 32d347a0daSSam Leffler * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95 33d347a0daSSam Leffler */ 34d347a0daSSam Leffler 35d347a0daSSam Leffler #include <sys/cdefs.h> 36d347a0daSSam Leffler __FBSDID("$FreeBSD$"); 37d347a0daSSam Leffler 38d347a0daSSam Leffler #include <sys/param.h> 39d347a0daSSam Leffler #include <sys/time.h> 40d347a0daSSam Leffler 41d347a0daSSam Leffler #include <assert.h> 42d347a0daSSam Leffler #include <errno.h> 43d347a0daSSam Leffler #include <stdio.h> 44d347a0daSSam Leffler #include <stdlib.h> 45d347a0daSSam Leffler #include <string.h> 46d347a0daSSam Leffler 47d347a0daSSam Leffler #include "makefs.h" 48d347a0daSSam Leffler 49d347a0daSSam Leffler #include <ufs/ufs/dinode.h> 50d347a0daSSam Leffler #include <ufs/ffs/fs.h> 51d347a0daSSam Leffler 52d347a0daSSam Leffler #include "ffs/ufs_bswap.h" 53d347a0daSSam Leffler #include "ffs/buf.h" 54d347a0daSSam Leffler #include "ffs/ufs_inode.h" 55d347a0daSSam Leffler #include "ffs/ffs_extern.h" 56d347a0daSSam Leffler 57d347a0daSSam Leffler static int ffs_balloc_ufs1(struct inode *, off_t, int, struct buf **); 58d347a0daSSam Leffler static int ffs_balloc_ufs2(struct inode *, off_t, int, struct buf **); 59d347a0daSSam Leffler 60d347a0daSSam Leffler /* 61d347a0daSSam Leffler * Balloc defines the structure of file system storage 62d347a0daSSam Leffler * by allocating the physical blocks on a device given 63d347a0daSSam Leffler * the inode and the logical block number in a file. 64d347a0daSSam Leffler * 65d347a0daSSam Leffler * Assume: flags == B_SYNC | B_CLRBUF 66d347a0daSSam Leffler */ 67d347a0daSSam Leffler 68d347a0daSSam Leffler int 69d347a0daSSam Leffler ffs_balloc(struct inode *ip, off_t offset, int bufsize, struct buf **bpp) 70d347a0daSSam Leffler { 71d347a0daSSam Leffler if (ip->i_fs->fs_magic == FS_UFS2_MAGIC) 72d347a0daSSam Leffler return ffs_balloc_ufs2(ip, offset, bufsize, bpp); 73d347a0daSSam Leffler else 74d347a0daSSam Leffler return ffs_balloc_ufs1(ip, offset, bufsize, bpp); 75d347a0daSSam Leffler } 76d347a0daSSam Leffler 77d347a0daSSam Leffler static int 78d347a0daSSam Leffler ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, struct buf **bpp) 79d347a0daSSam Leffler { 80d347a0daSSam Leffler daddr_t lbn, lastlbn; 81d347a0daSSam Leffler int size; 82d347a0daSSam Leffler int32_t nb; 83d347a0daSSam Leffler struct buf *bp, *nbp; 84d347a0daSSam Leffler struct fs *fs = ip->i_fs; 851dc349abSEd Maste struct indir indirs[UFS_NIADDR + 2]; 86d347a0daSSam Leffler daddr_t newb, pref; 87d347a0daSSam Leffler int32_t *bap; 88d347a0daSSam Leffler int osize, nsize, num, i, error; 891dc349abSEd Maste int32_t *allocblk, allociblk[UFS_NIADDR + 1]; 90d347a0daSSam Leffler int32_t *allocib; 91d347a0daSSam Leffler const int needswap = UFS_FSNEEDSWAP(fs); 9278b11a59SEd Maste struct vnode vp = { ip->i_fd, ip->i_fs, NULL, 0 }; 93d347a0daSSam Leffler 94d347a0daSSam Leffler lbn = lblkno(fs, offset); 95d347a0daSSam Leffler size = blkoff(fs, offset) + bufsize; 96d347a0daSSam Leffler if (bpp != NULL) { 97d347a0daSSam Leffler *bpp = NULL; 98d347a0daSSam Leffler } 99d347a0daSSam Leffler 100d347a0daSSam Leffler assert(size <= fs->fs_bsize); 101d347a0daSSam Leffler if (lbn < 0) 102d347a0daSSam Leffler return (EFBIG); 103d347a0daSSam Leffler 104d347a0daSSam Leffler /* 105d347a0daSSam Leffler * If the next write will extend the file into a new block, 106d347a0daSSam Leffler * and the file is currently composed of a fragment 107d347a0daSSam Leffler * this fragment has to be extended to be a full block. 108d347a0daSSam Leffler */ 109d347a0daSSam Leffler 110d347a0daSSam Leffler lastlbn = lblkno(fs, ip->i_ffs1_size); 1111dc349abSEd Maste if (lastlbn < UFS_NDADDR && lastlbn < lbn) { 112d347a0daSSam Leffler nb = lastlbn; 113d347a0daSSam Leffler osize = blksize(fs, ip, nb); 114d347a0daSSam Leffler if (osize < fs->fs_bsize && osize > 0) { 115d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 116d347a0daSSam Leffler abort(); 117d347a0daSSam Leffler } 118d347a0daSSam Leffler } 119d347a0daSSam Leffler 120d347a0daSSam Leffler /* 1211dc349abSEd Maste * The first UFS_NDADDR blocks are direct blocks 122d347a0daSSam Leffler */ 123d347a0daSSam Leffler 1241dc349abSEd Maste if (lbn < UFS_NDADDR) { 125d347a0daSSam Leffler nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap); 126*3afe6a68SEd Maste if (nb != 0 && ip->i_ffs1_size >= 127*3afe6a68SEd Maste (uint64_t)lblktosize(fs, lbn + 1)) { 128d347a0daSSam Leffler 129d347a0daSSam Leffler /* 130d347a0daSSam Leffler * The block is an already-allocated direct block 131d347a0daSSam Leffler * and the file already extends past this block, 132d347a0daSSam Leffler * thus this must be a whole block. 133d347a0daSSam Leffler * Just read the block (if requested). 134d347a0daSSam Leffler */ 135d347a0daSSam Leffler 136d347a0daSSam Leffler if (bpp != NULL) { 13778b11a59SEd Maste error = bread(&vp, lbn, fs->fs_bsize, NULL, 13878b11a59SEd Maste bpp); 139d347a0daSSam Leffler if (error) { 14078b11a59SEd Maste brelse(*bpp, 0); 141d347a0daSSam Leffler return (error); 142d347a0daSSam Leffler } 143d347a0daSSam Leffler } 144d347a0daSSam Leffler return (0); 145d347a0daSSam Leffler } 146d347a0daSSam Leffler if (nb != 0) { 147d347a0daSSam Leffler 148d347a0daSSam Leffler /* 149d347a0daSSam Leffler * Consider need to reallocate a fragment. 150d347a0daSSam Leffler */ 151d347a0daSSam Leffler 152d347a0daSSam Leffler osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size)); 153d347a0daSSam Leffler nsize = fragroundup(fs, size); 154d347a0daSSam Leffler if (nsize <= osize) { 155d347a0daSSam Leffler 156d347a0daSSam Leffler /* 157d347a0daSSam Leffler * The existing block is already 158d347a0daSSam Leffler * at least as big as we want. 159d347a0daSSam Leffler * Just read the block (if requested). 160d347a0daSSam Leffler */ 161d347a0daSSam Leffler 162d347a0daSSam Leffler if (bpp != NULL) { 16378b11a59SEd Maste error = bread(&vp, lbn, osize, NULL, 16478b11a59SEd Maste bpp); 165d347a0daSSam Leffler if (error) { 16678b11a59SEd Maste brelse(*bpp, 0); 167d347a0daSSam Leffler return (error); 168d347a0daSSam Leffler } 169d347a0daSSam Leffler } 170d347a0daSSam Leffler return 0; 171d347a0daSSam Leffler } else { 172d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 173d347a0daSSam Leffler abort(); 174d347a0daSSam Leffler } 175d347a0daSSam Leffler } else { 176d347a0daSSam Leffler 177d347a0daSSam Leffler /* 178d347a0daSSam Leffler * the block was not previously allocated, 179d347a0daSSam Leffler * allocate a new block or fragment. 180d347a0daSSam Leffler */ 181d347a0daSSam Leffler 182*3afe6a68SEd Maste if (ip->i_ffs1_size < (uint64_t)lblktosize(fs, lbn + 1)) 183d347a0daSSam Leffler nsize = fragroundup(fs, size); 184d347a0daSSam Leffler else 185d347a0daSSam Leffler nsize = fs->fs_bsize; 186d347a0daSSam Leffler error = ffs_alloc(ip, lbn, 187d347a0daSSam Leffler ffs_blkpref_ufs1(ip, lbn, (int)lbn, 188d347a0daSSam Leffler &ip->i_ffs1_db[0]), 189d347a0daSSam Leffler nsize, &newb); 190d347a0daSSam Leffler if (error) 191d347a0daSSam Leffler return (error); 192d347a0daSSam Leffler if (bpp != NULL) { 19378b11a59SEd Maste bp = getblk(&vp, lbn, nsize, 0, 0, 0); 194d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, newb); 195d347a0daSSam Leffler clrbuf(bp); 196d347a0daSSam Leffler *bpp = bp; 197d347a0daSSam Leffler } 198d347a0daSSam Leffler } 199d347a0daSSam Leffler ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap); 200d347a0daSSam Leffler return (0); 201d347a0daSSam Leffler } 202d347a0daSSam Leffler 203d347a0daSSam Leffler /* 204d347a0daSSam Leffler * Determine the number of levels of indirection. 205d347a0daSSam Leffler */ 206d347a0daSSam Leffler 207d347a0daSSam Leffler pref = 0; 208d347a0daSSam Leffler if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0) 209d347a0daSSam Leffler return (error); 210d347a0daSSam Leffler 211d347a0daSSam Leffler if (num < 1) { 212d347a0daSSam Leffler warnx("ffs_balloc: ufs_getlbns returned indirect block"); 213d347a0daSSam Leffler abort(); 214d347a0daSSam Leffler } 215d347a0daSSam Leffler 216d347a0daSSam Leffler /* 217d347a0daSSam Leffler * Fetch the first indirect block allocating if necessary. 218d347a0daSSam Leffler */ 219d347a0daSSam Leffler 220d347a0daSSam Leffler --num; 221d347a0daSSam Leffler nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap); 222d347a0daSSam Leffler allocib = NULL; 223d347a0daSSam Leffler allocblk = allociblk; 224d347a0daSSam Leffler if (nb == 0) { 225d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); 226d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 227d347a0daSSam Leffler if (error) 228d347a0daSSam Leffler return error; 229d347a0daSSam Leffler nb = newb; 230d347a0daSSam Leffler *allocblk++ = nb; 23178b11a59SEd Maste bp = getblk(&vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0); 232d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, nb); 233d347a0daSSam Leffler clrbuf(bp); 234d347a0daSSam Leffler /* 235d347a0daSSam Leffler * Write synchronously so that indirect blocks 236d347a0daSSam Leffler * never point at garbage. 237d347a0daSSam Leffler */ 238d347a0daSSam Leffler if ((error = bwrite(bp)) != 0) 239d347a0daSSam Leffler return error; 240d347a0daSSam Leffler allocib = &ip->i_ffs1_ib[indirs[0].in_off]; 241d347a0daSSam Leffler *allocib = ufs_rw32((int32_t)nb, needswap); 242d347a0daSSam Leffler } 243d347a0daSSam Leffler 244d347a0daSSam Leffler /* 245d347a0daSSam Leffler * Fetch through the indirect blocks, allocating as necessary. 246d347a0daSSam Leffler */ 247d347a0daSSam Leffler 248d347a0daSSam Leffler for (i = 1;;) { 24978b11a59SEd Maste error = bread(&vp, indirs[i].in_lbn, fs->fs_bsize, NULL, &bp); 250d347a0daSSam Leffler if (error) { 25178b11a59SEd Maste brelse(bp, 0); 252d347a0daSSam Leffler return error; 253d347a0daSSam Leffler } 254d347a0daSSam Leffler bap = (int32_t *)bp->b_data; 255d347a0daSSam Leffler nb = ufs_rw32(bap[indirs[i].in_off], needswap); 256d347a0daSSam Leffler if (i == num) 257d347a0daSSam Leffler break; 258d347a0daSSam Leffler i++; 259d347a0daSSam Leffler if (nb != 0) { 26078b11a59SEd Maste brelse(bp, 0); 261d347a0daSSam Leffler continue; 262d347a0daSSam Leffler } 263d347a0daSSam Leffler if (pref == 0) 264d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); 265d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 266d347a0daSSam Leffler if (error) { 26778b11a59SEd Maste brelse(bp, 0); 268d347a0daSSam Leffler return error; 269d347a0daSSam Leffler } 270d347a0daSSam Leffler nb = newb; 271d347a0daSSam Leffler *allocblk++ = nb; 27278b11a59SEd Maste nbp = getblk(&vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0); 273d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 274d347a0daSSam Leffler clrbuf(nbp); 275d347a0daSSam Leffler /* 276d347a0daSSam Leffler * Write synchronously so that indirect blocks 277d347a0daSSam Leffler * never point at garbage. 278d347a0daSSam Leffler */ 279d347a0daSSam Leffler 280d347a0daSSam Leffler if ((error = bwrite(nbp)) != 0) { 28178b11a59SEd Maste brelse(bp, 0); 282d347a0daSSam Leffler return error; 283d347a0daSSam Leffler } 284d347a0daSSam Leffler bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap); 285d347a0daSSam Leffler 286d347a0daSSam Leffler bwrite(bp); 287d347a0daSSam Leffler } 288d347a0daSSam Leffler 289d347a0daSSam Leffler /* 290d347a0daSSam Leffler * Get the data block, allocating if necessary. 291d347a0daSSam Leffler */ 292d347a0daSSam Leffler 293d347a0daSSam Leffler if (nb == 0) { 294d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]); 295d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 296d347a0daSSam Leffler if (error) { 29778b11a59SEd Maste brelse(bp, 0); 298d347a0daSSam Leffler return error; 299d347a0daSSam Leffler } 300d347a0daSSam Leffler nb = newb; 301d347a0daSSam Leffler *allocblk++ = nb; 302d347a0daSSam Leffler if (bpp != NULL) { 30378b11a59SEd Maste nbp = getblk(&vp, lbn, fs->fs_bsize, 0, 0, 0); 304d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 305d347a0daSSam Leffler clrbuf(nbp); 306d347a0daSSam Leffler *bpp = nbp; 307d347a0daSSam Leffler } 308d347a0daSSam Leffler bap[indirs[num].in_off] = ufs_rw32(nb, needswap); 309d347a0daSSam Leffler 310d347a0daSSam Leffler /* 311d347a0daSSam Leffler * If required, write synchronously, otherwise use 312d347a0daSSam Leffler * delayed write. 313d347a0daSSam Leffler */ 314d347a0daSSam Leffler bwrite(bp); 315d347a0daSSam Leffler return (0); 316d347a0daSSam Leffler } 31778b11a59SEd Maste brelse(bp, 0); 318d347a0daSSam Leffler if (bpp != NULL) { 31978b11a59SEd Maste error = bread(&vp, lbn, (int)fs->fs_bsize, NULL, &nbp); 320d347a0daSSam Leffler if (error) { 32178b11a59SEd Maste brelse(nbp, 0); 322d347a0daSSam Leffler return error; 323d347a0daSSam Leffler } 324d347a0daSSam Leffler *bpp = nbp; 325d347a0daSSam Leffler } 326d347a0daSSam Leffler return (0); 327d347a0daSSam Leffler } 328d347a0daSSam Leffler 329d347a0daSSam Leffler static int 330d347a0daSSam Leffler ffs_balloc_ufs2(struct inode *ip, off_t offset, int bufsize, struct buf **bpp) 331d347a0daSSam Leffler { 332d347a0daSSam Leffler daddr_t lbn, lastlbn; 333d347a0daSSam Leffler int size; 334d347a0daSSam Leffler struct buf *bp, *nbp; 335d347a0daSSam Leffler struct fs *fs = ip->i_fs; 3361dc349abSEd Maste struct indir indirs[UFS_NIADDR + 2]; 337d347a0daSSam Leffler daddr_t newb, pref, nb; 338d347a0daSSam Leffler int64_t *bap; 339d347a0daSSam Leffler int osize, nsize, num, i, error; 3401dc349abSEd Maste int64_t *allocblk, allociblk[UFS_NIADDR + 1]; 341d347a0daSSam Leffler int64_t *allocib; 342d347a0daSSam Leffler const int needswap = UFS_FSNEEDSWAP(fs); 34378b11a59SEd Maste struct vnode vp = { ip->i_fd, ip->i_fs, NULL, 0 }; 344d347a0daSSam Leffler 345d347a0daSSam Leffler lbn = lblkno(fs, offset); 346d347a0daSSam Leffler size = blkoff(fs, offset) + bufsize; 347d347a0daSSam Leffler if (bpp != NULL) { 348d347a0daSSam Leffler *bpp = NULL; 349d347a0daSSam Leffler } 350d347a0daSSam Leffler 351d347a0daSSam Leffler assert(size <= fs->fs_bsize); 352d347a0daSSam Leffler if (lbn < 0) 353d347a0daSSam Leffler return (EFBIG); 354d347a0daSSam Leffler 355d347a0daSSam Leffler /* 356d347a0daSSam Leffler * If the next write will extend the file into a new block, 357d347a0daSSam Leffler * and the file is currently composed of a fragment 358d347a0daSSam Leffler * this fragment has to be extended to be a full block. 359d347a0daSSam Leffler */ 360d347a0daSSam Leffler 361d347a0daSSam Leffler lastlbn = lblkno(fs, ip->i_ffs2_size); 3621dc349abSEd Maste if (lastlbn < UFS_NDADDR && lastlbn < lbn) { 363d347a0daSSam Leffler nb = lastlbn; 364d347a0daSSam Leffler osize = blksize(fs, ip, nb); 365d347a0daSSam Leffler if (osize < fs->fs_bsize && osize > 0) { 366d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 367d347a0daSSam Leffler abort(); 368d347a0daSSam Leffler } 369d347a0daSSam Leffler } 370d347a0daSSam Leffler 371d347a0daSSam Leffler /* 3721dc349abSEd Maste * The first UFS_NDADDR blocks are direct blocks 373d347a0daSSam Leffler */ 374d347a0daSSam Leffler 3751dc349abSEd Maste if (lbn < UFS_NDADDR) { 376d347a0daSSam Leffler nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap); 377*3afe6a68SEd Maste if (nb != 0 && ip->i_ffs2_size >= 378*3afe6a68SEd Maste (uint64_t)lblktosize(fs, lbn + 1)) { 379d347a0daSSam Leffler 380d347a0daSSam Leffler /* 381d347a0daSSam Leffler * The block is an already-allocated direct block 382d347a0daSSam Leffler * and the file already extends past this block, 383d347a0daSSam Leffler * thus this must be a whole block. 384d347a0daSSam Leffler * Just read the block (if requested). 385d347a0daSSam Leffler */ 386d347a0daSSam Leffler 387d347a0daSSam Leffler if (bpp != NULL) { 38878b11a59SEd Maste error = bread(&vp, lbn, fs->fs_bsize, NULL, 38978b11a59SEd Maste bpp); 390d347a0daSSam Leffler if (error) { 39178b11a59SEd Maste brelse(*bpp, 0); 392d347a0daSSam Leffler return (error); 393d347a0daSSam Leffler } 394d347a0daSSam Leffler } 395d347a0daSSam Leffler return (0); 396d347a0daSSam Leffler } 397d347a0daSSam Leffler if (nb != 0) { 398d347a0daSSam Leffler 399d347a0daSSam Leffler /* 400d347a0daSSam Leffler * Consider need to reallocate a fragment. 401d347a0daSSam Leffler */ 402d347a0daSSam Leffler 403d347a0daSSam Leffler osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size)); 404d347a0daSSam Leffler nsize = fragroundup(fs, size); 405d347a0daSSam Leffler if (nsize <= osize) { 406d347a0daSSam Leffler 407d347a0daSSam Leffler /* 408d347a0daSSam Leffler * The existing block is already 409d347a0daSSam Leffler * at least as big as we want. 410d347a0daSSam Leffler * Just read the block (if requested). 411d347a0daSSam Leffler */ 412d347a0daSSam Leffler 413d347a0daSSam Leffler if (bpp != NULL) { 41478b11a59SEd Maste error = bread(&vp, lbn, osize, NULL, 41578b11a59SEd Maste bpp); 416d347a0daSSam Leffler if (error) { 41778b11a59SEd Maste brelse(*bpp, 0); 418d347a0daSSam Leffler return (error); 419d347a0daSSam Leffler } 420d347a0daSSam Leffler } 421d347a0daSSam Leffler return 0; 422d347a0daSSam Leffler } else { 423d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 424d347a0daSSam Leffler abort(); 425d347a0daSSam Leffler } 426d347a0daSSam Leffler } else { 427d347a0daSSam Leffler 428d347a0daSSam Leffler /* 429d347a0daSSam Leffler * the block was not previously allocated, 430d347a0daSSam Leffler * allocate a new block or fragment. 431d347a0daSSam Leffler */ 432d347a0daSSam Leffler 433*3afe6a68SEd Maste if (ip->i_ffs2_size < (uint64_t)lblktosize(fs, lbn + 1)) 434d347a0daSSam Leffler nsize = fragroundup(fs, size); 435d347a0daSSam Leffler else 436d347a0daSSam Leffler nsize = fs->fs_bsize; 437d347a0daSSam Leffler error = ffs_alloc(ip, lbn, 438d347a0daSSam Leffler ffs_blkpref_ufs2(ip, lbn, (int)lbn, 439d347a0daSSam Leffler &ip->i_ffs2_db[0]), 440d347a0daSSam Leffler nsize, &newb); 441d347a0daSSam Leffler if (error) 442d347a0daSSam Leffler return (error); 443d347a0daSSam Leffler if (bpp != NULL) { 44478b11a59SEd Maste bp = getblk(&vp, lbn, nsize, 0, 0, 0); 445d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, newb); 446d347a0daSSam Leffler clrbuf(bp); 447d347a0daSSam Leffler *bpp = bp; 448d347a0daSSam Leffler } 449d347a0daSSam Leffler } 450d347a0daSSam Leffler ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap); 451d347a0daSSam Leffler return (0); 452d347a0daSSam Leffler } 453d347a0daSSam Leffler 454d347a0daSSam Leffler /* 455d347a0daSSam Leffler * Determine the number of levels of indirection. 456d347a0daSSam Leffler */ 457d347a0daSSam Leffler 458d347a0daSSam Leffler pref = 0; 459d347a0daSSam Leffler if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0) 460d347a0daSSam Leffler return (error); 461d347a0daSSam Leffler 462d347a0daSSam Leffler if (num < 1) { 463d347a0daSSam Leffler warnx("ffs_balloc: ufs_getlbns returned indirect block"); 464d347a0daSSam Leffler abort(); 465d347a0daSSam Leffler } 466d347a0daSSam Leffler 467d347a0daSSam Leffler /* 468d347a0daSSam Leffler * Fetch the first indirect block allocating if necessary. 469d347a0daSSam Leffler */ 470d347a0daSSam Leffler 471d347a0daSSam Leffler --num; 472d347a0daSSam Leffler nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap); 473d347a0daSSam Leffler allocib = NULL; 474d347a0daSSam Leffler allocblk = allociblk; 475d347a0daSSam Leffler if (nb == 0) { 476d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0); 477d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 478d347a0daSSam Leffler if (error) 479d347a0daSSam Leffler return error; 480d347a0daSSam Leffler nb = newb; 481d347a0daSSam Leffler *allocblk++ = nb; 48278b11a59SEd Maste bp = getblk(&vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0); 483d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, nb); 484d347a0daSSam Leffler clrbuf(bp); 485d347a0daSSam Leffler /* 486d347a0daSSam Leffler * Write synchronously so that indirect blocks 487d347a0daSSam Leffler * never point at garbage. 488d347a0daSSam Leffler */ 489d347a0daSSam Leffler if ((error = bwrite(bp)) != 0) 490d347a0daSSam Leffler return error; 491d347a0daSSam Leffler allocib = &ip->i_ffs2_ib[indirs[0].in_off]; 492d347a0daSSam Leffler *allocib = ufs_rw64(nb, needswap); 493d347a0daSSam Leffler } 494d347a0daSSam Leffler 495d347a0daSSam Leffler /* 496d347a0daSSam Leffler * Fetch through the indirect blocks, allocating as necessary. 497d347a0daSSam Leffler */ 498d347a0daSSam Leffler 499d347a0daSSam Leffler for (i = 1;;) { 50078b11a59SEd Maste error = bread(&vp, indirs[i].in_lbn, fs->fs_bsize, NULL, &bp); 501d347a0daSSam Leffler if (error) { 50278b11a59SEd Maste brelse(bp, 0); 503d347a0daSSam Leffler return error; 504d347a0daSSam Leffler } 505d347a0daSSam Leffler bap = (int64_t *)bp->b_data; 506d347a0daSSam Leffler nb = ufs_rw64(bap[indirs[i].in_off], needswap); 507d347a0daSSam Leffler if (i == num) 508d347a0daSSam Leffler break; 509d347a0daSSam Leffler i++; 510d347a0daSSam Leffler if (nb != 0) { 51178b11a59SEd Maste brelse(bp, 0); 512d347a0daSSam Leffler continue; 513d347a0daSSam Leffler } 514d347a0daSSam Leffler if (pref == 0) 515d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0); 516d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 517d347a0daSSam Leffler if (error) { 51878b11a59SEd Maste brelse(bp, 0); 519d347a0daSSam Leffler return error; 520d347a0daSSam Leffler } 521d347a0daSSam Leffler nb = newb; 522d347a0daSSam Leffler *allocblk++ = nb; 52378b11a59SEd Maste nbp = getblk(&vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0); 524d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 525d347a0daSSam Leffler clrbuf(nbp); 526d347a0daSSam Leffler /* 527d347a0daSSam Leffler * Write synchronously so that indirect blocks 528d347a0daSSam Leffler * never point at garbage. 529d347a0daSSam Leffler */ 530d347a0daSSam Leffler 531d347a0daSSam Leffler if ((error = bwrite(nbp)) != 0) { 53278b11a59SEd Maste brelse(bp, 0); 533d347a0daSSam Leffler return error; 534d347a0daSSam Leffler } 535d347a0daSSam Leffler bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap); 536d347a0daSSam Leffler 537d347a0daSSam Leffler bwrite(bp); 538d347a0daSSam Leffler } 539d347a0daSSam Leffler 540d347a0daSSam Leffler /* 541d347a0daSSam Leffler * Get the data block, allocating if necessary. 542d347a0daSSam Leffler */ 543d347a0daSSam Leffler 544d347a0daSSam Leffler if (nb == 0) { 545d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]); 546d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 547d347a0daSSam Leffler if (error) { 54878b11a59SEd Maste brelse(bp, 0); 549d347a0daSSam Leffler return error; 550d347a0daSSam Leffler } 551d347a0daSSam Leffler nb = newb; 552d347a0daSSam Leffler *allocblk++ = nb; 553d347a0daSSam Leffler if (bpp != NULL) { 55478b11a59SEd Maste nbp = getblk(&vp, lbn, fs->fs_bsize, 0, 0, 0); 555d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 556d347a0daSSam Leffler clrbuf(nbp); 557d347a0daSSam Leffler *bpp = nbp; 558d347a0daSSam Leffler } 559d347a0daSSam Leffler bap[indirs[num].in_off] = ufs_rw64(nb, needswap); 560d347a0daSSam Leffler 561d347a0daSSam Leffler /* 562d347a0daSSam Leffler * If required, write synchronously, otherwise use 563d347a0daSSam Leffler * delayed write. 564d347a0daSSam Leffler */ 565d347a0daSSam Leffler bwrite(bp); 566d347a0daSSam Leffler return (0); 567d347a0daSSam Leffler } 56878b11a59SEd Maste brelse(bp, 0); 569d347a0daSSam Leffler if (bpp != NULL) { 57078b11a59SEd Maste error = bread(&vp, lbn, (int)fs->fs_bsize, NULL, &nbp); 571d347a0daSSam Leffler if (error) { 57278b11a59SEd Maste brelse(nbp, 0); 573d347a0daSSam Leffler return error; 574d347a0daSSam Leffler } 575d347a0daSSam Leffler *bpp = nbp; 576d347a0daSSam Leffler } 577d347a0daSSam Leffler return (0); 578d347a0daSSam Leffler } 579