1d347a0daSSam Leffler /* $NetBSD: ffs_balloc.c,v 1.13 2004/06/20 22:20:18 jmc Exp $ */ 2d347a0daSSam Leffler /* From NetBSD: ffs_balloc.c,v 1.25 2001/08/08 08:36:36 lukem Exp */ 3d347a0daSSam Leffler 48a16b7a1SPedro F. Giffuni /*- 58a16b7a1SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 68a16b7a1SPedro F. Giffuni * 7d347a0daSSam Leffler * Copyright (c) 1982, 1986, 1989, 1993 8d347a0daSSam Leffler * The Regents of the University of California. All rights reserved. 9d347a0daSSam Leffler * 10d347a0daSSam Leffler * Redistribution and use in source and binary forms, with or without 11d347a0daSSam Leffler * modification, are permitted provided that the following conditions 12d347a0daSSam Leffler * are met: 13d347a0daSSam Leffler * 1. Redistributions of source code must retain the above copyright 14d347a0daSSam Leffler * notice, this list of conditions and the following disclaimer. 15d347a0daSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 16d347a0daSSam Leffler * notice, this list of conditions and the following disclaimer in the 17d347a0daSSam Leffler * documentation and/or other materials provided with the distribution. 18d347a0daSSam Leffler * 3. Neither the name of the University nor the names of its contributors 19d347a0daSSam Leffler * may be used to endorse or promote products derived from this software 20d347a0daSSam Leffler * without specific prior written permission. 21d347a0daSSam Leffler * 22d347a0daSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23d347a0daSSam Leffler * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24d347a0daSSam Leffler * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25d347a0daSSam Leffler * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26d347a0daSSam Leffler * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27d347a0daSSam Leffler * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28d347a0daSSam Leffler * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29d347a0daSSam Leffler * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30d347a0daSSam Leffler * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31d347a0daSSam Leffler * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32d347a0daSSam Leffler * SUCH DAMAGE. 33d347a0daSSam Leffler * 34d347a0daSSam Leffler * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95 35d347a0daSSam Leffler */ 36d347a0daSSam Leffler 37d347a0daSSam Leffler #include <sys/cdefs.h> 38d347a0daSSam Leffler __FBSDID("$FreeBSD$"); 39d347a0daSSam Leffler 40d347a0daSSam Leffler #include <sys/param.h> 41d347a0daSSam Leffler #include <sys/time.h> 42d347a0daSSam Leffler 43d347a0daSSam Leffler #include <assert.h> 44d347a0daSSam Leffler #include <errno.h> 45d347a0daSSam Leffler #include <stdio.h> 46d347a0daSSam Leffler #include <stdlib.h> 47d347a0daSSam Leffler #include <string.h> 48d347a0daSSam Leffler 49d347a0daSSam Leffler #include "makefs.h" 50d347a0daSSam Leffler 51d347a0daSSam Leffler #include <ufs/ufs/dinode.h> 52d347a0daSSam Leffler #include <ufs/ffs/fs.h> 53d347a0daSSam Leffler 54d347a0daSSam Leffler #include "ffs/ufs_bswap.h" 55d347a0daSSam Leffler #include "ffs/buf.h" 56d347a0daSSam Leffler #include "ffs/ufs_inode.h" 57d347a0daSSam Leffler #include "ffs/ffs_extern.h" 58d347a0daSSam Leffler 59*d485c77fSKonstantin Belousov static int ffs_balloc_ufs1(struct inode *, off_t, int, struct m_buf **); 60*d485c77fSKonstantin Belousov static int ffs_balloc_ufs2(struct inode *, off_t, int, struct m_buf **); 61d347a0daSSam Leffler 62d347a0daSSam Leffler /* 63d347a0daSSam Leffler * Balloc defines the structure of file system storage 64d347a0daSSam Leffler * by allocating the physical blocks on a device given 65d347a0daSSam Leffler * the inode and the logical block number in a file. 66d347a0daSSam Leffler * 67d347a0daSSam Leffler * Assume: flags == B_SYNC | B_CLRBUF 68d347a0daSSam Leffler */ 69d347a0daSSam Leffler 70d347a0daSSam Leffler int 71*d485c77fSKonstantin Belousov ffs_balloc(struct inode *ip, off_t offset, int bufsize, struct m_buf **bpp) 72d347a0daSSam Leffler { 73d347a0daSSam Leffler if (ip->i_fs->fs_magic == FS_UFS2_MAGIC) 74d347a0daSSam Leffler return ffs_balloc_ufs2(ip, offset, bufsize, bpp); 75d347a0daSSam Leffler else 76d347a0daSSam Leffler return ffs_balloc_ufs1(ip, offset, bufsize, bpp); 77d347a0daSSam Leffler } 78d347a0daSSam Leffler 79d347a0daSSam Leffler static int 80*d485c77fSKonstantin Belousov ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, 81*d485c77fSKonstantin Belousov struct m_buf **bpp) 82d347a0daSSam Leffler { 83d347a0daSSam Leffler daddr_t lbn, lastlbn; 84d347a0daSSam Leffler int size; 85d347a0daSSam Leffler int32_t nb; 86*d485c77fSKonstantin Belousov struct m_buf *bp, *nbp; 87d347a0daSSam Leffler struct fs *fs = ip->i_fs; 881dc349abSEd Maste struct indir indirs[UFS_NIADDR + 2]; 89d347a0daSSam Leffler daddr_t newb, pref; 90d347a0daSSam Leffler int32_t *bap; 91d347a0daSSam Leffler int osize, nsize, num, i, error; 921dc349abSEd Maste int32_t *allocblk, allociblk[UFS_NIADDR + 1]; 93d347a0daSSam Leffler int32_t *allocib; 94d347a0daSSam Leffler const int needswap = UFS_FSNEEDSWAP(fs); 95d347a0daSSam Leffler 96d347a0daSSam Leffler lbn = lblkno(fs, offset); 97d347a0daSSam Leffler size = blkoff(fs, offset) + bufsize; 98d347a0daSSam Leffler if (bpp != NULL) { 99d347a0daSSam Leffler *bpp = NULL; 100d347a0daSSam Leffler } 101d347a0daSSam Leffler 102d347a0daSSam Leffler assert(size <= fs->fs_bsize); 103d347a0daSSam Leffler if (lbn < 0) 104d347a0daSSam Leffler return (EFBIG); 105d347a0daSSam Leffler 106d347a0daSSam Leffler /* 107d347a0daSSam Leffler * If the next write will extend the file into a new block, 108d347a0daSSam Leffler * and the file is currently composed of a fragment 109d347a0daSSam Leffler * this fragment has to be extended to be a full block. 110d347a0daSSam Leffler */ 111d347a0daSSam Leffler 112d347a0daSSam Leffler lastlbn = lblkno(fs, ip->i_ffs1_size); 1131dc349abSEd Maste if (lastlbn < UFS_NDADDR && lastlbn < lbn) { 114d347a0daSSam Leffler nb = lastlbn; 115d347a0daSSam Leffler osize = blksize(fs, ip, nb); 116d347a0daSSam Leffler if (osize < fs->fs_bsize && osize > 0) { 117d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 118d347a0daSSam Leffler abort(); 119d347a0daSSam Leffler } 120d347a0daSSam Leffler } 121d347a0daSSam Leffler 122d347a0daSSam Leffler /* 1231dc349abSEd Maste * The first UFS_NDADDR blocks are direct blocks 124d347a0daSSam Leffler */ 125d347a0daSSam Leffler 1261dc349abSEd Maste if (lbn < UFS_NDADDR) { 127d347a0daSSam Leffler nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap); 1283afe6a68SEd Maste if (nb != 0 && ip->i_ffs1_size >= 1293afe6a68SEd Maste (uint64_t)lblktosize(fs, lbn + 1)) { 130d347a0daSSam Leffler 131d347a0daSSam Leffler /* 132d347a0daSSam Leffler * The block is an already-allocated direct block 133d347a0daSSam Leffler * and the file already extends past this block, 134d347a0daSSam Leffler * thus this must be a whole block. 135d347a0daSSam Leffler * Just read the block (if requested). 136d347a0daSSam Leffler */ 137d347a0daSSam Leffler 138d347a0daSSam Leffler if (bpp != NULL) { 139*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, 140*d485c77fSKonstantin Belousov fs->fs_bsize, NULL, bpp); 141d347a0daSSam Leffler if (error) { 1425b292f9aSEd Maste brelse(*bpp); 143d347a0daSSam Leffler return (error); 144d347a0daSSam Leffler } 145d347a0daSSam Leffler } 146d347a0daSSam Leffler return (0); 147d347a0daSSam Leffler } 148d347a0daSSam Leffler if (nb != 0) { 149d347a0daSSam Leffler 150d347a0daSSam Leffler /* 151d347a0daSSam Leffler * Consider need to reallocate a fragment. 152d347a0daSSam Leffler */ 153d347a0daSSam Leffler 154d347a0daSSam Leffler osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size)); 155d347a0daSSam Leffler nsize = fragroundup(fs, size); 156d347a0daSSam Leffler if (nsize <= osize) { 157d347a0daSSam Leffler 158d347a0daSSam Leffler /* 159d347a0daSSam Leffler * The existing block is already 160d347a0daSSam Leffler * at least as big as we want. 161d347a0daSSam Leffler * Just read the block (if requested). 162d347a0daSSam Leffler */ 163d347a0daSSam Leffler 164d347a0daSSam Leffler if (bpp != NULL) { 165*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, 166*d485c77fSKonstantin Belousov osize, NULL, bpp); 167d347a0daSSam Leffler if (error) { 1685b292f9aSEd Maste brelse(*bpp); 169d347a0daSSam Leffler return (error); 170d347a0daSSam Leffler } 171d347a0daSSam Leffler } 172d347a0daSSam Leffler return 0; 173d347a0daSSam Leffler } else { 174d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 175d347a0daSSam Leffler abort(); 176d347a0daSSam Leffler } 177d347a0daSSam Leffler } else { 178d347a0daSSam Leffler 179d347a0daSSam Leffler /* 180d347a0daSSam Leffler * the block was not previously allocated, 181d347a0daSSam Leffler * allocate a new block or fragment. 182d347a0daSSam Leffler */ 183d347a0daSSam Leffler 1843afe6a68SEd Maste if (ip->i_ffs1_size < (uint64_t)lblktosize(fs, lbn + 1)) 185d347a0daSSam Leffler nsize = fragroundup(fs, size); 186d347a0daSSam Leffler else 187d347a0daSSam Leffler nsize = fs->fs_bsize; 188d347a0daSSam Leffler error = ffs_alloc(ip, lbn, 189d347a0daSSam Leffler ffs_blkpref_ufs1(ip, lbn, (int)lbn, 190d347a0daSSam Leffler &ip->i_ffs1_db[0]), 191d347a0daSSam Leffler nsize, &newb); 192d347a0daSSam Leffler if (error) 193d347a0daSSam Leffler return (error); 194d347a0daSSam Leffler if (bpp != NULL) { 195*d485c77fSKonstantin Belousov bp = getblk((void *)ip->i_devvp, lbn, nsize, 196*d485c77fSKonstantin Belousov 0, 0, 0); 197d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, newb); 198d347a0daSSam Leffler clrbuf(bp); 199d347a0daSSam Leffler *bpp = bp; 200d347a0daSSam Leffler } 201d347a0daSSam Leffler } 202d347a0daSSam Leffler ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap); 203d347a0daSSam Leffler return (0); 204d347a0daSSam Leffler } 205d347a0daSSam Leffler 206d347a0daSSam Leffler /* 207d347a0daSSam Leffler * Determine the number of levels of indirection. 208d347a0daSSam Leffler */ 209d347a0daSSam Leffler 210d347a0daSSam Leffler pref = 0; 211d347a0daSSam Leffler if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0) 212d347a0daSSam Leffler return (error); 213d347a0daSSam Leffler 214d347a0daSSam Leffler if (num < 1) { 215d347a0daSSam Leffler warnx("ffs_balloc: ufs_getlbns returned indirect block"); 216d347a0daSSam Leffler abort(); 217d347a0daSSam Leffler } 218d347a0daSSam Leffler 219d347a0daSSam Leffler /* 220d347a0daSSam Leffler * Fetch the first indirect block allocating if necessary. 221d347a0daSSam Leffler */ 222d347a0daSSam Leffler 223d347a0daSSam Leffler --num; 224d347a0daSSam Leffler nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap); 225d347a0daSSam Leffler allocib = NULL; 226d347a0daSSam Leffler allocblk = allociblk; 227d347a0daSSam Leffler if (nb == 0) { 228d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); 229d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 230d347a0daSSam Leffler if (error) 231d347a0daSSam Leffler return error; 232d347a0daSSam Leffler nb = newb; 233d347a0daSSam Leffler *allocblk++ = nb; 234*d485c77fSKonstantin Belousov bp = getblk((void *)ip->i_devvp, indirs[1].in_lbn, 235*d485c77fSKonstantin Belousov fs->fs_bsize, 0, 0, 0); 236d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, nb); 237d347a0daSSam Leffler clrbuf(bp); 238d347a0daSSam Leffler /* 239d347a0daSSam Leffler * Write synchronously so that indirect blocks 240d347a0daSSam Leffler * never point at garbage. 241d347a0daSSam Leffler */ 242d347a0daSSam Leffler if ((error = bwrite(bp)) != 0) 243d347a0daSSam Leffler return error; 244d347a0daSSam Leffler allocib = &ip->i_ffs1_ib[indirs[0].in_off]; 245d347a0daSSam Leffler *allocib = ufs_rw32((int32_t)nb, needswap); 246d347a0daSSam Leffler } 247d347a0daSSam Leffler 248d347a0daSSam Leffler /* 249d347a0daSSam Leffler * Fetch through the indirect blocks, allocating as necessary. 250d347a0daSSam Leffler */ 251d347a0daSSam Leffler 252d347a0daSSam Leffler for (i = 1;;) { 253*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, indirs[i].in_lbn, 254*d485c77fSKonstantin Belousov fs->fs_bsize, NULL, &bp); 255d347a0daSSam Leffler if (error) { 2565b292f9aSEd Maste brelse(bp); 257d347a0daSSam Leffler return error; 258d347a0daSSam Leffler } 259d347a0daSSam Leffler bap = (int32_t *)bp->b_data; 260d347a0daSSam Leffler nb = ufs_rw32(bap[indirs[i].in_off], needswap); 261d347a0daSSam Leffler if (i == num) 262d347a0daSSam Leffler break; 263d347a0daSSam Leffler i++; 264d347a0daSSam Leffler if (nb != 0) { 2655b292f9aSEd Maste brelse(bp); 266d347a0daSSam Leffler continue; 267d347a0daSSam Leffler } 268d347a0daSSam Leffler if (pref == 0) 269d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); 270d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 271d347a0daSSam Leffler if (error) { 2725b292f9aSEd Maste brelse(bp); 273d347a0daSSam Leffler return error; 274d347a0daSSam Leffler } 275d347a0daSSam Leffler nb = newb; 276d347a0daSSam Leffler *allocblk++ = nb; 277*d485c77fSKonstantin Belousov nbp = getblk((void *)ip->i_devvp, indirs[i].in_lbn, 278*d485c77fSKonstantin Belousov fs->fs_bsize, 0, 0, 0); 279d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 280d347a0daSSam Leffler clrbuf(nbp); 281d347a0daSSam Leffler /* 282d347a0daSSam Leffler * Write synchronously so that indirect blocks 283d347a0daSSam Leffler * never point at garbage. 284d347a0daSSam Leffler */ 285d347a0daSSam Leffler 286d347a0daSSam Leffler if ((error = bwrite(nbp)) != 0) { 2875b292f9aSEd Maste brelse(bp); 288d347a0daSSam Leffler return error; 289d347a0daSSam Leffler } 290d347a0daSSam Leffler bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap); 291d347a0daSSam Leffler 292d347a0daSSam Leffler bwrite(bp); 293d347a0daSSam Leffler } 294d347a0daSSam Leffler 295d347a0daSSam Leffler /* 296d347a0daSSam Leffler * Get the data block, allocating if necessary. 297d347a0daSSam Leffler */ 298d347a0daSSam Leffler 299d347a0daSSam Leffler if (nb == 0) { 300d347a0daSSam Leffler pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]); 301d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 302d347a0daSSam Leffler if (error) { 3035b292f9aSEd Maste brelse(bp); 304d347a0daSSam Leffler return error; 305d347a0daSSam Leffler } 306d347a0daSSam Leffler nb = newb; 307d347a0daSSam Leffler *allocblk++ = nb; 308d347a0daSSam Leffler if (bpp != NULL) { 309*d485c77fSKonstantin Belousov nbp = getblk((void *)ip->i_devvp, lbn, fs->fs_bsize, 310*d485c77fSKonstantin Belousov 0, 0, 0); 311d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 312d347a0daSSam Leffler clrbuf(nbp); 313d347a0daSSam Leffler *bpp = nbp; 314d347a0daSSam Leffler } 315d347a0daSSam Leffler bap[indirs[num].in_off] = ufs_rw32(nb, needswap); 316d347a0daSSam Leffler 317d347a0daSSam Leffler /* 318d347a0daSSam Leffler * If required, write synchronously, otherwise use 319d347a0daSSam Leffler * delayed write. 320d347a0daSSam Leffler */ 321d347a0daSSam Leffler bwrite(bp); 322d347a0daSSam Leffler return (0); 323d347a0daSSam Leffler } 3245b292f9aSEd Maste brelse(bp); 325d347a0daSSam Leffler if (bpp != NULL) { 326*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, (int)fs->fs_bsize, 327*d485c77fSKonstantin Belousov NULL, &nbp); 328d347a0daSSam Leffler if (error) { 3295b292f9aSEd Maste brelse(nbp); 330d347a0daSSam Leffler return error; 331d347a0daSSam Leffler } 332d347a0daSSam Leffler *bpp = nbp; 333d347a0daSSam Leffler } 334d347a0daSSam Leffler return (0); 335d347a0daSSam Leffler } 336d347a0daSSam Leffler 337d347a0daSSam Leffler static int 338*d485c77fSKonstantin Belousov ffs_balloc_ufs2(struct inode *ip, off_t offset, int bufsize, 339*d485c77fSKonstantin Belousov struct m_buf **bpp) 340d347a0daSSam Leffler { 341d347a0daSSam Leffler daddr_t lbn, lastlbn; 342d347a0daSSam Leffler int size; 343*d485c77fSKonstantin Belousov struct m_buf *bp, *nbp; 344d347a0daSSam Leffler struct fs *fs = ip->i_fs; 3451dc349abSEd Maste struct indir indirs[UFS_NIADDR + 2]; 346d347a0daSSam Leffler daddr_t newb, pref, nb; 347d347a0daSSam Leffler int64_t *bap; 348d347a0daSSam Leffler int osize, nsize, num, i, error; 3491dc349abSEd Maste int64_t *allocblk, allociblk[UFS_NIADDR + 1]; 350d347a0daSSam Leffler int64_t *allocib; 351d347a0daSSam Leffler const int needswap = UFS_FSNEEDSWAP(fs); 352d347a0daSSam Leffler 353d347a0daSSam Leffler lbn = lblkno(fs, offset); 354d347a0daSSam Leffler size = blkoff(fs, offset) + bufsize; 355d347a0daSSam Leffler if (bpp != NULL) { 356d347a0daSSam Leffler *bpp = NULL; 357d347a0daSSam Leffler } 358d347a0daSSam Leffler 359d347a0daSSam Leffler assert(size <= fs->fs_bsize); 360d347a0daSSam Leffler if (lbn < 0) 361d347a0daSSam Leffler return (EFBIG); 362d347a0daSSam Leffler 363d347a0daSSam Leffler /* 364d347a0daSSam Leffler * If the next write will extend the file into a new block, 365d347a0daSSam Leffler * and the file is currently composed of a fragment 366d347a0daSSam Leffler * this fragment has to be extended to be a full block. 367d347a0daSSam Leffler */ 368d347a0daSSam Leffler 369d347a0daSSam Leffler lastlbn = lblkno(fs, ip->i_ffs2_size); 3701dc349abSEd Maste if (lastlbn < UFS_NDADDR && lastlbn < lbn) { 371d347a0daSSam Leffler nb = lastlbn; 372d347a0daSSam Leffler osize = blksize(fs, ip, nb); 373d347a0daSSam Leffler if (osize < fs->fs_bsize && osize > 0) { 374d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 375d347a0daSSam Leffler abort(); 376d347a0daSSam Leffler } 377d347a0daSSam Leffler } 378d347a0daSSam Leffler 379d347a0daSSam Leffler /* 3801dc349abSEd Maste * The first UFS_NDADDR blocks are direct blocks 381d347a0daSSam Leffler */ 382d347a0daSSam Leffler 3831dc349abSEd Maste if (lbn < UFS_NDADDR) { 384d347a0daSSam Leffler nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap); 3853afe6a68SEd Maste if (nb != 0 && ip->i_ffs2_size >= 3863afe6a68SEd Maste (uint64_t)lblktosize(fs, lbn + 1)) { 387d347a0daSSam Leffler 388d347a0daSSam Leffler /* 389d347a0daSSam Leffler * The block is an already-allocated direct block 390d347a0daSSam Leffler * and the file already extends past this block, 391d347a0daSSam Leffler * thus this must be a whole block. 392d347a0daSSam Leffler * Just read the block (if requested). 393d347a0daSSam Leffler */ 394d347a0daSSam Leffler 395d347a0daSSam Leffler if (bpp != NULL) { 396*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, 397*d485c77fSKonstantin Belousov fs->fs_bsize, NULL, bpp); 398d347a0daSSam Leffler if (error) { 3995b292f9aSEd Maste brelse(*bpp); 400d347a0daSSam Leffler return (error); 401d347a0daSSam Leffler } 402d347a0daSSam Leffler } 403d347a0daSSam Leffler return (0); 404d347a0daSSam Leffler } 405d347a0daSSam Leffler if (nb != 0) { 406d347a0daSSam Leffler 407d347a0daSSam Leffler /* 408d347a0daSSam Leffler * Consider need to reallocate a fragment. 409d347a0daSSam Leffler */ 410d347a0daSSam Leffler 411d347a0daSSam Leffler osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size)); 412d347a0daSSam Leffler nsize = fragroundup(fs, size); 413d347a0daSSam Leffler if (nsize <= osize) { 414d347a0daSSam Leffler 415d347a0daSSam Leffler /* 416d347a0daSSam Leffler * The existing block is already 417d347a0daSSam Leffler * at least as big as we want. 418d347a0daSSam Leffler * Just read the block (if requested). 419d347a0daSSam Leffler */ 420d347a0daSSam Leffler 421d347a0daSSam Leffler if (bpp != NULL) { 422*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, 423*d485c77fSKonstantin Belousov osize, NULL, bpp); 424d347a0daSSam Leffler if (error) { 4255b292f9aSEd Maste brelse(*bpp); 426d347a0daSSam Leffler return (error); 427d347a0daSSam Leffler } 428d347a0daSSam Leffler } 429d347a0daSSam Leffler return 0; 430d347a0daSSam Leffler } else { 431d347a0daSSam Leffler warnx("need to ffs_realloccg; not supported!"); 432d347a0daSSam Leffler abort(); 433d347a0daSSam Leffler } 434d347a0daSSam Leffler } else { 435d347a0daSSam Leffler 436d347a0daSSam Leffler /* 437d347a0daSSam Leffler * the block was not previously allocated, 438d347a0daSSam Leffler * allocate a new block or fragment. 439d347a0daSSam Leffler */ 440d347a0daSSam Leffler 4413afe6a68SEd Maste if (ip->i_ffs2_size < (uint64_t)lblktosize(fs, lbn + 1)) 442d347a0daSSam Leffler nsize = fragroundup(fs, size); 443d347a0daSSam Leffler else 444d347a0daSSam Leffler nsize = fs->fs_bsize; 445d347a0daSSam Leffler error = ffs_alloc(ip, lbn, 446d347a0daSSam Leffler ffs_blkpref_ufs2(ip, lbn, (int)lbn, 447d347a0daSSam Leffler &ip->i_ffs2_db[0]), 448d347a0daSSam Leffler nsize, &newb); 449d347a0daSSam Leffler if (error) 450d347a0daSSam Leffler return (error); 451d347a0daSSam Leffler if (bpp != NULL) { 452*d485c77fSKonstantin Belousov bp = getblk((void *)ip->i_devvp, lbn, nsize, 453*d485c77fSKonstantin Belousov 0, 0, 0); 454d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, newb); 455d347a0daSSam Leffler clrbuf(bp); 456d347a0daSSam Leffler *bpp = bp; 457d347a0daSSam Leffler } 458d347a0daSSam Leffler } 459d347a0daSSam Leffler ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap); 460d347a0daSSam Leffler return (0); 461d347a0daSSam Leffler } 462d347a0daSSam Leffler 463d347a0daSSam Leffler /* 464d347a0daSSam Leffler * Determine the number of levels of indirection. 465d347a0daSSam Leffler */ 466d347a0daSSam Leffler 467d347a0daSSam Leffler pref = 0; 468d347a0daSSam Leffler if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0) 469d347a0daSSam Leffler return (error); 470d347a0daSSam Leffler 471d347a0daSSam Leffler if (num < 1) { 472d347a0daSSam Leffler warnx("ffs_balloc: ufs_getlbns returned indirect block"); 473d347a0daSSam Leffler abort(); 474d347a0daSSam Leffler } 475d347a0daSSam Leffler 476d347a0daSSam Leffler /* 477d347a0daSSam Leffler * Fetch the first indirect block allocating if necessary. 478d347a0daSSam Leffler */ 479d347a0daSSam Leffler 480d347a0daSSam Leffler --num; 481d347a0daSSam Leffler nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap); 482d347a0daSSam Leffler allocib = NULL; 483d347a0daSSam Leffler allocblk = allociblk; 484d347a0daSSam Leffler if (nb == 0) { 485d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0); 486d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 487d347a0daSSam Leffler if (error) 488d347a0daSSam Leffler return error; 489d347a0daSSam Leffler nb = newb; 490d347a0daSSam Leffler *allocblk++ = nb; 491*d485c77fSKonstantin Belousov bp = getblk((void *)ip->i_devvp, indirs[1].in_lbn, 492*d485c77fSKonstantin Belousov fs->fs_bsize, 0, 0, 0); 493d347a0daSSam Leffler bp->b_blkno = fsbtodb(fs, nb); 494d347a0daSSam Leffler clrbuf(bp); 495d347a0daSSam Leffler /* 496d347a0daSSam Leffler * Write synchronously so that indirect blocks 497d347a0daSSam Leffler * never point at garbage. 498d347a0daSSam Leffler */ 499d347a0daSSam Leffler if ((error = bwrite(bp)) != 0) 500d347a0daSSam Leffler return error; 501d347a0daSSam Leffler allocib = &ip->i_ffs2_ib[indirs[0].in_off]; 502d347a0daSSam Leffler *allocib = ufs_rw64(nb, needswap); 503d347a0daSSam Leffler } 504d347a0daSSam Leffler 505d347a0daSSam Leffler /* 506d347a0daSSam Leffler * Fetch through the indirect blocks, allocating as necessary. 507d347a0daSSam Leffler */ 508d347a0daSSam Leffler 509d347a0daSSam Leffler for (i = 1;;) { 510*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, indirs[i].in_lbn, 511*d485c77fSKonstantin Belousov fs->fs_bsize, NULL, &bp); 512d347a0daSSam Leffler if (error) { 5135b292f9aSEd Maste brelse(bp); 514d347a0daSSam Leffler return error; 515d347a0daSSam Leffler } 516d347a0daSSam Leffler bap = (int64_t *)bp->b_data; 517d347a0daSSam Leffler nb = ufs_rw64(bap[indirs[i].in_off], needswap); 518d347a0daSSam Leffler if (i == num) 519d347a0daSSam Leffler break; 520d347a0daSSam Leffler i++; 521d347a0daSSam Leffler if (nb != 0) { 5225b292f9aSEd Maste brelse(bp); 523d347a0daSSam Leffler continue; 524d347a0daSSam Leffler } 525d347a0daSSam Leffler if (pref == 0) 526d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0); 527d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 528d347a0daSSam Leffler if (error) { 5295b292f9aSEd Maste brelse(bp); 530d347a0daSSam Leffler return error; 531d347a0daSSam Leffler } 532d347a0daSSam Leffler nb = newb; 533d347a0daSSam Leffler *allocblk++ = nb; 534*d485c77fSKonstantin Belousov nbp = getblk((void *)ip->i_devvp, indirs[i].in_lbn, 535*d485c77fSKonstantin Belousov fs->fs_bsize, 0, 0, 0); 536d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 537d347a0daSSam Leffler clrbuf(nbp); 538d347a0daSSam Leffler /* 539d347a0daSSam Leffler * Write synchronously so that indirect blocks 540d347a0daSSam Leffler * never point at garbage. 541d347a0daSSam Leffler */ 542d347a0daSSam Leffler 543d347a0daSSam Leffler if ((error = bwrite(nbp)) != 0) { 5445b292f9aSEd Maste brelse(bp); 545d347a0daSSam Leffler return error; 546d347a0daSSam Leffler } 547d347a0daSSam Leffler bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap); 548d347a0daSSam Leffler 549d347a0daSSam Leffler bwrite(bp); 550d347a0daSSam Leffler } 551d347a0daSSam Leffler 552d347a0daSSam Leffler /* 553d347a0daSSam Leffler * Get the data block, allocating if necessary. 554d347a0daSSam Leffler */ 555d347a0daSSam Leffler 556d347a0daSSam Leffler if (nb == 0) { 557d347a0daSSam Leffler pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]); 558d347a0daSSam Leffler error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); 559d347a0daSSam Leffler if (error) { 5605b292f9aSEd Maste brelse(bp); 561d347a0daSSam Leffler return error; 562d347a0daSSam Leffler } 563d347a0daSSam Leffler nb = newb; 564d347a0daSSam Leffler *allocblk++ = nb; 565d347a0daSSam Leffler if (bpp != NULL) { 566*d485c77fSKonstantin Belousov nbp = getblk((void *)ip->i_devvp, lbn, fs->fs_bsize, 567*d485c77fSKonstantin Belousov 0, 0, 0); 568d347a0daSSam Leffler nbp->b_blkno = fsbtodb(fs, nb); 569d347a0daSSam Leffler clrbuf(nbp); 570d347a0daSSam Leffler *bpp = nbp; 571d347a0daSSam Leffler } 572d347a0daSSam Leffler bap[indirs[num].in_off] = ufs_rw64(nb, needswap); 573d347a0daSSam Leffler 574d347a0daSSam Leffler /* 575d347a0daSSam Leffler * If required, write synchronously, otherwise use 576d347a0daSSam Leffler * delayed write. 577d347a0daSSam Leffler */ 578d347a0daSSam Leffler bwrite(bp); 579d347a0daSSam Leffler return (0); 580d347a0daSSam Leffler } 5815b292f9aSEd Maste brelse(bp); 582d347a0daSSam Leffler if (bpp != NULL) { 583*d485c77fSKonstantin Belousov error = bread((void *)ip->i_devvp, lbn, (int)fs->fs_bsize, 584*d485c77fSKonstantin Belousov NULL, &nbp); 585d347a0daSSam Leffler if (error) { 5865b292f9aSEd Maste brelse(nbp); 587d347a0daSSam Leffler return error; 588d347a0daSSam Leffler } 589d347a0daSSam Leffler *bpp = nbp; 590d347a0daSSam Leffler } 591d347a0daSSam Leffler return (0); 592d347a0daSSam Leffler } 593