1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #include <linux/fs.h> 20 #include "jfs_incore.h" 21 #include "jfs_superblock.h" 22 #include "jfs_dmap.h" 23 #include "jfs_imap.h" 24 #include "jfs_lock.h" 25 #include "jfs_metapage.h" 26 #include "jfs_debug.h" 27 28 /* 29 * SERIALIZATION of the Block Allocation Map. 30 * 31 * the working state of the block allocation map is accessed in 32 * two directions: 33 * 34 * 1) allocation and free requests that start at the dmap 35 * level and move up through the dmap control pages (i.e. 36 * the vast majority of requests). 37 * 38 * 2) allocation requests that start at dmap control page 39 * level and work down towards the dmaps. 40 * 41 * the serialization scheme used here is as follows. 42 * 43 * requests which start at the bottom are serialized against each 44 * other through buffers and each requests holds onto its buffers 45 * as it works it way up from a single dmap to the required level 46 * of dmap control page. 47 * requests that start at the top are serialized against each other 48 * and request that start from the bottom by the multiple read/single 49 * write inode lock of the bmap inode. requests starting at the top 50 * take this lock in write mode while request starting at the bottom 51 * take the lock in read mode. a single top-down request may proceed 52 * exclusively while multiple bottoms-up requests may proceed 53 * simultaneously (under the protection of busy buffers). 54 * 55 * in addition to information found in dmaps and dmap control pages, 56 * the working state of the block allocation map also includes read/ 57 * write information maintained in the bmap descriptor (i.e. total 58 * free block count, allocation group level free block counts). 59 * a single exclusive lock (BMAP_LOCK) is used to guard this information 60 * in the face of multiple-bottoms up requests. 61 * (lock ordering: IREAD_LOCK, BMAP_LOCK); 62 * 63 * accesses to the persistent state of the block allocation map (limited 64 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. 65 */ 66 67 #define BMAP_LOCK_INIT(bmp) init_MUTEX(&bmp->db_bmaplock) 68 #define BMAP_LOCK(bmp) down(&bmp->db_bmaplock) 69 #define BMAP_UNLOCK(bmp) up(&bmp->db_bmaplock) 70 71 /* 72 * forward references 73 */ 74 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 75 int nblocks); 76 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval); 77 static void dbBackSplit(dmtree_t * tp, int leafno); 78 static void dbJoin(dmtree_t * tp, int leafno, int newval); 79 static void dbAdjTree(dmtree_t * tp, int leafno, int newval); 80 static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, 81 int level); 82 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results); 83 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, 84 int nblocks); 85 static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno, 86 int nblocks, 87 int l2nb, s64 * results); 88 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 89 int nblocks); 90 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, 91 int l2nb, 92 s64 * results); 93 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, 94 s64 * results); 95 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, 96 s64 * results); 97 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks); 98 static int dbFindBits(u32 word, int l2nb); 99 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno); 100 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx); 101 static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 102 int nblocks); 103 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 104 int nblocks); 105 static int dbMaxBud(u8 * cp); 106 s64 dbMapFileSizeToMapSize(struct inode *ipbmap); 107 static int blkstol2(s64 nb); 108 109 static int cntlz(u32 value); 110 static int cnttz(u32 word); 111 112 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, 113 int nblocks); 114 static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks); 115 static int dbInitDmapTree(struct dmap * dp); 116 static int dbInitTree(struct dmaptree * dtp); 117 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i); 118 static int dbGetL2AGSize(s64 nblocks); 119 120 /* 121 * buddy table 122 * 123 * table used for determining buddy sizes within characters of 124 * dmap bitmap words. the characters themselves serve as indexes 125 * into the table, with the table elements yielding the maximum 126 * binary buddy of free bits within the character. 127 */ 128 static s8 budtab[256] = { 129 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 131 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 132 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 133 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 134 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 135 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 136 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 137 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 138 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 139 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 140 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 141 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 142 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 143 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 144 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1 145 }; 146 147 148 /* 149 * NAME: dbMount() 150 * 151 * FUNCTION: initializate the block allocation map. 152 * 153 * memory is allocated for the in-core bmap descriptor and 154 * the in-core descriptor is initialized from disk. 155 * 156 * PARAMETERS: 157 * ipbmap - pointer to in-core inode for the block map. 158 * 159 * RETURN VALUES: 160 * 0 - success 161 * -ENOMEM - insufficient memory 162 * -EIO - i/o error 163 */ 164 int dbMount(struct inode *ipbmap) 165 { 166 struct bmap *bmp; 167 struct dbmap_disk *dbmp_le; 168 struct metapage *mp; 169 int i; 170 171 /* 172 * allocate/initialize the in-memory bmap descriptor 173 */ 174 /* allocate memory for the in-memory bmap descriptor */ 175 bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL); 176 if (bmp == NULL) 177 return -ENOMEM; 178 179 /* read the on-disk bmap descriptor. */ 180 mp = read_metapage(ipbmap, 181 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, 182 PSIZE, 0); 183 if (mp == NULL) { 184 kfree(bmp); 185 return -EIO; 186 } 187 188 /* copy the on-disk bmap descriptor to its in-memory version. */ 189 dbmp_le = (struct dbmap_disk *) mp->data; 190 bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); 191 bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); 192 bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); 193 bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); 194 bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); 195 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); 196 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); 197 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); 198 bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); 199 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); 200 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); 201 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); 202 for (i = 0; i < MAXAG; i++) 203 bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); 204 bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); 205 bmp->db_maxfreebud = dbmp_le->dn_maxfreebud; 206 207 /* release the buffer. */ 208 release_metapage(mp); 209 210 /* bind the bmap inode and the bmap descriptor to each other. */ 211 bmp->db_ipbmap = ipbmap; 212 JFS_SBI(ipbmap->i_sb)->bmap = bmp; 213 214 memset(bmp->db_active, 0, sizeof(bmp->db_active)); 215 216 /* 217 * allocate/initialize the bmap lock 218 */ 219 BMAP_LOCK_INIT(bmp); 220 221 return (0); 222 } 223 224 225 /* 226 * NAME: dbUnmount() 227 * 228 * FUNCTION: terminate the block allocation map in preparation for 229 * file system unmount. 230 * 231 * the in-core bmap descriptor is written to disk and 232 * the memory for this descriptor is freed. 233 * 234 * PARAMETERS: 235 * ipbmap - pointer to in-core inode for the block map. 236 * 237 * RETURN VALUES: 238 * 0 - success 239 * -EIO - i/o error 240 */ 241 int dbUnmount(struct inode *ipbmap, int mounterror) 242 { 243 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 244 245 if (!(mounterror || isReadOnly(ipbmap))) 246 dbSync(ipbmap); 247 248 /* 249 * Invalidate the page cache buffers 250 */ 251 truncate_inode_pages(ipbmap->i_mapping, 0); 252 253 /* free the memory for the in-memory bmap. */ 254 kfree(bmp); 255 256 return (0); 257 } 258 259 /* 260 * dbSync() 261 */ 262 int dbSync(struct inode *ipbmap) 263 { 264 struct dbmap_disk *dbmp_le; 265 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 266 struct metapage *mp; 267 int i; 268 269 /* 270 * write bmap global control page 271 */ 272 /* get the buffer for the on-disk bmap descriptor. */ 273 mp = read_metapage(ipbmap, 274 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, 275 PSIZE, 0); 276 if (mp == NULL) { 277 jfs_err("dbSync: read_metapage failed!"); 278 return -EIO; 279 } 280 /* copy the in-memory version of the bmap to the on-disk version */ 281 dbmp_le = (struct dbmap_disk *) mp->data; 282 dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize); 283 dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree); 284 dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage); 285 dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag); 286 dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel); 287 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); 288 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); 289 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); 290 dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); 291 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); 292 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); 293 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); 294 for (i = 0; i < MAXAG; i++) 295 dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]); 296 dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize); 297 dbmp_le->dn_maxfreebud = bmp->db_maxfreebud; 298 299 /* write the buffer */ 300 write_metapage(mp); 301 302 /* 303 * write out dirty pages of bmap 304 */ 305 filemap_fdatawrite(ipbmap->i_mapping); 306 filemap_fdatawait(ipbmap->i_mapping); 307 308 ipbmap->i_state |= I_DIRTY; 309 diWriteSpecial(ipbmap, 0); 310 311 return (0); 312 } 313 314 315 /* 316 * NAME: dbFree() 317 * 318 * FUNCTION: free the specified block range from the working block 319 * allocation map. 320 * 321 * the blocks will be free from the working map one dmap 322 * at a time. 323 * 324 * PARAMETERS: 325 * ip - pointer to in-core inode; 326 * blkno - starting block number to be freed. 327 * nblocks - number of blocks to be freed. 328 * 329 * RETURN VALUES: 330 * 0 - success 331 * -EIO - i/o error 332 */ 333 int dbFree(struct inode *ip, s64 blkno, s64 nblocks) 334 { 335 struct metapage *mp; 336 struct dmap *dp; 337 int nb, rc; 338 s64 lblkno, rem; 339 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 340 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 341 342 IREAD_LOCK(ipbmap); 343 344 /* block to be freed better be within the mapsize. */ 345 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { 346 IREAD_UNLOCK(ipbmap); 347 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", 348 (unsigned long long) blkno, 349 (unsigned long long) nblocks); 350 jfs_error(ip->i_sb, 351 "dbFree: block to be freed is outside the map"); 352 return -EIO; 353 } 354 355 /* 356 * free the blocks a dmap at a time. 357 */ 358 mp = NULL; 359 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { 360 /* release previous dmap if any */ 361 if (mp) { 362 write_metapage(mp); 363 } 364 365 /* get the buffer for the current dmap. */ 366 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 367 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 368 if (mp == NULL) { 369 IREAD_UNLOCK(ipbmap); 370 return -EIO; 371 } 372 dp = (struct dmap *) mp->data; 373 374 /* determine the number of blocks to be freed from 375 * this dmap. 376 */ 377 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 378 379 /* free the blocks. */ 380 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { 381 release_metapage(mp); 382 IREAD_UNLOCK(ipbmap); 383 return (rc); 384 } 385 } 386 387 /* write the last buffer. */ 388 write_metapage(mp); 389 390 IREAD_UNLOCK(ipbmap); 391 392 return (0); 393 } 394 395 396 /* 397 * NAME: dbUpdatePMap() 398 * 399 * FUNCTION: update the allocation state (free or allocate) of the 400 * specified block range in the persistent block allocation map. 401 * 402 * the blocks will be updated in the persistent map one 403 * dmap at a time. 404 * 405 * PARAMETERS: 406 * ipbmap - pointer to in-core inode for the block map. 407 * free - TRUE if block range is to be freed from the persistent 408 * map; FALSE if it is to be allocated. 409 * blkno - starting block number of the range. 410 * nblocks - number of contiguous blocks in the range. 411 * tblk - transaction block; 412 * 413 * RETURN VALUES: 414 * 0 - success 415 * -EIO - i/o error 416 */ 417 int 418 dbUpdatePMap(struct inode *ipbmap, 419 int free, s64 blkno, s64 nblocks, struct tblock * tblk) 420 { 421 int nblks, dbitno, wbitno, rbits; 422 int word, nbits, nwords; 423 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 424 s64 lblkno, rem, lastlblkno; 425 u32 mask; 426 struct dmap *dp; 427 struct metapage *mp; 428 struct jfs_log *log; 429 int lsn, difft, diffp; 430 unsigned long flags; 431 432 /* the blocks better be within the mapsize. */ 433 if (blkno + nblocks > bmp->db_mapsize) { 434 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", 435 (unsigned long long) blkno, 436 (unsigned long long) nblocks); 437 jfs_error(ipbmap->i_sb, 438 "dbUpdatePMap: blocks are outside the map"); 439 return -EIO; 440 } 441 442 /* compute delta of transaction lsn from log syncpt */ 443 lsn = tblk->lsn; 444 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log; 445 logdiff(difft, lsn, log); 446 447 /* 448 * update the block state a dmap at a time. 449 */ 450 mp = NULL; 451 lastlblkno = 0; 452 for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) { 453 /* get the buffer for the current dmap. */ 454 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 455 if (lblkno != lastlblkno) { 456 if (mp) { 457 write_metapage(mp); 458 } 459 460 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 461 0); 462 if (mp == NULL) 463 return -EIO; 464 metapage_wait_for_io(mp); 465 } 466 dp = (struct dmap *) mp->data; 467 468 /* determine the bit number and word within the dmap of 469 * the starting block. also determine how many blocks 470 * are to be updated within this dmap. 471 */ 472 dbitno = blkno & (BPERDMAP - 1); 473 word = dbitno >> L2DBWORD; 474 nblks = min(rem, (s64)BPERDMAP - dbitno); 475 476 /* update the bits of the dmap words. the first and last 477 * words may only have a subset of their bits updated. if 478 * this is the case, we'll work against that word (i.e. 479 * partial first and/or last) only in a single pass. a 480 * single pass will also be used to update all words that 481 * are to have all their bits updated. 482 */ 483 for (rbits = nblks; rbits > 0; 484 rbits -= nbits, dbitno += nbits) { 485 /* determine the bit number within the word and 486 * the number of bits within the word. 487 */ 488 wbitno = dbitno & (DBWORD - 1); 489 nbits = min(rbits, DBWORD - wbitno); 490 491 /* check if only part of the word is to be updated. */ 492 if (nbits < DBWORD) { 493 /* update (free or allocate) the bits 494 * in this word. 495 */ 496 mask = 497 (ONES << (DBWORD - nbits) >> wbitno); 498 if (free) 499 dp->pmap[word] &= 500 cpu_to_le32(~mask); 501 else 502 dp->pmap[word] |= 503 cpu_to_le32(mask); 504 505 word += 1; 506 } else { 507 /* one or more words are to have all 508 * their bits updated. determine how 509 * many words and how many bits. 510 */ 511 nwords = rbits >> L2DBWORD; 512 nbits = nwords << L2DBWORD; 513 514 /* update (free or allocate) the bits 515 * in these words. 516 */ 517 if (free) 518 memset(&dp->pmap[word], 0, 519 nwords * 4); 520 else 521 memset(&dp->pmap[word], (int) ONES, 522 nwords * 4); 523 524 word += nwords; 525 } 526 } 527 528 /* 529 * update dmap lsn 530 */ 531 if (lblkno == lastlblkno) 532 continue; 533 534 lastlblkno = lblkno; 535 536 if (mp->lsn != 0) { 537 /* inherit older/smaller lsn */ 538 logdiff(diffp, mp->lsn, log); 539 LOGSYNC_LOCK(log, flags); 540 if (difft < diffp) { 541 mp->lsn = lsn; 542 543 /* move bp after tblock in logsync list */ 544 list_move(&mp->synclist, &tblk->synclist); 545 } 546 547 /* inherit younger/larger clsn */ 548 logdiff(difft, tblk->clsn, log); 549 logdiff(diffp, mp->clsn, log); 550 if (difft > diffp) 551 mp->clsn = tblk->clsn; 552 LOGSYNC_UNLOCK(log, flags); 553 } else { 554 mp->log = log; 555 mp->lsn = lsn; 556 557 /* insert bp after tblock in logsync list */ 558 LOGSYNC_LOCK(log, flags); 559 560 log->count++; 561 list_add(&mp->synclist, &tblk->synclist); 562 563 mp->clsn = tblk->clsn; 564 LOGSYNC_UNLOCK(log, flags); 565 } 566 } 567 568 /* write the last buffer. */ 569 if (mp) { 570 write_metapage(mp); 571 } 572 573 return (0); 574 } 575 576 577 /* 578 * NAME: dbNextAG() 579 * 580 * FUNCTION: find the preferred allocation group for new allocations. 581 * 582 * Within the allocation groups, we maintain a preferred 583 * allocation group which consists of a group with at least 584 * average free space. It is the preferred group that we target 585 * new inode allocation towards. The tie-in between inode 586 * allocation and block allocation occurs as we allocate the 587 * first (data) block of an inode and specify the inode (block) 588 * as the allocation hint for this block. 589 * 590 * We try to avoid having more than one open file growing in 591 * an allocation group, as this will lead to fragmentation. 592 * This differs from the old OS/2 method of trying to keep 593 * empty ags around for large allocations. 594 * 595 * PARAMETERS: 596 * ipbmap - pointer to in-core inode for the block map. 597 * 598 * RETURN VALUES: 599 * the preferred allocation group number. 600 */ 601 int dbNextAG(struct inode *ipbmap) 602 { 603 s64 avgfree; 604 int agpref; 605 s64 hwm = 0; 606 int i; 607 int next_best = -1; 608 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 609 610 BMAP_LOCK(bmp); 611 612 /* determine the average number of free blocks within the ags. */ 613 avgfree = (u32)bmp->db_nfree / bmp->db_numag; 614 615 /* 616 * if the current preferred ag does not have an active allocator 617 * and has at least average freespace, return it 618 */ 619 agpref = bmp->db_agpref; 620 if ((atomic_read(&bmp->db_active[agpref]) == 0) && 621 (bmp->db_agfree[agpref] >= avgfree)) 622 goto unlock; 623 624 /* From the last preferred ag, find the next one with at least 625 * average free space. 626 */ 627 for (i = 0 ; i < bmp->db_numag; i++, agpref++) { 628 if (agpref == bmp->db_numag) 629 agpref = 0; 630 631 if (atomic_read(&bmp->db_active[agpref])) 632 /* open file is currently growing in this ag */ 633 continue; 634 if (bmp->db_agfree[agpref] >= avgfree) { 635 /* Return this one */ 636 bmp->db_agpref = agpref; 637 goto unlock; 638 } else if (bmp->db_agfree[agpref] > hwm) { 639 /* Less than avg. freespace, but best so far */ 640 hwm = bmp->db_agfree[agpref]; 641 next_best = agpref; 642 } 643 } 644 645 /* 646 * If no inactive ag was found with average freespace, use the 647 * next best 648 */ 649 if (next_best != -1) 650 bmp->db_agpref = next_best; 651 /* else leave db_agpref unchanged */ 652 unlock: 653 BMAP_UNLOCK(bmp); 654 655 /* return the preferred group. 656 */ 657 return (bmp->db_agpref); 658 } 659 660 /* 661 * NAME: dbAlloc() 662 * 663 * FUNCTION: attempt to allocate a specified number of contiguous free 664 * blocks from the working allocation block map. 665 * 666 * the block allocation policy uses hints and a multi-step 667 * approach. 668 * 669 * for allocation requests smaller than the number of blocks 670 * per dmap, we first try to allocate the new blocks 671 * immediately following the hint. if these blocks are not 672 * available, we try to allocate blocks near the hint. if 673 * no blocks near the hint are available, we next try to 674 * allocate within the same dmap as contains the hint. 675 * 676 * if no blocks are available in the dmap or the allocation 677 * request is larger than the dmap size, we try to allocate 678 * within the same allocation group as contains the hint. if 679 * this does not succeed, we finally try to allocate anywhere 680 * within the aggregate. 681 * 682 * we also try to allocate anywhere within the aggregate for 683 * for allocation requests larger than the allocation group 684 * size or requests that specify no hint value. 685 * 686 * PARAMETERS: 687 * ip - pointer to in-core inode; 688 * hint - allocation hint. 689 * nblocks - number of contiguous blocks in the range. 690 * results - on successful return, set to the starting block number 691 * of the newly allocated contiguous range. 692 * 693 * RETURN VALUES: 694 * 0 - success 695 * -ENOSPC - insufficient disk resources 696 * -EIO - i/o error 697 */ 698 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) 699 { 700 int rc, agno; 701 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 702 struct bmap *bmp; 703 struct metapage *mp; 704 s64 lblkno, blkno; 705 struct dmap *dp; 706 int l2nb; 707 s64 mapSize; 708 int writers; 709 710 /* assert that nblocks is valid */ 711 assert(nblocks > 0); 712 713 #ifdef _STILL_TO_PORT 714 /* DASD limit check F226941 */ 715 if (OVER_LIMIT(ip, nblocks)) 716 return -ENOSPC; 717 #endif /* _STILL_TO_PORT */ 718 719 /* get the log2 number of blocks to be allocated. 720 * if the number of blocks is not a log2 multiple, 721 * it will be rounded up to the next log2 multiple. 722 */ 723 l2nb = BLKSTOL2(nblocks); 724 725 bmp = JFS_SBI(ip->i_sb)->bmap; 726 727 //retry: /* serialize w.r.t.extendfs() */ 728 mapSize = bmp->db_mapsize; 729 730 /* the hint should be within the map */ 731 if (hint >= mapSize) { 732 jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map"); 733 return -EIO; 734 } 735 736 /* if the number of blocks to be allocated is greater than the 737 * allocation group size, try to allocate anywhere. 738 */ 739 if (l2nb > bmp->db_agl2size) { 740 IWRITE_LOCK(ipbmap); 741 742 rc = dbAllocAny(bmp, nblocks, l2nb, results); 743 744 goto write_unlock; 745 } 746 747 /* 748 * If no hint, let dbNextAG recommend an allocation group 749 */ 750 if (hint == 0) 751 goto pref_ag; 752 753 /* we would like to allocate close to the hint. adjust the 754 * hint to the block following the hint since the allocators 755 * will start looking for free space starting at this point. 756 */ 757 blkno = hint + 1; 758 759 if (blkno >= bmp->db_mapsize) 760 goto pref_ag; 761 762 agno = blkno >> bmp->db_agl2size; 763 764 /* check if blkno crosses over into a new allocation group. 765 * if so, check if we should allow allocations within this 766 * allocation group. 767 */ 768 if ((blkno & (bmp->db_agsize - 1)) == 0) 769 /* check if the AG is currenly being written to. 770 * if so, call dbNextAG() to find a non-busy 771 * AG with sufficient free space. 772 */ 773 if (atomic_read(&bmp->db_active[agno])) 774 goto pref_ag; 775 776 /* check if the allocation request size can be satisfied from a 777 * single dmap. if so, try to allocate from the dmap containing 778 * the hint using a tiered strategy. 779 */ 780 if (nblocks <= BPERDMAP) { 781 IREAD_LOCK(ipbmap); 782 783 /* get the buffer for the dmap containing the hint. 784 */ 785 rc = -EIO; 786 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 787 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 788 if (mp == NULL) 789 goto read_unlock; 790 791 dp = (struct dmap *) mp->data; 792 793 /* first, try to satisfy the allocation request with the 794 * blocks beginning at the hint. 795 */ 796 if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks)) 797 != -ENOSPC) { 798 if (rc == 0) { 799 *results = blkno; 800 mark_metapage_dirty(mp); 801 } 802 803 release_metapage(mp); 804 goto read_unlock; 805 } 806 807 writers = atomic_read(&bmp->db_active[agno]); 808 if ((writers > 1) || 809 ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) { 810 /* 811 * Someone else is writing in this allocation 812 * group. To avoid fragmenting, try another ag 813 */ 814 release_metapage(mp); 815 IREAD_UNLOCK(ipbmap); 816 goto pref_ag; 817 } 818 819 /* next, try to satisfy the allocation request with blocks 820 * near the hint. 821 */ 822 if ((rc = 823 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results)) 824 != -ENOSPC) { 825 if (rc == 0) 826 mark_metapage_dirty(mp); 827 828 release_metapage(mp); 829 goto read_unlock; 830 } 831 832 /* try to satisfy the allocation request with blocks within 833 * the same dmap as the hint. 834 */ 835 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results)) 836 != -ENOSPC) { 837 if (rc == 0) 838 mark_metapage_dirty(mp); 839 840 release_metapage(mp); 841 goto read_unlock; 842 } 843 844 release_metapage(mp); 845 IREAD_UNLOCK(ipbmap); 846 } 847 848 /* try to satisfy the allocation request with blocks within 849 * the same allocation group as the hint. 850 */ 851 IWRITE_LOCK(ipbmap); 852 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) 853 goto write_unlock; 854 855 IWRITE_UNLOCK(ipbmap); 856 857 858 pref_ag: 859 /* 860 * Let dbNextAG recommend a preferred allocation group 861 */ 862 agno = dbNextAG(ipbmap); 863 IWRITE_LOCK(ipbmap); 864 865 /* Try to allocate within this allocation group. if that fails, try to 866 * allocate anywhere in the map. 867 */ 868 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC) 869 rc = dbAllocAny(bmp, nblocks, l2nb, results); 870 871 write_unlock: 872 IWRITE_UNLOCK(ipbmap); 873 874 return (rc); 875 876 read_unlock: 877 IREAD_UNLOCK(ipbmap); 878 879 return (rc); 880 } 881 882 #ifdef _NOTYET 883 /* 884 * NAME: dbAllocExact() 885 * 886 * FUNCTION: try to allocate the requested extent; 887 * 888 * PARAMETERS: 889 * ip - pointer to in-core inode; 890 * blkno - extent address; 891 * nblocks - extent length; 892 * 893 * RETURN VALUES: 894 * 0 - success 895 * -ENOSPC - insufficient disk resources 896 * -EIO - i/o error 897 */ 898 int dbAllocExact(struct inode *ip, s64 blkno, int nblocks) 899 { 900 int rc; 901 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 902 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 903 struct dmap *dp; 904 s64 lblkno; 905 struct metapage *mp; 906 907 IREAD_LOCK(ipbmap); 908 909 /* 910 * validate extent request: 911 * 912 * note: defragfs policy: 913 * max 64 blocks will be moved. 914 * allocation request size must be satisfied from a single dmap. 915 */ 916 if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) { 917 IREAD_UNLOCK(ipbmap); 918 return -EINVAL; 919 } 920 921 if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) { 922 /* the free space is no longer available */ 923 IREAD_UNLOCK(ipbmap); 924 return -ENOSPC; 925 } 926 927 /* read in the dmap covering the extent */ 928 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 929 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 930 if (mp == NULL) { 931 IREAD_UNLOCK(ipbmap); 932 return -EIO; 933 } 934 dp = (struct dmap *) mp->data; 935 936 /* try to allocate the requested extent */ 937 rc = dbAllocNext(bmp, dp, blkno, nblocks); 938 939 IREAD_UNLOCK(ipbmap); 940 941 if (rc == 0) 942 mark_metapage_dirty(mp); 943 944 release_metapage(mp); 945 946 return (rc); 947 } 948 #endif /* _NOTYET */ 949 950 /* 951 * NAME: dbReAlloc() 952 * 953 * FUNCTION: attempt to extend a current allocation by a specified 954 * number of blocks. 955 * 956 * this routine attempts to satisfy the allocation request 957 * by first trying to extend the existing allocation in 958 * place by allocating the additional blocks as the blocks 959 * immediately following the current allocation. if these 960 * blocks are not available, this routine will attempt to 961 * allocate a new set of contiguous blocks large enough 962 * to cover the existing allocation plus the additional 963 * number of blocks required. 964 * 965 * PARAMETERS: 966 * ip - pointer to in-core inode requiring allocation. 967 * blkno - starting block of the current allocation. 968 * nblocks - number of contiguous blocks within the current 969 * allocation. 970 * addnblocks - number of blocks to add to the allocation. 971 * results - on successful return, set to the starting block number 972 * of the existing allocation if the existing allocation 973 * was extended in place or to a newly allocated contiguous 974 * range if the existing allocation could not be extended 975 * in place. 976 * 977 * RETURN VALUES: 978 * 0 - success 979 * -ENOSPC - insufficient disk resources 980 * -EIO - i/o error 981 */ 982 int 983 dbReAlloc(struct inode *ip, 984 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results) 985 { 986 int rc; 987 988 /* try to extend the allocation in place. 989 */ 990 if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) { 991 *results = blkno; 992 return (0); 993 } else { 994 if (rc != -ENOSPC) 995 return (rc); 996 } 997 998 /* could not extend the allocation in place, so allocate a 999 * new set of blocks for the entire request (i.e. try to get 1000 * a range of contiguous blocks large enough to cover the 1001 * existing allocation plus the additional blocks.) 1002 */ 1003 return (dbAlloc 1004 (ip, blkno + nblocks - 1, addnblocks + nblocks, results)); 1005 } 1006 1007 1008 /* 1009 * NAME: dbExtend() 1010 * 1011 * FUNCTION: attempt to extend a current allocation by a specified 1012 * number of blocks. 1013 * 1014 * this routine attempts to satisfy the allocation request 1015 * by first trying to extend the existing allocation in 1016 * place by allocating the additional blocks as the blocks 1017 * immediately following the current allocation. 1018 * 1019 * PARAMETERS: 1020 * ip - pointer to in-core inode requiring allocation. 1021 * blkno - starting block of the current allocation. 1022 * nblocks - number of contiguous blocks within the current 1023 * allocation. 1024 * addnblocks - number of blocks to add to the allocation. 1025 * 1026 * RETURN VALUES: 1027 * 0 - success 1028 * -ENOSPC - insufficient disk resources 1029 * -EIO - i/o error 1030 */ 1031 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks) 1032 { 1033 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); 1034 s64 lblkno, lastblkno, extblkno; 1035 uint rel_block; 1036 struct metapage *mp; 1037 struct dmap *dp; 1038 int rc; 1039 struct inode *ipbmap = sbi->ipbmap; 1040 struct bmap *bmp; 1041 1042 /* 1043 * We don't want a non-aligned extent to cross a page boundary 1044 */ 1045 if (((rel_block = blkno & (sbi->nbperpage - 1))) && 1046 (rel_block + nblocks + addnblocks > sbi->nbperpage)) 1047 return -ENOSPC; 1048 1049 /* get the last block of the current allocation */ 1050 lastblkno = blkno + nblocks - 1; 1051 1052 /* determine the block number of the block following 1053 * the existing allocation. 1054 */ 1055 extblkno = lastblkno + 1; 1056 1057 IREAD_LOCK(ipbmap); 1058 1059 /* better be within the file system */ 1060 bmp = sbi->bmap; 1061 if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) { 1062 IREAD_UNLOCK(ipbmap); 1063 jfs_error(ip->i_sb, 1064 "dbExtend: the block is outside the filesystem"); 1065 return -EIO; 1066 } 1067 1068 /* we'll attempt to extend the current allocation in place by 1069 * allocating the additional blocks as the blocks immediately 1070 * following the current allocation. we only try to extend the 1071 * current allocation in place if the number of additional blocks 1072 * can fit into a dmap, the last block of the current allocation 1073 * is not the last block of the file system, and the start of the 1074 * inplace extension is not on an allocation group boundary. 1075 */ 1076 if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize || 1077 (extblkno & (bmp->db_agsize - 1)) == 0) { 1078 IREAD_UNLOCK(ipbmap); 1079 return -ENOSPC; 1080 } 1081 1082 /* get the buffer for the dmap containing the first block 1083 * of the extension. 1084 */ 1085 lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage); 1086 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 1087 if (mp == NULL) { 1088 IREAD_UNLOCK(ipbmap); 1089 return -EIO; 1090 } 1091 1092 dp = (struct dmap *) mp->data; 1093 1094 /* try to allocate the blocks immediately following the 1095 * current allocation. 1096 */ 1097 rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks); 1098 1099 IREAD_UNLOCK(ipbmap); 1100 1101 /* were we successful ? */ 1102 if (rc == 0) 1103 write_metapage(mp); 1104 else 1105 /* we were not successful */ 1106 release_metapage(mp); 1107 1108 1109 return (rc); 1110 } 1111 1112 1113 /* 1114 * NAME: dbAllocNext() 1115 * 1116 * FUNCTION: attempt to allocate the blocks of the specified block 1117 * range within a dmap. 1118 * 1119 * PARAMETERS: 1120 * bmp - pointer to bmap descriptor 1121 * dp - pointer to dmap. 1122 * blkno - starting block number of the range. 1123 * nblocks - number of contiguous free blocks of the range. 1124 * 1125 * RETURN VALUES: 1126 * 0 - success 1127 * -ENOSPC - insufficient disk resources 1128 * -EIO - i/o error 1129 * 1130 * serialization: IREAD_LOCK(ipbmap) held on entry/exit; 1131 */ 1132 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, 1133 int nblocks) 1134 { 1135 int dbitno, word, rembits, nb, nwords, wbitno, nw; 1136 int l2size; 1137 s8 *leaf; 1138 u32 mask; 1139 1140 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { 1141 jfs_error(bmp->db_ipbmap->i_sb, 1142 "dbAllocNext: Corrupt dmap page"); 1143 return -EIO; 1144 } 1145 1146 /* pick up a pointer to the leaves of the dmap tree. 1147 */ 1148 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); 1149 1150 /* determine the bit number and word within the dmap of the 1151 * starting block. 1152 */ 1153 dbitno = blkno & (BPERDMAP - 1); 1154 word = dbitno >> L2DBWORD; 1155 1156 /* check if the specified block range is contained within 1157 * this dmap. 1158 */ 1159 if (dbitno + nblocks > BPERDMAP) 1160 return -ENOSPC; 1161 1162 /* check if the starting leaf indicates that anything 1163 * is free. 1164 */ 1165 if (leaf[word] == NOFREE) 1166 return -ENOSPC; 1167 1168 /* check the dmaps words corresponding to block range to see 1169 * if the block range is free. not all bits of the first and 1170 * last words may be contained within the block range. if this 1171 * is the case, we'll work against those words (i.e. partial first 1172 * and/or last) on an individual basis (a single pass) and examine 1173 * the actual bits to determine if they are free. a single pass 1174 * will be used for all dmap words fully contained within the 1175 * specified range. within this pass, the leaves of the dmap 1176 * tree will be examined to determine if the blocks are free. a 1177 * single leaf may describe the free space of multiple dmap 1178 * words, so we may visit only a subset of the actual leaves 1179 * corresponding to the dmap words of the block range. 1180 */ 1181 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 1182 /* determine the bit number within the word and 1183 * the number of bits within the word. 1184 */ 1185 wbitno = dbitno & (DBWORD - 1); 1186 nb = min(rembits, DBWORD - wbitno); 1187 1188 /* check if only part of the word is to be examined. 1189 */ 1190 if (nb < DBWORD) { 1191 /* check if the bits are free. 1192 */ 1193 mask = (ONES << (DBWORD - nb) >> wbitno); 1194 if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask) 1195 return -ENOSPC; 1196 1197 word += 1; 1198 } else { 1199 /* one or more dmap words are fully contained 1200 * within the block range. determine how many 1201 * words and how many bits. 1202 */ 1203 nwords = rembits >> L2DBWORD; 1204 nb = nwords << L2DBWORD; 1205 1206 /* now examine the appropriate leaves to determine 1207 * if the blocks are free. 1208 */ 1209 while (nwords > 0) { 1210 /* does the leaf describe any free space ? 1211 */ 1212 if (leaf[word] < BUDMIN) 1213 return -ENOSPC; 1214 1215 /* determine the l2 number of bits provided 1216 * by this leaf. 1217 */ 1218 l2size = 1219 min((int)leaf[word], NLSTOL2BSZ(nwords)); 1220 1221 /* determine how many words were handled. 1222 */ 1223 nw = BUDSIZE(l2size, BUDMIN); 1224 1225 nwords -= nw; 1226 word += nw; 1227 } 1228 } 1229 } 1230 1231 /* allocate the blocks. 1232 */ 1233 return (dbAllocDmap(bmp, dp, blkno, nblocks)); 1234 } 1235 1236 1237 /* 1238 * NAME: dbAllocNear() 1239 * 1240 * FUNCTION: attempt to allocate a number of contiguous free blocks near 1241 * a specified block (hint) within a dmap. 1242 * 1243 * starting with the dmap leaf that covers the hint, we'll 1244 * check the next four contiguous leaves for sufficient free 1245 * space. if sufficient free space is found, we'll allocate 1246 * the desired free space. 1247 * 1248 * PARAMETERS: 1249 * bmp - pointer to bmap descriptor 1250 * dp - pointer to dmap. 1251 * blkno - block number to allocate near. 1252 * nblocks - actual number of contiguous free blocks desired. 1253 * l2nb - log2 number of contiguous free blocks desired. 1254 * results - on successful return, set to the starting block number 1255 * of the newly allocated range. 1256 * 1257 * RETURN VALUES: 1258 * 0 - success 1259 * -ENOSPC - insufficient disk resources 1260 * -EIO - i/o error 1261 * 1262 * serialization: IREAD_LOCK(ipbmap) held on entry/exit; 1263 */ 1264 static int 1265 dbAllocNear(struct bmap * bmp, 1266 struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results) 1267 { 1268 int word, lword, rc; 1269 s8 *leaf; 1270 1271 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { 1272 jfs_error(bmp->db_ipbmap->i_sb, 1273 "dbAllocNear: Corrupt dmap page"); 1274 return -EIO; 1275 } 1276 1277 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); 1278 1279 /* determine the word within the dmap that holds the hint 1280 * (i.e. blkno). also, determine the last word in the dmap 1281 * that we'll include in our examination. 1282 */ 1283 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; 1284 lword = min(word + 4, LPERDMAP); 1285 1286 /* examine the leaves for sufficient free space. 1287 */ 1288 for (; word < lword; word++) { 1289 /* does the leaf describe sufficient free space ? 1290 */ 1291 if (leaf[word] < l2nb) 1292 continue; 1293 1294 /* determine the block number within the file system 1295 * of the first block described by this dmap word. 1296 */ 1297 blkno = le64_to_cpu(dp->start) + (word << L2DBWORD); 1298 1299 /* if not all bits of the dmap word are free, get the 1300 * starting bit number within the dmap word of the required 1301 * string of free bits and adjust the block number with the 1302 * value. 1303 */ 1304 if (leaf[word] < BUDMIN) 1305 blkno += 1306 dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb); 1307 1308 /* allocate the blocks. 1309 */ 1310 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) 1311 *results = blkno; 1312 1313 return (rc); 1314 } 1315 1316 return -ENOSPC; 1317 } 1318 1319 1320 /* 1321 * NAME: dbAllocAG() 1322 * 1323 * FUNCTION: attempt to allocate the specified number of contiguous 1324 * free blocks within the specified allocation group. 1325 * 1326 * unless the allocation group size is equal to the number 1327 * of blocks per dmap, the dmap control pages will be used to 1328 * find the required free space, if available. we start the 1329 * search at the highest dmap control page level which 1330 * distinctly describes the allocation group's free space 1331 * (i.e. the highest level at which the allocation group's 1332 * free space is not mixed in with that of any other group). 1333 * in addition, we start the search within this level at a 1334 * height of the dmapctl dmtree at which the nodes distinctly 1335 * describe the allocation group's free space. at this height, 1336 * the allocation group's free space may be represented by 1 1337 * or two sub-trees, depending on the allocation group size. 1338 * we search the top nodes of these subtrees left to right for 1339 * sufficient free space. if sufficient free space is found, 1340 * the subtree is searched to find the leftmost leaf that 1341 * has free space. once we have made it to the leaf, we 1342 * move the search to the next lower level dmap control page 1343 * corresponding to this leaf. we continue down the dmap control 1344 * pages until we find the dmap that contains or starts the 1345 * sufficient free space and we allocate at this dmap. 1346 * 1347 * if the allocation group size is equal to the dmap size, 1348 * we'll start at the dmap corresponding to the allocation 1349 * group and attempt the allocation at this level. 1350 * 1351 * the dmap control page search is also not performed if the 1352 * allocation group is completely free and we go to the first 1353 * dmap of the allocation group to do the allocation. this is 1354 * done because the allocation group may be part (not the first 1355 * part) of a larger binary buddy system, causing the dmap 1356 * control pages to indicate no free space (NOFREE) within 1357 * the allocation group. 1358 * 1359 * PARAMETERS: 1360 * bmp - pointer to bmap descriptor 1361 * agno - allocation group number. 1362 * nblocks - actual number of contiguous free blocks desired. 1363 * l2nb - log2 number of contiguous free blocks desired. 1364 * results - on successful return, set to the starting block number 1365 * of the newly allocated range. 1366 * 1367 * RETURN VALUES: 1368 * 0 - success 1369 * -ENOSPC - insufficient disk resources 1370 * -EIO - i/o error 1371 * 1372 * note: IWRITE_LOCK(ipmap) held on entry/exit; 1373 */ 1374 static int 1375 dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) 1376 { 1377 struct metapage *mp; 1378 struct dmapctl *dcp; 1379 int rc, ti, i, k, m, n, agperlev; 1380 s64 blkno, lblkno; 1381 int budmin; 1382 1383 /* allocation request should not be for more than the 1384 * allocation group size. 1385 */ 1386 if (l2nb > bmp->db_agl2size) { 1387 jfs_error(bmp->db_ipbmap->i_sb, 1388 "dbAllocAG: allocation request is larger than the " 1389 "allocation group size"); 1390 return -EIO; 1391 } 1392 1393 /* determine the starting block number of the allocation 1394 * group. 1395 */ 1396 blkno = (s64) agno << bmp->db_agl2size; 1397 1398 /* check if the allocation group size is the minimum allocation 1399 * group size or if the allocation group is completely free. if 1400 * the allocation group size is the minimum size of BPERDMAP (i.e. 1401 * 1 dmap), there is no need to search the dmap control page (below) 1402 * that fully describes the allocation group since the allocation 1403 * group is already fully described by a dmap. in this case, we 1404 * just call dbAllocCtl() to search the dmap tree and allocate the 1405 * required space if available. 1406 * 1407 * if the allocation group is completely free, dbAllocCtl() is 1408 * also called to allocate the required space. this is done for 1409 * two reasons. first, it makes no sense searching the dmap control 1410 * pages for free space when we know that free space exists. second, 1411 * the dmap control pages may indicate that the allocation group 1412 * has no free space if the allocation group is part (not the first 1413 * part) of a larger binary buddy system. 1414 */ 1415 if (bmp->db_agsize == BPERDMAP 1416 || bmp->db_agfree[agno] == bmp->db_agsize) { 1417 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1418 if ((rc == -ENOSPC) && 1419 (bmp->db_agfree[agno] == bmp->db_agsize)) { 1420 printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n", 1421 (unsigned long long) blkno, 1422 (unsigned long long) nblocks); 1423 jfs_error(bmp->db_ipbmap->i_sb, 1424 "dbAllocAG: dbAllocCtl failed in free AG"); 1425 } 1426 return (rc); 1427 } 1428 1429 /* the buffer for the dmap control page that fully describes the 1430 * allocation group. 1431 */ 1432 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel); 1433 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1434 if (mp == NULL) 1435 return -EIO; 1436 dcp = (struct dmapctl *) mp->data; 1437 budmin = dcp->budmin; 1438 1439 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 1440 jfs_error(bmp->db_ipbmap->i_sb, 1441 "dbAllocAG: Corrupt dmapctl page"); 1442 release_metapage(mp); 1443 return -EIO; 1444 } 1445 1446 /* search the subtree(s) of the dmap control page that describes 1447 * the allocation group, looking for sufficient free space. to begin, 1448 * determine how many allocation groups are represented in a dmap 1449 * control page at the control page level (i.e. L0, L1, L2) that 1450 * fully describes an allocation group. next, determine the starting 1451 * tree index of this allocation group within the control page. 1452 */ 1453 agperlev = 1454 (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; 1455 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); 1456 1457 /* dmap control page trees fan-out by 4 and a single allocation 1458 * group may be described by 1 or 2 subtrees within the ag level 1459 * dmap control page, depending upon the ag size. examine the ag's 1460 * subtrees for sufficient free space, starting with the leftmost 1461 * subtree. 1462 */ 1463 for (i = 0; i < bmp->db_agwidth; i++, ti++) { 1464 /* is there sufficient free space ? 1465 */ 1466 if (l2nb > dcp->stree[ti]) 1467 continue; 1468 1469 /* sufficient free space found in a subtree. now search down 1470 * the subtree to find the leftmost leaf that describes this 1471 * free space. 1472 */ 1473 for (k = bmp->db_agheigth; k > 0; k--) { 1474 for (n = 0, m = (ti << 2) + 1; n < 4; n++) { 1475 if (l2nb <= dcp->stree[m + n]) { 1476 ti = m + n; 1477 break; 1478 } 1479 } 1480 if (n == 4) { 1481 jfs_error(bmp->db_ipbmap->i_sb, 1482 "dbAllocAG: failed descending stree"); 1483 release_metapage(mp); 1484 return -EIO; 1485 } 1486 } 1487 1488 /* determine the block number within the file system 1489 * that corresponds to this leaf. 1490 */ 1491 if (bmp->db_aglevel == 2) 1492 blkno = 0; 1493 else if (bmp->db_aglevel == 1) 1494 blkno &= ~(MAXL1SIZE - 1); 1495 else /* bmp->db_aglevel == 0 */ 1496 blkno &= ~(MAXL0SIZE - 1); 1497 1498 blkno += 1499 ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin; 1500 1501 /* release the buffer in preparation for going down 1502 * the next level of dmap control pages. 1503 */ 1504 release_metapage(mp); 1505 1506 /* check if we need to continue to search down the lower 1507 * level dmap control pages. we need to if the number of 1508 * blocks required is less than maximum number of blocks 1509 * described at the next lower level. 1510 */ 1511 if (l2nb < budmin) { 1512 1513 /* search the lower level dmap control pages to get 1514 * the starting block number of the the dmap that 1515 * contains or starts off the free space. 1516 */ 1517 if ((rc = 1518 dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1, 1519 &blkno))) { 1520 if (rc == -ENOSPC) { 1521 jfs_error(bmp->db_ipbmap->i_sb, 1522 "dbAllocAG: control page " 1523 "inconsistent"); 1524 return -EIO; 1525 } 1526 return (rc); 1527 } 1528 } 1529 1530 /* allocate the blocks. 1531 */ 1532 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1533 if (rc == -ENOSPC) { 1534 jfs_error(bmp->db_ipbmap->i_sb, 1535 "dbAllocAG: unable to allocate blocks"); 1536 rc = -EIO; 1537 } 1538 return (rc); 1539 } 1540 1541 /* no space in the allocation group. release the buffer and 1542 * return -ENOSPC. 1543 */ 1544 release_metapage(mp); 1545 1546 return -ENOSPC; 1547 } 1548 1549 1550 /* 1551 * NAME: dbAllocAny() 1552 * 1553 * FUNCTION: attempt to allocate the specified number of contiguous 1554 * free blocks anywhere in the file system. 1555 * 1556 * dbAllocAny() attempts to find the sufficient free space by 1557 * searching down the dmap control pages, starting with the 1558 * highest level (i.e. L0, L1, L2) control page. if free space 1559 * large enough to satisfy the desired free space is found, the 1560 * desired free space is allocated. 1561 * 1562 * PARAMETERS: 1563 * bmp - pointer to bmap descriptor 1564 * nblocks - actual number of contiguous free blocks desired. 1565 * l2nb - log2 number of contiguous free blocks desired. 1566 * results - on successful return, set to the starting block number 1567 * of the newly allocated range. 1568 * 1569 * RETURN VALUES: 1570 * 0 - success 1571 * -ENOSPC - insufficient disk resources 1572 * -EIO - i/o error 1573 * 1574 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1575 */ 1576 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results) 1577 { 1578 int rc; 1579 s64 blkno = 0; 1580 1581 /* starting with the top level dmap control page, search 1582 * down the dmap control levels for sufficient free space. 1583 * if free space is found, dbFindCtl() returns the starting 1584 * block number of the dmap that contains or starts off the 1585 * range of free space. 1586 */ 1587 if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno))) 1588 return (rc); 1589 1590 /* allocate the blocks. 1591 */ 1592 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1593 if (rc == -ENOSPC) { 1594 jfs_error(bmp->db_ipbmap->i_sb, 1595 "dbAllocAny: unable to allocate blocks"); 1596 return -EIO; 1597 } 1598 return (rc); 1599 } 1600 1601 1602 /* 1603 * NAME: dbFindCtl() 1604 * 1605 * FUNCTION: starting at a specified dmap control page level and block 1606 * number, search down the dmap control levels for a range of 1607 * contiguous free blocks large enough to satisfy an allocation 1608 * request for the specified number of free blocks. 1609 * 1610 * if sufficient contiguous free blocks are found, this routine 1611 * returns the starting block number within a dmap page that 1612 * contains or starts a range of contiqious free blocks that 1613 * is sufficient in size. 1614 * 1615 * PARAMETERS: 1616 * bmp - pointer to bmap descriptor 1617 * level - starting dmap control page level. 1618 * l2nb - log2 number of contiguous free blocks desired. 1619 * *blkno - on entry, starting block number for conducting the search. 1620 * on successful return, the first block within a dmap page 1621 * that contains or starts a range of contiguous free blocks. 1622 * 1623 * RETURN VALUES: 1624 * 0 - success 1625 * -ENOSPC - insufficient disk resources 1626 * -EIO - i/o error 1627 * 1628 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1629 */ 1630 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno) 1631 { 1632 int rc, leafidx, lev; 1633 s64 b, lblkno; 1634 struct dmapctl *dcp; 1635 int budmin; 1636 struct metapage *mp; 1637 1638 /* starting at the specified dmap control page level and block 1639 * number, search down the dmap control levels for the starting 1640 * block number of a dmap page that contains or starts off 1641 * sufficient free blocks. 1642 */ 1643 for (lev = level, b = *blkno; lev >= 0; lev--) { 1644 /* get the buffer of the dmap control page for the block 1645 * number and level (i.e. L0, L1, L2). 1646 */ 1647 lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev); 1648 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1649 if (mp == NULL) 1650 return -EIO; 1651 dcp = (struct dmapctl *) mp->data; 1652 budmin = dcp->budmin; 1653 1654 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 1655 jfs_error(bmp->db_ipbmap->i_sb, 1656 "dbFindCtl: Corrupt dmapctl page"); 1657 release_metapage(mp); 1658 return -EIO; 1659 } 1660 1661 /* search the tree within the dmap control page for 1662 * sufficent free space. if sufficient free space is found, 1663 * dbFindLeaf() returns the index of the leaf at which 1664 * free space was found. 1665 */ 1666 rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx); 1667 1668 /* release the buffer. 1669 */ 1670 release_metapage(mp); 1671 1672 /* space found ? 1673 */ 1674 if (rc) { 1675 if (lev != level) { 1676 jfs_error(bmp->db_ipbmap->i_sb, 1677 "dbFindCtl: dmap inconsistent"); 1678 return -EIO; 1679 } 1680 return -ENOSPC; 1681 } 1682 1683 /* adjust the block number to reflect the location within 1684 * the dmap control page (i.e. the leaf) at which free 1685 * space was found. 1686 */ 1687 b += (((s64) leafidx) << budmin); 1688 1689 /* we stop the search at this dmap control page level if 1690 * the number of blocks required is greater than or equal 1691 * to the maximum number of blocks described at the next 1692 * (lower) level. 1693 */ 1694 if (l2nb >= budmin) 1695 break; 1696 } 1697 1698 *blkno = b; 1699 return (0); 1700 } 1701 1702 1703 /* 1704 * NAME: dbAllocCtl() 1705 * 1706 * FUNCTION: attempt to allocate a specified number of contiguous 1707 * blocks starting within a specific dmap. 1708 * 1709 * this routine is called by higher level routines that search 1710 * the dmap control pages above the actual dmaps for contiguous 1711 * free space. the result of successful searches by these 1712 * routines are the starting block numbers within dmaps, with 1713 * the dmaps themselves containing the desired contiguous free 1714 * space or starting a contiguous free space of desired size 1715 * that is made up of the blocks of one or more dmaps. these 1716 * calls should not fail due to insufficent resources. 1717 * 1718 * this routine is called in some cases where it is not known 1719 * whether it will fail due to insufficient resources. more 1720 * specifically, this occurs when allocating from an allocation 1721 * group whose size is equal to the number of blocks per dmap. 1722 * in this case, the dmap control pages are not examined prior 1723 * to calling this routine (to save pathlength) and the call 1724 * might fail. 1725 * 1726 * for a request size that fits within a dmap, this routine relies 1727 * upon the dmap's dmtree to find the requested contiguous free 1728 * space. for request sizes that are larger than a dmap, the 1729 * requested free space will start at the first block of the 1730 * first dmap (i.e. blkno). 1731 * 1732 * PARAMETERS: 1733 * bmp - pointer to bmap descriptor 1734 * nblocks - actual number of contiguous free blocks to allocate. 1735 * l2nb - log2 number of contiguous free blocks to allocate. 1736 * blkno - starting block number of the dmap to start the allocation 1737 * from. 1738 * results - on successful return, set to the starting block number 1739 * of the newly allocated range. 1740 * 1741 * RETURN VALUES: 1742 * 0 - success 1743 * -ENOSPC - insufficient disk resources 1744 * -EIO - i/o error 1745 * 1746 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1747 */ 1748 static int 1749 dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results) 1750 { 1751 int rc, nb; 1752 s64 b, lblkno, n; 1753 struct metapage *mp; 1754 struct dmap *dp; 1755 1756 /* check if the allocation request is confined to a single dmap. 1757 */ 1758 if (l2nb <= L2BPERDMAP) { 1759 /* get the buffer for the dmap. 1760 */ 1761 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 1762 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1763 if (mp == NULL) 1764 return -EIO; 1765 dp = (struct dmap *) mp->data; 1766 1767 /* try to allocate the blocks. 1768 */ 1769 rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results); 1770 if (rc == 0) 1771 mark_metapage_dirty(mp); 1772 1773 release_metapage(mp); 1774 1775 return (rc); 1776 } 1777 1778 /* allocation request involving multiple dmaps. it must start on 1779 * a dmap boundary. 1780 */ 1781 assert((blkno & (BPERDMAP - 1)) == 0); 1782 1783 /* allocate the blocks dmap by dmap. 1784 */ 1785 for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) { 1786 /* get the buffer for the dmap. 1787 */ 1788 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); 1789 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1790 if (mp == NULL) { 1791 rc = -EIO; 1792 goto backout; 1793 } 1794 dp = (struct dmap *) mp->data; 1795 1796 /* the dmap better be all free. 1797 */ 1798 if (dp->tree.stree[ROOT] != L2BPERDMAP) { 1799 release_metapage(mp); 1800 jfs_error(bmp->db_ipbmap->i_sb, 1801 "dbAllocCtl: the dmap is not all free"); 1802 rc = -EIO; 1803 goto backout; 1804 } 1805 1806 /* determine how many blocks to allocate from this dmap. 1807 */ 1808 nb = min(n, (s64)BPERDMAP); 1809 1810 /* allocate the blocks from the dmap. 1811 */ 1812 if ((rc = dbAllocDmap(bmp, dp, b, nb))) { 1813 release_metapage(mp); 1814 goto backout; 1815 } 1816 1817 /* write the buffer. 1818 */ 1819 write_metapage(mp); 1820 } 1821 1822 /* set the results (starting block number) and return. 1823 */ 1824 *results = blkno; 1825 return (0); 1826 1827 /* something failed in handling an allocation request involving 1828 * multiple dmaps. we'll try to clean up by backing out any 1829 * allocation that has already happened for this request. if 1830 * we fail in backing out the allocation, we'll mark the file 1831 * system to indicate that blocks have been leaked. 1832 */ 1833 backout: 1834 1835 /* try to backout the allocations dmap by dmap. 1836 */ 1837 for (n = nblocks - n, b = blkno; n > 0; 1838 n -= BPERDMAP, b += BPERDMAP) { 1839 /* get the buffer for this dmap. 1840 */ 1841 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); 1842 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1843 if (mp == NULL) { 1844 /* could not back out. mark the file system 1845 * to indicate that we have leaked blocks. 1846 */ 1847 jfs_error(bmp->db_ipbmap->i_sb, 1848 "dbAllocCtl: I/O Error: Block Leakage."); 1849 continue; 1850 } 1851 dp = (struct dmap *) mp->data; 1852 1853 /* free the blocks is this dmap. 1854 */ 1855 if (dbFreeDmap(bmp, dp, b, BPERDMAP)) { 1856 /* could not back out. mark the file system 1857 * to indicate that we have leaked blocks. 1858 */ 1859 release_metapage(mp); 1860 jfs_error(bmp->db_ipbmap->i_sb, 1861 "dbAllocCtl: Block Leakage."); 1862 continue; 1863 } 1864 1865 /* write the buffer. 1866 */ 1867 write_metapage(mp); 1868 } 1869 1870 return (rc); 1871 } 1872 1873 1874 /* 1875 * NAME: dbAllocDmapLev() 1876 * 1877 * FUNCTION: attempt to allocate a specified number of contiguous blocks 1878 * from a specified dmap. 1879 * 1880 * this routine checks if the contiguous blocks are available. 1881 * if so, nblocks of blocks are allocated; otherwise, ENOSPC is 1882 * returned. 1883 * 1884 * PARAMETERS: 1885 * mp - pointer to bmap descriptor 1886 * dp - pointer to dmap to attempt to allocate blocks from. 1887 * l2nb - log2 number of contiguous block desired. 1888 * nblocks - actual number of contiguous block desired. 1889 * results - on successful return, set to the starting block number 1890 * of the newly allocated range. 1891 * 1892 * RETURN VALUES: 1893 * 0 - success 1894 * -ENOSPC - insufficient disk resources 1895 * -EIO - i/o error 1896 * 1897 * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or 1898 * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit; 1899 */ 1900 static int 1901 dbAllocDmapLev(struct bmap * bmp, 1902 struct dmap * dp, int nblocks, int l2nb, s64 * results) 1903 { 1904 s64 blkno; 1905 int leafidx, rc; 1906 1907 /* can't be more than a dmaps worth of blocks */ 1908 assert(l2nb <= L2BPERDMAP); 1909 1910 /* search the tree within the dmap page for sufficient 1911 * free space. if sufficient free space is found, dbFindLeaf() 1912 * returns the index of the leaf at which free space was found. 1913 */ 1914 if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx)) 1915 return -ENOSPC; 1916 1917 /* determine the block number within the file system corresponding 1918 * to the leaf at which free space was found. 1919 */ 1920 blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD); 1921 1922 /* if not all bits of the dmap word are free, get the starting 1923 * bit number within the dmap word of the required string of free 1924 * bits and adjust the block number with this value. 1925 */ 1926 if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN) 1927 blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb); 1928 1929 /* allocate the blocks */ 1930 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) 1931 *results = blkno; 1932 1933 return (rc); 1934 } 1935 1936 1937 /* 1938 * NAME: dbAllocDmap() 1939 * 1940 * FUNCTION: adjust the disk allocation map to reflect the allocation 1941 * of a specified block range within a dmap. 1942 * 1943 * this routine allocates the specified blocks from the dmap 1944 * through a call to dbAllocBits(). if the allocation of the 1945 * block range causes the maximum string of free blocks within 1946 * the dmap to change (i.e. the value of the root of the dmap's 1947 * dmtree), this routine will cause this change to be reflected 1948 * up through the appropriate levels of the dmap control pages 1949 * by a call to dbAdjCtl() for the L0 dmap control page that 1950 * covers this dmap. 1951 * 1952 * PARAMETERS: 1953 * bmp - pointer to bmap descriptor 1954 * dp - pointer to dmap to allocate the block range from. 1955 * blkno - starting block number of the block to be allocated. 1956 * nblocks - number of blocks to be allocated. 1957 * 1958 * RETURN VALUES: 1959 * 0 - success 1960 * -EIO - i/o error 1961 * 1962 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 1963 */ 1964 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 1965 int nblocks) 1966 { 1967 s8 oldroot; 1968 int rc; 1969 1970 /* save the current value of the root (i.e. maximum free string) 1971 * of the dmap tree. 1972 */ 1973 oldroot = dp->tree.stree[ROOT]; 1974 1975 /* allocate the specified (blocks) bits */ 1976 dbAllocBits(bmp, dp, blkno, nblocks); 1977 1978 /* if the root has not changed, done. */ 1979 if (dp->tree.stree[ROOT] == oldroot) 1980 return (0); 1981 1982 /* root changed. bubble the change up to the dmap control pages. 1983 * if the adjustment of the upper level control pages fails, 1984 * backout the bit allocation (thus making everything consistent). 1985 */ 1986 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0))) 1987 dbFreeBits(bmp, dp, blkno, nblocks); 1988 1989 return (rc); 1990 } 1991 1992 1993 /* 1994 * NAME: dbFreeDmap() 1995 * 1996 * FUNCTION: adjust the disk allocation map to reflect the allocation 1997 * of a specified block range within a dmap. 1998 * 1999 * this routine frees the specified blocks from the dmap through 2000 * a call to dbFreeBits(). if the deallocation of the block range 2001 * causes the maximum string of free blocks within the dmap to 2002 * change (i.e. the value of the root of the dmap's dmtree), this 2003 * routine will cause this change to be reflected up through the 2004 * appropriate levels of the dmap control pages by a call to 2005 * dbAdjCtl() for the L0 dmap control page that covers this dmap. 2006 * 2007 * PARAMETERS: 2008 * bmp - pointer to bmap descriptor 2009 * dp - pointer to dmap to free the block range from. 2010 * blkno - starting block number of the block to be freed. 2011 * nblocks - number of blocks to be freed. 2012 * 2013 * RETURN VALUES: 2014 * 0 - success 2015 * -EIO - i/o error 2016 * 2017 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2018 */ 2019 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 2020 int nblocks) 2021 { 2022 s8 oldroot; 2023 int rc, word; 2024 2025 /* save the current value of the root (i.e. maximum free string) 2026 * of the dmap tree. 2027 */ 2028 oldroot = dp->tree.stree[ROOT]; 2029 2030 /* free the specified (blocks) bits */ 2031 dbFreeBits(bmp, dp, blkno, nblocks); 2032 2033 /* if the root has not changed, done. */ 2034 if (dp->tree.stree[ROOT] == oldroot) 2035 return (0); 2036 2037 /* root changed. bubble the change up to the dmap control pages. 2038 * if the adjustment of the upper level control pages fails, 2039 * backout the deallocation. 2040 */ 2041 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) { 2042 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; 2043 2044 /* as part of backing out the deallocation, we will have 2045 * to back split the dmap tree if the deallocation caused 2046 * the freed blocks to become part of a larger binary buddy 2047 * system. 2048 */ 2049 if (dp->tree.stree[word] == NOFREE) 2050 dbBackSplit((dmtree_t *) & dp->tree, word); 2051 2052 dbAllocBits(bmp, dp, blkno, nblocks); 2053 } 2054 2055 return (rc); 2056 } 2057 2058 2059 /* 2060 * NAME: dbAllocBits() 2061 * 2062 * FUNCTION: allocate a specified block range from a dmap. 2063 * 2064 * this routine updates the dmap to reflect the working 2065 * state allocation of the specified block range. it directly 2066 * updates the bits of the working map and causes the adjustment 2067 * of the binary buddy system described by the dmap's dmtree 2068 * leaves to reflect the bits allocated. it also causes the 2069 * dmap's dmtree, as a whole, to reflect the allocated range. 2070 * 2071 * PARAMETERS: 2072 * bmp - pointer to bmap descriptor 2073 * dp - pointer to dmap to allocate bits from. 2074 * blkno - starting block number of the bits to be allocated. 2075 * nblocks - number of bits to be allocated. 2076 * 2077 * RETURN VALUES: none 2078 * 2079 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2080 */ 2081 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 2082 int nblocks) 2083 { 2084 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; 2085 dmtree_t *tp = (dmtree_t *) & dp->tree; 2086 int size; 2087 s8 *leaf; 2088 2089 /* pick up a pointer to the leaves of the dmap tree */ 2090 leaf = dp->tree.stree + LEAFIND; 2091 2092 /* determine the bit number and word within the dmap of the 2093 * starting block. 2094 */ 2095 dbitno = blkno & (BPERDMAP - 1); 2096 word = dbitno >> L2DBWORD; 2097 2098 /* block range better be within the dmap */ 2099 assert(dbitno + nblocks <= BPERDMAP); 2100 2101 /* allocate the bits of the dmap's words corresponding to the block 2102 * range. not all bits of the first and last words may be contained 2103 * within the block range. if this is the case, we'll work against 2104 * those words (i.e. partial first and/or last) on an individual basis 2105 * (a single pass), allocating the bits of interest by hand and 2106 * updating the leaf corresponding to the dmap word. a single pass 2107 * will be used for all dmap words fully contained within the 2108 * specified range. within this pass, the bits of all fully contained 2109 * dmap words will be marked as free in a single shot and the leaves 2110 * will be updated. a single leaf may describe the free space of 2111 * multiple dmap words, so we may update only a subset of the actual 2112 * leaves corresponding to the dmap words of the block range. 2113 */ 2114 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 2115 /* determine the bit number within the word and 2116 * the number of bits within the word. 2117 */ 2118 wbitno = dbitno & (DBWORD - 1); 2119 nb = min(rembits, DBWORD - wbitno); 2120 2121 /* check if only part of a word is to be allocated. 2122 */ 2123 if (nb < DBWORD) { 2124 /* allocate (set to 1) the appropriate bits within 2125 * this dmap word. 2126 */ 2127 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) 2128 >> wbitno); 2129 2130 /* update the leaf for this dmap word. in addition 2131 * to setting the leaf value to the binary buddy max 2132 * of the updated dmap word, dbSplit() will split 2133 * the binary system of the leaves if need be. 2134 */ 2135 dbSplit(tp, word, BUDMIN, 2136 dbMaxBud((u8 *) & dp->wmap[word])); 2137 2138 word += 1; 2139 } else { 2140 /* one or more dmap words are fully contained 2141 * within the block range. determine how many 2142 * words and allocate (set to 1) the bits of these 2143 * words. 2144 */ 2145 nwords = rembits >> L2DBWORD; 2146 memset(&dp->wmap[word], (int) ONES, nwords * 4); 2147 2148 /* determine how many bits. 2149 */ 2150 nb = nwords << L2DBWORD; 2151 2152 /* now update the appropriate leaves to reflect 2153 * the allocated words. 2154 */ 2155 for (; nwords > 0; nwords -= nw) { 2156 if (leaf[word] < BUDMIN) { 2157 jfs_error(bmp->db_ipbmap->i_sb, 2158 "dbAllocBits: leaf page " 2159 "corrupt"); 2160 break; 2161 } 2162 2163 /* determine what the leaf value should be 2164 * updated to as the minimum of the l2 number 2165 * of bits being allocated and the l2 number 2166 * of bits currently described by this leaf. 2167 */ 2168 size = min((int)leaf[word], NLSTOL2BSZ(nwords)); 2169 2170 /* update the leaf to reflect the allocation. 2171 * in addition to setting the leaf value to 2172 * NOFREE, dbSplit() will split the binary 2173 * system of the leaves to reflect the current 2174 * allocation (size). 2175 */ 2176 dbSplit(tp, word, size, NOFREE); 2177 2178 /* get the number of dmap words handled */ 2179 nw = BUDSIZE(size, BUDMIN); 2180 word += nw; 2181 } 2182 } 2183 } 2184 2185 /* update the free count for this dmap */ 2186 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks); 2187 2188 BMAP_LOCK(bmp); 2189 2190 /* if this allocation group is completely free, 2191 * update the maximum allocation group number if this allocation 2192 * group is the new max. 2193 */ 2194 agno = blkno >> bmp->db_agl2size; 2195 if (agno > bmp->db_maxag) 2196 bmp->db_maxag = agno; 2197 2198 /* update the free count for the allocation group and map */ 2199 bmp->db_agfree[agno] -= nblocks; 2200 bmp->db_nfree -= nblocks; 2201 2202 BMAP_UNLOCK(bmp); 2203 } 2204 2205 2206 /* 2207 * NAME: dbFreeBits() 2208 * 2209 * FUNCTION: free a specified block range from a dmap. 2210 * 2211 * this routine updates the dmap to reflect the working 2212 * state allocation of the specified block range. it directly 2213 * updates the bits of the working map and causes the adjustment 2214 * of the binary buddy system described by the dmap's dmtree 2215 * leaves to reflect the bits freed. it also causes the dmap's 2216 * dmtree, as a whole, to reflect the deallocated range. 2217 * 2218 * PARAMETERS: 2219 * bmp - pointer to bmap descriptor 2220 * dp - pointer to dmap to free bits from. 2221 * blkno - starting block number of the bits to be freed. 2222 * nblocks - number of bits to be freed. 2223 * 2224 * RETURN VALUES: none 2225 * 2226 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2227 */ 2228 static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 2229 int nblocks) 2230 { 2231 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; 2232 dmtree_t *tp = (dmtree_t *) & dp->tree; 2233 int size; 2234 2235 /* determine the bit number and word within the dmap of the 2236 * starting block. 2237 */ 2238 dbitno = blkno & (BPERDMAP - 1); 2239 word = dbitno >> L2DBWORD; 2240 2241 /* block range better be within the dmap. 2242 */ 2243 assert(dbitno + nblocks <= BPERDMAP); 2244 2245 /* free the bits of the dmaps words corresponding to the block range. 2246 * not all bits of the first and last words may be contained within 2247 * the block range. if this is the case, we'll work against those 2248 * words (i.e. partial first and/or last) on an individual basis 2249 * (a single pass), freeing the bits of interest by hand and updating 2250 * the leaf corresponding to the dmap word. a single pass will be used 2251 * for all dmap words fully contained within the specified range. 2252 * within this pass, the bits of all fully contained dmap words will 2253 * be marked as free in a single shot and the leaves will be updated. a 2254 * single leaf may describe the free space of multiple dmap words, 2255 * so we may update only a subset of the actual leaves corresponding 2256 * to the dmap words of the block range. 2257 * 2258 * dbJoin() is used to update leaf values and will join the binary 2259 * buddy system of the leaves if the new leaf values indicate this 2260 * should be done. 2261 */ 2262 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 2263 /* determine the bit number within the word and 2264 * the number of bits within the word. 2265 */ 2266 wbitno = dbitno & (DBWORD - 1); 2267 nb = min(rembits, DBWORD - wbitno); 2268 2269 /* check if only part of a word is to be freed. 2270 */ 2271 if (nb < DBWORD) { 2272 /* free (zero) the appropriate bits within this 2273 * dmap word. 2274 */ 2275 dp->wmap[word] &= 2276 cpu_to_le32(~(ONES << (DBWORD - nb) 2277 >> wbitno)); 2278 2279 /* update the leaf for this dmap word. 2280 */ 2281 dbJoin(tp, word, 2282 dbMaxBud((u8 *) & dp->wmap[word])); 2283 2284 word += 1; 2285 } else { 2286 /* one or more dmap words are fully contained 2287 * within the block range. determine how many 2288 * words and free (zero) the bits of these words. 2289 */ 2290 nwords = rembits >> L2DBWORD; 2291 memset(&dp->wmap[word], 0, nwords * 4); 2292 2293 /* determine how many bits. 2294 */ 2295 nb = nwords << L2DBWORD; 2296 2297 /* now update the appropriate leaves to reflect 2298 * the freed words. 2299 */ 2300 for (; nwords > 0; nwords -= nw) { 2301 /* determine what the leaf value should be 2302 * updated to as the minimum of the l2 number 2303 * of bits being freed and the l2 (max) number 2304 * of bits that can be described by this leaf. 2305 */ 2306 size = 2307 min(LITOL2BSZ 2308 (word, L2LPERDMAP, BUDMIN), 2309 NLSTOL2BSZ(nwords)); 2310 2311 /* update the leaf. 2312 */ 2313 dbJoin(tp, word, size); 2314 2315 /* get the number of dmap words handled. 2316 */ 2317 nw = BUDSIZE(size, BUDMIN); 2318 word += nw; 2319 } 2320 } 2321 } 2322 2323 /* update the free count for this dmap. 2324 */ 2325 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks); 2326 2327 BMAP_LOCK(bmp); 2328 2329 /* update the free count for the allocation group and 2330 * map. 2331 */ 2332 agno = blkno >> bmp->db_agl2size; 2333 bmp->db_nfree += nblocks; 2334 bmp->db_agfree[agno] += nblocks; 2335 2336 /* check if this allocation group is not completely free and 2337 * if it is currently the maximum (rightmost) allocation group. 2338 * if so, establish the new maximum allocation group number by 2339 * searching left for the first allocation group with allocation. 2340 */ 2341 if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) || 2342 (agno == bmp->db_numag - 1 && 2343 bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) { 2344 while (bmp->db_maxag > 0) { 2345 bmp->db_maxag -= 1; 2346 if (bmp->db_agfree[bmp->db_maxag] != 2347 bmp->db_agsize) 2348 break; 2349 } 2350 2351 /* re-establish the allocation group preference if the 2352 * current preference is right of the maximum allocation 2353 * group. 2354 */ 2355 if (bmp->db_agpref > bmp->db_maxag) 2356 bmp->db_agpref = bmp->db_maxag; 2357 } 2358 2359 BMAP_UNLOCK(bmp); 2360 } 2361 2362 2363 /* 2364 * NAME: dbAdjCtl() 2365 * 2366 * FUNCTION: adjust a dmap control page at a specified level to reflect 2367 * the change in a lower level dmap or dmap control page's 2368 * maximum string of free blocks (i.e. a change in the root 2369 * of the lower level object's dmtree) due to the allocation 2370 * or deallocation of a range of blocks with a single dmap. 2371 * 2372 * on entry, this routine is provided with the new value of 2373 * the lower level dmap or dmap control page root and the 2374 * starting block number of the block range whose allocation 2375 * or deallocation resulted in the root change. this range 2376 * is respresented by a single leaf of the current dmapctl 2377 * and the leaf will be updated with this value, possibly 2378 * causing a binary buddy system within the leaves to be 2379 * split or joined. the update may also cause the dmapctl's 2380 * dmtree to be updated. 2381 * 2382 * if the adjustment of the dmap control page, itself, causes its 2383 * root to change, this change will be bubbled up to the next dmap 2384 * control level by a recursive call to this routine, specifying 2385 * the new root value and the next dmap control page level to 2386 * be adjusted. 2387 * PARAMETERS: 2388 * bmp - pointer to bmap descriptor 2389 * blkno - the first block of a block range within a dmap. it is 2390 * the allocation or deallocation of this block range that 2391 * requires the dmap control page to be adjusted. 2392 * newval - the new value of the lower level dmap or dmap control 2393 * page root. 2394 * alloc - TRUE if adjustment is due to an allocation. 2395 * level - current level of dmap control page (i.e. L0, L1, L2) to 2396 * be adjusted. 2397 * 2398 * RETURN VALUES: 2399 * 0 - success 2400 * -EIO - i/o error 2401 * 2402 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2403 */ 2404 static int 2405 dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level) 2406 { 2407 struct metapage *mp; 2408 s8 oldroot; 2409 int oldval; 2410 s64 lblkno; 2411 struct dmapctl *dcp; 2412 int rc, leafno, ti; 2413 2414 /* get the buffer for the dmap control page for the specified 2415 * block number and control page level. 2416 */ 2417 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level); 2418 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 2419 if (mp == NULL) 2420 return -EIO; 2421 dcp = (struct dmapctl *) mp->data; 2422 2423 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 2424 jfs_error(bmp->db_ipbmap->i_sb, 2425 "dbAdjCtl: Corrupt dmapctl page"); 2426 release_metapage(mp); 2427 return -EIO; 2428 } 2429 2430 /* determine the leaf number corresponding to the block and 2431 * the index within the dmap control tree. 2432 */ 2433 leafno = BLKTOCTLLEAF(blkno, dcp->budmin); 2434 ti = leafno + le32_to_cpu(dcp->leafidx); 2435 2436 /* save the current leaf value and the current root level (i.e. 2437 * maximum l2 free string described by this dmapctl). 2438 */ 2439 oldval = dcp->stree[ti]; 2440 oldroot = dcp->stree[ROOT]; 2441 2442 /* check if this is a control page update for an allocation. 2443 * if so, update the leaf to reflect the new leaf value using 2444 * dbSplit(); otherwise (deallocation), use dbJoin() to udpate 2445 * the leaf with the new value. in addition to updating the 2446 * leaf, dbSplit() will also split the binary buddy system of 2447 * the leaves, if required, and bubble new values within the 2448 * dmapctl tree, if required. similarly, dbJoin() will join 2449 * the binary buddy system of leaves and bubble new values up 2450 * the dmapctl tree as required by the new leaf value. 2451 */ 2452 if (alloc) { 2453 /* check if we are in the middle of a binary buddy 2454 * system. this happens when we are performing the 2455 * first allocation out of an allocation group that 2456 * is part (not the first part) of a larger binary 2457 * buddy system. if we are in the middle, back split 2458 * the system prior to calling dbSplit() which assumes 2459 * that it is at the front of a binary buddy system. 2460 */ 2461 if (oldval == NOFREE) { 2462 dbBackSplit((dmtree_t *) dcp, leafno); 2463 oldval = dcp->stree[ti]; 2464 } 2465 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval); 2466 } else { 2467 dbJoin((dmtree_t *) dcp, leafno, newval); 2468 } 2469 2470 /* check if the root of the current dmap control page changed due 2471 * to the update and if the current dmap control page is not at 2472 * the current top level (i.e. L0, L1, L2) of the map. if so (i.e. 2473 * root changed and this is not the top level), call this routine 2474 * again (recursion) for the next higher level of the mapping to 2475 * reflect the change in root for the current dmap control page. 2476 */ 2477 if (dcp->stree[ROOT] != oldroot) { 2478 /* are we below the top level of the map. if so, 2479 * bubble the root up to the next higher level. 2480 */ 2481 if (level < bmp->db_maxlevel) { 2482 /* bubble up the new root of this dmap control page to 2483 * the next level. 2484 */ 2485 if ((rc = 2486 dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc, 2487 level + 1))) { 2488 /* something went wrong in bubbling up the new 2489 * root value, so backout the changes to the 2490 * current dmap control page. 2491 */ 2492 if (alloc) { 2493 dbJoin((dmtree_t *) dcp, leafno, 2494 oldval); 2495 } else { 2496 /* the dbJoin() above might have 2497 * caused a larger binary buddy system 2498 * to form and we may now be in the 2499 * middle of it. if this is the case, 2500 * back split the buddies. 2501 */ 2502 if (dcp->stree[ti] == NOFREE) 2503 dbBackSplit((dmtree_t *) 2504 dcp, leafno); 2505 dbSplit((dmtree_t *) dcp, leafno, 2506 dcp->budmin, oldval); 2507 } 2508 2509 /* release the buffer and return the error. 2510 */ 2511 release_metapage(mp); 2512 return (rc); 2513 } 2514 } else { 2515 /* we're at the top level of the map. update 2516 * the bmap control page to reflect the size 2517 * of the maximum free buddy system. 2518 */ 2519 assert(level == bmp->db_maxlevel); 2520 if (bmp->db_maxfreebud != oldroot) { 2521 jfs_error(bmp->db_ipbmap->i_sb, 2522 "dbAdjCtl: the maximum free buddy is " 2523 "not the old root"); 2524 } 2525 bmp->db_maxfreebud = dcp->stree[ROOT]; 2526 } 2527 } 2528 2529 /* write the buffer. 2530 */ 2531 write_metapage(mp); 2532 2533 return (0); 2534 } 2535 2536 2537 /* 2538 * NAME: dbSplit() 2539 * 2540 * FUNCTION: update the leaf of a dmtree with a new value, splitting 2541 * the leaf from the binary buddy system of the dmtree's 2542 * leaves, as required. 2543 * 2544 * PARAMETERS: 2545 * tp - pointer to the tree containing the leaf. 2546 * leafno - the number of the leaf to be updated. 2547 * splitsz - the size the binary buddy system starting at the leaf 2548 * must be split to, specified as the log2 number of blocks. 2549 * newval - the new value for the leaf. 2550 * 2551 * RETURN VALUES: none 2552 * 2553 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2554 */ 2555 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval) 2556 { 2557 int budsz; 2558 int cursz; 2559 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2560 2561 /* check if the leaf needs to be split. 2562 */ 2563 if (leaf[leafno] > tp->dmt_budmin) { 2564 /* the split occurs by cutting the buddy system in half 2565 * at the specified leaf until we reach the specified 2566 * size. pick up the starting split size (current size 2567 * - 1 in l2) and the corresponding buddy size. 2568 */ 2569 cursz = leaf[leafno] - 1; 2570 budsz = BUDSIZE(cursz, tp->dmt_budmin); 2571 2572 /* split until we reach the specified size. 2573 */ 2574 while (cursz >= splitsz) { 2575 /* update the buddy's leaf with its new value. 2576 */ 2577 dbAdjTree(tp, leafno ^ budsz, cursz); 2578 2579 /* on to the next size and buddy. 2580 */ 2581 cursz -= 1; 2582 budsz >>= 1; 2583 } 2584 } 2585 2586 /* adjust the dmap tree to reflect the specified leaf's new 2587 * value. 2588 */ 2589 dbAdjTree(tp, leafno, newval); 2590 } 2591 2592 2593 /* 2594 * NAME: dbBackSplit() 2595 * 2596 * FUNCTION: back split the binary buddy system of dmtree leaves 2597 * that hold a specified leaf until the specified leaf 2598 * starts its own binary buddy system. 2599 * 2600 * the allocators typically perform allocations at the start 2601 * of binary buddy systems and dbSplit() is used to accomplish 2602 * any required splits. in some cases, however, allocation 2603 * may occur in the middle of a binary system and requires a 2604 * back split, with the split proceeding out from the middle of 2605 * the system (less efficient) rather than the start of the 2606 * system (more efficient). the cases in which a back split 2607 * is required are rare and are limited to the first allocation 2608 * within an allocation group which is a part (not first part) 2609 * of a larger binary buddy system and a few exception cases 2610 * in which a previous join operation must be backed out. 2611 * 2612 * PARAMETERS: 2613 * tp - pointer to the tree containing the leaf. 2614 * leafno - the number of the leaf to be updated. 2615 * 2616 * RETURN VALUES: none 2617 * 2618 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2619 */ 2620 static void dbBackSplit(dmtree_t * tp, int leafno) 2621 { 2622 int budsz, bud, w, bsz, size; 2623 int cursz; 2624 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2625 2626 /* leaf should be part (not first part) of a binary 2627 * buddy system. 2628 */ 2629 assert(leaf[leafno] == NOFREE); 2630 2631 /* the back split is accomplished by iteratively finding the leaf 2632 * that starts the buddy system that contains the specified leaf and 2633 * splitting that system in two. this iteration continues until 2634 * the specified leaf becomes the start of a buddy system. 2635 * 2636 * determine maximum possible l2 size for the specified leaf. 2637 */ 2638 size = 2639 LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs), 2640 tp->dmt_budmin); 2641 2642 /* determine the number of leaves covered by this size. this 2643 * is the buddy size that we will start with as we search for 2644 * the buddy system that contains the specified leaf. 2645 */ 2646 budsz = BUDSIZE(size, tp->dmt_budmin); 2647 2648 /* back split. 2649 */ 2650 while (leaf[leafno] == NOFREE) { 2651 /* find the leftmost buddy leaf. 2652 */ 2653 for (w = leafno, bsz = budsz;; bsz <<= 1, 2654 w = (w < bud) ? w : bud) { 2655 assert(bsz < le32_to_cpu(tp->dmt_nleafs)); 2656 2657 /* determine the buddy. 2658 */ 2659 bud = w ^ bsz; 2660 2661 /* check if this buddy is the start of the system. 2662 */ 2663 if (leaf[bud] != NOFREE) { 2664 /* split the leaf at the start of the 2665 * system in two. 2666 */ 2667 cursz = leaf[bud] - 1; 2668 dbSplit(tp, bud, cursz, cursz); 2669 break; 2670 } 2671 } 2672 } 2673 2674 assert(leaf[leafno] == size); 2675 } 2676 2677 2678 /* 2679 * NAME: dbJoin() 2680 * 2681 * FUNCTION: update the leaf of a dmtree with a new value, joining 2682 * the leaf with other leaves of the dmtree into a multi-leaf 2683 * binary buddy system, as required. 2684 * 2685 * PARAMETERS: 2686 * tp - pointer to the tree containing the leaf. 2687 * leafno - the number of the leaf to be updated. 2688 * newval - the new value for the leaf. 2689 * 2690 * RETURN VALUES: none 2691 */ 2692 static void dbJoin(dmtree_t * tp, int leafno, int newval) 2693 { 2694 int budsz, buddy; 2695 s8 *leaf; 2696 2697 /* can the new leaf value require a join with other leaves ? 2698 */ 2699 if (newval >= tp->dmt_budmin) { 2700 /* pickup a pointer to the leaves of the tree. 2701 */ 2702 leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2703 2704 /* try to join the specified leaf into a large binary 2705 * buddy system. the join proceeds by attempting to join 2706 * the specified leafno with its buddy (leaf) at new value. 2707 * if the join occurs, we attempt to join the left leaf 2708 * of the joined buddies with its buddy at new value + 1. 2709 * we continue to join until we find a buddy that cannot be 2710 * joined (does not have a value equal to the size of the 2711 * last join) or until all leaves have been joined into a 2712 * single system. 2713 * 2714 * get the buddy size (number of words covered) of 2715 * the new value. 2716 */ 2717 budsz = BUDSIZE(newval, tp->dmt_budmin); 2718 2719 /* try to join. 2720 */ 2721 while (budsz < le32_to_cpu(tp->dmt_nleafs)) { 2722 /* get the buddy leaf. 2723 */ 2724 buddy = leafno ^ budsz; 2725 2726 /* if the leaf's new value is greater than its 2727 * buddy's value, we join no more. 2728 */ 2729 if (newval > leaf[buddy]) 2730 break; 2731 2732 assert(newval == leaf[buddy]); 2733 2734 /* check which (leafno or buddy) is the left buddy. 2735 * the left buddy gets to claim the blocks resulting 2736 * from the join while the right gets to claim none. 2737 * the left buddy is also eligable to participate in 2738 * a join at the next higher level while the right 2739 * is not. 2740 * 2741 */ 2742 if (leafno < buddy) { 2743 /* leafno is the left buddy. 2744 */ 2745 dbAdjTree(tp, buddy, NOFREE); 2746 } else { 2747 /* buddy is the left buddy and becomes 2748 * leafno. 2749 */ 2750 dbAdjTree(tp, leafno, NOFREE); 2751 leafno = buddy; 2752 } 2753 2754 /* on to try the next join. 2755 */ 2756 newval += 1; 2757 budsz <<= 1; 2758 } 2759 } 2760 2761 /* update the leaf value. 2762 */ 2763 dbAdjTree(tp, leafno, newval); 2764 } 2765 2766 2767 /* 2768 * NAME: dbAdjTree() 2769 * 2770 * FUNCTION: update a leaf of a dmtree with a new value, adjusting 2771 * the dmtree, as required, to reflect the new leaf value. 2772 * the combination of any buddies must already be done before 2773 * this is called. 2774 * 2775 * PARAMETERS: 2776 * tp - pointer to the tree to be adjusted. 2777 * leafno - the number of the leaf to be updated. 2778 * newval - the new value for the leaf. 2779 * 2780 * RETURN VALUES: none 2781 */ 2782 static void dbAdjTree(dmtree_t * tp, int leafno, int newval) 2783 { 2784 int lp, pp, k; 2785 int max; 2786 2787 /* pick up the index of the leaf for this leafno. 2788 */ 2789 lp = leafno + le32_to_cpu(tp->dmt_leafidx); 2790 2791 /* is the current value the same as the old value ? if so, 2792 * there is nothing to do. 2793 */ 2794 if (tp->dmt_stree[lp] == newval) 2795 return; 2796 2797 /* set the new value. 2798 */ 2799 tp->dmt_stree[lp] = newval; 2800 2801 /* bubble the new value up the tree as required. 2802 */ 2803 for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) { 2804 /* get the index of the first leaf of the 4 leaf 2805 * group containing the specified leaf (leafno). 2806 */ 2807 lp = ((lp - 1) & ~0x03) + 1; 2808 2809 /* get the index of the parent of this 4 leaf group. 2810 */ 2811 pp = (lp - 1) >> 2; 2812 2813 /* determine the maximum of the 4 leaves. 2814 */ 2815 max = TREEMAX(&tp->dmt_stree[lp]); 2816 2817 /* if the maximum of the 4 is the same as the 2818 * parent's value, we're done. 2819 */ 2820 if (tp->dmt_stree[pp] == max) 2821 break; 2822 2823 /* parent gets new value. 2824 */ 2825 tp->dmt_stree[pp] = max; 2826 2827 /* parent becomes leaf for next go-round. 2828 */ 2829 lp = pp; 2830 } 2831 } 2832 2833 2834 /* 2835 * NAME: dbFindLeaf() 2836 * 2837 * FUNCTION: search a dmtree_t for sufficient free blocks, returning 2838 * the index of a leaf describing the free blocks if 2839 * sufficient free blocks are found. 2840 * 2841 * the search starts at the top of the dmtree_t tree and 2842 * proceeds down the tree to the leftmost leaf with sufficient 2843 * free space. 2844 * 2845 * PARAMETERS: 2846 * tp - pointer to the tree to be searched. 2847 * l2nb - log2 number of free blocks to search for. 2848 * leafidx - return pointer to be set to the index of the leaf 2849 * describing at least l2nb free blocks if sufficient 2850 * free blocks are found. 2851 * 2852 * RETURN VALUES: 2853 * 0 - success 2854 * -ENOSPC - insufficient free blocks. 2855 */ 2856 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx) 2857 { 2858 int ti, n = 0, k, x = 0; 2859 2860 /* first check the root of the tree to see if there is 2861 * sufficient free space. 2862 */ 2863 if (l2nb > tp->dmt_stree[ROOT]) 2864 return -ENOSPC; 2865 2866 /* sufficient free space available. now search down the tree 2867 * starting at the next level for the leftmost leaf that 2868 * describes sufficient free space. 2869 */ 2870 for (k = le32_to_cpu(tp->dmt_height), ti = 1; 2871 k > 0; k--, ti = ((ti + n) << 2) + 1) { 2872 /* search the four nodes at this level, starting from 2873 * the left. 2874 */ 2875 for (x = ti, n = 0; n < 4; n++) { 2876 /* sufficient free space found. move to the next 2877 * level (or quit if this is the last level). 2878 */ 2879 if (l2nb <= tp->dmt_stree[x + n]) 2880 break; 2881 } 2882 2883 /* better have found something since the higher 2884 * levels of the tree said it was here. 2885 */ 2886 assert(n < 4); 2887 } 2888 2889 /* set the return to the leftmost leaf describing sufficient 2890 * free space. 2891 */ 2892 *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx); 2893 2894 return (0); 2895 } 2896 2897 2898 /* 2899 * NAME: dbFindBits() 2900 * 2901 * FUNCTION: find a specified number of binary buddy free bits within a 2902 * dmap bitmap word value. 2903 * 2904 * this routine searches the bitmap value for (1 << l2nb) free 2905 * bits at (1 << l2nb) alignments within the value. 2906 * 2907 * PARAMETERS: 2908 * word - dmap bitmap word value. 2909 * l2nb - number of free bits specified as a log2 number. 2910 * 2911 * RETURN VALUES: 2912 * starting bit number of free bits. 2913 */ 2914 static int dbFindBits(u32 word, int l2nb) 2915 { 2916 int bitno, nb; 2917 u32 mask; 2918 2919 /* get the number of bits. 2920 */ 2921 nb = 1 << l2nb; 2922 assert(nb <= DBWORD); 2923 2924 /* complement the word so we can use a mask (i.e. 0s represent 2925 * free bits) and compute the mask. 2926 */ 2927 word = ~word; 2928 mask = ONES << (DBWORD - nb); 2929 2930 /* scan the word for nb free bits at nb alignments. 2931 */ 2932 for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) { 2933 if ((mask & word) == mask) 2934 break; 2935 } 2936 2937 ASSERT(bitno < 32); 2938 2939 /* return the bit number. 2940 */ 2941 return (bitno); 2942 } 2943 2944 2945 /* 2946 * NAME: dbMaxBud(u8 *cp) 2947 * 2948 * FUNCTION: determine the largest binary buddy string of free 2949 * bits within 32-bits of the map. 2950 * 2951 * PARAMETERS: 2952 * cp - pointer to the 32-bit value. 2953 * 2954 * RETURN VALUES: 2955 * largest binary buddy of free bits within a dmap word. 2956 */ 2957 static int dbMaxBud(u8 * cp) 2958 { 2959 signed char tmp1, tmp2; 2960 2961 /* check if the wmap word is all free. if so, the 2962 * free buddy size is BUDMIN. 2963 */ 2964 if (*((uint *) cp) == 0) 2965 return (BUDMIN); 2966 2967 /* check if the wmap word is half free. if so, the 2968 * free buddy size is BUDMIN-1. 2969 */ 2970 if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0) 2971 return (BUDMIN - 1); 2972 2973 /* not all free or half free. determine the free buddy 2974 * size thru table lookup using quarters of the wmap word. 2975 */ 2976 tmp1 = max(budtab[cp[2]], budtab[cp[3]]); 2977 tmp2 = max(budtab[cp[0]], budtab[cp[1]]); 2978 return (max(tmp1, tmp2)); 2979 } 2980 2981 2982 /* 2983 * NAME: cnttz(uint word) 2984 * 2985 * FUNCTION: determine the number of trailing zeros within a 32-bit 2986 * value. 2987 * 2988 * PARAMETERS: 2989 * value - 32-bit value to be examined. 2990 * 2991 * RETURN VALUES: 2992 * count of trailing zeros 2993 */ 2994 static int cnttz(u32 word) 2995 { 2996 int n; 2997 2998 for (n = 0; n < 32; n++, word >>= 1) { 2999 if (word & 0x01) 3000 break; 3001 } 3002 3003 return (n); 3004 } 3005 3006 3007 /* 3008 * NAME: cntlz(u32 value) 3009 * 3010 * FUNCTION: determine the number of leading zeros within a 32-bit 3011 * value. 3012 * 3013 * PARAMETERS: 3014 * value - 32-bit value to be examined. 3015 * 3016 * RETURN VALUES: 3017 * count of leading zeros 3018 */ 3019 static int cntlz(u32 value) 3020 { 3021 int n; 3022 3023 for (n = 0; n < 32; n++, value <<= 1) { 3024 if (value & HIGHORDER) 3025 break; 3026 } 3027 return (n); 3028 } 3029 3030 3031 /* 3032 * NAME: blkstol2(s64 nb) 3033 * 3034 * FUNCTION: convert a block count to its log2 value. if the block 3035 * count is not a l2 multiple, it is rounded up to the next 3036 * larger l2 multiple. 3037 * 3038 * PARAMETERS: 3039 * nb - number of blocks 3040 * 3041 * RETURN VALUES: 3042 * log2 number of blocks 3043 */ 3044 int blkstol2(s64 nb) 3045 { 3046 int l2nb; 3047 s64 mask; /* meant to be signed */ 3048 3049 mask = (s64) 1 << (64 - 1); 3050 3051 /* count the leading bits. 3052 */ 3053 for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) { 3054 /* leading bit found. 3055 */ 3056 if (nb & mask) { 3057 /* determine the l2 value. 3058 */ 3059 l2nb = (64 - 1) - l2nb; 3060 3061 /* check if we need to round up. 3062 */ 3063 if (~mask & nb) 3064 l2nb++; 3065 3066 return (l2nb); 3067 } 3068 } 3069 assert(0); 3070 return 0; /* fix compiler warning */ 3071 } 3072 3073 3074 /* 3075 * NAME: dbAllocBottomUp() 3076 * 3077 * FUNCTION: alloc the specified block range from the working block 3078 * allocation map. 3079 * 3080 * the blocks will be alloc from the working map one dmap 3081 * at a time. 3082 * 3083 * PARAMETERS: 3084 * ip - pointer to in-core inode; 3085 * blkno - starting block number to be freed. 3086 * nblocks - number of blocks to be freed. 3087 * 3088 * RETURN VALUES: 3089 * 0 - success 3090 * -EIO - i/o error 3091 */ 3092 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks) 3093 { 3094 struct metapage *mp; 3095 struct dmap *dp; 3096 int nb, rc; 3097 s64 lblkno, rem; 3098 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 3099 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 3100 3101 IREAD_LOCK(ipbmap); 3102 3103 /* block to be allocated better be within the mapsize. */ 3104 ASSERT(nblocks <= bmp->db_mapsize - blkno); 3105 3106 /* 3107 * allocate the blocks a dmap at a time. 3108 */ 3109 mp = NULL; 3110 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { 3111 /* release previous dmap if any */ 3112 if (mp) { 3113 write_metapage(mp); 3114 } 3115 3116 /* get the buffer for the current dmap. */ 3117 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 3118 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 3119 if (mp == NULL) { 3120 IREAD_UNLOCK(ipbmap); 3121 return -EIO; 3122 } 3123 dp = (struct dmap *) mp->data; 3124 3125 /* determine the number of blocks to be allocated from 3126 * this dmap. 3127 */ 3128 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 3129 3130 /* allocate the blocks. */ 3131 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) { 3132 release_metapage(mp); 3133 IREAD_UNLOCK(ipbmap); 3134 return (rc); 3135 } 3136 } 3137 3138 /* write the last buffer. */ 3139 write_metapage(mp); 3140 3141 IREAD_UNLOCK(ipbmap); 3142 3143 return (0); 3144 } 3145 3146 3147 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, 3148 int nblocks) 3149 { 3150 int rc; 3151 int dbitno, word, rembits, nb, nwords, wbitno, agno; 3152 s8 oldroot, *leaf; 3153 struct dmaptree *tp = (struct dmaptree *) & dp->tree; 3154 3155 /* save the current value of the root (i.e. maximum free string) 3156 * of the dmap tree. 3157 */ 3158 oldroot = tp->stree[ROOT]; 3159 3160 /* pick up a pointer to the leaves of the dmap tree */ 3161 leaf = tp->stree + LEAFIND; 3162 3163 /* determine the bit number and word within the dmap of the 3164 * starting block. 3165 */ 3166 dbitno = blkno & (BPERDMAP - 1); 3167 word = dbitno >> L2DBWORD; 3168 3169 /* block range better be within the dmap */ 3170 assert(dbitno + nblocks <= BPERDMAP); 3171 3172 /* allocate the bits of the dmap's words corresponding to the block 3173 * range. not all bits of the first and last words may be contained 3174 * within the block range. if this is the case, we'll work against 3175 * those words (i.e. partial first and/or last) on an individual basis 3176 * (a single pass), allocating the bits of interest by hand and 3177 * updating the leaf corresponding to the dmap word. a single pass 3178 * will be used for all dmap words fully contained within the 3179 * specified range. within this pass, the bits of all fully contained 3180 * dmap words will be marked as free in a single shot and the leaves 3181 * will be updated. a single leaf may describe the free space of 3182 * multiple dmap words, so we may update only a subset of the actual 3183 * leaves corresponding to the dmap words of the block range. 3184 */ 3185 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 3186 /* determine the bit number within the word and 3187 * the number of bits within the word. 3188 */ 3189 wbitno = dbitno & (DBWORD - 1); 3190 nb = min(rembits, DBWORD - wbitno); 3191 3192 /* check if only part of a word is to be allocated. 3193 */ 3194 if (nb < DBWORD) { 3195 /* allocate (set to 1) the appropriate bits within 3196 * this dmap word. 3197 */ 3198 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) 3199 >> wbitno); 3200 3201 word++; 3202 } else { 3203 /* one or more dmap words are fully contained 3204 * within the block range. determine how many 3205 * words and allocate (set to 1) the bits of these 3206 * words. 3207 */ 3208 nwords = rembits >> L2DBWORD; 3209 memset(&dp->wmap[word], (int) ONES, nwords * 4); 3210 3211 /* determine how many bits */ 3212 nb = nwords << L2DBWORD; 3213 word += nwords; 3214 } 3215 } 3216 3217 /* update the free count for this dmap */ 3218 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks); 3219 3220 /* reconstruct summary tree */ 3221 dbInitDmapTree(dp); 3222 3223 BMAP_LOCK(bmp); 3224 3225 /* if this allocation group is completely free, 3226 * update the highest active allocation group number 3227 * if this allocation group is the new max. 3228 */ 3229 agno = blkno >> bmp->db_agl2size; 3230 if (agno > bmp->db_maxag) 3231 bmp->db_maxag = agno; 3232 3233 /* update the free count for the allocation group and map */ 3234 bmp->db_agfree[agno] -= nblocks; 3235 bmp->db_nfree -= nblocks; 3236 3237 BMAP_UNLOCK(bmp); 3238 3239 /* if the root has not changed, done. */ 3240 if (tp->stree[ROOT] == oldroot) 3241 return (0); 3242 3243 /* root changed. bubble the change up to the dmap control pages. 3244 * if the adjustment of the upper level control pages fails, 3245 * backout the bit allocation (thus making everything consistent). 3246 */ 3247 if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0))) 3248 dbFreeBits(bmp, dp, blkno, nblocks); 3249 3250 return (rc); 3251 } 3252 3253 3254 /* 3255 * NAME: dbExtendFS() 3256 * 3257 * FUNCTION: extend bmap from blkno for nblocks; 3258 * dbExtendFS() updates bmap ready for dbAllocBottomUp(); 3259 * 3260 * L2 3261 * | 3262 * L1---------------------------------L1 3263 * | | 3264 * L0---------L0---------L0 L0---------L0---------L0 3265 * | | | | | | 3266 * d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm; 3267 * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm 3268 * 3269 * <---old---><----------------------------extend-----------------------> 3270 */ 3271 int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) 3272 { 3273 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb); 3274 int nbperpage = sbi->nbperpage; 3275 int i, i0 = TRUE, j, j0 = TRUE, k, n; 3276 s64 newsize; 3277 s64 p; 3278 struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL; 3279 struct dmapctl *l2dcp, *l1dcp, *l0dcp; 3280 struct dmap *dp; 3281 s8 *l0leaf, *l1leaf, *l2leaf; 3282 struct bmap *bmp = sbi->bmap; 3283 int agno, l2agsize, oldl2agsize; 3284 s64 ag_rem; 3285 3286 newsize = blkno + nblocks; 3287 3288 jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld", 3289 (long long) blkno, (long long) nblocks, (long long) newsize); 3290 3291 /* 3292 * initialize bmap control page. 3293 * 3294 * all the data in bmap control page should exclude 3295 * the mkfs hidden dmap page. 3296 */ 3297 3298 /* update mapsize */ 3299 bmp->db_mapsize = newsize; 3300 bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize); 3301 3302 /* compute new AG size */ 3303 l2agsize = dbGetL2AGSize(newsize); 3304 oldl2agsize = bmp->db_agl2size; 3305 3306 bmp->db_agl2size = l2agsize; 3307 bmp->db_agsize = 1 << l2agsize; 3308 3309 /* compute new number of AG */ 3310 agno = bmp->db_numag; 3311 bmp->db_numag = newsize >> l2agsize; 3312 bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0; 3313 3314 /* 3315 * reconfigure db_agfree[] 3316 * from old AG configuration to new AG configuration; 3317 * 3318 * coalesce contiguous k (newAGSize/oldAGSize) AGs; 3319 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn; 3320 * note: new AG size = old AG size * (2**x). 3321 */ 3322 if (l2agsize == oldl2agsize) 3323 goto extend; 3324 k = 1 << (l2agsize - oldl2agsize); 3325 ag_rem = bmp->db_agfree[0]; /* save agfree[0] */ 3326 for (i = 0, n = 0; i < agno; n++) { 3327 bmp->db_agfree[n] = 0; /* init collection point */ 3328 3329 /* coalesce cotiguous k AGs; */ 3330 for (j = 0; j < k && i < agno; j++, i++) { 3331 /* merge AGi to AGn */ 3332 bmp->db_agfree[n] += bmp->db_agfree[i]; 3333 } 3334 } 3335 bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */ 3336 3337 for (; n < MAXAG; n++) 3338 bmp->db_agfree[n] = 0; 3339 3340 /* 3341 * update highest active ag number 3342 */ 3343 3344 bmp->db_maxag = bmp->db_maxag / k; 3345 3346 /* 3347 * extend bmap 3348 * 3349 * update bit maps and corresponding level control pages; 3350 * global control page db_nfree, db_agfree[agno], db_maxfreebud; 3351 */ 3352 extend: 3353 /* get L2 page */ 3354 p = BMAPBLKNO + nbperpage; /* L2 page */ 3355 l2mp = read_metapage(ipbmap, p, PSIZE, 0); 3356 if (!l2mp) { 3357 jfs_error(ipbmap->i_sb, "dbExtendFS: L2 page could not be read"); 3358 return -EIO; 3359 } 3360 l2dcp = (struct dmapctl *) l2mp->data; 3361 3362 /* compute start L1 */ 3363 k = blkno >> L2MAXL1SIZE; 3364 l2leaf = l2dcp->stree + CTLLEAFIND + k; 3365 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */ 3366 3367 /* 3368 * extend each L1 in L2 3369 */ 3370 for (; k < LPERCTL; k++, p += nbperpage) { 3371 /* get L1 page */ 3372 if (j0) { 3373 /* read in L1 page: (blkno & (MAXL1SIZE - 1)) */ 3374 l1mp = read_metapage(ipbmap, p, PSIZE, 0); 3375 if (l1mp == NULL) 3376 goto errout; 3377 l1dcp = (struct dmapctl *) l1mp->data; 3378 3379 /* compute start L0 */ 3380 j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE; 3381 l1leaf = l1dcp->stree + CTLLEAFIND + j; 3382 p = BLKTOL0(blkno, sbi->l2nbperpage); 3383 j0 = FALSE; 3384 } else { 3385 /* assign/init L1 page */ 3386 l1mp = get_metapage(ipbmap, p, PSIZE, 0); 3387 if (l1mp == NULL) 3388 goto errout; 3389 3390 l1dcp = (struct dmapctl *) l1mp->data; 3391 3392 /* compute start L0 */ 3393 j = 0; 3394 l1leaf = l1dcp->stree + CTLLEAFIND; 3395 p += nbperpage; /* 1st L0 of L1.k */ 3396 } 3397 3398 /* 3399 * extend each L0 in L1 3400 */ 3401 for (; j < LPERCTL; j++) { 3402 /* get L0 page */ 3403 if (i0) { 3404 /* read in L0 page: (blkno & (MAXL0SIZE - 1)) */ 3405 3406 l0mp = read_metapage(ipbmap, p, PSIZE, 0); 3407 if (l0mp == NULL) 3408 goto errout; 3409 l0dcp = (struct dmapctl *) l0mp->data; 3410 3411 /* compute start dmap */ 3412 i = (blkno & (MAXL0SIZE - 1)) >> 3413 L2BPERDMAP; 3414 l0leaf = l0dcp->stree + CTLLEAFIND + i; 3415 p = BLKTODMAP(blkno, 3416 sbi->l2nbperpage); 3417 i0 = FALSE; 3418 } else { 3419 /* assign/init L0 page */ 3420 l0mp = get_metapage(ipbmap, p, PSIZE, 0); 3421 if (l0mp == NULL) 3422 goto errout; 3423 3424 l0dcp = (struct dmapctl *) l0mp->data; 3425 3426 /* compute start dmap */ 3427 i = 0; 3428 l0leaf = l0dcp->stree + CTLLEAFIND; 3429 p += nbperpage; /* 1st dmap of L0.j */ 3430 } 3431 3432 /* 3433 * extend each dmap in L0 3434 */ 3435 for (; i < LPERCTL; i++) { 3436 /* 3437 * reconstruct the dmap page, and 3438 * initialize corresponding parent L0 leaf 3439 */ 3440 if ((n = blkno & (BPERDMAP - 1))) { 3441 /* read in dmap page: */ 3442 mp = read_metapage(ipbmap, p, 3443 PSIZE, 0); 3444 if (mp == NULL) 3445 goto errout; 3446 n = min(nblocks, (s64)BPERDMAP - n); 3447 } else { 3448 /* assign/init dmap page */ 3449 mp = read_metapage(ipbmap, p, 3450 PSIZE, 0); 3451 if (mp == NULL) 3452 goto errout; 3453 3454 n = min(nblocks, (s64)BPERDMAP); 3455 } 3456 3457 dp = (struct dmap *) mp->data; 3458 *l0leaf = dbInitDmap(dp, blkno, n); 3459 3460 bmp->db_nfree += n; 3461 agno = le64_to_cpu(dp->start) >> l2agsize; 3462 bmp->db_agfree[agno] += n; 3463 3464 write_metapage(mp); 3465 3466 l0leaf++; 3467 p += nbperpage; 3468 3469 blkno += n; 3470 nblocks -= n; 3471 if (nblocks == 0) 3472 break; 3473 } /* for each dmap in a L0 */ 3474 3475 /* 3476 * build current L0 page from its leaves, and 3477 * initialize corresponding parent L1 leaf 3478 */ 3479 *l1leaf = dbInitDmapCtl(l0dcp, 0, ++i); 3480 write_metapage(l0mp); 3481 l0mp = NULL; 3482 3483 if (nblocks) 3484 l1leaf++; /* continue for next L0 */ 3485 else { 3486 /* more than 1 L0 ? */ 3487 if (j > 0) 3488 break; /* build L1 page */ 3489 else { 3490 /* summarize in global bmap page */ 3491 bmp->db_maxfreebud = *l1leaf; 3492 release_metapage(l1mp); 3493 release_metapage(l2mp); 3494 goto finalize; 3495 } 3496 } 3497 } /* for each L0 in a L1 */ 3498 3499 /* 3500 * build current L1 page from its leaves, and 3501 * initialize corresponding parent L2 leaf 3502 */ 3503 *l2leaf = dbInitDmapCtl(l1dcp, 1, ++j); 3504 write_metapage(l1mp); 3505 l1mp = NULL; 3506 3507 if (nblocks) 3508 l2leaf++; /* continue for next L1 */ 3509 else { 3510 /* more than 1 L1 ? */ 3511 if (k > 0) 3512 break; /* build L2 page */ 3513 else { 3514 /* summarize in global bmap page */ 3515 bmp->db_maxfreebud = *l2leaf; 3516 release_metapage(l2mp); 3517 goto finalize; 3518 } 3519 } 3520 } /* for each L1 in a L2 */ 3521 3522 jfs_error(ipbmap->i_sb, 3523 "dbExtendFS: function has not returned as expected"); 3524 errout: 3525 if (l0mp) 3526 release_metapage(l0mp); 3527 if (l1mp) 3528 release_metapage(l1mp); 3529 release_metapage(l2mp); 3530 return -EIO; 3531 3532 /* 3533 * finalize bmap control page 3534 */ 3535 finalize: 3536 3537 return 0; 3538 } 3539 3540 3541 /* 3542 * dbFinalizeBmap() 3543 */ 3544 void dbFinalizeBmap(struct inode *ipbmap) 3545 { 3546 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 3547 int actags, inactags, l2nl; 3548 s64 ag_rem, actfree, inactfree, avgfree; 3549 int i, n; 3550 3551 /* 3552 * finalize bmap control page 3553 */ 3554 //finalize: 3555 /* 3556 * compute db_agpref: preferred ag to allocate from 3557 * (the leftmost ag with average free space in it); 3558 */ 3559 //agpref: 3560 /* get the number of active ags and inacitve ags */ 3561 actags = bmp->db_maxag + 1; 3562 inactags = bmp->db_numag - actags; 3563 ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */ 3564 3565 /* determine how many blocks are in the inactive allocation 3566 * groups. in doing this, we must account for the fact that 3567 * the rightmost group might be a partial group (i.e. file 3568 * system size is not a multiple of the group size). 3569 */ 3570 inactfree = (inactags && ag_rem) ? 3571 ((inactags - 1) << bmp->db_agl2size) + ag_rem 3572 : inactags << bmp->db_agl2size; 3573 3574 /* determine how many free blocks are in the active 3575 * allocation groups plus the average number of free blocks 3576 * within the active ags. 3577 */ 3578 actfree = bmp->db_nfree - inactfree; 3579 avgfree = (u32) actfree / (u32) actags; 3580 3581 /* if the preferred allocation group has not average free space. 3582 * re-establish the preferred group as the leftmost 3583 * group with average free space. 3584 */ 3585 if (bmp->db_agfree[bmp->db_agpref] < avgfree) { 3586 for (bmp->db_agpref = 0; bmp->db_agpref < actags; 3587 bmp->db_agpref++) { 3588 if (bmp->db_agfree[bmp->db_agpref] >= avgfree) 3589 break; 3590 } 3591 if (bmp->db_agpref >= bmp->db_numag) { 3592 jfs_error(ipbmap->i_sb, 3593 "cannot find ag with average freespace"); 3594 } 3595 } 3596 3597 /* 3598 * compute db_aglevel, db_agheigth, db_width, db_agstart: 3599 * an ag is covered in aglevel dmapctl summary tree, 3600 * at agheight level height (from leaf) with agwidth number of nodes 3601 * each, which starts at agstart index node of the smmary tree node 3602 * array; 3603 */ 3604 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); 3605 l2nl = 3606 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); 3607 bmp->db_agheigth = l2nl >> 1; 3608 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); 3609 for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; 3610 i--) { 3611 bmp->db_agstart += n; 3612 n <<= 2; 3613 } 3614 3615 } 3616 3617 3618 /* 3619 * NAME: dbInitDmap()/ujfs_idmap_page() 3620 * 3621 * FUNCTION: initialize working/persistent bitmap of the dmap page 3622 * for the specified number of blocks: 3623 * 3624 * at entry, the bitmaps had been initialized as free (ZEROS); 3625 * The number of blocks will only account for the actually 3626 * existing blocks. Blocks which don't actually exist in 3627 * the aggregate will be marked as allocated (ONES); 3628 * 3629 * PARAMETERS: 3630 * dp - pointer to page of map 3631 * nblocks - number of blocks this page 3632 * 3633 * RETURNS: NONE 3634 */ 3635 static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks) 3636 { 3637 int blkno, w, b, r, nw, nb, i; 3638 3639 /* starting block number within the dmap */ 3640 blkno = Blkno & (BPERDMAP - 1); 3641 3642 if (blkno == 0) { 3643 dp->nblocks = dp->nfree = cpu_to_le32(nblocks); 3644 dp->start = cpu_to_le64(Blkno); 3645 3646 if (nblocks == BPERDMAP) { 3647 memset(&dp->wmap[0], 0, LPERDMAP * 4); 3648 memset(&dp->pmap[0], 0, LPERDMAP * 4); 3649 goto initTree; 3650 } 3651 } else { 3652 dp->nblocks = 3653 cpu_to_le32(le32_to_cpu(dp->nblocks) + nblocks); 3654 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks); 3655 } 3656 3657 /* word number containing start block number */ 3658 w = blkno >> L2DBWORD; 3659 3660 /* 3661 * free the bits corresponding to the block range (ZEROS): 3662 * note: not all bits of the first and last words may be contained 3663 * within the block range. 3664 */ 3665 for (r = nblocks; r > 0; r -= nb, blkno += nb) { 3666 /* number of bits preceding range to be freed in the word */ 3667 b = blkno & (DBWORD - 1); 3668 /* number of bits to free in the word */ 3669 nb = min(r, DBWORD - b); 3670 3671 /* is partial word to be freed ? */ 3672 if (nb < DBWORD) { 3673 /* free (set to 0) from the bitmap word */ 3674 dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) 3675 >> b)); 3676 dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) 3677 >> b)); 3678 3679 /* skip the word freed */ 3680 w++; 3681 } else { 3682 /* free (set to 0) contiguous bitmap words */ 3683 nw = r >> L2DBWORD; 3684 memset(&dp->wmap[w], 0, nw * 4); 3685 memset(&dp->pmap[w], 0, nw * 4); 3686 3687 /* skip the words freed */ 3688 nb = nw << L2DBWORD; 3689 w += nw; 3690 } 3691 } 3692 3693 /* 3694 * mark bits following the range to be freed (non-existing 3695 * blocks) as allocated (ONES) 3696 */ 3697 3698 if (blkno == BPERDMAP) 3699 goto initTree; 3700 3701 /* the first word beyond the end of existing blocks */ 3702 w = blkno >> L2DBWORD; 3703 3704 /* does nblocks fall on a 32-bit boundary ? */ 3705 b = blkno & (DBWORD - 1); 3706 if (b) { 3707 /* mark a partial word allocated */ 3708 dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b); 3709 w++; 3710 } 3711 3712 /* set the rest of the words in the page to allocated (ONES) */ 3713 for (i = w; i < LPERDMAP; i++) 3714 dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES); 3715 3716 /* 3717 * init tree 3718 */ 3719 initTree: 3720 return (dbInitDmapTree(dp)); 3721 } 3722 3723 3724 /* 3725 * NAME: dbInitDmapTree()/ujfs_complete_dmap() 3726 * 3727 * FUNCTION: initialize summary tree of the specified dmap: 3728 * 3729 * at entry, bitmap of the dmap has been initialized; 3730 * 3731 * PARAMETERS: 3732 * dp - dmap to complete 3733 * blkno - starting block number for this dmap 3734 * treemax - will be filled in with max free for this dmap 3735 * 3736 * RETURNS: max free string at the root of the tree 3737 */ 3738 static int dbInitDmapTree(struct dmap * dp) 3739 { 3740 struct dmaptree *tp; 3741 s8 *cp; 3742 int i; 3743 3744 /* init fixed info of tree */ 3745 tp = &dp->tree; 3746 tp->nleafs = cpu_to_le32(LPERDMAP); 3747 tp->l2nleafs = cpu_to_le32(L2LPERDMAP); 3748 tp->leafidx = cpu_to_le32(LEAFIND); 3749 tp->height = cpu_to_le32(4); 3750 tp->budmin = BUDMIN; 3751 3752 /* init each leaf from corresponding wmap word: 3753 * note: leaf is set to NOFREE(-1) if all blocks of corresponding 3754 * bitmap word are allocated. 3755 */ 3756 cp = tp->stree + le32_to_cpu(tp->leafidx); 3757 for (i = 0; i < LPERDMAP; i++) 3758 *cp++ = dbMaxBud((u8 *) & dp->wmap[i]); 3759 3760 /* build the dmap's binary buddy summary tree */ 3761 return (dbInitTree(tp)); 3762 } 3763 3764 3765 /* 3766 * NAME: dbInitTree()/ujfs_adjtree() 3767 * 3768 * FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl. 3769 * 3770 * at entry, the leaves of the tree has been initialized 3771 * from corresponding bitmap word or root of summary tree 3772 * of the child control page; 3773 * configure binary buddy system at the leaf level, then 3774 * bubble up the values of the leaf nodes up the tree. 3775 * 3776 * PARAMETERS: 3777 * cp - Pointer to the root of the tree 3778 * l2leaves- Number of leaf nodes as a power of 2 3779 * l2min - Number of blocks that can be covered by a leaf 3780 * as a power of 2 3781 * 3782 * RETURNS: max free string at the root of the tree 3783 */ 3784 static int dbInitTree(struct dmaptree * dtp) 3785 { 3786 int l2max, l2free, bsize, nextb, i; 3787 int child, parent, nparent; 3788 s8 *tp, *cp, *cp1; 3789 3790 tp = dtp->stree; 3791 3792 /* Determine the maximum free string possible for the leaves */ 3793 l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin; 3794 3795 /* 3796 * configure the leaf levevl into binary buddy system 3797 * 3798 * Try to combine buddies starting with a buddy size of 1 3799 * (i.e. two leaves). At a buddy size of 1 two buddy leaves 3800 * can be combined if both buddies have a maximum free of l2min; 3801 * the combination will result in the left-most buddy leaf having 3802 * a maximum free of l2min+1. 3803 * After processing all buddies for a given size, process buddies 3804 * at the next higher buddy size (i.e. current size * 2) and 3805 * the next maximum free (current free + 1). 3806 * This continues until the maximum possible buddy combination 3807 * yields maximum free. 3808 */ 3809 for (l2free = dtp->budmin, bsize = 1; l2free < l2max; 3810 l2free++, bsize = nextb) { 3811 /* get next buddy size == current buddy pair size */ 3812 nextb = bsize << 1; 3813 3814 /* scan each adjacent buddy pair at current buddy size */ 3815 for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx); 3816 i < le32_to_cpu(dtp->nleafs); 3817 i += nextb, cp += nextb) { 3818 /* coalesce if both adjacent buddies are max free */ 3819 if (*cp == l2free && *(cp + bsize) == l2free) { 3820 *cp = l2free + 1; /* left take right */ 3821 *(cp + bsize) = -1; /* right give left */ 3822 } 3823 } 3824 } 3825 3826 /* 3827 * bubble summary information of leaves up the tree. 3828 * 3829 * Starting at the leaf node level, the four nodes described by 3830 * the higher level parent node are compared for a maximum free and 3831 * this maximum becomes the value of the parent node. 3832 * when all lower level nodes are processed in this fashion then 3833 * move up to the next level (parent becomes a lower level node) and 3834 * continue the process for that level. 3835 */ 3836 for (child = le32_to_cpu(dtp->leafidx), 3837 nparent = le32_to_cpu(dtp->nleafs) >> 2; 3838 nparent > 0; nparent >>= 2, child = parent) { 3839 /* get index of 1st node of parent level */ 3840 parent = (child - 1) >> 2; 3841 3842 /* set the value of the parent node as the maximum 3843 * of the four nodes of the current level. 3844 */ 3845 for (i = 0, cp = tp + child, cp1 = tp + parent; 3846 i < nparent; i++, cp += 4, cp1++) 3847 *cp1 = TREEMAX(cp); 3848 } 3849 3850 return (*tp); 3851 } 3852 3853 3854 /* 3855 * dbInitDmapCtl() 3856 * 3857 * function: initialize dmapctl page 3858 */ 3859 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i) 3860 { /* start leaf index not covered by range */ 3861 s8 *cp; 3862 3863 dcp->nleafs = cpu_to_le32(LPERCTL); 3864 dcp->l2nleafs = cpu_to_le32(L2LPERCTL); 3865 dcp->leafidx = cpu_to_le32(CTLLEAFIND); 3866 dcp->height = cpu_to_le32(5); 3867 dcp->budmin = L2BPERDMAP + L2LPERCTL * level; 3868 3869 /* 3870 * initialize the leaves of current level that were not covered 3871 * by the specified input block range (i.e. the leaves have no 3872 * low level dmapctl or dmap). 3873 */ 3874 cp = &dcp->stree[CTLLEAFIND + i]; 3875 for (; i < LPERCTL; i++) 3876 *cp++ = NOFREE; 3877 3878 /* build the dmap's binary buddy summary tree */ 3879 return (dbInitTree((struct dmaptree *) dcp)); 3880 } 3881 3882 3883 /* 3884 * NAME: dbGetL2AGSize()/ujfs_getagl2size() 3885 * 3886 * FUNCTION: Determine log2(allocation group size) from aggregate size 3887 * 3888 * PARAMETERS: 3889 * nblocks - Number of blocks in aggregate 3890 * 3891 * RETURNS: log2(allocation group size) in aggregate blocks 3892 */ 3893 static int dbGetL2AGSize(s64 nblocks) 3894 { 3895 s64 sz; 3896 s64 m; 3897 int l2sz; 3898 3899 if (nblocks < BPERDMAP * MAXAG) 3900 return (L2BPERDMAP); 3901 3902 /* round up aggregate size to power of 2 */ 3903 m = ((u64) 1 << (64 - 1)); 3904 for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) { 3905 if (m & nblocks) 3906 break; 3907 } 3908 3909 sz = (s64) 1 << l2sz; 3910 if (sz < nblocks) 3911 l2sz += 1; 3912 3913 /* agsize = roundupSize/max_number_of_ag */ 3914 return (l2sz - L2MAXAG); 3915 } 3916 3917 3918 /* 3919 * NAME: dbMapFileSizeToMapSize() 3920 * 3921 * FUNCTION: compute number of blocks the block allocation map file 3922 * can cover from the map file size; 3923 * 3924 * RETURNS: Number of blocks which can be covered by this block map file; 3925 */ 3926 3927 /* 3928 * maximum number of map pages at each level including control pages 3929 */ 3930 #define MAXL0PAGES (1 + LPERCTL) 3931 #define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES) 3932 #define MAXL2PAGES (1 + LPERCTL * MAXL1PAGES) 3933 3934 /* 3935 * convert number of map pages to the zero origin top dmapctl level 3936 */ 3937 #define BMAPPGTOLEV(npages) \ 3938 (((npages) <= 3 + MAXL0PAGES) ? 0 \ 3939 : ((npages) <= 2 + MAXL1PAGES) ? 1 : 2) 3940 3941 s64 dbMapFileSizeToMapSize(struct inode * ipbmap) 3942 { 3943 struct super_block *sb = ipbmap->i_sb; 3944 s64 nblocks; 3945 s64 npages, ndmaps; 3946 int level, i; 3947 int complete, factor; 3948 3949 nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize; 3950 npages = nblocks >> JFS_SBI(sb)->l2nbperpage; 3951 level = BMAPPGTOLEV(npages); 3952 3953 /* At each level, accumulate the number of dmap pages covered by 3954 * the number of full child levels below it; 3955 * repeat for the last incomplete child level. 3956 */ 3957 ndmaps = 0; 3958 npages--; /* skip the first global control page */ 3959 /* skip higher level control pages above top level covered by map */ 3960 npages -= (2 - level); 3961 npages--; /* skip top level's control page */ 3962 for (i = level; i >= 0; i--) { 3963 factor = 3964 (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1); 3965 complete = (u32) npages / factor; 3966 ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL 3967 : ((i == 1) ? LPERCTL : 1)); 3968 3969 /* pages in last/incomplete child */ 3970 npages = (u32) npages % factor; 3971 /* skip incomplete child's level control page */ 3972 npages--; 3973 } 3974 3975 /* convert the number of dmaps into the number of blocks 3976 * which can be covered by the dmaps; 3977 */ 3978 nblocks = ndmaps << L2BPERDMAP; 3979 3980 return (nblocks); 3981 } 3982