1 /* 2 * linux/fs/ufs/balloc.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/ufs_fs.h> 11 #include <linux/stat.h> 12 #include <linux/time.h> 13 #include <linux/string.h> 14 #include <linux/quotaops.h> 15 #include <linux/buffer_head.h> 16 #include <linux/capability.h> 17 #include <linux/sched.h> 18 #include <linux/bitops.h> 19 #include <asm/byteorder.h> 20 21 #include "swab.h" 22 #include "util.h" 23 24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *); 25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *); 26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *); 27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned); 28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[]; 29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int); 30 31 /* 32 * Free 'count' fragments from fragment number 'fragment' 33 */ 34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count) 35 { 36 struct super_block * sb; 37 struct ufs_sb_private_info * uspi; 38 struct ufs_super_block_first * usb1; 39 struct ufs_cg_private_info * ucpi; 40 struct ufs_cylinder_group * ucg; 41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; 42 43 sb = inode->i_sb; 44 uspi = UFS_SB(sb)->s_uspi; 45 usb1 = ubh_get_usb_first(uspi); 46 47 UFSD("ENTER, fragment %u, count %u\n", fragment, count); 48 49 if (ufs_fragnum(fragment) + count > uspi->s_fpg) 50 ufs_error (sb, "ufs_free_fragments", "internal error"); 51 52 lock_super(sb); 53 54 cgno = ufs_dtog(fragment); 55 bit = ufs_dtogd(fragment); 56 if (cgno >= uspi->s_ncg) { 57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); 58 goto failed; 59 } 60 61 ucpi = ufs_load_cylinder (sb, cgno); 62 if (!ucpi) 63 goto failed; 64 ucg = ubh_get_ucg (UCPI_UBH(ucpi)); 65 if (!ufs_cg_chkmagic(sb, ucg)) { 66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); 67 goto failed; 68 } 69 70 end_bit = bit + count; 71 bbase = ufs_blknum (bit); 72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); 73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); 74 for (i = bit; i < end_bit; i++) { 75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i)) 76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i); 77 else 78 ufs_error (sb, "ufs_free_fragments", 79 "bit already cleared for fragment %u", i); 80 } 81 82 DQUOT_FREE_BLOCK (inode, count); 83 84 85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 86 uspi->cs_total.cs_nffree += count; 87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); 89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); 90 91 /* 92 * Trying to reassemble free fragments into block 93 */ 94 blkno = ufs_fragstoblks (bbase); 95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { 96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); 97 uspi->cs_total.cs_nffree -= uspi->s_fpb; 98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); 99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 100 ufs_clusteracct (sb, ucpi, blkno, 1); 101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 102 uspi->cs_total.cs_nbfree++; 103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); 104 cylno = ufs_cbtocylno (bbase); 105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); 106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); 107 } 108 109 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 111 if (sb->s_flags & MS_SYNCHRONOUS) { 112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 113 ubh_wait_on_buffer (UCPI_UBH(ucpi)); 114 } 115 sb->s_dirt = 1; 116 117 unlock_super (sb); 118 UFSD("EXIT\n"); 119 return; 120 121 failed: 122 unlock_super (sb); 123 UFSD("EXIT (FAILED)\n"); 124 return; 125 } 126 127 /* 128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks) 129 */ 130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count) 131 { 132 struct super_block * sb; 133 struct ufs_sb_private_info * uspi; 134 struct ufs_super_block_first * usb1; 135 struct ufs_cg_private_info * ucpi; 136 struct ufs_cylinder_group * ucg; 137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; 138 139 sb = inode->i_sb; 140 uspi = UFS_SB(sb)->s_uspi; 141 usb1 = ubh_get_usb_first(uspi); 142 143 UFSD("ENTER, fragment %u, count %u\n", fragment, count); 144 145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { 146 ufs_error (sb, "ufs_free_blocks", "internal error, " 147 "fragment %u, count %u\n", fragment, count); 148 goto failed; 149 } 150 151 lock_super(sb); 152 153 do_more: 154 overflow = 0; 155 cgno = ufs_dtog (fragment); 156 bit = ufs_dtogd (fragment); 157 if (cgno >= uspi->s_ncg) { 158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); 159 goto failed_unlock; 160 } 161 end_bit = bit + count; 162 if (end_bit > uspi->s_fpg) { 163 overflow = bit + count - uspi->s_fpg; 164 count -= overflow; 165 end_bit -= overflow; 166 } 167 168 ucpi = ufs_load_cylinder (sb, cgno); 169 if (!ucpi) 170 goto failed_unlock; 171 ucg = ubh_get_ucg (UCPI_UBH(ucpi)); 172 if (!ufs_cg_chkmagic(sb, ucg)) { 173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); 174 goto failed_unlock; 175 } 176 177 for (i = bit; i < end_bit; i += uspi->s_fpb) { 178 blkno = ufs_fragstoblks(i); 179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { 180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 181 } 182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 184 ufs_clusteracct (sb, ucpi, blkno, 1); 185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb); 186 187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 188 uspi->cs_total.cs_nbfree++; 189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); 190 cylno = ufs_cbtocylno(i); 191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); 192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); 193 } 194 195 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 197 if (sb->s_flags & MS_SYNCHRONOUS) { 198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 199 ubh_wait_on_buffer (UCPI_UBH(ucpi)); 200 } 201 202 if (overflow) { 203 fragment += count; 204 count = overflow; 205 goto do_more; 206 } 207 208 sb->s_dirt = 1; 209 unlock_super (sb); 210 UFSD("EXIT\n"); 211 return; 212 213 failed_unlock: 214 unlock_super (sb); 215 failed: 216 UFSD("EXIT (FAILED)\n"); 217 return; 218 } 219 220 static struct page *ufs_get_locked_page(struct address_space *mapping, 221 unsigned long index) 222 { 223 struct page *page; 224 225 try_again: 226 page = find_lock_page(mapping, index); 227 if (!page) { 228 page = read_cache_page(mapping, index, 229 (filler_t*)mapping->a_ops->readpage, 230 NULL); 231 if (IS_ERR(page)) { 232 printk(KERN_ERR "ufs_change_blocknr: " 233 "read_cache_page error: ino %lu, index: %lu\n", 234 mapping->host->i_ino, index); 235 goto out; 236 } 237 238 lock_page(page); 239 240 if (!PageUptodate(page) || PageError(page)) { 241 unlock_page(page); 242 page_cache_release(page); 243 244 printk(KERN_ERR "ufs_change_blocknr: " 245 "can not read page: ino %lu, index: %lu\n", 246 mapping->host->i_ino, index); 247 248 page = ERR_PTR(-EIO); 249 goto out; 250 } 251 } 252 253 if (unlikely(!page->mapping || !page_has_buffers(page))) { 254 unlock_page(page); 255 page_cache_release(page); 256 goto try_again;/*we really need these buffers*/ 257 } 258 out: 259 return page; 260 } 261 262 /* 263 * Modify inode page cache in such way: 264 * have - blocks with b_blocknr equal to oldb...oldb+count-1 265 * get - blocks with b_blocknr equal to newb...newb+count-1 266 * also we suppose that oldb...oldb+count-1 blocks 267 * situated at the end of file. 268 * 269 * We can come here from ufs_writepage or ufs_prepare_write, 270 * locked_page is argument of these functions, so we already lock it. 271 */ 272 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, 273 unsigned int count, unsigned int oldb, 274 unsigned int newb, struct page *locked_page) 275 { 276 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 277 struct address_space *mapping = inode->i_mapping; 278 pgoff_t index, cur_index = locked_page->index; 279 unsigned int i, j; 280 struct page *page; 281 struct buffer_head *head, *bh; 282 283 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n", 284 inode->i_ino, count, oldb, newb); 285 286 BUG_ON(!PageLocked(locked_page)); 287 288 for (i = 0; i < count; i += blk_per_page) { 289 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 290 291 if (likely(cur_index != index)) { 292 page = ufs_get_locked_page(mapping, index); 293 if (IS_ERR(page)) 294 continue; 295 } else 296 page = locked_page; 297 298 j = i; 299 head = page_buffers(page); 300 bh = head; 301 do { 302 if (likely(bh->b_blocknr == j + oldb && j < count)) { 303 unmap_underlying_metadata(bh->b_bdev, 304 bh->b_blocknr); 305 bh->b_blocknr = newb + j++; 306 mark_buffer_dirty(bh); 307 } 308 309 bh = bh->b_this_page; 310 } while (bh != head); 311 312 set_page_dirty(page); 313 314 if (likely(cur_index != index)) { 315 unlock_page(page); 316 page_cache_release(page); 317 } 318 } 319 UFSD("EXIT\n"); 320 } 321 322 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, 323 unsigned goal, unsigned count, int * err, struct page *locked_page) 324 { 325 struct super_block * sb; 326 struct ufs_sb_private_info * uspi; 327 struct ufs_super_block_first * usb1; 328 unsigned cgno, oldcount, newcount, tmp, request, result; 329 330 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count); 331 332 sb = inode->i_sb; 333 uspi = UFS_SB(sb)->s_uspi; 334 usb1 = ubh_get_usb_first(uspi); 335 *err = -ENOSPC; 336 337 lock_super (sb); 338 339 tmp = fs32_to_cpu(sb, *p); 340 if (count + ufs_fragnum(fragment) > uspi->s_fpb) { 341 ufs_warning (sb, "ufs_new_fragments", "internal warning" 342 " fragment %u, count %u", fragment, count); 343 count = uspi->s_fpb - ufs_fragnum(fragment); 344 } 345 oldcount = ufs_fragnum (fragment); 346 newcount = oldcount + count; 347 348 /* 349 * Somebody else has just allocated our fragments 350 */ 351 if (oldcount) { 352 if (!tmp) { 353 ufs_error (sb, "ufs_new_fragments", "internal error, " 354 "fragment %u, tmp %u\n", fragment, tmp); 355 unlock_super (sb); 356 return (unsigned)-1; 357 } 358 if (fragment < UFS_I(inode)->i_lastfrag) { 359 UFSD("EXIT (ALREADY ALLOCATED)\n"); 360 unlock_super (sb); 361 return 0; 362 } 363 } 364 else { 365 if (tmp) { 366 UFSD("EXIT (ALREADY ALLOCATED)\n"); 367 unlock_super(sb); 368 return 0; 369 } 370 } 371 372 /* 373 * There is not enough space for user on the device 374 */ 375 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { 376 unlock_super (sb); 377 UFSD("EXIT (FAILED)\n"); 378 return 0; 379 } 380 381 if (goal >= uspi->s_size) 382 goal = 0; 383 if (goal == 0) 384 cgno = ufs_inotocg (inode->i_ino); 385 else 386 cgno = ufs_dtog (goal); 387 388 /* 389 * allocate new fragment 390 */ 391 if (oldcount == 0) { 392 result = ufs_alloc_fragments (inode, cgno, goal, count, err); 393 if (result) { 394 *p = cpu_to_fs32(sb, result); 395 *err = 0; 396 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 397 } 398 unlock_super(sb); 399 UFSD("EXIT, result %u\n", result); 400 return result; 401 } 402 403 /* 404 * resize block 405 */ 406 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); 407 if (result) { 408 *err = 0; 409 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 410 unlock_super(sb); 411 UFSD("EXIT, result %u\n", result); 412 return result; 413 } 414 415 /* 416 * allocate new block and move data 417 */ 418 switch (fs32_to_cpu(sb, usb1->fs_optim)) { 419 case UFS_OPTSPACE: 420 request = newcount; 421 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 422 > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 423 break; 424 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 425 break; 426 default: 427 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 428 429 case UFS_OPTTIME: 430 request = uspi->s_fpb; 431 if (uspi->cs_total.cs_nffree < uspi->s_dsize * 432 (uspi->s_minfree - 2) / 100) 433 break; 434 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 435 break; 436 } 437 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 438 if (result) { 439 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp, 440 result, locked_page); 441 442 *p = cpu_to_fs32(sb, result); 443 *err = 0; 444 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 445 unlock_super(sb); 446 if (newcount < request) 447 ufs_free_fragments (inode, result + newcount, request - newcount); 448 ufs_free_fragments (inode, tmp, oldcount); 449 UFSD("EXIT, result %u\n", result); 450 return result; 451 } 452 453 unlock_super(sb); 454 UFSD("EXIT (FAILED)\n"); 455 return 0; 456 } 457 458 static unsigned 459 ufs_add_fragments (struct inode * inode, unsigned fragment, 460 unsigned oldcount, unsigned newcount, int * err) 461 { 462 struct super_block * sb; 463 struct ufs_sb_private_info * uspi; 464 struct ufs_super_block_first * usb1; 465 struct ufs_cg_private_info * ucpi; 466 struct ufs_cylinder_group * ucg; 467 unsigned cgno, fragno, fragoff, count, fragsize, i; 468 469 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount); 470 471 sb = inode->i_sb; 472 uspi = UFS_SB(sb)->s_uspi; 473 usb1 = ubh_get_usb_first (uspi); 474 count = newcount - oldcount; 475 476 cgno = ufs_dtog(fragment); 477 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count) 478 return 0; 479 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb) 480 return 0; 481 ucpi = ufs_load_cylinder (sb, cgno); 482 if (!ucpi) 483 return 0; 484 ucg = ubh_get_ucg (UCPI_UBH(ucpi)); 485 if (!ufs_cg_chkmagic(sb, ucg)) { 486 ufs_panic (sb, "ufs_add_fragments", 487 "internal error, bad magic number on cg %u", cgno); 488 return 0; 489 } 490 491 fragno = ufs_dtogd (fragment); 492 fragoff = ufs_fragnum (fragno); 493 for (i = oldcount; i < newcount; i++) 494 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 495 return 0; 496 /* 497 * Block can be extended 498 */ 499 ucg->cg_time = cpu_to_fs32(sb, get_seconds()); 500 for (i = newcount; i < (uspi->s_fpb - fragoff); i++) 501 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 502 break; 503 fragsize = i - oldcount; 504 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize])) 505 ufs_panic (sb, "ufs_add_fragments", 506 "internal error or corrupted bitmap on cg %u", cgno); 507 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1); 508 if (fragsize != count) 509 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 510 for (i = oldcount; i < newcount; i++) 511 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); 512 if(DQUOT_ALLOC_BLOCK(inode, count)) { 513 *err = -EDQUOT; 514 return 0; 515 } 516 517 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); 518 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 519 uspi->cs_total.cs_nffree -= count; 520 521 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 522 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 523 if (sb->s_flags & MS_SYNCHRONOUS) { 524 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 525 ubh_wait_on_buffer (UCPI_UBH(ucpi)); 526 } 527 sb->s_dirt = 1; 528 529 UFSD("EXIT, fragment %u\n", fragment); 530 531 return fragment; 532 } 533 534 #define UFS_TEST_FREE_SPACE_CG \ 535 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \ 536 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \ 537 goto cg_found; \ 538 for (k = count; k < uspi->s_fpb; k++) \ 539 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \ 540 goto cg_found; 541 542 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno, 543 unsigned goal, unsigned count, int * err) 544 { 545 struct super_block * sb; 546 struct ufs_sb_private_info * uspi; 547 struct ufs_super_block_first * usb1; 548 struct ufs_cg_private_info * ucpi; 549 struct ufs_cylinder_group * ucg; 550 unsigned oldcg, i, j, k, result, allocsize; 551 552 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count); 553 554 sb = inode->i_sb; 555 uspi = UFS_SB(sb)->s_uspi; 556 usb1 = ubh_get_usb_first(uspi); 557 oldcg = cgno; 558 559 /* 560 * 1. searching on preferred cylinder group 561 */ 562 UFS_TEST_FREE_SPACE_CG 563 564 /* 565 * 2. quadratic rehash 566 */ 567 for (j = 1; j < uspi->s_ncg; j *= 2) { 568 cgno += j; 569 if (cgno >= uspi->s_ncg) 570 cgno -= uspi->s_ncg; 571 UFS_TEST_FREE_SPACE_CG 572 } 573 574 /* 575 * 3. brute force search 576 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step ) 577 */ 578 cgno = (oldcg + 1) % uspi->s_ncg; 579 for (j = 2; j < uspi->s_ncg; j++) { 580 cgno++; 581 if (cgno >= uspi->s_ncg) 582 cgno = 0; 583 UFS_TEST_FREE_SPACE_CG 584 } 585 586 UFSD("EXIT (FAILED)\n"); 587 return 0; 588 589 cg_found: 590 ucpi = ufs_load_cylinder (sb, cgno); 591 if (!ucpi) 592 return 0; 593 ucg = ubh_get_ucg (UCPI_UBH(ucpi)); 594 if (!ufs_cg_chkmagic(sb, ucg)) 595 ufs_panic (sb, "ufs_alloc_fragments", 596 "internal error, bad magic number on cg %u", cgno); 597 ucg->cg_time = cpu_to_fs32(sb, get_seconds()); 598 599 if (count == uspi->s_fpb) { 600 result = ufs_alloccg_block (inode, ucpi, goal, err); 601 if (result == (unsigned)-1) 602 return 0; 603 goto succed; 604 } 605 606 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++) 607 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0) 608 break; 609 610 if (allocsize == uspi->s_fpb) { 611 result = ufs_alloccg_block (inode, ucpi, goal, err); 612 if (result == (unsigned)-1) 613 return 0; 614 goal = ufs_dtogd (result); 615 for (i = count; i < uspi->s_fpb; i++) 616 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 617 i = uspi->s_fpb - count; 618 DQUOT_FREE_BLOCK(inode, i); 619 620 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 621 uspi->cs_total.cs_nffree += i; 622 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); 623 fs32_add(sb, &ucg->cg_frsum[i], 1); 624 goto succed; 625 } 626 627 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 628 if (result == (unsigned)-1) 629 return 0; 630 if(DQUOT_ALLOC_BLOCK(inode, count)) { 631 *err = -EDQUOT; 632 return 0; 633 } 634 for (i = 0; i < count; i++) 635 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); 636 637 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); 638 uspi->cs_total.cs_nffree -= count; 639 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 640 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); 641 642 if (count != allocsize) 643 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); 644 645 succed: 646 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 647 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 648 if (sb->s_flags & MS_SYNCHRONOUS) { 649 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 650 ubh_wait_on_buffer (UCPI_UBH(ucpi)); 651 } 652 sb->s_dirt = 1; 653 654 result += cgno * uspi->s_fpg; 655 UFSD("EXIT3, result %u\n", result); 656 return result; 657 } 658 659 static unsigned ufs_alloccg_block (struct inode * inode, 660 struct ufs_cg_private_info * ucpi, unsigned goal, int * err) 661 { 662 struct super_block * sb; 663 struct ufs_sb_private_info * uspi; 664 struct ufs_super_block_first * usb1; 665 struct ufs_cylinder_group * ucg; 666 unsigned result, cylno, blkno; 667 668 UFSD("ENTER, goal %u\n", goal); 669 670 sb = inode->i_sb; 671 uspi = UFS_SB(sb)->s_uspi; 672 usb1 = ubh_get_usb_first(uspi); 673 ucg = ubh_get_ucg(UCPI_UBH(ucpi)); 674 675 if (goal == 0) { 676 goal = ucpi->c_rotor; 677 goto norot; 678 } 679 goal = ufs_blknum (goal); 680 goal = ufs_dtogd (goal); 681 682 /* 683 * If the requested block is available, use it. 684 */ 685 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) { 686 result = goal; 687 goto gotit; 688 } 689 690 norot: 691 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb); 692 if (result == (unsigned)-1) 693 return (unsigned)-1; 694 ucpi->c_rotor = result; 695 gotit: 696 blkno = ufs_fragstoblks(result); 697 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 698 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 699 ufs_clusteracct (sb, ucpi, blkno, -1); 700 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { 701 *err = -EDQUOT; 702 return (unsigned)-1; 703 } 704 705 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); 706 uspi->cs_total.cs_nbfree--; 707 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1); 708 cylno = ufs_cbtocylno(result); 709 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1); 710 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1); 711 712 UFSD("EXIT, result %u\n", result); 713 714 return result; 715 } 716 717 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi, 718 struct ufs_buffer_head *ubh, 719 unsigned begin, unsigned size, 720 unsigned char *table, unsigned char mask) 721 { 722 unsigned rest, offset; 723 unsigned char *cp; 724 725 726 offset = begin & ~uspi->s_fmask; 727 begin >>= uspi->s_fshift; 728 for (;;) { 729 if ((offset + size) < uspi->s_fsize) 730 rest = size; 731 else 732 rest = uspi->s_fsize - offset; 733 size -= rest; 734 cp = ubh->bh[begin]->b_data + offset; 735 while ((table[*cp++] & mask) == 0 && --rest) 736 ; 737 if (rest || !size) 738 break; 739 begin++; 740 offset = 0; 741 } 742 return (size + rest); 743 } 744 745 /* 746 * Find a block of the specified size in the specified cylinder group. 747 * @sp: pointer to super block 748 * @ucpi: pointer to cylinder group info 749 * @goal: near which block we want find new one 750 * @count: specified size 751 */ 752 static unsigned ufs_bitmap_search(struct super_block *sb, 753 struct ufs_cg_private_info *ucpi, 754 unsigned goal, unsigned count) 755 { 756 /* 757 * Bit patterns for identifying fragments in the block map 758 * used as ((map & mask_arr) == want_arr) 759 */ 760 static const int mask_arr[9] = { 761 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff 762 }; 763 static const int want_arr[9] = { 764 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe 765 }; 766 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 767 struct ufs_super_block_first *usb1; 768 struct ufs_cylinder_group *ucg; 769 unsigned start, length, loc, result; 770 unsigned pos, want, blockmap, mask, end; 771 772 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count); 773 774 usb1 = ubh_get_usb_first (uspi); 775 ucg = ubh_get_ucg(UCPI_UBH(ucpi)); 776 777 if (goal) 778 start = ufs_dtogd(goal) >> 3; 779 else 780 start = ucpi->c_frotor >> 3; 781 782 length = ((uspi->s_fpg + 7) >> 3) - start; 783 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length, 784 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, 785 1 << (count - 1 + (uspi->s_fpb & 7))); 786 if (loc == 0) { 787 length = start + 1; 788 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length, 789 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : 790 ufs_fragtable_other, 791 1 << (count - 1 + (uspi->s_fpb & 7))); 792 if (loc == 0) { 793 ufs_error(sb, "ufs_bitmap_search", 794 "bitmap corrupted on cg %u, start %u," 795 " length %u, count %u, freeoff %u\n", 796 ucpi->c_cgx, start, length, count, 797 ucpi->c_freeoff); 798 return (unsigned)-1; 799 } 800 start = 0; 801 } 802 result = (start + length - loc) << 3; 803 ucpi->c_frotor = result; 804 805 /* 806 * found the byte in the map 807 */ 808 809 for (end = result + 8; result < end; result += uspi->s_fpb) { 810 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result); 811 blockmap <<= 1; 812 mask = mask_arr[count]; 813 want = want_arr[count]; 814 for (pos = 0; pos <= uspi->s_fpb - count; pos++) { 815 if ((blockmap & mask) == want) { 816 UFSD("EXIT, result %u\n", result); 817 return result + pos; 818 } 819 mask <<= 1; 820 want <<= 1; 821 } 822 } 823 824 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n", 825 ucpi->c_cgx); 826 UFSD("EXIT (FAILED)\n"); 827 return (unsigned)-1; 828 } 829 830 static void ufs_clusteracct(struct super_block * sb, 831 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt) 832 { 833 struct ufs_sb_private_info * uspi; 834 int i, start, end, forw, back; 835 836 uspi = UFS_SB(sb)->s_uspi; 837 if (uspi->s_contigsumsize <= 0) 838 return; 839 840 if (cnt > 0) 841 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno); 842 else 843 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno); 844 845 /* 846 * Find the size of the cluster going forward. 847 */ 848 start = blkno + 1; 849 end = start + uspi->s_contigsumsize; 850 if ( end >= ucpi->c_nclusterblks) 851 end = ucpi->c_nclusterblks; 852 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start); 853 if (i > end) 854 i = end; 855 forw = i - start; 856 857 /* 858 * Find the size of the cluster going backward. 859 */ 860 start = blkno - 1; 861 end = start - uspi->s_contigsumsize; 862 if (end < 0 ) 863 end = -1; 864 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end); 865 if ( i < end) 866 i = end; 867 back = start - i; 868 869 /* 870 * Account for old cluster and the possibly new forward and 871 * back clusters. 872 */ 873 i = back + forw + 1; 874 if (i > uspi->s_contigsumsize) 875 i = uspi->s_contigsumsize; 876 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt); 877 if (back > 0) 878 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt); 879 if (forw > 0) 880 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt); 881 } 882 883 884 static unsigned char ufs_fragtable_8fpb[] = { 885 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08, 886 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10, 887 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 888 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20, 889 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 890 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11, 891 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A, 892 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40, 893 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 894 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11, 895 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 896 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21, 897 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A, 898 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12, 899 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C, 900 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80, 901 }; 902 903 static unsigned char ufs_fragtable_other[] = { 904 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A, 905 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 906 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 907 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA, 908 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 909 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 910 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE, 911 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE, 912 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 913 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 914 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E, 915 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE, 916 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA, 917 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE, 918 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE, 919 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A, 920 }; 921