1 /* 2 * balloc.c 3 * 4 * PURPOSE 5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem. 6 * 7 * COPYRIGHT 8 * This file is distributed under the terms of the GNU General Public 9 * License (GPL). Copies of the GPL can be obtained from: 10 * ftp://prep.ai.mit.edu/pub/gnu/GPL 11 * Each contributing author retains all rights to their own work. 12 * 13 * (C) 1999-2001 Ben Fennema 14 * (C) 1999 Stelias Computing Inc 15 * 16 * HISTORY 17 * 18 * 02/24/99 blf Created. 19 * 20 */ 21 22 #include "udfdecl.h" 23 24 #include <linux/buffer_head.h> 25 #include <linux/bitops.h> 26 27 #include "udf_i.h" 28 #include "udf_sb.h" 29 30 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr) 31 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr) 32 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) 33 #define udf_find_next_one_bit(addr, size, offset) \ 34 ext2_find_next_bit(addr, size, offset) 35 36 static int read_block_bitmap(struct super_block *sb, 37 struct udf_bitmap *bitmap, unsigned int block, 38 unsigned long bitmap_nr) 39 { 40 struct buffer_head *bh = NULL; 41 int retval = 0; 42 struct kernel_lb_addr loc; 43 44 loc.logicalBlockNum = bitmap->s_extPosition; 45 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 46 47 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block)); 48 if (!bh) 49 retval = -EIO; 50 51 bitmap->s_block_bitmap[bitmap_nr] = bh; 52 return retval; 53 } 54 55 static int __load_block_bitmap(struct super_block *sb, 56 struct udf_bitmap *bitmap, 57 unsigned int block_group) 58 { 59 int retval = 0; 60 int nr_groups = bitmap->s_nr_groups; 61 62 if (block_group >= nr_groups) { 63 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, 64 nr_groups); 65 } 66 67 if (bitmap->s_block_bitmap[block_group]) { 68 return block_group; 69 } else { 70 retval = read_block_bitmap(sb, bitmap, block_group, 71 block_group); 72 if (retval < 0) 73 return retval; 74 return block_group; 75 } 76 } 77 78 static inline int load_block_bitmap(struct super_block *sb, 79 struct udf_bitmap *bitmap, 80 unsigned int block_group) 81 { 82 int slot; 83 84 slot = __load_block_bitmap(sb, bitmap, block_group); 85 86 if (slot < 0) 87 return slot; 88 89 if (!bitmap->s_block_bitmap[slot]) 90 return -EIO; 91 92 return slot; 93 } 94 95 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt) 96 { 97 struct udf_sb_info *sbi = UDF_SB(sb); 98 struct logicalVolIntegrityDesc *lvid; 99 100 if (!sbi->s_lvid_bh) 101 return; 102 103 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 104 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt); 105 udf_updated_lvid(sb); 106 } 107 108 static void udf_bitmap_free_blocks(struct super_block *sb, 109 struct inode *inode, 110 struct udf_bitmap *bitmap, 111 struct kernel_lb_addr *bloc, 112 uint32_t offset, 113 uint32_t count) 114 { 115 struct udf_sb_info *sbi = UDF_SB(sb); 116 struct buffer_head *bh = NULL; 117 struct udf_part_map *partmap; 118 unsigned long block; 119 unsigned long block_group; 120 unsigned long bit; 121 unsigned long i; 122 int bitmap_nr; 123 unsigned long overflow; 124 125 mutex_lock(&sbi->s_alloc_mutex); 126 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 127 if (bloc->logicalBlockNum + count < count || 128 (bloc->logicalBlockNum + count) > partmap->s_partition_len) { 129 udf_debug("%d < %d || %d + %d > %d\n", 130 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, 131 count, partmap->s_partition_len); 132 goto error_return; 133 } 134 135 block = bloc->logicalBlockNum + offset + 136 (sizeof(struct spaceBitmapDesc) << 3); 137 138 do { 139 overflow = 0; 140 block_group = block >> (sb->s_blocksize_bits + 3); 141 bit = block % (sb->s_blocksize << 3); 142 143 /* 144 * Check to see if we are freeing blocks across a group boundary. 145 */ 146 if (bit + count > (sb->s_blocksize << 3)) { 147 overflow = bit + count - (sb->s_blocksize << 3); 148 count -= overflow; 149 } 150 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 151 if (bitmap_nr < 0) 152 goto error_return; 153 154 bh = bitmap->s_block_bitmap[bitmap_nr]; 155 for (i = 0; i < count; i++) { 156 if (udf_set_bit(bit + i, bh->b_data)) { 157 udf_debug("bit %ld already set\n", bit + i); 158 udf_debug("byte=%2x\n", 159 ((char *)bh->b_data)[(bit + i) >> 3]); 160 } else { 161 udf_add_free_space(sb, sbi->s_partition, 1); 162 } 163 } 164 mark_buffer_dirty(bh); 165 if (overflow) { 166 block += count; 167 count = overflow; 168 } 169 } while (overflow); 170 171 error_return: 172 mutex_unlock(&sbi->s_alloc_mutex); 173 } 174 175 static int udf_bitmap_prealloc_blocks(struct super_block *sb, 176 struct inode *inode, 177 struct udf_bitmap *bitmap, 178 uint16_t partition, uint32_t first_block, 179 uint32_t block_count) 180 { 181 struct udf_sb_info *sbi = UDF_SB(sb); 182 int alloc_count = 0; 183 int bit, block, block_group, group_start; 184 int nr_groups, bitmap_nr; 185 struct buffer_head *bh; 186 __u32 part_len; 187 188 mutex_lock(&sbi->s_alloc_mutex); 189 part_len = sbi->s_partmaps[partition].s_partition_len; 190 if (first_block >= part_len) 191 goto out; 192 193 if (first_block + block_count > part_len) 194 block_count = part_len - first_block; 195 196 do { 197 nr_groups = udf_compute_nr_groups(sb, partition); 198 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 199 block_group = block >> (sb->s_blocksize_bits + 3); 200 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 201 202 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 203 if (bitmap_nr < 0) 204 goto out; 205 bh = bitmap->s_block_bitmap[bitmap_nr]; 206 207 bit = block % (sb->s_blocksize << 3); 208 209 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 210 if (!udf_clear_bit(bit, bh->b_data)) 211 goto out; 212 block_count--; 213 alloc_count++; 214 bit++; 215 block++; 216 } 217 mark_buffer_dirty(bh); 218 } while (block_count > 0); 219 220 out: 221 udf_add_free_space(sb, partition, -alloc_count); 222 mutex_unlock(&sbi->s_alloc_mutex); 223 return alloc_count; 224 } 225 226 static int udf_bitmap_new_block(struct super_block *sb, 227 struct inode *inode, 228 struct udf_bitmap *bitmap, uint16_t partition, 229 uint32_t goal, int *err) 230 { 231 struct udf_sb_info *sbi = UDF_SB(sb); 232 int newbit, bit = 0, block, block_group, group_start; 233 int end_goal, nr_groups, bitmap_nr, i; 234 struct buffer_head *bh = NULL; 235 char *ptr; 236 int newblock = 0; 237 238 *err = -ENOSPC; 239 mutex_lock(&sbi->s_alloc_mutex); 240 241 repeat: 242 if (goal >= sbi->s_partmaps[partition].s_partition_len) 243 goal = 0; 244 245 nr_groups = bitmap->s_nr_groups; 246 block = goal + (sizeof(struct spaceBitmapDesc) << 3); 247 block_group = block >> (sb->s_blocksize_bits + 3); 248 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 249 250 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 251 if (bitmap_nr < 0) 252 goto error_return; 253 bh = bitmap->s_block_bitmap[bitmap_nr]; 254 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 255 sb->s_blocksize - group_start); 256 257 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 258 bit = block % (sb->s_blocksize << 3); 259 if (udf_test_bit(bit, bh->b_data)) 260 goto got_block; 261 262 end_goal = (bit + 63) & ~63; 263 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 264 if (bit < end_goal) 265 goto got_block; 266 267 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, 268 sb->s_blocksize - ((bit + 7) >> 3)); 269 newbit = (ptr - ((char *)bh->b_data)) << 3; 270 if (newbit < sb->s_blocksize << 3) { 271 bit = newbit; 272 goto search_back; 273 } 274 275 newbit = udf_find_next_one_bit(bh->b_data, 276 sb->s_blocksize << 3, bit); 277 if (newbit < sb->s_blocksize << 3) { 278 bit = newbit; 279 goto got_block; 280 } 281 } 282 283 for (i = 0; i < (nr_groups * 2); i++) { 284 block_group++; 285 if (block_group >= nr_groups) 286 block_group = 0; 287 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 288 289 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 290 if (bitmap_nr < 0) 291 goto error_return; 292 bh = bitmap->s_block_bitmap[bitmap_nr]; 293 if (i < nr_groups) { 294 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 295 sb->s_blocksize - group_start); 296 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 297 bit = (ptr - ((char *)bh->b_data)) << 3; 298 break; 299 } 300 } else { 301 bit = udf_find_next_one_bit((char *)bh->b_data, 302 sb->s_blocksize << 3, 303 group_start << 3); 304 if (bit < sb->s_blocksize << 3) 305 break; 306 } 307 } 308 if (i >= (nr_groups * 2)) { 309 mutex_unlock(&sbi->s_alloc_mutex); 310 return newblock; 311 } 312 if (bit < sb->s_blocksize << 3) 313 goto search_back; 314 else 315 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, 316 group_start << 3); 317 if (bit >= sb->s_blocksize << 3) { 318 mutex_unlock(&sbi->s_alloc_mutex); 319 return 0; 320 } 321 322 search_back: 323 i = 0; 324 while (i < 7 && bit > (group_start << 3) && 325 udf_test_bit(bit - 1, bh->b_data)) { 326 ++i; 327 --bit; 328 } 329 330 got_block: 331 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 332 (sizeof(struct spaceBitmapDesc) << 3); 333 334 if (!udf_clear_bit(bit, bh->b_data)) { 335 udf_debug("bit already cleared for block %d\n", bit); 336 goto repeat; 337 } 338 339 mark_buffer_dirty(bh); 340 341 udf_add_free_space(sb, partition, -1); 342 mutex_unlock(&sbi->s_alloc_mutex); 343 *err = 0; 344 return newblock; 345 346 error_return: 347 *err = -EIO; 348 mutex_unlock(&sbi->s_alloc_mutex); 349 return 0; 350 } 351 352 static void udf_table_free_blocks(struct super_block *sb, 353 struct inode *inode, 354 struct inode *table, 355 struct kernel_lb_addr *bloc, 356 uint32_t offset, 357 uint32_t count) 358 { 359 struct udf_sb_info *sbi = UDF_SB(sb); 360 struct udf_part_map *partmap; 361 uint32_t start, end; 362 uint32_t elen; 363 struct kernel_lb_addr eloc; 364 struct extent_position oepos, epos; 365 int8_t etype; 366 int i; 367 struct udf_inode_info *iinfo; 368 369 mutex_lock(&sbi->s_alloc_mutex); 370 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 371 if (bloc->logicalBlockNum + count < count || 372 (bloc->logicalBlockNum + count) > partmap->s_partition_len) { 373 udf_debug("%d < %d || %d + %d > %d\n", 374 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, 375 partmap->s_partition_len); 376 goto error_return; 377 } 378 379 iinfo = UDF_I(table); 380 udf_add_free_space(sb, sbi->s_partition, count); 381 382 start = bloc->logicalBlockNum + offset; 383 end = bloc->logicalBlockNum + offset + count - 1; 384 385 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); 386 elen = 0; 387 epos.block = oepos.block = iinfo->i_location; 388 epos.bh = oepos.bh = NULL; 389 390 while (count && 391 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 392 if (((eloc.logicalBlockNum + 393 (elen >> sb->s_blocksize_bits)) == start)) { 394 if ((0x3FFFFFFF - elen) < 395 (count << sb->s_blocksize_bits)) { 396 uint32_t tmp = ((0x3FFFFFFF - elen) >> 397 sb->s_blocksize_bits); 398 count -= tmp; 399 start += tmp; 400 elen = (etype << 30) | 401 (0x40000000 - sb->s_blocksize); 402 } else { 403 elen = (etype << 30) | 404 (elen + 405 (count << sb->s_blocksize_bits)); 406 start += count; 407 count = 0; 408 } 409 udf_write_aext(table, &oepos, &eloc, elen, 1); 410 } else if (eloc.logicalBlockNum == (end + 1)) { 411 if ((0x3FFFFFFF - elen) < 412 (count << sb->s_blocksize_bits)) { 413 uint32_t tmp = ((0x3FFFFFFF - elen) >> 414 sb->s_blocksize_bits); 415 count -= tmp; 416 end -= tmp; 417 eloc.logicalBlockNum -= tmp; 418 elen = (etype << 30) | 419 (0x40000000 - sb->s_blocksize); 420 } else { 421 eloc.logicalBlockNum = start; 422 elen = (etype << 30) | 423 (elen + 424 (count << sb->s_blocksize_bits)); 425 end -= count; 426 count = 0; 427 } 428 udf_write_aext(table, &oepos, &eloc, elen, 1); 429 } 430 431 if (epos.bh != oepos.bh) { 432 i = -1; 433 oepos.block = epos.block; 434 brelse(oepos.bh); 435 get_bh(epos.bh); 436 oepos.bh = epos.bh; 437 oepos.offset = 0; 438 } else { 439 oepos.offset = epos.offset; 440 } 441 } 442 443 if (count) { 444 /* 445 * NOTE: we CANNOT use udf_add_aext here, as it can try to 446 * allocate a new block, and since we hold the super block 447 * lock already very bad things would happen :) 448 * 449 * We copy the behavior of udf_add_aext, but instead of 450 * trying to allocate a new block close to the existing one, 451 * we just steal a block from the extent we are trying to add. 452 * 453 * It would be nice if the blocks were close together, but it 454 * isn't required. 455 */ 456 457 int adsize; 458 struct short_ad *sad = NULL; 459 struct long_ad *lad = NULL; 460 struct allocExtDesc *aed; 461 462 eloc.logicalBlockNum = start; 463 elen = EXT_RECORDED_ALLOCATED | 464 (count << sb->s_blocksize_bits); 465 466 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 467 adsize = sizeof(struct short_ad); 468 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 469 adsize = sizeof(struct long_ad); 470 else { 471 brelse(oepos.bh); 472 brelse(epos.bh); 473 goto error_return; 474 } 475 476 if (epos.offset + (2 * adsize) > sb->s_blocksize) { 477 unsigned char *sptr, *dptr; 478 int loffset; 479 480 brelse(oepos.bh); 481 oepos = epos; 482 483 /* Steal a block from the extent being free'd */ 484 epos.block.logicalBlockNum = eloc.logicalBlockNum; 485 eloc.logicalBlockNum++; 486 elen -= sb->s_blocksize; 487 488 epos.bh = udf_tread(sb, 489 udf_get_lb_pblock(sb, &epos.block, 0)); 490 if (!epos.bh) { 491 brelse(oepos.bh); 492 goto error_return; 493 } 494 aed = (struct allocExtDesc *)(epos.bh->b_data); 495 aed->previousAllocExtLocation = 496 cpu_to_le32(oepos.block.logicalBlockNum); 497 if (epos.offset + adsize > sb->s_blocksize) { 498 loffset = epos.offset; 499 aed->lengthAllocDescs = cpu_to_le32(adsize); 500 sptr = iinfo->i_ext.i_data + epos.offset 501 - adsize; 502 dptr = epos.bh->b_data + 503 sizeof(struct allocExtDesc); 504 memcpy(dptr, sptr, adsize); 505 epos.offset = sizeof(struct allocExtDesc) + 506 adsize; 507 } else { 508 loffset = epos.offset + adsize; 509 aed->lengthAllocDescs = cpu_to_le32(0); 510 if (oepos.bh) { 511 sptr = oepos.bh->b_data + epos.offset; 512 aed = (struct allocExtDesc *) 513 oepos.bh->b_data; 514 le32_add_cpu(&aed->lengthAllocDescs, 515 adsize); 516 } else { 517 sptr = iinfo->i_ext.i_data + 518 epos.offset; 519 iinfo->i_lenAlloc += adsize; 520 mark_inode_dirty(table); 521 } 522 epos.offset = sizeof(struct allocExtDesc); 523 } 524 if (sbi->s_udfrev >= 0x0200) 525 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 526 3, 1, epos.block.logicalBlockNum, 527 sizeof(struct tag)); 528 else 529 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 530 2, 1, epos.block.logicalBlockNum, 531 sizeof(struct tag)); 532 533 switch (iinfo->i_alloc_type) { 534 case ICBTAG_FLAG_AD_SHORT: 535 sad = (struct short_ad *)sptr; 536 sad->extLength = cpu_to_le32( 537 EXT_NEXT_EXTENT_ALLOCDECS | 538 sb->s_blocksize); 539 sad->extPosition = 540 cpu_to_le32(epos.block.logicalBlockNum); 541 break; 542 case ICBTAG_FLAG_AD_LONG: 543 lad = (struct long_ad *)sptr; 544 lad->extLength = cpu_to_le32( 545 EXT_NEXT_EXTENT_ALLOCDECS | 546 sb->s_blocksize); 547 lad->extLocation = 548 cpu_to_lelb(epos.block); 549 break; 550 } 551 if (oepos.bh) { 552 udf_update_tag(oepos.bh->b_data, loffset); 553 mark_buffer_dirty(oepos.bh); 554 } else { 555 mark_inode_dirty(table); 556 } 557 } 558 559 /* It's possible that stealing the block emptied the extent */ 560 if (elen) { 561 udf_write_aext(table, &epos, &eloc, elen, 1); 562 563 if (!epos.bh) { 564 iinfo->i_lenAlloc += adsize; 565 mark_inode_dirty(table); 566 } else { 567 aed = (struct allocExtDesc *)epos.bh->b_data; 568 le32_add_cpu(&aed->lengthAllocDescs, adsize); 569 udf_update_tag(epos.bh->b_data, epos.offset); 570 mark_buffer_dirty(epos.bh); 571 } 572 } 573 } 574 575 brelse(epos.bh); 576 brelse(oepos.bh); 577 578 error_return: 579 mutex_unlock(&sbi->s_alloc_mutex); 580 return; 581 } 582 583 static int udf_table_prealloc_blocks(struct super_block *sb, 584 struct inode *inode, 585 struct inode *table, uint16_t partition, 586 uint32_t first_block, uint32_t block_count) 587 { 588 struct udf_sb_info *sbi = UDF_SB(sb); 589 int alloc_count = 0; 590 uint32_t elen, adsize; 591 struct kernel_lb_addr eloc; 592 struct extent_position epos; 593 int8_t etype = -1; 594 struct udf_inode_info *iinfo; 595 596 if (first_block >= sbi->s_partmaps[partition].s_partition_len) 597 return 0; 598 599 iinfo = UDF_I(table); 600 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 601 adsize = sizeof(struct short_ad); 602 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 603 adsize = sizeof(struct long_ad); 604 else 605 return 0; 606 607 mutex_lock(&sbi->s_alloc_mutex); 608 epos.offset = sizeof(struct unallocSpaceEntry); 609 epos.block = iinfo->i_location; 610 epos.bh = NULL; 611 eloc.logicalBlockNum = 0xFFFFFFFF; 612 613 while (first_block != eloc.logicalBlockNum && 614 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 615 udf_debug("eloc=%d, elen=%d, first_block=%d\n", 616 eloc.logicalBlockNum, elen, first_block); 617 ; /* empty loop body */ 618 } 619 620 if (first_block == eloc.logicalBlockNum) { 621 epos.offset -= adsize; 622 623 alloc_count = (elen >> sb->s_blocksize_bits); 624 if (alloc_count > block_count) { 625 alloc_count = block_count; 626 eloc.logicalBlockNum += alloc_count; 627 elen -= (alloc_count << sb->s_blocksize_bits); 628 udf_write_aext(table, &epos, &eloc, 629 (etype << 30) | elen, 1); 630 } else 631 udf_delete_aext(table, epos, eloc, 632 (etype << 30) | elen); 633 } else { 634 alloc_count = 0; 635 } 636 637 brelse(epos.bh); 638 639 if (alloc_count) 640 udf_add_free_space(sb, partition, -alloc_count); 641 mutex_unlock(&sbi->s_alloc_mutex); 642 return alloc_count; 643 } 644 645 static int udf_table_new_block(struct super_block *sb, 646 struct inode *inode, 647 struct inode *table, uint16_t partition, 648 uint32_t goal, int *err) 649 { 650 struct udf_sb_info *sbi = UDF_SB(sb); 651 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; 652 uint32_t newblock = 0, adsize; 653 uint32_t elen, goal_elen = 0; 654 struct kernel_lb_addr eloc, uninitialized_var(goal_eloc); 655 struct extent_position epos, goal_epos; 656 int8_t etype; 657 struct udf_inode_info *iinfo = UDF_I(table); 658 659 *err = -ENOSPC; 660 661 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 662 adsize = sizeof(struct short_ad); 663 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 664 adsize = sizeof(struct long_ad); 665 else 666 return newblock; 667 668 mutex_lock(&sbi->s_alloc_mutex); 669 if (goal >= sbi->s_partmaps[partition].s_partition_len) 670 goal = 0; 671 672 /* We search for the closest matching block to goal. If we find 673 a exact hit, we stop. Otherwise we keep going till we run out 674 of extents. We store the buffer_head, bloc, and extoffset 675 of the current closest match and use that when we are done. 676 */ 677 epos.offset = sizeof(struct unallocSpaceEntry); 678 epos.block = iinfo->i_location; 679 epos.bh = goal_epos.bh = NULL; 680 681 while (spread && 682 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 683 if (goal >= eloc.logicalBlockNum) { 684 if (goal < eloc.logicalBlockNum + 685 (elen >> sb->s_blocksize_bits)) 686 nspread = 0; 687 else 688 nspread = goal - eloc.logicalBlockNum - 689 (elen >> sb->s_blocksize_bits); 690 } else { 691 nspread = eloc.logicalBlockNum - goal; 692 } 693 694 if (nspread < spread) { 695 spread = nspread; 696 if (goal_epos.bh != epos.bh) { 697 brelse(goal_epos.bh); 698 goal_epos.bh = epos.bh; 699 get_bh(goal_epos.bh); 700 } 701 goal_epos.block = epos.block; 702 goal_epos.offset = epos.offset - adsize; 703 goal_eloc = eloc; 704 goal_elen = (etype << 30) | elen; 705 } 706 } 707 708 brelse(epos.bh); 709 710 if (spread == 0xFFFFFFFF) { 711 brelse(goal_epos.bh); 712 mutex_unlock(&sbi->s_alloc_mutex); 713 return 0; 714 } 715 716 /* Only allocate blocks from the beginning of the extent. 717 That way, we only delete (empty) extents, never have to insert an 718 extent because of splitting */ 719 /* This works, but very poorly.... */ 720 721 newblock = goal_eloc.logicalBlockNum; 722 goal_eloc.logicalBlockNum++; 723 goal_elen -= sb->s_blocksize; 724 725 if (goal_elen) 726 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); 727 else 728 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); 729 brelse(goal_epos.bh); 730 731 udf_add_free_space(sb, partition, -1); 732 733 mutex_unlock(&sbi->s_alloc_mutex); 734 *err = 0; 735 return newblock; 736 } 737 738 void udf_free_blocks(struct super_block *sb, struct inode *inode, 739 struct kernel_lb_addr *bloc, uint32_t offset, 740 uint32_t count) 741 { 742 uint16_t partition = bloc->partitionReferenceNum; 743 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 744 745 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { 746 udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap, 747 bloc, offset, count); 748 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { 749 udf_table_free_blocks(sb, inode, map->s_uspace.s_table, 750 bloc, offset, count); 751 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { 752 udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap, 753 bloc, offset, count); 754 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { 755 udf_table_free_blocks(sb, inode, map->s_fspace.s_table, 756 bloc, offset, count); 757 } 758 } 759 760 inline int udf_prealloc_blocks(struct super_block *sb, 761 struct inode *inode, 762 uint16_t partition, uint32_t first_block, 763 uint32_t block_count) 764 { 765 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 766 767 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 768 return udf_bitmap_prealloc_blocks(sb, inode, 769 map->s_uspace.s_bitmap, 770 partition, first_block, 771 block_count); 772 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 773 return udf_table_prealloc_blocks(sb, inode, 774 map->s_uspace.s_table, 775 partition, first_block, 776 block_count); 777 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) 778 return udf_bitmap_prealloc_blocks(sb, inode, 779 map->s_fspace.s_bitmap, 780 partition, first_block, 781 block_count); 782 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) 783 return udf_table_prealloc_blocks(sb, inode, 784 map->s_fspace.s_table, 785 partition, first_block, 786 block_count); 787 else 788 return 0; 789 } 790 791 inline int udf_new_block(struct super_block *sb, 792 struct inode *inode, 793 uint16_t partition, uint32_t goal, int *err) 794 { 795 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 796 797 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 798 return udf_bitmap_new_block(sb, inode, 799 map->s_uspace.s_bitmap, 800 partition, goal, err); 801 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 802 return udf_table_new_block(sb, inode, 803 map->s_uspace.s_table, 804 partition, goal, err); 805 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) 806 return udf_bitmap_new_block(sb, inode, 807 map->s_fspace.s_bitmap, 808 partition, goal, err); 809 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) 810 return udf_table_new_block(sb, inode, 811 map->s_fspace.s_table, 812 partition, goal, err); 813 else { 814 *err = -EIO; 815 return 0; 816 } 817 } 818