1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * balloc.c 4 * 5 * PURPOSE 6 * Block allocation handling routines for the OSTA-UDF(tm) filesystem. 7 * 8 * COPYRIGHT 9 * (C) 1999-2001 Ben Fennema 10 * (C) 1999 Stelias Computing Inc 11 * 12 * HISTORY 13 * 14 * 02/24/99 blf Created. 15 * 16 */ 17 18 #include "udfdecl.h" 19 20 #include <linux/bitops.h> 21 #include <linux/overflow.h> 22 23 #include "udf_i.h" 24 #include "udf_sb.h" 25 26 #define udf_clear_bit __test_and_clear_bit_le 27 #define udf_set_bit __test_and_set_bit_le 28 #define udf_test_bit test_bit_le 29 #define udf_find_next_one_bit find_next_bit_le 30 31 static int read_block_bitmap(struct super_block *sb, 32 struct udf_bitmap *bitmap, unsigned int block, 33 unsigned long bitmap_nr) 34 { 35 struct buffer_head *bh = NULL; 36 int i; 37 int max_bits, off, count; 38 struct kernel_lb_addr loc; 39 40 loc.logicalBlockNum = bitmap->s_extPosition; 41 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 42 43 bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block)); 44 bitmap->s_block_bitmap[bitmap_nr] = bh; 45 if (!bh) 46 return -EIO; 47 48 /* Check consistency of Space Bitmap buffer. */ 49 max_bits = sb->s_blocksize * 8; 50 if (!bitmap_nr) { 51 off = sizeof(struct spaceBitmapDesc) << 3; 52 count = min(max_bits - off, bitmap->s_nr_groups); 53 } else { 54 /* 55 * Rough check if bitmap number is too big to have any bitmap 56 * blocks reserved. 57 */ 58 if (bitmap_nr > 59 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2) 60 return 0; 61 off = 0; 62 count = bitmap->s_nr_groups - bitmap_nr * max_bits + 63 (sizeof(struct spaceBitmapDesc) << 3); 64 count = min(count, max_bits); 65 } 66 67 for (i = 0; i < count; i++) 68 if (udf_test_bit(i + off, bh->b_data)) { 69 bitmap->s_block_bitmap[bitmap_nr] = 70 ERR_PTR(-EFSCORRUPTED); 71 brelse(bh); 72 return -EFSCORRUPTED; 73 } 74 return 0; 75 } 76 77 static int load_block_bitmap(struct super_block *sb, 78 struct udf_bitmap *bitmap, 79 unsigned int block_group) 80 { 81 int retval = 0; 82 int nr_groups = bitmap->s_nr_groups; 83 84 if (block_group >= nr_groups) { 85 udf_debug("block_group (%u) > nr_groups (%d)\n", 86 block_group, nr_groups); 87 } 88 89 if (bitmap->s_block_bitmap[block_group]) { 90 /* 91 * The bitmap failed verification in the past. No point in 92 * trying again. 93 */ 94 if (IS_ERR(bitmap->s_block_bitmap[block_group])) 95 return PTR_ERR(bitmap->s_block_bitmap[block_group]); 96 return block_group; 97 } 98 99 retval = read_block_bitmap(sb, bitmap, block_group, block_group); 100 if (retval < 0) 101 return retval; 102 103 return block_group; 104 } 105 106 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt) 107 { 108 struct udf_sb_info *sbi = UDF_SB(sb); 109 struct logicalVolIntegrityDesc *lvid; 110 111 if (!sbi->s_lvid_bh) 112 return; 113 114 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 115 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt); 116 udf_updated_lvid(sb); 117 } 118 119 static void udf_bitmap_free_blocks(struct super_block *sb, 120 struct udf_bitmap *bitmap, 121 struct kernel_lb_addr *bloc, 122 uint32_t offset, 123 uint32_t count) 124 { 125 struct udf_sb_info *sbi = UDF_SB(sb); 126 struct buffer_head *bh = NULL; 127 unsigned long block; 128 unsigned long block_group; 129 unsigned long bit; 130 unsigned long i; 131 int bitmap_nr; 132 unsigned long overflow; 133 134 mutex_lock(&sbi->s_alloc_mutex); 135 /* We make sure this cannot overflow when mounting the filesystem */ 136 block = bloc->logicalBlockNum + offset + 137 (sizeof(struct spaceBitmapDesc) << 3); 138 do { 139 overflow = 0; 140 block_group = block >> (sb->s_blocksize_bits + 3); 141 bit = block % (sb->s_blocksize << 3); 142 143 /* 144 * Check to see if we are freeing blocks across a group boundary. 145 */ 146 if (bit + count > (sb->s_blocksize << 3)) { 147 overflow = bit + count - (sb->s_blocksize << 3); 148 count -= overflow; 149 } 150 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 151 if (bitmap_nr < 0) 152 goto error_return; 153 154 bh = bitmap->s_block_bitmap[bitmap_nr]; 155 for (i = 0; i < count; i++) { 156 if (udf_set_bit(bit + i, bh->b_data)) { 157 udf_debug("bit %lu already set\n", bit + i); 158 udf_debug("byte=%2x\n", 159 ((__u8 *)bh->b_data)[(bit + i) >> 3]); 160 } 161 } 162 udf_add_free_space(sb, sbi->s_partition, count); 163 mark_buffer_dirty(bh); 164 if (overflow) { 165 block += count; 166 count = overflow; 167 } 168 } while (overflow); 169 170 error_return: 171 mutex_unlock(&sbi->s_alloc_mutex); 172 } 173 174 static int udf_bitmap_prealloc_blocks(struct super_block *sb, 175 struct udf_bitmap *bitmap, 176 uint16_t partition, uint32_t first_block, 177 uint32_t block_count) 178 { 179 struct udf_sb_info *sbi = UDF_SB(sb); 180 int alloc_count = 0; 181 int bit, block, block_group; 182 int bitmap_nr; 183 struct buffer_head *bh; 184 __u32 part_len; 185 186 mutex_lock(&sbi->s_alloc_mutex); 187 part_len = sbi->s_partmaps[partition].s_partition_len; 188 if (first_block >= part_len) 189 goto out; 190 191 if (first_block + block_count > part_len) 192 block_count = part_len - first_block; 193 194 do { 195 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 196 block_group = block >> (sb->s_blocksize_bits + 3); 197 198 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 199 if (bitmap_nr < 0) 200 goto out; 201 bh = bitmap->s_block_bitmap[bitmap_nr]; 202 203 bit = block % (sb->s_blocksize << 3); 204 205 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 206 if (!udf_clear_bit(bit, bh->b_data)) 207 goto out; 208 block_count--; 209 alloc_count++; 210 bit++; 211 block++; 212 } 213 mark_buffer_dirty(bh); 214 } while (block_count > 0); 215 216 out: 217 udf_add_free_space(sb, partition, -alloc_count); 218 mutex_unlock(&sbi->s_alloc_mutex); 219 return alloc_count; 220 } 221 222 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb, 223 struct udf_bitmap *bitmap, uint16_t partition, 224 uint32_t goal, int *err) 225 { 226 struct udf_sb_info *sbi = UDF_SB(sb); 227 int newbit, bit = 0; 228 udf_pblk_t block; 229 int block_group, group_start; 230 int end_goal, nr_groups, bitmap_nr, i; 231 struct buffer_head *bh = NULL; 232 char *ptr; 233 udf_pblk_t newblock = 0; 234 235 *err = -ENOSPC; 236 mutex_lock(&sbi->s_alloc_mutex); 237 238 repeat: 239 if (goal >= sbi->s_partmaps[partition].s_partition_len) 240 goal = 0; 241 242 nr_groups = bitmap->s_nr_groups; 243 block = goal + (sizeof(struct spaceBitmapDesc) << 3); 244 block_group = block >> (sb->s_blocksize_bits + 3); 245 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 246 247 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 248 if (bitmap_nr < 0) 249 goto error_return; 250 bh = bitmap->s_block_bitmap[bitmap_nr]; 251 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 252 sb->s_blocksize - group_start); 253 254 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 255 bit = block % (sb->s_blocksize << 3); 256 if (udf_test_bit(bit, bh->b_data)) 257 goto got_block; 258 259 end_goal = (bit + 63) & ~63; 260 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 261 if (bit < end_goal) 262 goto got_block; 263 264 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, 265 sb->s_blocksize - ((bit + 7) >> 3)); 266 newbit = (ptr - ((char *)bh->b_data)) << 3; 267 if (newbit < sb->s_blocksize << 3) { 268 bit = newbit; 269 goto search_back; 270 } 271 272 newbit = udf_find_next_one_bit(bh->b_data, 273 sb->s_blocksize << 3, bit); 274 if (newbit < sb->s_blocksize << 3) { 275 bit = newbit; 276 goto got_block; 277 } 278 } 279 280 for (i = 0; i < (nr_groups * 2); i++) { 281 block_group++; 282 if (block_group >= nr_groups) 283 block_group = 0; 284 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 285 286 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 287 if (bitmap_nr < 0) 288 goto error_return; 289 bh = bitmap->s_block_bitmap[bitmap_nr]; 290 if (i < nr_groups) { 291 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 292 sb->s_blocksize - group_start); 293 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 294 bit = (ptr - ((char *)bh->b_data)) << 3; 295 break; 296 } 297 } else { 298 bit = udf_find_next_one_bit(bh->b_data, 299 sb->s_blocksize << 3, 300 group_start << 3); 301 if (bit < sb->s_blocksize << 3) 302 break; 303 } 304 } 305 if (i >= (nr_groups * 2)) { 306 mutex_unlock(&sbi->s_alloc_mutex); 307 return newblock; 308 } 309 if (bit < sb->s_blocksize << 3) 310 goto search_back; 311 else 312 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, 313 group_start << 3); 314 if (bit >= sb->s_blocksize << 3) { 315 mutex_unlock(&sbi->s_alloc_mutex); 316 return 0; 317 } 318 319 search_back: 320 i = 0; 321 while (i < 7 && bit > (group_start << 3) && 322 udf_test_bit(bit - 1, bh->b_data)) { 323 ++i; 324 --bit; 325 } 326 327 got_block: 328 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 329 (sizeof(struct spaceBitmapDesc) << 3); 330 331 if (newblock >= sbi->s_partmaps[partition].s_partition_len) { 332 /* 333 * Ran off the end of the bitmap, and bits following are 334 * non-compliant (not all zero) 335 */ 336 udf_err(sb, "bitmap for partition %d corrupted (block %u marked" 337 " as free, partition length is %u)\n", partition, 338 newblock, sbi->s_partmaps[partition].s_partition_len); 339 goto error_return; 340 } 341 342 if (!udf_clear_bit(bit, bh->b_data)) { 343 udf_debug("bit already cleared for block %d\n", bit); 344 goto repeat; 345 } 346 347 mark_buffer_dirty(bh); 348 349 udf_add_free_space(sb, partition, -1); 350 mutex_unlock(&sbi->s_alloc_mutex); 351 *err = 0; 352 return newblock; 353 354 error_return: 355 *err = -EIO; 356 mutex_unlock(&sbi->s_alloc_mutex); 357 return 0; 358 } 359 360 static void udf_table_free_blocks(struct super_block *sb, 361 struct inode *table, 362 struct kernel_lb_addr *bloc, 363 uint32_t offset, 364 uint32_t count) 365 { 366 struct udf_sb_info *sbi = UDF_SB(sb); 367 uint32_t start, end; 368 uint32_t elen; 369 struct kernel_lb_addr eloc; 370 struct extent_position oepos, epos; 371 int8_t etype; 372 struct udf_inode_info *iinfo; 373 374 mutex_lock(&sbi->s_alloc_mutex); 375 iinfo = UDF_I(table); 376 udf_add_free_space(sb, sbi->s_partition, count); 377 378 start = bloc->logicalBlockNum + offset; 379 end = bloc->logicalBlockNum + offset + count - 1; 380 381 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); 382 elen = 0; 383 epos.block = oepos.block = iinfo->i_location; 384 epos.bh = oepos.bh = NULL; 385 386 while (count && 387 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 388 if (((eloc.logicalBlockNum + 389 (elen >> sb->s_blocksize_bits)) == start)) { 390 if ((0x3FFFFFFF - elen) < 391 (count << sb->s_blocksize_bits)) { 392 uint32_t tmp = ((0x3FFFFFFF - elen) >> 393 sb->s_blocksize_bits); 394 count -= tmp; 395 start += tmp; 396 elen = (etype << 30) | 397 (0x40000000 - sb->s_blocksize); 398 } else { 399 elen = (etype << 30) | 400 (elen + 401 (count << sb->s_blocksize_bits)); 402 start += count; 403 count = 0; 404 } 405 udf_write_aext(table, &oepos, &eloc, elen, 1); 406 } else if (eloc.logicalBlockNum == (end + 1)) { 407 if ((0x3FFFFFFF - elen) < 408 (count << sb->s_blocksize_bits)) { 409 uint32_t tmp = ((0x3FFFFFFF - elen) >> 410 sb->s_blocksize_bits); 411 count -= tmp; 412 end -= tmp; 413 eloc.logicalBlockNum -= tmp; 414 elen = (etype << 30) | 415 (0x40000000 - sb->s_blocksize); 416 } else { 417 eloc.logicalBlockNum = start; 418 elen = (etype << 30) | 419 (elen + 420 (count << sb->s_blocksize_bits)); 421 end -= count; 422 count = 0; 423 } 424 udf_write_aext(table, &oepos, &eloc, elen, 1); 425 } 426 427 if (epos.bh != oepos.bh) { 428 oepos.block = epos.block; 429 brelse(oepos.bh); 430 get_bh(epos.bh); 431 oepos.bh = epos.bh; 432 oepos.offset = 0; 433 } else { 434 oepos.offset = epos.offset; 435 } 436 } 437 438 if (count) { 439 /* 440 * NOTE: we CANNOT use udf_add_aext here, as it can try to 441 * allocate a new block, and since we hold the super block 442 * lock already very bad things would happen :) 443 * 444 * We copy the behavior of udf_add_aext, but instead of 445 * trying to allocate a new block close to the existing one, 446 * we just steal a block from the extent we are trying to add. 447 * 448 * It would be nice if the blocks were close together, but it 449 * isn't required. 450 */ 451 452 int adsize; 453 454 eloc.logicalBlockNum = start; 455 elen = EXT_RECORDED_ALLOCATED | 456 (count << sb->s_blocksize_bits); 457 458 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 459 adsize = sizeof(struct short_ad); 460 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 461 adsize = sizeof(struct long_ad); 462 else { 463 brelse(oepos.bh); 464 brelse(epos.bh); 465 goto error_return; 466 } 467 468 if (epos.offset + (2 * adsize) > sb->s_blocksize) { 469 /* Steal a block from the extent being free'd */ 470 udf_setup_indirect_aext(table, eloc.logicalBlockNum, 471 &epos); 472 473 eloc.logicalBlockNum++; 474 elen -= sb->s_blocksize; 475 } 476 477 /* It's possible that stealing the block emptied the extent */ 478 if (elen) 479 __udf_add_aext(table, &epos, &eloc, elen, 1); 480 } 481 482 brelse(epos.bh); 483 brelse(oepos.bh); 484 485 error_return: 486 mutex_unlock(&sbi->s_alloc_mutex); 487 return; 488 } 489 490 static int udf_table_prealloc_blocks(struct super_block *sb, 491 struct inode *table, uint16_t partition, 492 uint32_t first_block, uint32_t block_count) 493 { 494 struct udf_sb_info *sbi = UDF_SB(sb); 495 int alloc_count = 0; 496 uint32_t elen, adsize; 497 struct kernel_lb_addr eloc; 498 struct extent_position epos; 499 int8_t etype = -1; 500 struct udf_inode_info *iinfo; 501 502 if (first_block >= sbi->s_partmaps[partition].s_partition_len) 503 return 0; 504 505 iinfo = UDF_I(table); 506 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 507 adsize = sizeof(struct short_ad); 508 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 509 adsize = sizeof(struct long_ad); 510 else 511 return 0; 512 513 mutex_lock(&sbi->s_alloc_mutex); 514 epos.offset = sizeof(struct unallocSpaceEntry); 515 epos.block = iinfo->i_location; 516 epos.bh = NULL; 517 eloc.logicalBlockNum = 0xFFFFFFFF; 518 519 while (first_block != eloc.logicalBlockNum && 520 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 521 udf_debug("eloc=%u, elen=%u, first_block=%u\n", 522 eloc.logicalBlockNum, elen, first_block); 523 ; /* empty loop body */ 524 } 525 526 if (first_block == eloc.logicalBlockNum) { 527 epos.offset -= adsize; 528 529 alloc_count = (elen >> sb->s_blocksize_bits); 530 if (alloc_count > block_count) { 531 alloc_count = block_count; 532 eloc.logicalBlockNum += alloc_count; 533 elen -= (alloc_count << sb->s_blocksize_bits); 534 udf_write_aext(table, &epos, &eloc, 535 (etype << 30) | elen, 1); 536 } else 537 udf_delete_aext(table, epos); 538 } else { 539 alloc_count = 0; 540 } 541 542 brelse(epos.bh); 543 544 if (alloc_count) 545 udf_add_free_space(sb, partition, -alloc_count); 546 mutex_unlock(&sbi->s_alloc_mutex); 547 return alloc_count; 548 } 549 550 static udf_pblk_t udf_table_new_block(struct super_block *sb, 551 struct inode *table, uint16_t partition, 552 uint32_t goal, int *err) 553 { 554 struct udf_sb_info *sbi = UDF_SB(sb); 555 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; 556 udf_pblk_t newblock = 0; 557 uint32_t adsize; 558 uint32_t elen, goal_elen = 0; 559 struct kernel_lb_addr eloc, goal_eloc; 560 struct extent_position epos, goal_epos; 561 int8_t etype; 562 struct udf_inode_info *iinfo = UDF_I(table); 563 564 *err = -ENOSPC; 565 566 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 567 adsize = sizeof(struct short_ad); 568 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 569 adsize = sizeof(struct long_ad); 570 else 571 return newblock; 572 573 mutex_lock(&sbi->s_alloc_mutex); 574 if (goal >= sbi->s_partmaps[partition].s_partition_len) 575 goal = 0; 576 577 /* We search for the closest matching block to goal. If we find 578 a exact hit, we stop. Otherwise we keep going till we run out 579 of extents. We store the buffer_head, bloc, and extoffset 580 of the current closest match and use that when we are done. 581 */ 582 epos.offset = sizeof(struct unallocSpaceEntry); 583 epos.block = iinfo->i_location; 584 epos.bh = goal_epos.bh = NULL; 585 586 while (spread && 587 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 588 if (goal >= eloc.logicalBlockNum) { 589 if (goal < eloc.logicalBlockNum + 590 (elen >> sb->s_blocksize_bits)) 591 nspread = 0; 592 else 593 nspread = goal - eloc.logicalBlockNum - 594 (elen >> sb->s_blocksize_bits); 595 } else { 596 nspread = eloc.logicalBlockNum - goal; 597 } 598 599 if (nspread < spread) { 600 spread = nspread; 601 if (goal_epos.bh != epos.bh) { 602 brelse(goal_epos.bh); 603 goal_epos.bh = epos.bh; 604 get_bh(goal_epos.bh); 605 } 606 goal_epos.block = epos.block; 607 goal_epos.offset = epos.offset - adsize; 608 goal_eloc = eloc; 609 goal_elen = (etype << 30) | elen; 610 } 611 } 612 613 brelse(epos.bh); 614 615 if (spread == 0xFFFFFFFF) { 616 brelse(goal_epos.bh); 617 mutex_unlock(&sbi->s_alloc_mutex); 618 return 0; 619 } 620 621 /* Only allocate blocks from the beginning of the extent. 622 That way, we only delete (empty) extents, never have to insert an 623 extent because of splitting */ 624 /* This works, but very poorly.... */ 625 626 newblock = goal_eloc.logicalBlockNum; 627 goal_eloc.logicalBlockNum++; 628 goal_elen -= sb->s_blocksize; 629 630 if (goal_elen) 631 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); 632 else 633 udf_delete_aext(table, goal_epos); 634 brelse(goal_epos.bh); 635 636 udf_add_free_space(sb, partition, -1); 637 638 mutex_unlock(&sbi->s_alloc_mutex); 639 *err = 0; 640 return newblock; 641 } 642 643 void udf_free_blocks(struct super_block *sb, struct inode *inode, 644 struct kernel_lb_addr *bloc, uint32_t offset, 645 uint32_t count) 646 { 647 uint16_t partition = bloc->partitionReferenceNum; 648 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 649 uint32_t blk; 650 651 if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) || 652 check_add_overflow(blk, count, &blk) || 653 bloc->logicalBlockNum + count > map->s_partition_len) { 654 udf_debug("Invalid request to free blocks: (%d, %u), off %u, " 655 "len %u, partition len %u\n", 656 partition, bloc->logicalBlockNum, offset, count, 657 map->s_partition_len); 658 return; 659 } 660 661 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { 662 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, 663 bloc, offset, count); 664 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { 665 udf_table_free_blocks(sb, map->s_uspace.s_table, 666 bloc, offset, count); 667 } 668 669 if (inode) { 670 inode_sub_bytes(inode, 671 ((sector_t)count) << sb->s_blocksize_bits); 672 } 673 } 674 675 inline int udf_prealloc_blocks(struct super_block *sb, 676 struct inode *inode, 677 uint16_t partition, uint32_t first_block, 678 uint32_t block_count) 679 { 680 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 681 int allocated; 682 683 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 684 allocated = udf_bitmap_prealloc_blocks(sb, 685 map->s_uspace.s_bitmap, 686 partition, first_block, 687 block_count); 688 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 689 allocated = udf_table_prealloc_blocks(sb, 690 map->s_uspace.s_table, 691 partition, first_block, 692 block_count); 693 else 694 return 0; 695 696 if (inode && allocated > 0) 697 inode_add_bytes(inode, allocated << sb->s_blocksize_bits); 698 return allocated; 699 } 700 701 inline udf_pblk_t udf_new_block(struct super_block *sb, 702 struct inode *inode, 703 uint16_t partition, uint32_t goal, int *err) 704 { 705 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 706 udf_pblk_t block; 707 708 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 709 block = udf_bitmap_new_block(sb, 710 map->s_uspace.s_bitmap, 711 partition, goal, err); 712 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 713 block = udf_table_new_block(sb, 714 map->s_uspace.s_table, 715 partition, goal, err); 716 else { 717 *err = -EIO; 718 return 0; 719 } 720 if (inode && block) 721 inode_add_bytes(inode, sb->s_blocksize); 722 return block; 723 } 724