1 /* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5 #include <linux/time.h> 6 #include <linux/fs.h> 7 #include "reiserfs.h" 8 #include "acl.h" 9 #include "xattr.h" 10 #include <linux/exportfs.h> 11 #include <linux/pagemap.h> 12 #include <linux/highmem.h> 13 #include <linux/slab.h> 14 #include <linux/uaccess.h> 15 #include <asm/unaligned.h> 16 #include <linux/buffer_head.h> 17 #include <linux/mpage.h> 18 #include <linux/writeback.h> 19 #include <linux/quotaops.h> 20 #include <linux/swap.h> 21 #include <linux/uio.h> 22 #include <linux/bio.h> 23 24 int reiserfs_commit_write(struct file *f, struct page *page, 25 unsigned from, unsigned to); 26 27 void reiserfs_evict_inode(struct inode *inode) 28 { 29 /* 30 * We need blocks for transaction + (user+group) quota 31 * update (possibly delete) 32 */ 33 int jbegin_count = 34 JOURNAL_PER_BALANCE_CNT * 2 + 35 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb); 36 struct reiserfs_transaction_handle th; 37 int err; 38 39 if (!inode->i_nlink && !is_bad_inode(inode)) 40 dquot_initialize(inode); 41 42 truncate_inode_pages_final(&inode->i_data); 43 if (inode->i_nlink) 44 goto no_delete; 45 46 /* 47 * The = 0 happens when we abort creating a new inode 48 * for some reason like lack of space.. 49 * also handles bad_inode case 50 */ 51 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { 52 53 reiserfs_delete_xattrs(inode); 54 55 reiserfs_write_lock(inode->i_sb); 56 57 if (journal_begin(&th, inode->i_sb, jbegin_count)) 58 goto out; 59 reiserfs_update_inode_transaction(inode); 60 61 reiserfs_discard_prealloc(&th, inode); 62 63 err = reiserfs_delete_object(&th, inode); 64 65 /* 66 * Do quota update inside a transaction for journaled quotas. 67 * We must do that after delete_object so that quota updates 68 * go into the same transaction as stat data deletion 69 */ 70 if (!err) { 71 int depth = reiserfs_write_unlock_nested(inode->i_sb); 72 dquot_free_inode(inode); 73 reiserfs_write_lock_nested(inode->i_sb, depth); 74 } 75 76 if (journal_end(&th)) 77 goto out; 78 79 /* 80 * check return value from reiserfs_delete_object after 81 * ending the transaction 82 */ 83 if (err) 84 goto out; 85 86 /* 87 * all items of file are deleted, so we can remove 88 * "save" link 89 * we can't do anything about an error here 90 */ 91 remove_save_link(inode, 0 /* not truncate */); 92 out: 93 reiserfs_write_unlock(inode->i_sb); 94 } else { 95 /* no object items are in the tree */ 96 ; 97 } 98 99 /* note this must go after the journal_end to prevent deadlock */ 100 clear_inode(inode); 101 102 dquot_drop(inode); 103 inode->i_blocks = 0; 104 return; 105 106 no_delete: 107 clear_inode(inode); 108 dquot_drop(inode); 109 } 110 111 static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid, 112 __u32 objectid, loff_t offset, int type, int length) 113 { 114 key->version = version; 115 116 key->on_disk_key.k_dir_id = dirid; 117 key->on_disk_key.k_objectid = objectid; 118 set_cpu_key_k_offset(key, offset); 119 set_cpu_key_k_type(key, type); 120 key->key_length = length; 121 } 122 123 /* 124 * take base of inode_key (it comes from inode always) (dirid, objectid) 125 * and version from an inode, set offset and type of key 126 */ 127 void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset, 128 int type, int length) 129 { 130 _make_cpu_key(key, get_inode_item_key_version(inode), 131 le32_to_cpu(INODE_PKEY(inode)->k_dir_id), 132 le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type, 133 length); 134 } 135 136 /* when key is 0, do not set version and short key */ 137 inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key, 138 int version, 139 loff_t offset, int type, int length, 140 int entry_count /*or ih_free_space */ ) 141 { 142 if (key) { 143 ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id); 144 ih->ih_key.k_objectid = 145 cpu_to_le32(key->on_disk_key.k_objectid); 146 } 147 put_ih_version(ih, version); 148 set_le_ih_k_offset(ih, offset); 149 set_le_ih_k_type(ih, type); 150 put_ih_item_len(ih, length); 151 /* set_ih_free_space (ih, 0); */ 152 /* 153 * for directory items it is entry count, for directs and stat 154 * datas - 0xffff, for indirects - 0 155 */ 156 put_ih_entry_count(ih, entry_count); 157 } 158 159 /* 160 * FIXME: we might cache recently accessed indirect item 161 * Ugh. Not too eager for that.... 162 * I cut the code until such time as I see a convincing argument (benchmark). 163 * I don't want a bloated inode struct..., and I don't like code complexity.... 164 */ 165 166 /* 167 * cutting the code is fine, since it really isn't in use yet and is easy 168 * to add back in. But, Vladimir has a really good idea here. Think 169 * about what happens for reading a file. For each page, 170 * The VFS layer calls reiserfs_read_folio, who searches the tree to find 171 * an indirect item. This indirect item has X number of pointers, where 172 * X is a big number if we've done the block allocation right. But, 173 * we only use one or two of these pointers during each call to read_folio, 174 * needlessly researching again later on. 175 * 176 * The size of the cache could be dynamic based on the size of the file. 177 * 178 * I'd also like to see us cache the location the stat data item, since 179 * we are needlessly researching for that frequently. 180 * 181 * --chris 182 */ 183 184 /* 185 * If this page has a file tail in it, and 186 * it was read in by get_block_create_0, the page data is valid, 187 * but tail is still sitting in a direct item, and we can't write to 188 * it. So, look through this page, and check all the mapped buffers 189 * to make sure they have valid block numbers. Any that don't need 190 * to be unmapped, so that __block_write_begin will correctly call 191 * reiserfs_get_block to convert the tail into an unformatted node 192 */ 193 static inline void fix_tail_page_for_writing(struct page *page) 194 { 195 struct buffer_head *head, *next, *bh; 196 197 if (page && page_has_buffers(page)) { 198 head = page_buffers(page); 199 bh = head; 200 do { 201 next = bh->b_this_page; 202 if (buffer_mapped(bh) && bh->b_blocknr == 0) { 203 reiserfs_unmap_buffer(bh); 204 } 205 bh = next; 206 } while (bh != head); 207 } 208 } 209 210 /* 211 * reiserfs_get_block does not need to allocate a block only if it has been 212 * done already or non-hole position has been found in the indirect item 213 */ 214 static inline int allocation_needed(int retval, b_blocknr_t allocated, 215 struct item_head *ih, 216 __le32 * item, int pos_in_item) 217 { 218 if (allocated) 219 return 0; 220 if (retval == POSITION_FOUND && is_indirect_le_ih(ih) && 221 get_block_num(item, pos_in_item)) 222 return 0; 223 return 1; 224 } 225 226 static inline int indirect_item_found(int retval, struct item_head *ih) 227 { 228 return (retval == POSITION_FOUND) && is_indirect_le_ih(ih); 229 } 230 231 static inline void set_block_dev_mapped(struct buffer_head *bh, 232 b_blocknr_t block, struct inode *inode) 233 { 234 map_bh(bh, inode->i_sb, block); 235 } 236 237 /* 238 * files which were created in the earlier version can not be longer, 239 * than 2 gb 240 */ 241 static int file_capable(struct inode *inode, sector_t block) 242 { 243 /* it is new file. */ 244 if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 || 245 /* old file, but 'block' is inside of 2gb */ 246 block < (1 << (31 - inode->i_sb->s_blocksize_bits))) 247 return 1; 248 249 return 0; 250 } 251 252 static int restart_transaction(struct reiserfs_transaction_handle *th, 253 struct inode *inode, struct treepath *path) 254 { 255 struct super_block *s = th->t_super; 256 int err; 257 258 BUG_ON(!th->t_trans_id); 259 BUG_ON(!th->t_refcount); 260 261 pathrelse(path); 262 263 /* we cannot restart while nested */ 264 if (th->t_refcount > 1) { 265 return 0; 266 } 267 reiserfs_update_sd(th, inode); 268 err = journal_end(th); 269 if (!err) { 270 err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6); 271 if (!err) 272 reiserfs_update_inode_transaction(inode); 273 } 274 return err; 275 } 276 277 /* 278 * it is called by get_block when create == 0. Returns block number 279 * for 'block'-th logical block of file. When it hits direct item it 280 * returns 0 (being called from bmap) or read direct item into piece 281 * of page (bh_result) 282 * Please improve the english/clarity in the comment above, as it is 283 * hard to understand. 284 */ 285 static int _get_block_create_0(struct inode *inode, sector_t block, 286 struct buffer_head *bh_result, int args) 287 { 288 INITIALIZE_PATH(path); 289 struct cpu_key key; 290 struct buffer_head *bh; 291 struct item_head *ih, tmp_ih; 292 b_blocknr_t blocknr; 293 char *p; 294 int chars; 295 int ret; 296 int result; 297 int done = 0; 298 unsigned long offset; 299 300 /* prepare the key to look for the 'block'-th block of file */ 301 make_cpu_key(&key, inode, 302 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 303 3); 304 305 result = search_for_position_by_key(inode->i_sb, &key, &path); 306 if (result != POSITION_FOUND) { 307 pathrelse(&path); 308 if (result == IO_ERROR) 309 return -EIO; 310 /* 311 * We do not return -ENOENT if there is a hole but page is 312 * uptodate, because it means that there is some MMAPED data 313 * associated with it that is yet to be written to disk. 314 */ 315 if ((args & GET_BLOCK_NO_HOLE) 316 && !PageUptodate(bh_result->b_page)) { 317 return -ENOENT; 318 } 319 return 0; 320 } 321 322 bh = get_last_bh(&path); 323 ih = tp_item_head(&path); 324 if (is_indirect_le_ih(ih)) { 325 __le32 *ind_item = (__le32 *) ih_item_body(bh, ih); 326 327 /* 328 * FIXME: here we could cache indirect item or part of it in 329 * the inode to avoid search_by_key in case of subsequent 330 * access to file 331 */ 332 blocknr = get_block_num(ind_item, path.pos_in_item); 333 ret = 0; 334 if (blocknr) { 335 map_bh(bh_result, inode->i_sb, blocknr); 336 if (path.pos_in_item == 337 ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) { 338 set_buffer_boundary(bh_result); 339 } 340 } else 341 /* 342 * We do not return -ENOENT if there is a hole but 343 * page is uptodate, because it means that there is 344 * some MMAPED data associated with it that is 345 * yet to be written to disk. 346 */ 347 if ((args & GET_BLOCK_NO_HOLE) 348 && !PageUptodate(bh_result->b_page)) { 349 ret = -ENOENT; 350 } 351 352 pathrelse(&path); 353 return ret; 354 } 355 /* requested data are in direct item(s) */ 356 if (!(args & GET_BLOCK_READ_DIRECT)) { 357 /* 358 * we are called by bmap. FIXME: we can not map block of file 359 * when it is stored in direct item(s) 360 */ 361 pathrelse(&path); 362 return -ENOENT; 363 } 364 365 /* 366 * if we've got a direct item, and the buffer or page was uptodate, 367 * we don't want to pull data off disk again. skip to the 368 * end, where we map the buffer and return 369 */ 370 if (buffer_uptodate(bh_result)) { 371 goto finished; 372 } else 373 /* 374 * grab_tail_page can trigger calls to reiserfs_get_block on 375 * up to date pages without any buffers. If the page is up 376 * to date, we don't want read old data off disk. Set the up 377 * to date bit on the buffer instead and jump to the end 378 */ 379 if (!bh_result->b_page || PageUptodate(bh_result->b_page)) { 380 set_buffer_uptodate(bh_result); 381 goto finished; 382 } 383 /* read file tail into part of page */ 384 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1); 385 copy_item_head(&tmp_ih, ih); 386 387 /* 388 * we only want to kmap if we are reading the tail into the page. 389 * this is not the common case, so we don't kmap until we are 390 * sure we need to. But, this means the item might move if 391 * kmap schedules 392 */ 393 p = (char *)kmap(bh_result->b_page); 394 p += offset; 395 memset(p, 0, inode->i_sb->s_blocksize); 396 do { 397 if (!is_direct_le_ih(ih)) { 398 BUG(); 399 } 400 /* 401 * make sure we don't read more bytes than actually exist in 402 * the file. This can happen in odd cases where i_size isn't 403 * correct, and when direct item padding results in a few 404 * extra bytes at the end of the direct item 405 */ 406 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) 407 break; 408 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) { 409 chars = 410 inode->i_size - (le_ih_k_offset(ih) - 1) - 411 path.pos_in_item; 412 done = 1; 413 } else { 414 chars = ih_item_len(ih) - path.pos_in_item; 415 } 416 memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars); 417 418 if (done) 419 break; 420 421 p += chars; 422 423 /* 424 * we done, if read direct item is not the last item of 425 * node FIXME: we could try to check right delimiting key 426 * to see whether direct item continues in the right 427 * neighbor or rely on i_size 428 */ 429 if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1)) 430 break; 431 432 /* update key to look for the next piece */ 433 set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars); 434 result = search_for_position_by_key(inode->i_sb, &key, &path); 435 if (result != POSITION_FOUND) 436 /* i/o error most likely */ 437 break; 438 bh = get_last_bh(&path); 439 ih = tp_item_head(&path); 440 } while (1); 441 442 flush_dcache_page(bh_result->b_page); 443 kunmap(bh_result->b_page); 444 445 finished: 446 pathrelse(&path); 447 448 if (result == IO_ERROR) 449 return -EIO; 450 451 /* 452 * this buffer has valid data, but isn't valid for io. mapping it to 453 * block #0 tells the rest of reiserfs it just has a tail in it 454 */ 455 map_bh(bh_result, inode->i_sb, 0); 456 set_buffer_uptodate(bh_result); 457 return 0; 458 } 459 460 /* 461 * this is called to create file map. So, _get_block_create_0 will not 462 * read direct item 463 */ 464 static int reiserfs_bmap(struct inode *inode, sector_t block, 465 struct buffer_head *bh_result, int create) 466 { 467 if (!file_capable(inode, block)) 468 return -EFBIG; 469 470 reiserfs_write_lock(inode->i_sb); 471 /* do not read the direct item */ 472 _get_block_create_0(inode, block, bh_result, 0); 473 reiserfs_write_unlock(inode->i_sb); 474 return 0; 475 } 476 477 /* 478 * special version of get_block that is only used by grab_tail_page right 479 * now. It is sent to __block_write_begin, and when you try to get a 480 * block past the end of the file (or a block from a hole) it returns 481 * -ENOENT instead of a valid buffer. __block_write_begin expects to 482 * be able to do i/o on the buffers returned, unless an error value 483 * is also returned. 484 * 485 * So, this allows __block_write_begin to be used for reading a single block 486 * in a page. Where it does not produce a valid page for holes, or past the 487 * end of the file. This turns out to be exactly what we need for reading 488 * tails for conversion. 489 * 490 * The point of the wrapper is forcing a certain value for create, even 491 * though the VFS layer is calling this function with create==1. If you 492 * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, 493 * don't use this function. 494 */ 495 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, 496 struct buffer_head *bh_result, 497 int create) 498 { 499 return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE); 500 } 501 502 /* 503 * This is special helper for reiserfs_get_block in case we are executing 504 * direct_IO request. 505 */ 506 static int reiserfs_get_blocks_direct_io(struct inode *inode, 507 sector_t iblock, 508 struct buffer_head *bh_result, 509 int create) 510 { 511 int ret; 512 513 bh_result->b_page = NULL; 514 515 /* 516 * We set the b_size before reiserfs_get_block call since it is 517 * referenced in convert_tail_for_hole() that may be called from 518 * reiserfs_get_block() 519 */ 520 bh_result->b_size = i_blocksize(inode); 521 522 ret = reiserfs_get_block(inode, iblock, bh_result, 523 create | GET_BLOCK_NO_DANGLE); 524 if (ret) 525 goto out; 526 527 /* don't allow direct io onto tail pages */ 528 if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) { 529 /* 530 * make sure future calls to the direct io funcs for this 531 * offset in the file fail by unmapping the buffer 532 */ 533 clear_buffer_mapped(bh_result); 534 ret = -EINVAL; 535 } 536 537 /* 538 * Possible unpacked tail. Flush the data before pages have 539 * disappeared 540 */ 541 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) { 542 int err; 543 544 reiserfs_write_lock(inode->i_sb); 545 546 err = reiserfs_commit_for_inode(inode); 547 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 548 549 reiserfs_write_unlock(inode->i_sb); 550 551 if (err < 0) 552 ret = err; 553 } 554 out: 555 return ret; 556 } 557 558 /* 559 * helper function for when reiserfs_get_block is called for a hole 560 * but the file tail is still in a direct item 561 * bh_result is the buffer head for the hole 562 * tail_offset is the offset of the start of the tail in the file 563 * 564 * This calls prepare_write, which will start a new transaction 565 * you should not be in a transaction, or have any paths held when you 566 * call this. 567 */ 568 static int convert_tail_for_hole(struct inode *inode, 569 struct buffer_head *bh_result, 570 loff_t tail_offset) 571 { 572 unsigned long index; 573 unsigned long tail_end; 574 unsigned long tail_start; 575 struct page *tail_page; 576 struct page *hole_page = bh_result->b_page; 577 int retval = 0; 578 579 if ((tail_offset & (bh_result->b_size - 1)) != 1) 580 return -EIO; 581 582 /* always try to read until the end of the block */ 583 tail_start = tail_offset & (PAGE_SIZE - 1); 584 tail_end = (tail_start | (bh_result->b_size - 1)) + 1; 585 586 index = tail_offset >> PAGE_SHIFT; 587 /* 588 * hole_page can be zero in case of direct_io, we are sure 589 * that we cannot get here if we write with O_DIRECT into tail page 590 */ 591 if (!hole_page || index != hole_page->index) { 592 tail_page = grab_cache_page(inode->i_mapping, index); 593 retval = -ENOMEM; 594 if (!tail_page) { 595 goto out; 596 } 597 } else { 598 tail_page = hole_page; 599 } 600 601 /* 602 * we don't have to make sure the conversion did not happen while 603 * we were locking the page because anyone that could convert 604 * must first take i_mutex. 605 * 606 * We must fix the tail page for writing because it might have buffers 607 * that are mapped, but have a block number of 0. This indicates tail 608 * data that has been read directly into the page, and 609 * __block_write_begin won't trigger a get_block in this case. 610 */ 611 fix_tail_page_for_writing(tail_page); 612 retval = __reiserfs_write_begin(tail_page, tail_start, 613 tail_end - tail_start); 614 if (retval) 615 goto unlock; 616 617 /* tail conversion might change the data in the page */ 618 flush_dcache_page(tail_page); 619 620 retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end); 621 622 unlock: 623 if (tail_page != hole_page) { 624 unlock_page(tail_page); 625 put_page(tail_page); 626 } 627 out: 628 return retval; 629 } 630 631 static inline int _allocate_block(struct reiserfs_transaction_handle *th, 632 sector_t block, 633 struct inode *inode, 634 b_blocknr_t * allocated_block_nr, 635 struct treepath *path, int flags) 636 { 637 BUG_ON(!th->t_trans_id); 638 639 #ifdef REISERFS_PREALLOCATE 640 if (!(flags & GET_BLOCK_NO_IMUX)) { 641 return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr, 642 path, block); 643 } 644 #endif 645 return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path, 646 block); 647 } 648 649 int reiserfs_get_block(struct inode *inode, sector_t block, 650 struct buffer_head *bh_result, int create) 651 { 652 int repeat, retval = 0; 653 /* b_blocknr_t is (unsigned) 32 bit int*/ 654 b_blocknr_t allocated_block_nr = 0; 655 INITIALIZE_PATH(path); 656 int pos_in_item; 657 struct cpu_key key; 658 struct buffer_head *bh, *unbh = NULL; 659 struct item_head *ih, tmp_ih; 660 __le32 *item; 661 int done; 662 int fs_gen; 663 struct reiserfs_transaction_handle *th = NULL; 664 /* 665 * space reserved in transaction batch: 666 * . 3 balancings in direct->indirect conversion 667 * . 1 block involved into reiserfs_update_sd() 668 * XXX in practically impossible worst case direct2indirect() 669 * can incur (much) more than 3 balancings. 670 * quota update for user, group 671 */ 672 int jbegin_count = 673 JOURNAL_PER_BALANCE_CNT * 3 + 1 + 674 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb); 675 int version; 676 int dangle = 1; 677 loff_t new_offset = 678 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1; 679 680 reiserfs_write_lock(inode->i_sb); 681 version = get_inode_item_key_version(inode); 682 683 if (!file_capable(inode, block)) { 684 reiserfs_write_unlock(inode->i_sb); 685 return -EFBIG; 686 } 687 688 /* 689 * if !create, we aren't changing the FS, so we don't need to 690 * log anything, so we don't need to start a transaction 691 */ 692 if (!(create & GET_BLOCK_CREATE)) { 693 int ret; 694 /* find number of block-th logical block of the file */ 695 ret = _get_block_create_0(inode, block, bh_result, 696 create | GET_BLOCK_READ_DIRECT); 697 reiserfs_write_unlock(inode->i_sb); 698 return ret; 699 } 700 701 /* 702 * if we're already in a transaction, make sure to close 703 * any new transactions we start in this func 704 */ 705 if ((create & GET_BLOCK_NO_DANGLE) || 706 reiserfs_transaction_running(inode->i_sb)) 707 dangle = 0; 708 709 /* 710 * If file is of such a size, that it might have a tail and 711 * tails are enabled we should mark it as possibly needing 712 * tail packing on close 713 */ 714 if ((have_large_tails(inode->i_sb) 715 && inode->i_size < i_block_size(inode) * 4) 716 || (have_small_tails(inode->i_sb) 717 && inode->i_size < i_block_size(inode))) 718 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask; 719 720 /* set the key of the first byte in the 'block'-th block of file */ 721 make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ ); 722 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) { 723 start_trans: 724 th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count); 725 if (!th) { 726 retval = -ENOMEM; 727 goto failure; 728 } 729 reiserfs_update_inode_transaction(inode); 730 } 731 research: 732 733 retval = search_for_position_by_key(inode->i_sb, &key, &path); 734 if (retval == IO_ERROR) { 735 retval = -EIO; 736 goto failure; 737 } 738 739 bh = get_last_bh(&path); 740 ih = tp_item_head(&path); 741 item = tp_item_body(&path); 742 pos_in_item = path.pos_in_item; 743 744 fs_gen = get_generation(inode->i_sb); 745 copy_item_head(&tmp_ih, ih); 746 747 if (allocation_needed 748 (retval, allocated_block_nr, ih, item, pos_in_item)) { 749 /* we have to allocate block for the unformatted node */ 750 if (!th) { 751 pathrelse(&path); 752 goto start_trans; 753 } 754 755 repeat = 756 _allocate_block(th, block, inode, &allocated_block_nr, 757 &path, create); 758 759 /* 760 * restart the transaction to give the journal a chance to free 761 * some blocks. releases the path, so we have to go back to 762 * research if we succeed on the second try 763 */ 764 if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) { 765 SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1; 766 retval = restart_transaction(th, inode, &path); 767 if (retval) 768 goto failure; 769 repeat = 770 _allocate_block(th, block, inode, 771 &allocated_block_nr, NULL, create); 772 773 if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) { 774 goto research; 775 } 776 if (repeat == QUOTA_EXCEEDED) 777 retval = -EDQUOT; 778 else 779 retval = -ENOSPC; 780 goto failure; 781 } 782 783 if (fs_changed(fs_gen, inode->i_sb) 784 && item_moved(&tmp_ih, &path)) { 785 goto research; 786 } 787 } 788 789 if (indirect_item_found(retval, ih)) { 790 b_blocknr_t unfm_ptr; 791 /* 792 * 'block'-th block is in the file already (there is 793 * corresponding cell in some indirect item). But it may be 794 * zero unformatted node pointer (hole) 795 */ 796 unfm_ptr = get_block_num(item, pos_in_item); 797 if (unfm_ptr == 0) { 798 /* use allocated block to plug the hole */ 799 reiserfs_prepare_for_journal(inode->i_sb, bh, 1); 800 if (fs_changed(fs_gen, inode->i_sb) 801 && item_moved(&tmp_ih, &path)) { 802 reiserfs_restore_prepared_buffer(inode->i_sb, 803 bh); 804 goto research; 805 } 806 set_buffer_new(bh_result); 807 if (buffer_dirty(bh_result) 808 && reiserfs_data_ordered(inode->i_sb)) 809 reiserfs_add_ordered_list(inode, bh_result); 810 put_block_num(item, pos_in_item, allocated_block_nr); 811 unfm_ptr = allocated_block_nr; 812 journal_mark_dirty(th, bh); 813 reiserfs_update_sd(th, inode); 814 } 815 set_block_dev_mapped(bh_result, unfm_ptr, inode); 816 pathrelse(&path); 817 retval = 0; 818 if (!dangle && th) 819 retval = reiserfs_end_persistent_transaction(th); 820 821 reiserfs_write_unlock(inode->i_sb); 822 823 /* 824 * the item was found, so new blocks were not added to the file 825 * there is no need to make sure the inode is updated with this 826 * transaction 827 */ 828 return retval; 829 } 830 831 if (!th) { 832 pathrelse(&path); 833 goto start_trans; 834 } 835 836 /* 837 * desired position is not found or is in the direct item. We have 838 * to append file with holes up to 'block'-th block converting 839 * direct items to indirect one if necessary 840 */ 841 done = 0; 842 do { 843 if (is_statdata_le_ih(ih)) { 844 __le32 unp = 0; 845 struct cpu_key tmp_key; 846 847 /* indirect item has to be inserted */ 848 make_le_item_head(&tmp_ih, &key, version, 1, 849 TYPE_INDIRECT, UNFM_P_SIZE, 850 0 /* free_space */ ); 851 852 /* 853 * we are going to add 'block'-th block to the file. 854 * Use allocated block for that 855 */ 856 if (cpu_key_k_offset(&key) == 1) { 857 unp = cpu_to_le32(allocated_block_nr); 858 set_block_dev_mapped(bh_result, 859 allocated_block_nr, inode); 860 set_buffer_new(bh_result); 861 done = 1; 862 } 863 tmp_key = key; /* ;) */ 864 set_cpu_key_k_offset(&tmp_key, 1); 865 PATH_LAST_POSITION(&path)++; 866 867 retval = 868 reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih, 869 inode, (char *)&unp); 870 if (retval) { 871 reiserfs_free_block(th, inode, 872 allocated_block_nr, 1); 873 /* 874 * retval == -ENOSPC, -EDQUOT or -EIO 875 * or -EEXIST 876 */ 877 goto failure; 878 } 879 } else if (is_direct_le_ih(ih)) { 880 /* direct item has to be converted */ 881 loff_t tail_offset; 882 883 tail_offset = 884 ((le_ih_k_offset(ih) - 885 1) & ~(inode->i_sb->s_blocksize - 1)) + 1; 886 887 /* 888 * direct item we just found fits into block we have 889 * to map. Convert it into unformatted node: use 890 * bh_result for the conversion 891 */ 892 if (tail_offset == cpu_key_k_offset(&key)) { 893 set_block_dev_mapped(bh_result, 894 allocated_block_nr, inode); 895 unbh = bh_result; 896 done = 1; 897 } else { 898 /* 899 * we have to pad file tail stored in direct 900 * item(s) up to block size and convert it 901 * to unformatted node. FIXME: this should 902 * also get into page cache 903 */ 904 905 pathrelse(&path); 906 /* 907 * ugly, but we can only end the transaction if 908 * we aren't nested 909 */ 910 BUG_ON(!th->t_refcount); 911 if (th->t_refcount == 1) { 912 retval = 913 reiserfs_end_persistent_transaction 914 (th); 915 th = NULL; 916 if (retval) 917 goto failure; 918 } 919 920 retval = 921 convert_tail_for_hole(inode, bh_result, 922 tail_offset); 923 if (retval) { 924 if (retval != -ENOSPC) 925 reiserfs_error(inode->i_sb, 926 "clm-6004", 927 "convert tail failed " 928 "inode %lu, error %d", 929 inode->i_ino, 930 retval); 931 if (allocated_block_nr) { 932 /* 933 * the bitmap, the super, 934 * and the stat data == 3 935 */ 936 if (!th) 937 th = reiserfs_persistent_transaction(inode->i_sb, 3); 938 if (th) 939 reiserfs_free_block(th, 940 inode, 941 allocated_block_nr, 942 1); 943 } 944 goto failure; 945 } 946 goto research; 947 } 948 retval = 949 direct2indirect(th, inode, &path, unbh, 950 tail_offset); 951 if (retval) { 952 reiserfs_unmap_buffer(unbh); 953 reiserfs_free_block(th, inode, 954 allocated_block_nr, 1); 955 goto failure; 956 } 957 /* 958 * it is important the set_buffer_uptodate is done 959 * after the direct2indirect. The buffer might 960 * contain valid data newer than the data on disk 961 * (read by read_folio, changed, and then sent here by 962 * writepage). direct2indirect needs to know if unbh 963 * was already up to date, so it can decide if the 964 * data in unbh needs to be replaced with data from 965 * the disk 966 */ 967 set_buffer_uptodate(unbh); 968 969 /* 970 * unbh->b_page == NULL in case of DIRECT_IO request, 971 * this means buffer will disappear shortly, so it 972 * should not be added to 973 */ 974 if (unbh->b_page) { 975 /* 976 * we've converted the tail, so we must 977 * flush unbh before the transaction commits 978 */ 979 reiserfs_add_tail_list(inode, unbh); 980 981 /* 982 * mark it dirty now to prevent commit_write 983 * from adding this buffer to the inode's 984 * dirty buffer list 985 */ 986 /* 987 * AKPM: changed __mark_buffer_dirty to 988 * mark_buffer_dirty(). It's still atomic, 989 * but it sets the page dirty too, which makes 990 * it eligible for writeback at any time by the 991 * VM (which was also the case with 992 * __mark_buffer_dirty()) 993 */ 994 mark_buffer_dirty(unbh); 995 } 996 } else { 997 /* 998 * append indirect item with holes if needed, when 999 * appending pointer to 'block'-th block use block, 1000 * which is already allocated 1001 */ 1002 struct cpu_key tmp_key; 1003 /* 1004 * We use this in case we need to allocate 1005 * only one block which is a fastpath 1006 */ 1007 unp_t unf_single = 0; 1008 unp_t *un; 1009 __u64 max_to_insert = 1010 MAX_ITEM_LEN(inode->i_sb->s_blocksize) / 1011 UNFM_P_SIZE; 1012 __u64 blocks_needed; 1013 1014 RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE, 1015 "vs-804: invalid position for append"); 1016 /* 1017 * indirect item has to be appended, 1018 * set up key of that position 1019 * (key type is unimportant) 1020 */ 1021 make_cpu_key(&tmp_key, inode, 1022 le_key_k_offset(version, 1023 &ih->ih_key) + 1024 op_bytes_number(ih, 1025 inode->i_sb->s_blocksize), 1026 TYPE_INDIRECT, 3); 1027 1028 RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key), 1029 "green-805: invalid offset"); 1030 blocks_needed = 1031 1 + 1032 ((cpu_key_k_offset(&key) - 1033 cpu_key_k_offset(&tmp_key)) >> inode->i_sb-> 1034 s_blocksize_bits); 1035 1036 if (blocks_needed == 1) { 1037 un = &unf_single; 1038 } else { 1039 un = kcalloc(min(blocks_needed, max_to_insert), 1040 UNFM_P_SIZE, GFP_NOFS); 1041 if (!un) { 1042 un = &unf_single; 1043 blocks_needed = 1; 1044 max_to_insert = 0; 1045 } 1046 } 1047 if (blocks_needed <= max_to_insert) { 1048 /* 1049 * we are going to add target block to 1050 * the file. Use allocated block for that 1051 */ 1052 un[blocks_needed - 1] = 1053 cpu_to_le32(allocated_block_nr); 1054 set_block_dev_mapped(bh_result, 1055 allocated_block_nr, inode); 1056 set_buffer_new(bh_result); 1057 done = 1; 1058 } else { 1059 /* paste hole to the indirect item */ 1060 /* 1061 * If kcalloc failed, max_to_insert becomes 1062 * zero and it means we only have space for 1063 * one block 1064 */ 1065 blocks_needed = 1066 max_to_insert ? max_to_insert : 1; 1067 } 1068 retval = 1069 reiserfs_paste_into_item(th, &path, &tmp_key, inode, 1070 (char *)un, 1071 UNFM_P_SIZE * 1072 blocks_needed); 1073 1074 if (blocks_needed != 1) 1075 kfree(un); 1076 1077 if (retval) { 1078 reiserfs_free_block(th, inode, 1079 allocated_block_nr, 1); 1080 goto failure; 1081 } 1082 if (!done) { 1083 /* 1084 * We need to mark new file size in case 1085 * this function will be interrupted/aborted 1086 * later on. And we may do this only for 1087 * holes. 1088 */ 1089 inode->i_size += 1090 inode->i_sb->s_blocksize * blocks_needed; 1091 } 1092 } 1093 1094 if (done == 1) 1095 break; 1096 1097 /* 1098 * this loop could log more blocks than we had originally 1099 * asked for. So, we have to allow the transaction to end 1100 * if it is too big or too full. Update the inode so things 1101 * are consistent if we crash before the function returns 1102 * release the path so that anybody waiting on the path before 1103 * ending their transaction will be able to continue. 1104 */ 1105 if (journal_transaction_should_end(th, th->t_blocks_allocated)) { 1106 retval = restart_transaction(th, inode, &path); 1107 if (retval) 1108 goto failure; 1109 } 1110 /* 1111 * inserting indirect pointers for a hole can take a 1112 * long time. reschedule if needed and also release the write 1113 * lock for others. 1114 */ 1115 reiserfs_cond_resched(inode->i_sb); 1116 1117 retval = search_for_position_by_key(inode->i_sb, &key, &path); 1118 if (retval == IO_ERROR) { 1119 retval = -EIO; 1120 goto failure; 1121 } 1122 if (retval == POSITION_FOUND) { 1123 reiserfs_warning(inode->i_sb, "vs-825", 1124 "%K should not be found", &key); 1125 retval = -EEXIST; 1126 if (allocated_block_nr) 1127 reiserfs_free_block(th, inode, 1128 allocated_block_nr, 1); 1129 pathrelse(&path); 1130 goto failure; 1131 } 1132 bh = get_last_bh(&path); 1133 ih = tp_item_head(&path); 1134 item = tp_item_body(&path); 1135 pos_in_item = path.pos_in_item; 1136 } while (1); 1137 1138 retval = 0; 1139 1140 failure: 1141 if (th && (!dangle || (retval && !th->t_trans_id))) { 1142 int err; 1143 if (th->t_trans_id) 1144 reiserfs_update_sd(th, inode); 1145 err = reiserfs_end_persistent_transaction(th); 1146 if (err) 1147 retval = err; 1148 } 1149 1150 reiserfs_write_unlock(inode->i_sb); 1151 reiserfs_check_path(&path); 1152 return retval; 1153 } 1154 1155 static void reiserfs_readahead(struct readahead_control *rac) 1156 { 1157 mpage_readahead(rac, reiserfs_get_block); 1158 } 1159 1160 /* 1161 * Compute real number of used bytes by file 1162 * Following three functions can go away when we'll have enough space in 1163 * stat item 1164 */ 1165 static int real_space_diff(struct inode *inode, int sd_size) 1166 { 1167 int bytes; 1168 loff_t blocksize = inode->i_sb->s_blocksize; 1169 1170 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) 1171 return sd_size; 1172 1173 /* 1174 * End of file is also in full block with indirect reference, so round 1175 * up to the next block. 1176 * 1177 * there is just no way to know if the tail is actually packed 1178 * on the file, so we have to assume it isn't. When we pack the 1179 * tail, we add 4 bytes to pretend there really is an unformatted 1180 * node pointer 1181 */ 1182 bytes = 1183 ((inode->i_size + 1184 (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE + 1185 sd_size; 1186 return bytes; 1187 } 1188 1189 static inline loff_t to_real_used_space(struct inode *inode, ulong blocks, 1190 int sd_size) 1191 { 1192 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) { 1193 return inode->i_size + 1194 (loff_t) (real_space_diff(inode, sd_size)); 1195 } 1196 return ((loff_t) real_space_diff(inode, sd_size)) + 1197 (((loff_t) blocks) << 9); 1198 } 1199 1200 /* Compute number of blocks used by file in ReiserFS counting */ 1201 static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size) 1202 { 1203 loff_t bytes = inode_get_bytes(inode); 1204 loff_t real_space = real_space_diff(inode, sd_size); 1205 1206 /* keeps fsck and non-quota versions of reiserfs happy */ 1207 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) { 1208 bytes += (loff_t) 511; 1209 } 1210 1211 /* 1212 * files from before the quota patch might i_blocks such that 1213 * bytes < real_space. Deal with that here to prevent it from 1214 * going negative. 1215 */ 1216 if (bytes < real_space) 1217 return 0; 1218 return (bytes - real_space) >> 9; 1219 } 1220 1221 /* 1222 * BAD: new directories have stat data of new type and all other items 1223 * of old type. Version stored in the inode says about body items, so 1224 * in update_stat_data we can not rely on inode, but have to check 1225 * item version directly 1226 */ 1227 1228 /* called by read_locked_inode */ 1229 static void init_inode(struct inode *inode, struct treepath *path) 1230 { 1231 struct buffer_head *bh; 1232 struct item_head *ih; 1233 __u32 rdev; 1234 1235 bh = PATH_PLAST_BUFFER(path); 1236 ih = tp_item_head(path); 1237 1238 copy_key(INODE_PKEY(inode), &ih->ih_key); 1239 1240 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list); 1241 REISERFS_I(inode)->i_flags = 0; 1242 REISERFS_I(inode)->i_prealloc_block = 0; 1243 REISERFS_I(inode)->i_prealloc_count = 0; 1244 REISERFS_I(inode)->i_trans_id = 0; 1245 REISERFS_I(inode)->i_jl = NULL; 1246 reiserfs_init_xattr_rwsem(inode); 1247 1248 if (stat_data_v1(ih)) { 1249 struct stat_data_v1 *sd = 1250 (struct stat_data_v1 *)ih_item_body(bh, ih); 1251 unsigned long blocks; 1252 1253 set_inode_item_key_version(inode, KEY_FORMAT_3_5); 1254 set_inode_sd_version(inode, STAT_DATA_V1); 1255 inode->i_mode = sd_v1_mode(sd); 1256 set_nlink(inode, sd_v1_nlink(sd)); 1257 i_uid_write(inode, sd_v1_uid(sd)); 1258 i_gid_write(inode, sd_v1_gid(sd)); 1259 inode->i_size = sd_v1_size(sd); 1260 inode_set_atime(inode, sd_v1_atime(sd), 0); 1261 inode_set_mtime(inode, sd_v1_mtime(sd), 0); 1262 inode_set_ctime(inode, sd_v1_ctime(sd), 0); 1263 1264 inode->i_blocks = sd_v1_blocks(sd); 1265 inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); 1266 blocks = (inode->i_size + 511) >> 9; 1267 blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9); 1268 1269 /* 1270 * there was a bug in <=3.5.23 when i_blocks could take 1271 * negative values. Starting from 3.5.17 this value could 1272 * even be stored in stat data. For such files we set 1273 * i_blocks based on file size. Just 2 notes: this can be 1274 * wrong for sparse files. On-disk value will be only 1275 * updated if file's inode will ever change 1276 */ 1277 if (inode->i_blocks > blocks) { 1278 inode->i_blocks = blocks; 1279 } 1280 1281 rdev = sd_v1_rdev(sd); 1282 REISERFS_I(inode)->i_first_direct_byte = 1283 sd_v1_first_direct_byte(sd); 1284 1285 /* 1286 * an early bug in the quota code can give us an odd 1287 * number for the block count. This is incorrect, fix it here. 1288 */ 1289 if (inode->i_blocks & 1) { 1290 inode->i_blocks++; 1291 } 1292 inode_set_bytes(inode, 1293 to_real_used_space(inode, inode->i_blocks, 1294 SD_V1_SIZE)); 1295 /* 1296 * nopack is initially zero for v1 objects. For v2 objects, 1297 * nopack is initialised from sd_attrs 1298 */ 1299 REISERFS_I(inode)->i_flags &= ~i_nopack_mask; 1300 } else { 1301 /* 1302 * new stat data found, but object may have old items 1303 * (directories and symlinks) 1304 */ 1305 struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih); 1306 1307 inode->i_mode = sd_v2_mode(sd); 1308 set_nlink(inode, sd_v2_nlink(sd)); 1309 i_uid_write(inode, sd_v2_uid(sd)); 1310 inode->i_size = sd_v2_size(sd); 1311 i_gid_write(inode, sd_v2_gid(sd)); 1312 inode_set_mtime(inode, sd_v2_mtime(sd), 0); 1313 inode_set_atime(inode, sd_v2_atime(sd), 0); 1314 inode_set_ctime(inode, sd_v2_ctime(sd), 0); 1315 inode->i_blocks = sd_v2_blocks(sd); 1316 rdev = sd_v2_rdev(sd); 1317 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 1318 inode->i_generation = 1319 le32_to_cpu(INODE_PKEY(inode)->k_dir_id); 1320 else 1321 inode->i_generation = sd_v2_generation(sd); 1322 1323 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 1324 set_inode_item_key_version(inode, KEY_FORMAT_3_5); 1325 else 1326 set_inode_item_key_version(inode, KEY_FORMAT_3_6); 1327 REISERFS_I(inode)->i_first_direct_byte = 0; 1328 set_inode_sd_version(inode, STAT_DATA_V2); 1329 inode_set_bytes(inode, 1330 to_real_used_space(inode, inode->i_blocks, 1331 SD_V2_SIZE)); 1332 /* 1333 * read persistent inode attributes from sd and initialise 1334 * generic inode flags from them 1335 */ 1336 REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd); 1337 sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode); 1338 } 1339 1340 pathrelse(path); 1341 if (S_ISREG(inode->i_mode)) { 1342 inode->i_op = &reiserfs_file_inode_operations; 1343 inode->i_fop = &reiserfs_file_operations; 1344 inode->i_mapping->a_ops = &reiserfs_address_space_operations; 1345 } else if (S_ISDIR(inode->i_mode)) { 1346 inode->i_op = &reiserfs_dir_inode_operations; 1347 inode->i_fop = &reiserfs_dir_operations; 1348 } else if (S_ISLNK(inode->i_mode)) { 1349 inode->i_op = &reiserfs_symlink_inode_operations; 1350 inode_nohighmem(inode); 1351 inode->i_mapping->a_ops = &reiserfs_address_space_operations; 1352 } else { 1353 inode->i_blocks = 0; 1354 inode->i_op = &reiserfs_special_inode_operations; 1355 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); 1356 } 1357 } 1358 1359 /* update new stat data with inode fields */ 1360 static void inode2sd(void *sd, struct inode *inode, loff_t size) 1361 { 1362 struct stat_data *sd_v2 = (struct stat_data *)sd; 1363 1364 set_sd_v2_mode(sd_v2, inode->i_mode); 1365 set_sd_v2_nlink(sd_v2, inode->i_nlink); 1366 set_sd_v2_uid(sd_v2, i_uid_read(inode)); 1367 set_sd_v2_size(sd_v2, size); 1368 set_sd_v2_gid(sd_v2, i_gid_read(inode)); 1369 set_sd_v2_mtime(sd_v2, inode_get_mtime_sec(inode)); 1370 set_sd_v2_atime(sd_v2, inode_get_atime_sec(inode)); 1371 set_sd_v2_ctime(sd_v2, inode_get_ctime_sec(inode)); 1372 set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE)); 1373 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 1374 set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev)); 1375 else 1376 set_sd_v2_generation(sd_v2, inode->i_generation); 1377 set_sd_v2_attrs(sd_v2, REISERFS_I(inode)->i_attrs); 1378 } 1379 1380 /* used to copy inode's fields to old stat data */ 1381 static void inode2sd_v1(void *sd, struct inode *inode, loff_t size) 1382 { 1383 struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd; 1384 1385 set_sd_v1_mode(sd_v1, inode->i_mode); 1386 set_sd_v1_uid(sd_v1, i_uid_read(inode)); 1387 set_sd_v1_gid(sd_v1, i_gid_read(inode)); 1388 set_sd_v1_nlink(sd_v1, inode->i_nlink); 1389 set_sd_v1_size(sd_v1, size); 1390 set_sd_v1_atime(sd_v1, inode_get_atime_sec(inode)); 1391 set_sd_v1_ctime(sd_v1, inode_get_ctime_sec(inode)); 1392 set_sd_v1_mtime(sd_v1, inode_get_mtime_sec(inode)); 1393 1394 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 1395 set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev)); 1396 else 1397 set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE)); 1398 1399 /* Sigh. i_first_direct_byte is back */ 1400 set_sd_v1_first_direct_byte(sd_v1, 1401 REISERFS_I(inode)->i_first_direct_byte); 1402 } 1403 1404 /* 1405 * NOTE, you must prepare the buffer head before sending it here, 1406 * and then log it after the call 1407 */ 1408 static void update_stat_data(struct treepath *path, struct inode *inode, 1409 loff_t size) 1410 { 1411 struct buffer_head *bh; 1412 struct item_head *ih; 1413 1414 bh = PATH_PLAST_BUFFER(path); 1415 ih = tp_item_head(path); 1416 1417 if (!is_statdata_le_ih(ih)) 1418 reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h", 1419 INODE_PKEY(inode), ih); 1420 1421 /* path points to old stat data */ 1422 if (stat_data_v1(ih)) { 1423 inode2sd_v1(ih_item_body(bh, ih), inode, size); 1424 } else { 1425 inode2sd(ih_item_body(bh, ih), inode, size); 1426 } 1427 1428 return; 1429 } 1430 1431 void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, 1432 struct inode *inode, loff_t size) 1433 { 1434 struct cpu_key key; 1435 INITIALIZE_PATH(path); 1436 struct buffer_head *bh; 1437 int fs_gen; 1438 struct item_head *ih, tmp_ih; 1439 int retval; 1440 1441 BUG_ON(!th->t_trans_id); 1442 1443 /* key type is unimportant */ 1444 make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3); 1445 1446 for (;;) { 1447 int pos; 1448 /* look for the object's stat data */ 1449 retval = search_item(inode->i_sb, &key, &path); 1450 if (retval == IO_ERROR) { 1451 reiserfs_error(inode->i_sb, "vs-13050", 1452 "i/o failure occurred trying to " 1453 "update %K stat data", &key); 1454 return; 1455 } 1456 if (retval == ITEM_NOT_FOUND) { 1457 pos = PATH_LAST_POSITION(&path); 1458 pathrelse(&path); 1459 if (inode->i_nlink == 0) { 1460 /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */ 1461 return; 1462 } 1463 reiserfs_warning(inode->i_sb, "vs-13060", 1464 "stat data of object %k (nlink == %d) " 1465 "not found (pos %d)", 1466 INODE_PKEY(inode), inode->i_nlink, 1467 pos); 1468 reiserfs_check_path(&path); 1469 return; 1470 } 1471 1472 /* 1473 * sigh, prepare_for_journal might schedule. When it 1474 * schedules the FS might change. We have to detect that, 1475 * and loop back to the search if the stat data item has moved 1476 */ 1477 bh = get_last_bh(&path); 1478 ih = tp_item_head(&path); 1479 copy_item_head(&tmp_ih, ih); 1480 fs_gen = get_generation(inode->i_sb); 1481 reiserfs_prepare_for_journal(inode->i_sb, bh, 1); 1482 1483 /* Stat_data item has been moved after scheduling. */ 1484 if (fs_changed(fs_gen, inode->i_sb) 1485 && item_moved(&tmp_ih, &path)) { 1486 reiserfs_restore_prepared_buffer(inode->i_sb, bh); 1487 continue; 1488 } 1489 break; 1490 } 1491 update_stat_data(&path, inode, size); 1492 journal_mark_dirty(th, bh); 1493 pathrelse(&path); 1494 return; 1495 } 1496 1497 /* 1498 * reiserfs_read_locked_inode is called to read the inode off disk, and it 1499 * does a make_bad_inode when things go wrong. But, we need to make sure 1500 * and clear the key in the private portion of the inode, otherwise a 1501 * corresponding iput might try to delete whatever object the inode last 1502 * represented. 1503 */ 1504 static void reiserfs_make_bad_inode(struct inode *inode) 1505 { 1506 memset(INODE_PKEY(inode), 0, KEY_SIZE); 1507 make_bad_inode(inode); 1508 } 1509 1510 /* 1511 * initially this function was derived from minix or ext2's analog and 1512 * evolved as the prototype did 1513 */ 1514 int reiserfs_init_locked_inode(struct inode *inode, void *p) 1515 { 1516 struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p; 1517 inode->i_ino = args->objectid; 1518 INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid); 1519 return 0; 1520 } 1521 1522 /* 1523 * looks for stat data in the tree, and fills up the fields of in-core 1524 * inode stat data fields 1525 */ 1526 void reiserfs_read_locked_inode(struct inode *inode, 1527 struct reiserfs_iget_args *args) 1528 { 1529 INITIALIZE_PATH(path_to_sd); 1530 struct cpu_key key; 1531 unsigned long dirino; 1532 int retval; 1533 1534 dirino = args->dirid; 1535 1536 /* 1537 * set version 1, version 2 could be used too, because stat data 1538 * key is the same in both versions 1539 */ 1540 _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3); 1541 1542 /* look for the object's stat data */ 1543 retval = search_item(inode->i_sb, &key, &path_to_sd); 1544 if (retval == IO_ERROR) { 1545 reiserfs_error(inode->i_sb, "vs-13070", 1546 "i/o failure occurred trying to find " 1547 "stat data of %K", &key); 1548 reiserfs_make_bad_inode(inode); 1549 return; 1550 } 1551 1552 /* a stale NFS handle can trigger this without it being an error */ 1553 if (retval != ITEM_FOUND) { 1554 pathrelse(&path_to_sd); 1555 reiserfs_make_bad_inode(inode); 1556 clear_nlink(inode); 1557 return; 1558 } 1559 1560 init_inode(inode, &path_to_sd); 1561 1562 /* 1563 * It is possible that knfsd is trying to access inode of a file 1564 * that is being removed from the disk by some other thread. As we 1565 * update sd on unlink all that is required is to check for nlink 1566 * here. This bug was first found by Sizif when debugging 1567 * SquidNG/Butterfly, forgotten, and found again after Philippe 1568 * Gramoulle <philippe.gramoulle@mmania.com> reproduced it. 1569 1570 * More logical fix would require changes in fs/inode.c:iput() to 1571 * remove inode from hash-table _after_ fs cleaned disk stuff up and 1572 * in iget() to return NULL if I_FREEING inode is found in 1573 * hash-table. 1574 */ 1575 1576 /* 1577 * Currently there is one place where it's ok to meet inode with 1578 * nlink==0: processing of open-unlinked and half-truncated files 1579 * during mount (fs/reiserfs/super.c:finish_unfinished()). 1580 */ 1581 if ((inode->i_nlink == 0) && 1582 !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) { 1583 reiserfs_warning(inode->i_sb, "vs-13075", 1584 "dead inode read from disk %K. " 1585 "This is likely to be race with knfsd. Ignore", 1586 &key); 1587 reiserfs_make_bad_inode(inode); 1588 } 1589 1590 /* init inode should be relsing */ 1591 reiserfs_check_path(&path_to_sd); 1592 1593 /* 1594 * Stat data v1 doesn't support ACLs. 1595 */ 1596 if (get_inode_sd_version(inode) == STAT_DATA_V1) 1597 cache_no_acl(inode); 1598 } 1599 1600 /* 1601 * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked(). 1602 * 1603 * @inode: inode from hash table to check 1604 * @opaque: "cookie" passed to iget5_locked(). This is &reiserfs_iget_args. 1605 * 1606 * This function is called by iget5_locked() to distinguish reiserfs inodes 1607 * having the same inode numbers. Such inodes can only exist due to some 1608 * error condition. One of them should be bad. Inodes with identical 1609 * inode numbers (objectids) are distinguished by parent directory ids. 1610 * 1611 */ 1612 int reiserfs_find_actor(struct inode *inode, void *opaque) 1613 { 1614 struct reiserfs_iget_args *args; 1615 1616 args = opaque; 1617 /* args is already in CPU order */ 1618 return (inode->i_ino == args->objectid) && 1619 (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid); 1620 } 1621 1622 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key) 1623 { 1624 struct inode *inode; 1625 struct reiserfs_iget_args args; 1626 int depth; 1627 1628 args.objectid = key->on_disk_key.k_objectid; 1629 args.dirid = key->on_disk_key.k_dir_id; 1630 depth = reiserfs_write_unlock_nested(s); 1631 inode = iget5_locked(s, key->on_disk_key.k_objectid, 1632 reiserfs_find_actor, reiserfs_init_locked_inode, 1633 (void *)(&args)); 1634 reiserfs_write_lock_nested(s, depth); 1635 if (!inode) 1636 return ERR_PTR(-ENOMEM); 1637 1638 if (inode->i_state & I_NEW) { 1639 reiserfs_read_locked_inode(inode, &args); 1640 unlock_new_inode(inode); 1641 } 1642 1643 if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) { 1644 /* either due to i/o error or a stale NFS handle */ 1645 iput(inode); 1646 inode = NULL; 1647 } 1648 return inode; 1649 } 1650 1651 static struct dentry *reiserfs_get_dentry(struct super_block *sb, 1652 u32 objectid, u32 dir_id, u32 generation) 1653 1654 { 1655 struct cpu_key key; 1656 struct inode *inode; 1657 1658 key.on_disk_key.k_objectid = objectid; 1659 key.on_disk_key.k_dir_id = dir_id; 1660 reiserfs_write_lock(sb); 1661 inode = reiserfs_iget(sb, &key); 1662 if (inode && !IS_ERR(inode) && generation != 0 && 1663 generation != inode->i_generation) { 1664 iput(inode); 1665 inode = NULL; 1666 } 1667 reiserfs_write_unlock(sb); 1668 1669 return d_obtain_alias(inode); 1670 } 1671 1672 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, 1673 int fh_len, int fh_type) 1674 { 1675 /* 1676 * fhtype happens to reflect the number of u32s encoded. 1677 * due to a bug in earlier code, fhtype might indicate there 1678 * are more u32s then actually fitted. 1679 * so if fhtype seems to be more than len, reduce fhtype. 1680 * Valid types are: 1681 * 2 - objectid + dir_id - legacy support 1682 * 3 - objectid + dir_id + generation 1683 * 4 - objectid + dir_id + objectid and dirid of parent - legacy 1684 * 5 - objectid + dir_id + generation + objectid and dirid of parent 1685 * 6 - as above plus generation of directory 1686 * 6 does not fit in NFSv2 handles 1687 */ 1688 if (fh_type > fh_len) { 1689 if (fh_type != 6 || fh_len != 5) 1690 reiserfs_warning(sb, "reiserfs-13077", 1691 "nfsd/reiserfs, fhtype=%d, len=%d - odd", 1692 fh_type, fh_len); 1693 fh_type = fh_len; 1694 } 1695 if (fh_len < 2) 1696 return NULL; 1697 1698 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1], 1699 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0); 1700 } 1701 1702 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, 1703 int fh_len, int fh_type) 1704 { 1705 if (fh_type > fh_len) 1706 fh_type = fh_len; 1707 if (fh_type < 4) 1708 return NULL; 1709 1710 return reiserfs_get_dentry(sb, 1711 (fh_type >= 5) ? fid->raw[3] : fid->raw[2], 1712 (fh_type >= 5) ? fid->raw[4] : fid->raw[3], 1713 (fh_type == 6) ? fid->raw[5] : 0); 1714 } 1715 1716 int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp, 1717 struct inode *parent) 1718 { 1719 int maxlen = *lenp; 1720 1721 if (parent && (maxlen < 5)) { 1722 *lenp = 5; 1723 return FILEID_INVALID; 1724 } else if (maxlen < 3) { 1725 *lenp = 3; 1726 return FILEID_INVALID; 1727 } 1728 1729 data[0] = inode->i_ino; 1730 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); 1731 data[2] = inode->i_generation; 1732 *lenp = 3; 1733 if (parent) { 1734 data[3] = parent->i_ino; 1735 data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id); 1736 *lenp = 5; 1737 if (maxlen >= 6) { 1738 data[5] = parent->i_generation; 1739 *lenp = 6; 1740 } 1741 } 1742 return *lenp; 1743 } 1744 1745 /* 1746 * looks for stat data, then copies fields to it, marks the buffer 1747 * containing stat data as dirty 1748 */ 1749 /* 1750 * reiserfs inodes are never really dirty, since the dirty inode call 1751 * always logs them. This call allows the VFS inode marking routines 1752 * to properly mark inodes for datasync and such, but only actually 1753 * does something when called for a synchronous update. 1754 */ 1755 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1756 { 1757 struct reiserfs_transaction_handle th; 1758 int jbegin_count = 1; 1759 1760 if (sb_rdonly(inode->i_sb)) 1761 return -EROFS; 1762 /* 1763 * memory pressure can sometimes initiate write_inode calls with 1764 * sync == 1, 1765 * these cases are just when the system needs ram, not when the 1766 * inode needs to reach disk for safety, and they can safely be 1767 * ignored because the altered inode has already been logged. 1768 */ 1769 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) { 1770 reiserfs_write_lock(inode->i_sb); 1771 if (!journal_begin(&th, inode->i_sb, jbegin_count)) { 1772 reiserfs_update_sd(&th, inode); 1773 journal_end_sync(&th); 1774 } 1775 reiserfs_write_unlock(inode->i_sb); 1776 } 1777 return 0; 1778 } 1779 1780 /* 1781 * stat data of new object is inserted already, this inserts the item 1782 * containing "." and ".." entries 1783 */ 1784 static int reiserfs_new_directory(struct reiserfs_transaction_handle *th, 1785 struct inode *inode, 1786 struct item_head *ih, struct treepath *path, 1787 struct inode *dir) 1788 { 1789 struct super_block *sb = th->t_super; 1790 char empty_dir[EMPTY_DIR_SIZE]; 1791 char *body = empty_dir; 1792 struct cpu_key key; 1793 int retval; 1794 1795 BUG_ON(!th->t_trans_id); 1796 1797 _make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id), 1798 le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET, 1799 TYPE_DIRENTRY, 3 /*key length */ ); 1800 1801 /* 1802 * compose item head for new item. Directories consist of items of 1803 * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it 1804 * is done by reiserfs_new_inode 1805 */ 1806 if (old_format_only(sb)) { 1807 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET, 1808 TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2); 1809 1810 make_empty_dir_item_v1(body, ih->ih_key.k_dir_id, 1811 ih->ih_key.k_objectid, 1812 INODE_PKEY(dir)->k_dir_id, 1813 INODE_PKEY(dir)->k_objectid); 1814 } else { 1815 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET, 1816 TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2); 1817 1818 make_empty_dir_item(body, ih->ih_key.k_dir_id, 1819 ih->ih_key.k_objectid, 1820 INODE_PKEY(dir)->k_dir_id, 1821 INODE_PKEY(dir)->k_objectid); 1822 } 1823 1824 /* look for place in the tree for new item */ 1825 retval = search_item(sb, &key, path); 1826 if (retval == IO_ERROR) { 1827 reiserfs_error(sb, "vs-13080", 1828 "i/o failure occurred creating new directory"); 1829 return -EIO; 1830 } 1831 if (retval == ITEM_FOUND) { 1832 pathrelse(path); 1833 reiserfs_warning(sb, "vs-13070", 1834 "object with this key exists (%k)", 1835 &(ih->ih_key)); 1836 return -EEXIST; 1837 } 1838 1839 /* insert item, that is empty directory item */ 1840 return reiserfs_insert_item(th, path, &key, ih, inode, body); 1841 } 1842 1843 /* 1844 * stat data of object has been inserted, this inserts the item 1845 * containing the body of symlink 1846 */ 1847 static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, 1848 struct inode *inode, 1849 struct item_head *ih, 1850 struct treepath *path, const char *symname, 1851 int item_len) 1852 { 1853 struct super_block *sb = th->t_super; 1854 struct cpu_key key; 1855 int retval; 1856 1857 BUG_ON(!th->t_trans_id); 1858 1859 _make_cpu_key(&key, KEY_FORMAT_3_5, 1860 le32_to_cpu(ih->ih_key.k_dir_id), 1861 le32_to_cpu(ih->ih_key.k_objectid), 1862 1, TYPE_DIRECT, 3 /*key length */ ); 1863 1864 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len, 1865 0 /*free_space */ ); 1866 1867 /* look for place in the tree for new item */ 1868 retval = search_item(sb, &key, path); 1869 if (retval == IO_ERROR) { 1870 reiserfs_error(sb, "vs-13080", 1871 "i/o failure occurred creating new symlink"); 1872 return -EIO; 1873 } 1874 if (retval == ITEM_FOUND) { 1875 pathrelse(path); 1876 reiserfs_warning(sb, "vs-13080", 1877 "object with this key exists (%k)", 1878 &(ih->ih_key)); 1879 return -EEXIST; 1880 } 1881 1882 /* insert item, that is body of symlink */ 1883 return reiserfs_insert_item(th, path, &key, ih, inode, symname); 1884 } 1885 1886 /* 1887 * inserts the stat data into the tree, and then calls 1888 * reiserfs_new_directory (to insert ".", ".." item if new object is 1889 * directory) or reiserfs_new_symlink (to insert symlink body if new 1890 * object is symlink) or nothing (if new object is regular file) 1891 1892 * NOTE! uid and gid must already be set in the inode. If we return 1893 * non-zero due to an error, we have to drop the quota previously allocated 1894 * for the fresh inode. This can only be done outside a transaction, so 1895 * if we return non-zero, we also end the transaction. 1896 * 1897 * @th: active transaction handle 1898 * @dir: parent directory for new inode 1899 * @mode: mode of new inode 1900 * @symname: symlink contents if inode is symlink 1901 * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for 1902 * symlinks 1903 * @inode: inode to be filled 1904 * @security: optional security context to associate with this inode 1905 */ 1906 int reiserfs_new_inode(struct reiserfs_transaction_handle *th, 1907 struct inode *dir, umode_t mode, const char *symname, 1908 /* 0 for regular, EMTRY_DIR_SIZE for dirs, 1909 strlen (symname) for symlinks) */ 1910 loff_t i_size, struct dentry *dentry, 1911 struct inode *inode, 1912 struct reiserfs_security_handle *security) 1913 { 1914 struct super_block *sb = dir->i_sb; 1915 struct reiserfs_iget_args args; 1916 INITIALIZE_PATH(path_to_key); 1917 struct cpu_key key; 1918 struct item_head ih; 1919 struct stat_data sd; 1920 int retval; 1921 int err; 1922 int depth; 1923 1924 BUG_ON(!th->t_trans_id); 1925 1926 depth = reiserfs_write_unlock_nested(sb); 1927 err = dquot_alloc_inode(inode); 1928 reiserfs_write_lock_nested(sb, depth); 1929 if (err) 1930 goto out_end_trans; 1931 if (!dir->i_nlink) { 1932 err = -EPERM; 1933 goto out_bad_inode; 1934 } 1935 1936 /* item head of new item */ 1937 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir); 1938 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th)); 1939 if (!ih.ih_key.k_objectid) { 1940 err = -ENOMEM; 1941 goto out_bad_inode; 1942 } 1943 args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); 1944 if (old_format_only(sb)) 1945 make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET, 1946 TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT); 1947 else 1948 make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET, 1949 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); 1950 memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE); 1951 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); 1952 1953 depth = reiserfs_write_unlock_nested(inode->i_sb); 1954 err = insert_inode_locked4(inode, args.objectid, 1955 reiserfs_find_actor, &args); 1956 reiserfs_write_lock_nested(inode->i_sb, depth); 1957 if (err) { 1958 err = -EINVAL; 1959 goto out_bad_inode; 1960 } 1961 1962 if (old_format_only(sb)) 1963 /* 1964 * not a perfect generation count, as object ids can be reused, 1965 * but this is as good as reiserfs can do right now. 1966 * note that the private part of inode isn't filled in yet, 1967 * we have to use the directory. 1968 */ 1969 inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid); 1970 else 1971 #if defined( USE_INODE_GENERATION_COUNTER ) 1972 inode->i_generation = 1973 le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation); 1974 #else 1975 inode->i_generation = ++event; 1976 #endif 1977 1978 /* fill stat data */ 1979 set_nlink(inode, (S_ISDIR(mode) ? 2 : 1)); 1980 1981 /* uid and gid must already be set by the caller for quota init */ 1982 1983 simple_inode_init_ts(inode); 1984 inode->i_size = i_size; 1985 inode->i_blocks = 0; 1986 inode->i_bytes = 0; 1987 REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 : 1988 U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ; 1989 1990 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list); 1991 REISERFS_I(inode)->i_flags = 0; 1992 REISERFS_I(inode)->i_prealloc_block = 0; 1993 REISERFS_I(inode)->i_prealloc_count = 0; 1994 REISERFS_I(inode)->i_trans_id = 0; 1995 REISERFS_I(inode)->i_jl = NULL; 1996 REISERFS_I(inode)->i_attrs = 1997 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK; 1998 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode); 1999 reiserfs_init_xattr_rwsem(inode); 2000 2001 /* key to search for correct place for new stat data */ 2002 _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id), 2003 le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET, 2004 TYPE_STAT_DATA, 3 /*key length */ ); 2005 2006 /* find proper place for inserting of stat data */ 2007 retval = search_item(sb, &key, &path_to_key); 2008 if (retval == IO_ERROR) { 2009 err = -EIO; 2010 goto out_bad_inode; 2011 } 2012 if (retval == ITEM_FOUND) { 2013 pathrelse(&path_to_key); 2014 err = -EEXIST; 2015 goto out_bad_inode; 2016 } 2017 if (old_format_only(sb)) { 2018 /* i_uid or i_gid is too big to be stored in stat data v3.5 */ 2019 if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) { 2020 pathrelse(&path_to_key); 2021 err = -EINVAL; 2022 goto out_bad_inode; 2023 } 2024 inode2sd_v1(&sd, inode, inode->i_size); 2025 } else { 2026 inode2sd(&sd, inode, inode->i_size); 2027 } 2028 /* 2029 * store in in-core inode the key of stat data and version all 2030 * object items will have (directory items will have old offset 2031 * format, other new objects will consist of new items) 2032 */ 2033 if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode)) 2034 set_inode_item_key_version(inode, KEY_FORMAT_3_5); 2035 else 2036 set_inode_item_key_version(inode, KEY_FORMAT_3_6); 2037 if (old_format_only(sb)) 2038 set_inode_sd_version(inode, STAT_DATA_V1); 2039 else 2040 set_inode_sd_version(inode, STAT_DATA_V2); 2041 2042 /* insert the stat data into the tree */ 2043 #ifdef DISPLACE_NEW_PACKING_LOCALITIES 2044 if (REISERFS_I(dir)->new_packing_locality) 2045 th->displace_new_blocks = 1; 2046 #endif 2047 retval = 2048 reiserfs_insert_item(th, &path_to_key, &key, &ih, inode, 2049 (char *)(&sd)); 2050 if (retval) { 2051 err = retval; 2052 reiserfs_check_path(&path_to_key); 2053 goto out_bad_inode; 2054 } 2055 #ifdef DISPLACE_NEW_PACKING_LOCALITIES 2056 if (!th->displace_new_blocks) 2057 REISERFS_I(dir)->new_packing_locality = 0; 2058 #endif 2059 if (S_ISDIR(mode)) { 2060 /* insert item with "." and ".." */ 2061 retval = 2062 reiserfs_new_directory(th, inode, &ih, &path_to_key, dir); 2063 } 2064 2065 if (S_ISLNK(mode)) { 2066 /* insert body of symlink */ 2067 if (!old_format_only(sb)) 2068 i_size = ROUND_UP(i_size); 2069 retval = 2070 reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname, 2071 i_size); 2072 } 2073 if (retval) { 2074 err = retval; 2075 reiserfs_check_path(&path_to_key); 2076 journal_end(th); 2077 goto out_inserted_sd; 2078 } 2079 2080 /* 2081 * Mark it private if we're creating the privroot 2082 * or something under it. 2083 */ 2084 if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) 2085 reiserfs_init_priv_inode(inode); 2086 2087 if (reiserfs_posixacl(inode->i_sb)) { 2088 reiserfs_write_unlock(inode->i_sb); 2089 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode); 2090 reiserfs_write_lock(inode->i_sb); 2091 if (retval) { 2092 err = retval; 2093 reiserfs_check_path(&path_to_key); 2094 journal_end(th); 2095 goto out_inserted_sd; 2096 } 2097 } else if (inode->i_sb->s_flags & SB_POSIXACL) { 2098 reiserfs_warning(inode->i_sb, "jdm-13090", 2099 "ACLs aren't enabled in the fs, " 2100 "but vfs thinks they are!"); 2101 } 2102 2103 if (security->name) { 2104 reiserfs_write_unlock(inode->i_sb); 2105 retval = reiserfs_security_write(th, inode, security); 2106 reiserfs_write_lock(inode->i_sb); 2107 if (retval) { 2108 err = retval; 2109 reiserfs_check_path(&path_to_key); 2110 retval = journal_end(th); 2111 if (retval) 2112 err = retval; 2113 goto out_inserted_sd; 2114 } 2115 } 2116 2117 reiserfs_update_sd(th, inode); 2118 reiserfs_check_path(&path_to_key); 2119 2120 return 0; 2121 2122 out_bad_inode: 2123 /* Invalidate the object, nothing was inserted yet */ 2124 INODE_PKEY(inode)->k_objectid = 0; 2125 2126 /* Quota change must be inside a transaction for journaling */ 2127 depth = reiserfs_write_unlock_nested(inode->i_sb); 2128 dquot_free_inode(inode); 2129 reiserfs_write_lock_nested(inode->i_sb, depth); 2130 2131 out_end_trans: 2132 journal_end(th); 2133 /* 2134 * Drop can be outside and it needs more credits so it's better 2135 * to have it outside 2136 */ 2137 depth = reiserfs_write_unlock_nested(inode->i_sb); 2138 dquot_drop(inode); 2139 reiserfs_write_lock_nested(inode->i_sb, depth); 2140 inode->i_flags |= S_NOQUOTA; 2141 make_bad_inode(inode); 2142 2143 out_inserted_sd: 2144 clear_nlink(inode); 2145 th->t_trans_id = 0; /* so the caller can't use this handle later */ 2146 if (inode->i_state & I_NEW) 2147 unlock_new_inode(inode); 2148 iput(inode); 2149 return err; 2150 } 2151 2152 /* 2153 * finds the tail page in the page cache, 2154 * reads the last block in. 2155 * 2156 * On success, page_result is set to a locked, pinned page, and bh_result 2157 * is set to an up to date buffer for the last block in the file. returns 0. 2158 * 2159 * tail conversion is not done, so bh_result might not be valid for writing 2160 * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before 2161 * trying to write the block. 2162 * 2163 * on failure, nonzero is returned, page_result and bh_result are untouched. 2164 */ 2165 static int grab_tail_page(struct inode *inode, 2166 struct page **page_result, 2167 struct buffer_head **bh_result) 2168 { 2169 2170 /* 2171 * we want the page with the last byte in the file, 2172 * not the page that will hold the next byte for appending 2173 */ 2174 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT; 2175 unsigned long pos = 0; 2176 unsigned long start = 0; 2177 unsigned long blocksize = inode->i_sb->s_blocksize; 2178 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1); 2179 struct buffer_head *bh; 2180 struct buffer_head *head; 2181 struct page *page; 2182 int error; 2183 2184 /* 2185 * we know that we are only called with inode->i_size > 0. 2186 * we also know that a file tail can never be as big as a block 2187 * If i_size % blocksize == 0, our file is currently block aligned 2188 * and it won't need converting or zeroing after a truncate. 2189 */ 2190 if ((offset & (blocksize - 1)) == 0) { 2191 return -ENOENT; 2192 } 2193 page = grab_cache_page(inode->i_mapping, index); 2194 error = -ENOMEM; 2195 if (!page) { 2196 goto out; 2197 } 2198 /* start within the page of the last block in the file */ 2199 start = (offset / blocksize) * blocksize; 2200 2201 error = __block_write_begin(page, start, offset - start, 2202 reiserfs_get_block_create_0); 2203 if (error) 2204 goto unlock; 2205 2206 head = page_buffers(page); 2207 bh = head; 2208 do { 2209 if (pos >= start) { 2210 break; 2211 } 2212 bh = bh->b_this_page; 2213 pos += blocksize; 2214 } while (bh != head); 2215 2216 if (!buffer_uptodate(bh)) { 2217 /* 2218 * note, this should never happen, prepare_write should be 2219 * taking care of this for us. If the buffer isn't up to 2220 * date, I've screwed up the code to find the buffer, or the 2221 * code to call prepare_write 2222 */ 2223 reiserfs_error(inode->i_sb, "clm-6000", 2224 "error reading block %lu", bh->b_blocknr); 2225 error = -EIO; 2226 goto unlock; 2227 } 2228 *bh_result = bh; 2229 *page_result = page; 2230 2231 out: 2232 return error; 2233 2234 unlock: 2235 unlock_page(page); 2236 put_page(page); 2237 return error; 2238 } 2239 2240 /* 2241 * vfs version of truncate file. Must NOT be called with 2242 * a transaction already started. 2243 * 2244 * some code taken from block_truncate_page 2245 */ 2246 int reiserfs_truncate_file(struct inode *inode, int update_timestamps) 2247 { 2248 struct reiserfs_transaction_handle th; 2249 /* we want the offset for the first byte after the end of the file */ 2250 unsigned long offset = inode->i_size & (PAGE_SIZE - 1); 2251 unsigned blocksize = inode->i_sb->s_blocksize; 2252 unsigned length; 2253 struct page *page = NULL; 2254 int error; 2255 struct buffer_head *bh = NULL; 2256 int err2; 2257 2258 reiserfs_write_lock(inode->i_sb); 2259 2260 if (inode->i_size > 0) { 2261 error = grab_tail_page(inode, &page, &bh); 2262 if (error) { 2263 /* 2264 * -ENOENT means we truncated past the end of the 2265 * file, and get_block_create_0 could not find a 2266 * block to read in, which is ok. 2267 */ 2268 if (error != -ENOENT) 2269 reiserfs_error(inode->i_sb, "clm-6001", 2270 "grab_tail_page failed %d", 2271 error); 2272 page = NULL; 2273 bh = NULL; 2274 } 2275 } 2276 2277 /* 2278 * so, if page != NULL, we have a buffer head for the offset at 2279 * the end of the file. if the bh is mapped, and bh->b_blocknr != 0, 2280 * then we have an unformatted node. Otherwise, we have a direct item, 2281 * and no zeroing is required on disk. We zero after the truncate, 2282 * because the truncate might pack the item anyway 2283 * (it will unmap bh if it packs). 2284 * 2285 * it is enough to reserve space in transaction for 2 balancings: 2286 * one for "save" link adding and another for the first 2287 * cut_from_item. 1 is for update_sd 2288 */ 2289 error = journal_begin(&th, inode->i_sb, 2290 JOURNAL_PER_BALANCE_CNT * 2 + 1); 2291 if (error) 2292 goto out; 2293 reiserfs_update_inode_transaction(inode); 2294 if (update_timestamps) 2295 /* 2296 * we are doing real truncate: if the system crashes 2297 * before the last transaction of truncating gets committed 2298 * - on reboot the file either appears truncated properly 2299 * or not truncated at all 2300 */ 2301 add_save_link(&th, inode, 1); 2302 err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps); 2303 error = journal_end(&th); 2304 if (error) 2305 goto out; 2306 2307 /* check reiserfs_do_truncate after ending the transaction */ 2308 if (err2) { 2309 error = err2; 2310 goto out; 2311 } 2312 2313 if (update_timestamps) { 2314 error = remove_save_link(inode, 1 /* truncate */); 2315 if (error) 2316 goto out; 2317 } 2318 2319 if (page) { 2320 length = offset & (blocksize - 1); 2321 /* if we are not on a block boundary */ 2322 if (length) { 2323 length = blocksize - length; 2324 zero_user(page, offset, length); 2325 if (buffer_mapped(bh) && bh->b_blocknr != 0) { 2326 mark_buffer_dirty(bh); 2327 } 2328 } 2329 unlock_page(page); 2330 put_page(page); 2331 } 2332 2333 reiserfs_write_unlock(inode->i_sb); 2334 2335 return 0; 2336 out: 2337 if (page) { 2338 unlock_page(page); 2339 put_page(page); 2340 } 2341 2342 reiserfs_write_unlock(inode->i_sb); 2343 2344 return error; 2345 } 2346 2347 static int map_block_for_writepage(struct inode *inode, 2348 struct buffer_head *bh_result, 2349 unsigned long block) 2350 { 2351 struct reiserfs_transaction_handle th; 2352 int fs_gen; 2353 struct item_head tmp_ih; 2354 struct item_head *ih; 2355 struct buffer_head *bh; 2356 __le32 *item; 2357 struct cpu_key key; 2358 INITIALIZE_PATH(path); 2359 int pos_in_item; 2360 int jbegin_count = JOURNAL_PER_BALANCE_CNT; 2361 loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1; 2362 int retval; 2363 int use_get_block = 0; 2364 int bytes_copied = 0; 2365 int copy_size; 2366 int trans_running = 0; 2367 2368 /* 2369 * catch places below that try to log something without 2370 * starting a trans 2371 */ 2372 th.t_trans_id = 0; 2373 2374 if (!buffer_uptodate(bh_result)) { 2375 return -EIO; 2376 } 2377 2378 kmap(bh_result->b_page); 2379 start_over: 2380 reiserfs_write_lock(inode->i_sb); 2381 make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3); 2382 2383 research: 2384 retval = search_for_position_by_key(inode->i_sb, &key, &path); 2385 if (retval != POSITION_FOUND) { 2386 use_get_block = 1; 2387 goto out; 2388 } 2389 2390 bh = get_last_bh(&path); 2391 ih = tp_item_head(&path); 2392 item = tp_item_body(&path); 2393 pos_in_item = path.pos_in_item; 2394 2395 /* we've found an unformatted node */ 2396 if (indirect_item_found(retval, ih)) { 2397 if (bytes_copied > 0) { 2398 reiserfs_warning(inode->i_sb, "clm-6002", 2399 "bytes_copied %d", bytes_copied); 2400 } 2401 if (!get_block_num(item, pos_in_item)) { 2402 /* crap, we are writing to a hole */ 2403 use_get_block = 1; 2404 goto out; 2405 } 2406 set_block_dev_mapped(bh_result, 2407 get_block_num(item, pos_in_item), inode); 2408 } else if (is_direct_le_ih(ih)) { 2409 char *p; 2410 p = page_address(bh_result->b_page); 2411 p += (byte_offset - 1) & (PAGE_SIZE - 1); 2412 copy_size = ih_item_len(ih) - pos_in_item; 2413 2414 fs_gen = get_generation(inode->i_sb); 2415 copy_item_head(&tmp_ih, ih); 2416 2417 if (!trans_running) { 2418 /* vs-3050 is gone, no need to drop the path */ 2419 retval = journal_begin(&th, inode->i_sb, jbegin_count); 2420 if (retval) 2421 goto out; 2422 reiserfs_update_inode_transaction(inode); 2423 trans_running = 1; 2424 if (fs_changed(fs_gen, inode->i_sb) 2425 && item_moved(&tmp_ih, &path)) { 2426 reiserfs_restore_prepared_buffer(inode->i_sb, 2427 bh); 2428 goto research; 2429 } 2430 } 2431 2432 reiserfs_prepare_for_journal(inode->i_sb, bh, 1); 2433 2434 if (fs_changed(fs_gen, inode->i_sb) 2435 && item_moved(&tmp_ih, &path)) { 2436 reiserfs_restore_prepared_buffer(inode->i_sb, bh); 2437 goto research; 2438 } 2439 2440 memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied, 2441 copy_size); 2442 2443 journal_mark_dirty(&th, bh); 2444 bytes_copied += copy_size; 2445 set_block_dev_mapped(bh_result, 0, inode); 2446 2447 /* are there still bytes left? */ 2448 if (bytes_copied < bh_result->b_size && 2449 (byte_offset + bytes_copied) < inode->i_size) { 2450 set_cpu_key_k_offset(&key, 2451 cpu_key_k_offset(&key) + 2452 copy_size); 2453 goto research; 2454 } 2455 } else { 2456 reiserfs_warning(inode->i_sb, "clm-6003", 2457 "bad item inode %lu", inode->i_ino); 2458 retval = -EIO; 2459 goto out; 2460 } 2461 retval = 0; 2462 2463 out: 2464 pathrelse(&path); 2465 if (trans_running) { 2466 int err = journal_end(&th); 2467 if (err) 2468 retval = err; 2469 trans_running = 0; 2470 } 2471 reiserfs_write_unlock(inode->i_sb); 2472 2473 /* this is where we fill in holes in the file. */ 2474 if (use_get_block) { 2475 retval = reiserfs_get_block(inode, block, bh_result, 2476 GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX 2477 | GET_BLOCK_NO_DANGLE); 2478 if (!retval) { 2479 if (!buffer_mapped(bh_result) 2480 || bh_result->b_blocknr == 0) { 2481 /* get_block failed to find a mapped unformatted node. */ 2482 use_get_block = 0; 2483 goto start_over; 2484 } 2485 } 2486 } 2487 kunmap(bh_result->b_page); 2488 2489 if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) { 2490 /* 2491 * we've copied data from the page into the direct item, so the 2492 * buffer in the page is now clean, mark it to reflect that. 2493 */ 2494 lock_buffer(bh_result); 2495 clear_buffer_dirty(bh_result); 2496 unlock_buffer(bh_result); 2497 } 2498 return retval; 2499 } 2500 2501 /* 2502 * mason@suse.com: updated in 2.5.54 to follow the same general io 2503 * start/recovery path as __block_write_full_folio, along with special 2504 * code to handle reiserfs tails. 2505 */ 2506 static int reiserfs_write_full_folio(struct folio *folio, 2507 struct writeback_control *wbc) 2508 { 2509 struct inode *inode = folio->mapping->host; 2510 unsigned long end_index = inode->i_size >> PAGE_SHIFT; 2511 int error = 0; 2512 unsigned long block; 2513 sector_t last_block; 2514 struct buffer_head *head, *bh; 2515 int partial = 0; 2516 int nr = 0; 2517 int checked = folio_test_checked(folio); 2518 struct reiserfs_transaction_handle th; 2519 struct super_block *s = inode->i_sb; 2520 int bh_per_page = PAGE_SIZE / s->s_blocksize; 2521 th.t_trans_id = 0; 2522 2523 /* no logging allowed when nonblocking or from PF_MEMALLOC */ 2524 if (checked && (current->flags & PF_MEMALLOC)) { 2525 folio_redirty_for_writepage(wbc, folio); 2526 folio_unlock(folio); 2527 return 0; 2528 } 2529 2530 /* 2531 * The folio dirty bit is cleared before writepage is called, which 2532 * means we have to tell create_empty_buffers to make dirty buffers 2533 * The folio really should be up to date at this point, so tossing 2534 * in the BH_Uptodate is just a sanity check. 2535 */ 2536 head = folio_buffers(folio); 2537 if (!head) 2538 head = create_empty_buffers(folio, s->s_blocksize, 2539 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2540 2541 /* 2542 * last folio in the file, zero out any contents past the 2543 * last byte in the file 2544 */ 2545 if (folio->index >= end_index) { 2546 unsigned last_offset; 2547 2548 last_offset = inode->i_size & (PAGE_SIZE - 1); 2549 /* no file contents in this folio */ 2550 if (folio->index >= end_index + 1 || !last_offset) { 2551 folio_unlock(folio); 2552 return 0; 2553 } 2554 folio_zero_segment(folio, last_offset, folio_size(folio)); 2555 } 2556 bh = head; 2557 block = folio->index << (PAGE_SHIFT - s->s_blocksize_bits); 2558 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 2559 /* first map all the buffers, logging any direct items we find */ 2560 do { 2561 if (block > last_block) { 2562 /* 2563 * This can happen when the block size is less than 2564 * the folio size. The corresponding bytes in the folio 2565 * were zero filled above 2566 */ 2567 clear_buffer_dirty(bh); 2568 set_buffer_uptodate(bh); 2569 } else if ((checked || buffer_dirty(bh)) && 2570 (!buffer_mapped(bh) || bh->b_blocknr == 0)) { 2571 /* 2572 * not mapped yet, or it points to a direct item, search 2573 * the btree for the mapping info, and log any direct 2574 * items found 2575 */ 2576 if ((error = map_block_for_writepage(inode, bh, block))) { 2577 goto fail; 2578 } 2579 } 2580 bh = bh->b_this_page; 2581 block++; 2582 } while (bh != head); 2583 2584 /* 2585 * we start the transaction after map_block_for_writepage, 2586 * because it can create holes in the file (an unbounded operation). 2587 * starting it here, we can make a reliable estimate for how many 2588 * blocks we're going to log 2589 */ 2590 if (checked) { 2591 folio_clear_checked(folio); 2592 reiserfs_write_lock(s); 2593 error = journal_begin(&th, s, bh_per_page + 1); 2594 if (error) { 2595 reiserfs_write_unlock(s); 2596 goto fail; 2597 } 2598 reiserfs_update_inode_transaction(inode); 2599 } 2600 /* now go through and lock any dirty buffers on the folio */ 2601 do { 2602 get_bh(bh); 2603 if (!buffer_mapped(bh)) 2604 continue; 2605 if (buffer_mapped(bh) && bh->b_blocknr == 0) 2606 continue; 2607 2608 if (checked) { 2609 reiserfs_prepare_for_journal(s, bh, 1); 2610 journal_mark_dirty(&th, bh); 2611 continue; 2612 } 2613 /* 2614 * from this point on, we know the buffer is mapped to a 2615 * real block and not a direct item 2616 */ 2617 if (wbc->sync_mode != WB_SYNC_NONE) { 2618 lock_buffer(bh); 2619 } else { 2620 if (!trylock_buffer(bh)) { 2621 folio_redirty_for_writepage(wbc, folio); 2622 continue; 2623 } 2624 } 2625 if (test_clear_buffer_dirty(bh)) { 2626 mark_buffer_async_write(bh); 2627 } else { 2628 unlock_buffer(bh); 2629 } 2630 } while ((bh = bh->b_this_page) != head); 2631 2632 if (checked) { 2633 error = journal_end(&th); 2634 reiserfs_write_unlock(s); 2635 if (error) 2636 goto fail; 2637 } 2638 BUG_ON(folio_test_writeback(folio)); 2639 folio_start_writeback(folio); 2640 folio_unlock(folio); 2641 2642 /* 2643 * since any buffer might be the only dirty buffer on the folio, 2644 * the first submit_bh can bring the folio out of writeback. 2645 * be careful with the buffers. 2646 */ 2647 do { 2648 struct buffer_head *next = bh->b_this_page; 2649 if (buffer_async_write(bh)) { 2650 submit_bh(REQ_OP_WRITE, bh); 2651 nr++; 2652 } 2653 put_bh(bh); 2654 bh = next; 2655 } while (bh != head); 2656 2657 error = 0; 2658 done: 2659 if (nr == 0) { 2660 /* 2661 * if this folio only had a direct item, it is very possible for 2662 * no io to be required without there being an error. Or, 2663 * someone else could have locked them and sent them down the 2664 * pipe without locking the folio 2665 */ 2666 bh = head; 2667 do { 2668 if (!buffer_uptodate(bh)) { 2669 partial = 1; 2670 break; 2671 } 2672 bh = bh->b_this_page; 2673 } while (bh != head); 2674 if (!partial) 2675 folio_mark_uptodate(folio); 2676 folio_end_writeback(folio); 2677 } 2678 return error; 2679 2680 fail: 2681 /* 2682 * catches various errors, we need to make sure any valid dirty blocks 2683 * get to the media. The folio is currently locked and not marked for 2684 * writeback 2685 */ 2686 folio_clear_uptodate(folio); 2687 bh = head; 2688 do { 2689 get_bh(bh); 2690 if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) { 2691 lock_buffer(bh); 2692 mark_buffer_async_write(bh); 2693 } else { 2694 /* 2695 * clear any dirty bits that might have come from 2696 * getting attached to a dirty folio 2697 */ 2698 clear_buffer_dirty(bh); 2699 } 2700 bh = bh->b_this_page; 2701 } while (bh != head); 2702 folio_set_error(folio); 2703 BUG_ON(folio_test_writeback(folio)); 2704 folio_start_writeback(folio); 2705 folio_unlock(folio); 2706 do { 2707 struct buffer_head *next = bh->b_this_page; 2708 if (buffer_async_write(bh)) { 2709 clear_buffer_dirty(bh); 2710 submit_bh(REQ_OP_WRITE, bh); 2711 nr++; 2712 } 2713 put_bh(bh); 2714 bh = next; 2715 } while (bh != head); 2716 goto done; 2717 } 2718 2719 static int reiserfs_read_folio(struct file *f, struct folio *folio) 2720 { 2721 return block_read_full_folio(folio, reiserfs_get_block); 2722 } 2723 2724 static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) 2725 { 2726 struct folio *folio = page_folio(page); 2727 struct inode *inode = folio->mapping->host; 2728 reiserfs_wait_on_write_block(inode->i_sb); 2729 return reiserfs_write_full_folio(folio, wbc); 2730 } 2731 2732 static void reiserfs_truncate_failed_write(struct inode *inode) 2733 { 2734 truncate_inode_pages(inode->i_mapping, inode->i_size); 2735 reiserfs_truncate_file(inode, 0); 2736 } 2737 2738 static int reiserfs_write_begin(struct file *file, 2739 struct address_space *mapping, 2740 loff_t pos, unsigned len, 2741 struct page **pagep, void **fsdata) 2742 { 2743 struct inode *inode; 2744 struct page *page; 2745 pgoff_t index; 2746 int ret; 2747 int old_ref = 0; 2748 2749 inode = mapping->host; 2750 index = pos >> PAGE_SHIFT; 2751 page = grab_cache_page_write_begin(mapping, index); 2752 if (!page) 2753 return -ENOMEM; 2754 *pagep = page; 2755 2756 reiserfs_wait_on_write_block(inode->i_sb); 2757 fix_tail_page_for_writing(page); 2758 if (reiserfs_transaction_running(inode->i_sb)) { 2759 struct reiserfs_transaction_handle *th; 2760 th = (struct reiserfs_transaction_handle *)current-> 2761 journal_info; 2762 BUG_ON(!th->t_refcount); 2763 BUG_ON(!th->t_trans_id); 2764 old_ref = th->t_refcount; 2765 th->t_refcount++; 2766 } 2767 ret = __block_write_begin(page, pos, len, reiserfs_get_block); 2768 if (ret && reiserfs_transaction_running(inode->i_sb)) { 2769 struct reiserfs_transaction_handle *th = current->journal_info; 2770 /* 2771 * this gets a little ugly. If reiserfs_get_block returned an 2772 * error and left a transacstion running, we've got to close 2773 * it, and we've got to free handle if it was a persistent 2774 * transaction. 2775 * 2776 * But, if we had nested into an existing transaction, we need 2777 * to just drop the ref count on the handle. 2778 * 2779 * If old_ref == 0, the transaction is from reiserfs_get_block, 2780 * and it was a persistent trans. Otherwise, it was nested 2781 * above. 2782 */ 2783 if (th->t_refcount > old_ref) { 2784 if (old_ref) 2785 th->t_refcount--; 2786 else { 2787 int err; 2788 reiserfs_write_lock(inode->i_sb); 2789 err = reiserfs_end_persistent_transaction(th); 2790 reiserfs_write_unlock(inode->i_sb); 2791 if (err) 2792 ret = err; 2793 } 2794 } 2795 } 2796 if (ret) { 2797 unlock_page(page); 2798 put_page(page); 2799 /* Truncate allocated blocks */ 2800 reiserfs_truncate_failed_write(inode); 2801 } 2802 return ret; 2803 } 2804 2805 int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len) 2806 { 2807 struct inode *inode = page->mapping->host; 2808 int ret; 2809 int old_ref = 0; 2810 int depth; 2811 2812 depth = reiserfs_write_unlock_nested(inode->i_sb); 2813 reiserfs_wait_on_write_block(inode->i_sb); 2814 reiserfs_write_lock_nested(inode->i_sb, depth); 2815 2816 fix_tail_page_for_writing(page); 2817 if (reiserfs_transaction_running(inode->i_sb)) { 2818 struct reiserfs_transaction_handle *th; 2819 th = (struct reiserfs_transaction_handle *)current-> 2820 journal_info; 2821 BUG_ON(!th->t_refcount); 2822 BUG_ON(!th->t_trans_id); 2823 old_ref = th->t_refcount; 2824 th->t_refcount++; 2825 } 2826 2827 ret = __block_write_begin(page, from, len, reiserfs_get_block); 2828 if (ret && reiserfs_transaction_running(inode->i_sb)) { 2829 struct reiserfs_transaction_handle *th = current->journal_info; 2830 /* 2831 * this gets a little ugly. If reiserfs_get_block returned an 2832 * error and left a transacstion running, we've got to close 2833 * it, and we've got to free handle if it was a persistent 2834 * transaction. 2835 * 2836 * But, if we had nested into an existing transaction, we need 2837 * to just drop the ref count on the handle. 2838 * 2839 * If old_ref == 0, the transaction is from reiserfs_get_block, 2840 * and it was a persistent trans. Otherwise, it was nested 2841 * above. 2842 */ 2843 if (th->t_refcount > old_ref) { 2844 if (old_ref) 2845 th->t_refcount--; 2846 else { 2847 int err; 2848 reiserfs_write_lock(inode->i_sb); 2849 err = reiserfs_end_persistent_transaction(th); 2850 reiserfs_write_unlock(inode->i_sb); 2851 if (err) 2852 ret = err; 2853 } 2854 } 2855 } 2856 return ret; 2857 2858 } 2859 2860 static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block) 2861 { 2862 return generic_block_bmap(as, block, reiserfs_bmap); 2863 } 2864 2865 static int reiserfs_write_end(struct file *file, struct address_space *mapping, 2866 loff_t pos, unsigned len, unsigned copied, 2867 struct page *page, void *fsdata) 2868 { 2869 struct folio *folio = page_folio(page); 2870 struct inode *inode = page->mapping->host; 2871 int ret = 0; 2872 int update_sd = 0; 2873 struct reiserfs_transaction_handle *th; 2874 unsigned start; 2875 bool locked = false; 2876 2877 reiserfs_wait_on_write_block(inode->i_sb); 2878 if (reiserfs_transaction_running(inode->i_sb)) 2879 th = current->journal_info; 2880 else 2881 th = NULL; 2882 2883 start = pos & (PAGE_SIZE - 1); 2884 if (unlikely(copied < len)) { 2885 if (!folio_test_uptodate(folio)) 2886 copied = 0; 2887 2888 folio_zero_new_buffers(folio, start + copied, start + len); 2889 } 2890 flush_dcache_folio(folio); 2891 2892 reiserfs_commit_page(inode, page, start, start + copied); 2893 2894 /* 2895 * generic_commit_write does this for us, but does not update the 2896 * transaction tracking stuff when the size changes. So, we have 2897 * to do the i_size updates here. 2898 */ 2899 if (pos + copied > inode->i_size) { 2900 struct reiserfs_transaction_handle myth; 2901 reiserfs_write_lock(inode->i_sb); 2902 locked = true; 2903 /* 2904 * If the file have grown beyond the border where it 2905 * can have a tail, unmark it as needing a tail 2906 * packing 2907 */ 2908 if ((have_large_tails(inode->i_sb) 2909 && inode->i_size > i_block_size(inode) * 4) 2910 || (have_small_tails(inode->i_sb) 2911 && inode->i_size > i_block_size(inode))) 2912 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 2913 2914 ret = journal_begin(&myth, inode->i_sb, 1); 2915 if (ret) 2916 goto journal_error; 2917 2918 reiserfs_update_inode_transaction(inode); 2919 inode->i_size = pos + copied; 2920 /* 2921 * this will just nest into our transaction. It's important 2922 * to use mark_inode_dirty so the inode gets pushed around on 2923 * the dirty lists, and so that O_SYNC works as expected 2924 */ 2925 mark_inode_dirty(inode); 2926 reiserfs_update_sd(&myth, inode); 2927 update_sd = 1; 2928 ret = journal_end(&myth); 2929 if (ret) 2930 goto journal_error; 2931 } 2932 if (th) { 2933 if (!locked) { 2934 reiserfs_write_lock(inode->i_sb); 2935 locked = true; 2936 } 2937 if (!update_sd) 2938 mark_inode_dirty(inode); 2939 ret = reiserfs_end_persistent_transaction(th); 2940 if (ret) 2941 goto out; 2942 } 2943 2944 out: 2945 if (locked) 2946 reiserfs_write_unlock(inode->i_sb); 2947 unlock_page(page); 2948 put_page(page); 2949 2950 if (pos + len > inode->i_size) 2951 reiserfs_truncate_failed_write(inode); 2952 2953 return ret == 0 ? copied : ret; 2954 2955 journal_error: 2956 reiserfs_write_unlock(inode->i_sb); 2957 locked = false; 2958 if (th) { 2959 if (!update_sd) 2960 reiserfs_update_sd(th, inode); 2961 ret = reiserfs_end_persistent_transaction(th); 2962 } 2963 goto out; 2964 } 2965 2966 int reiserfs_commit_write(struct file *f, struct page *page, 2967 unsigned from, unsigned to) 2968 { 2969 struct inode *inode = page->mapping->host; 2970 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to; 2971 int ret = 0; 2972 int update_sd = 0; 2973 struct reiserfs_transaction_handle *th = NULL; 2974 int depth; 2975 2976 depth = reiserfs_write_unlock_nested(inode->i_sb); 2977 reiserfs_wait_on_write_block(inode->i_sb); 2978 reiserfs_write_lock_nested(inode->i_sb, depth); 2979 2980 if (reiserfs_transaction_running(inode->i_sb)) { 2981 th = current->journal_info; 2982 } 2983 reiserfs_commit_page(inode, page, from, to); 2984 2985 /* 2986 * generic_commit_write does this for us, but does not update the 2987 * transaction tracking stuff when the size changes. So, we have 2988 * to do the i_size updates here. 2989 */ 2990 if (pos > inode->i_size) { 2991 struct reiserfs_transaction_handle myth; 2992 /* 2993 * If the file have grown beyond the border where it 2994 * can have a tail, unmark it as needing a tail 2995 * packing 2996 */ 2997 if ((have_large_tails(inode->i_sb) 2998 && inode->i_size > i_block_size(inode) * 4) 2999 || (have_small_tails(inode->i_sb) 3000 && inode->i_size > i_block_size(inode))) 3001 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 3002 3003 ret = journal_begin(&myth, inode->i_sb, 1); 3004 if (ret) 3005 goto journal_error; 3006 3007 reiserfs_update_inode_transaction(inode); 3008 inode->i_size = pos; 3009 /* 3010 * this will just nest into our transaction. It's important 3011 * to use mark_inode_dirty so the inode gets pushed around 3012 * on the dirty lists, and so that O_SYNC works as expected 3013 */ 3014 mark_inode_dirty(inode); 3015 reiserfs_update_sd(&myth, inode); 3016 update_sd = 1; 3017 ret = journal_end(&myth); 3018 if (ret) 3019 goto journal_error; 3020 } 3021 if (th) { 3022 if (!update_sd) 3023 mark_inode_dirty(inode); 3024 ret = reiserfs_end_persistent_transaction(th); 3025 if (ret) 3026 goto out; 3027 } 3028 3029 out: 3030 return ret; 3031 3032 journal_error: 3033 if (th) { 3034 if (!update_sd) 3035 reiserfs_update_sd(th, inode); 3036 ret = reiserfs_end_persistent_transaction(th); 3037 } 3038 3039 return ret; 3040 } 3041 3042 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode) 3043 { 3044 if (reiserfs_attrs(inode->i_sb)) { 3045 if (sd_attrs & REISERFS_SYNC_FL) 3046 inode->i_flags |= S_SYNC; 3047 else 3048 inode->i_flags &= ~S_SYNC; 3049 if (sd_attrs & REISERFS_IMMUTABLE_FL) 3050 inode->i_flags |= S_IMMUTABLE; 3051 else 3052 inode->i_flags &= ~S_IMMUTABLE; 3053 if (sd_attrs & REISERFS_APPEND_FL) 3054 inode->i_flags |= S_APPEND; 3055 else 3056 inode->i_flags &= ~S_APPEND; 3057 if (sd_attrs & REISERFS_NOATIME_FL) 3058 inode->i_flags |= S_NOATIME; 3059 else 3060 inode->i_flags &= ~S_NOATIME; 3061 if (sd_attrs & REISERFS_NOTAIL_FL) 3062 REISERFS_I(inode)->i_flags |= i_nopack_mask; 3063 else 3064 REISERFS_I(inode)->i_flags &= ~i_nopack_mask; 3065 } 3066 } 3067 3068 /* 3069 * decide if this buffer needs to stay around for data logging or ordered 3070 * write purposes 3071 */ 3072 static int invalidate_folio_can_drop(struct inode *inode, struct buffer_head *bh) 3073 { 3074 int ret = 1; 3075 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb); 3076 3077 lock_buffer(bh); 3078 spin_lock(&j->j_dirty_buffers_lock); 3079 if (!buffer_mapped(bh)) { 3080 goto free_jh; 3081 } 3082 /* 3083 * the page is locked, and the only places that log a data buffer 3084 * also lock the page. 3085 */ 3086 if (reiserfs_file_data_log(inode)) { 3087 /* 3088 * very conservative, leave the buffer pinned if 3089 * anyone might need it. 3090 */ 3091 if (buffer_journaled(bh) || buffer_journal_dirty(bh)) { 3092 ret = 0; 3093 } 3094 } else if (buffer_dirty(bh)) { 3095 struct reiserfs_journal_list *jl; 3096 struct reiserfs_jh *jh = bh->b_private; 3097 3098 /* 3099 * why is this safe? 3100 * reiserfs_setattr updates i_size in the on disk 3101 * stat data before allowing vmtruncate to be called. 3102 * 3103 * If buffer was put onto the ordered list for this 3104 * transaction, we know for sure either this transaction 3105 * or an older one already has updated i_size on disk, 3106 * and this ordered data won't be referenced in the file 3107 * if we crash. 3108 * 3109 * if the buffer was put onto the ordered list for an older 3110 * transaction, we need to leave it around 3111 */ 3112 if (jh && (jl = jh->jl) 3113 && jl != SB_JOURNAL(inode->i_sb)->j_current_jl) 3114 ret = 0; 3115 } 3116 free_jh: 3117 if (ret && bh->b_private) { 3118 reiserfs_free_jh(bh); 3119 } 3120 spin_unlock(&j->j_dirty_buffers_lock); 3121 unlock_buffer(bh); 3122 return ret; 3123 } 3124 3125 /* clm -- taken from fs/buffer.c:block_invalidate_folio */ 3126 static void reiserfs_invalidate_folio(struct folio *folio, size_t offset, 3127 size_t length) 3128 { 3129 struct buffer_head *head, *bh, *next; 3130 struct inode *inode = folio->mapping->host; 3131 unsigned int curr_off = 0; 3132 unsigned int stop = offset + length; 3133 int partial_page = (offset || length < folio_size(folio)); 3134 int ret = 1; 3135 3136 BUG_ON(!folio_test_locked(folio)); 3137 3138 if (!partial_page) 3139 folio_clear_checked(folio); 3140 3141 head = folio_buffers(folio); 3142 if (!head) 3143 goto out; 3144 3145 bh = head; 3146 do { 3147 unsigned int next_off = curr_off + bh->b_size; 3148 next = bh->b_this_page; 3149 3150 if (next_off > stop) 3151 goto out; 3152 3153 /* 3154 * is this block fully invalidated? 3155 */ 3156 if (offset <= curr_off) { 3157 if (invalidate_folio_can_drop(inode, bh)) 3158 reiserfs_unmap_buffer(bh); 3159 else 3160 ret = 0; 3161 } 3162 curr_off = next_off; 3163 bh = next; 3164 } while (bh != head); 3165 3166 /* 3167 * We release buffers only if the entire page is being invalidated. 3168 * The get_block cached value has been unconditionally invalidated, 3169 * so real IO is not possible anymore. 3170 */ 3171 if (!partial_page && ret) { 3172 ret = filemap_release_folio(folio, 0); 3173 /* maybe should BUG_ON(!ret); - neilb */ 3174 } 3175 out: 3176 return; 3177 } 3178 3179 static bool reiserfs_dirty_folio(struct address_space *mapping, 3180 struct folio *folio) 3181 { 3182 if (reiserfs_file_data_log(mapping->host)) { 3183 folio_set_checked(folio); 3184 return filemap_dirty_folio(mapping, folio); 3185 } 3186 return block_dirty_folio(mapping, folio); 3187 } 3188 3189 /* 3190 * Returns true if the folio's buffers were dropped. The folio is locked. 3191 * 3192 * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads 3193 * in the buffers at folio_buffers(folio). 3194 * 3195 * even in -o notail mode, we can't be sure an old mount without -o notail 3196 * didn't create files with tails. 3197 */ 3198 static bool reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) 3199 { 3200 struct inode *inode = folio->mapping->host; 3201 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb); 3202 struct buffer_head *head; 3203 struct buffer_head *bh; 3204 bool ret = true; 3205 3206 WARN_ON(folio_test_checked(folio)); 3207 spin_lock(&j->j_dirty_buffers_lock); 3208 head = folio_buffers(folio); 3209 bh = head; 3210 do { 3211 if (bh->b_private) { 3212 if (!buffer_dirty(bh) && !buffer_locked(bh)) { 3213 reiserfs_free_jh(bh); 3214 } else { 3215 ret = false; 3216 break; 3217 } 3218 } 3219 bh = bh->b_this_page; 3220 } while (bh != head); 3221 if (ret) 3222 ret = try_to_free_buffers(folio); 3223 spin_unlock(&j->j_dirty_buffers_lock); 3224 return ret; 3225 } 3226 3227 /* 3228 * We thank Mingming Cao for helping us understand in great detail what 3229 * to do in this section of the code. 3230 */ 3231 static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3232 { 3233 struct file *file = iocb->ki_filp; 3234 struct inode *inode = file->f_mapping->host; 3235 size_t count = iov_iter_count(iter); 3236 ssize_t ret; 3237 3238 ret = blockdev_direct_IO(iocb, inode, iter, 3239 reiserfs_get_blocks_direct_io); 3240 3241 /* 3242 * In case of error extending write may have instantiated a few 3243 * blocks outside i_size. Trim these off again. 3244 */ 3245 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { 3246 loff_t isize = i_size_read(inode); 3247 loff_t end = iocb->ki_pos + count; 3248 3249 if ((end > isize) && inode_newsize_ok(inode, isize) == 0) { 3250 truncate_setsize(inode, isize); 3251 reiserfs_vfs_truncate_file(inode); 3252 } 3253 } 3254 3255 return ret; 3256 } 3257 3258 int reiserfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 3259 struct iattr *attr) 3260 { 3261 struct inode *inode = d_inode(dentry); 3262 unsigned int ia_valid; 3263 int error; 3264 3265 error = setattr_prepare(&nop_mnt_idmap, dentry, attr); 3266 if (error) 3267 return error; 3268 3269 /* must be turned off for recursive notify_change calls */ 3270 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); 3271 3272 if (is_quota_modification(&nop_mnt_idmap, inode, attr)) { 3273 error = dquot_initialize(inode); 3274 if (error) 3275 return error; 3276 } 3277 reiserfs_write_lock(inode->i_sb); 3278 if (attr->ia_valid & ATTR_SIZE) { 3279 /* 3280 * version 2 items will be caught by the s_maxbytes check 3281 * done for us in vmtruncate 3282 */ 3283 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 && 3284 attr->ia_size > MAX_NON_LFS) { 3285 reiserfs_write_unlock(inode->i_sb); 3286 error = -EFBIG; 3287 goto out; 3288 } 3289 3290 inode_dio_wait(inode); 3291 3292 /* fill in hole pointers in the expanding truncate case. */ 3293 if (attr->ia_size > inode->i_size) { 3294 loff_t pos = attr->ia_size; 3295 3296 if ((pos & (inode->i_sb->s_blocksize - 1)) == 0) 3297 pos++; 3298 error = generic_cont_expand_simple(inode, pos); 3299 if (REISERFS_I(inode)->i_prealloc_count > 0) { 3300 int err; 3301 struct reiserfs_transaction_handle th; 3302 /* we're changing at most 2 bitmaps, inode + super */ 3303 err = journal_begin(&th, inode->i_sb, 4); 3304 if (!err) { 3305 reiserfs_discard_prealloc(&th, inode); 3306 err = journal_end(&th); 3307 } 3308 if (err) 3309 error = err; 3310 } 3311 if (error) { 3312 reiserfs_write_unlock(inode->i_sb); 3313 goto out; 3314 } 3315 /* 3316 * file size is changed, ctime and mtime are 3317 * to be updated 3318 */ 3319 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME); 3320 } 3321 } 3322 reiserfs_write_unlock(inode->i_sb); 3323 3324 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) || 3325 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) && 3326 (get_inode_sd_version(inode) == STAT_DATA_V1)) { 3327 /* stat data of format v3.5 has 16 bit uid and gid */ 3328 error = -EINVAL; 3329 goto out; 3330 } 3331 3332 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 3333 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 3334 struct reiserfs_transaction_handle th; 3335 int jbegin_count = 3336 2 * 3337 (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) + 3338 REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) + 3339 2; 3340 3341 error = reiserfs_chown_xattrs(inode, attr); 3342 3343 if (error) 3344 return error; 3345 3346 /* 3347 * (user+group)*(old+new) structure - we count quota 3348 * info and , inode write (sb, inode) 3349 */ 3350 reiserfs_write_lock(inode->i_sb); 3351 error = journal_begin(&th, inode->i_sb, jbegin_count); 3352 reiserfs_write_unlock(inode->i_sb); 3353 if (error) 3354 goto out; 3355 error = dquot_transfer(&nop_mnt_idmap, inode, attr); 3356 reiserfs_write_lock(inode->i_sb); 3357 if (error) { 3358 journal_end(&th); 3359 reiserfs_write_unlock(inode->i_sb); 3360 goto out; 3361 } 3362 3363 /* 3364 * Update corresponding info in inode so that everything 3365 * is in one transaction 3366 */ 3367 if (attr->ia_valid & ATTR_UID) 3368 inode->i_uid = attr->ia_uid; 3369 if (attr->ia_valid & ATTR_GID) 3370 inode->i_gid = attr->ia_gid; 3371 mark_inode_dirty(inode); 3372 error = journal_end(&th); 3373 reiserfs_write_unlock(inode->i_sb); 3374 if (error) 3375 goto out; 3376 } 3377 3378 if ((attr->ia_valid & ATTR_SIZE) && 3379 attr->ia_size != i_size_read(inode)) { 3380 error = inode_newsize_ok(inode, attr->ia_size); 3381 if (!error) { 3382 /* 3383 * Could race against reiserfs_file_release 3384 * if called from NFS, so take tailpack mutex. 3385 */ 3386 mutex_lock(&REISERFS_I(inode)->tailpack); 3387 truncate_setsize(inode, attr->ia_size); 3388 reiserfs_truncate_file(inode, 1); 3389 mutex_unlock(&REISERFS_I(inode)->tailpack); 3390 } 3391 } 3392 3393 if (!error) { 3394 setattr_copy(&nop_mnt_idmap, inode, attr); 3395 mark_inode_dirty(inode); 3396 } 3397 3398 if (!error && reiserfs_posixacl(inode->i_sb)) { 3399 if (attr->ia_valid & ATTR_MODE) 3400 error = reiserfs_acl_chmod(dentry); 3401 } 3402 3403 out: 3404 return error; 3405 } 3406 3407 const struct address_space_operations reiserfs_address_space_operations = { 3408 .writepage = reiserfs_writepage, 3409 .read_folio = reiserfs_read_folio, 3410 .readahead = reiserfs_readahead, 3411 .release_folio = reiserfs_release_folio, 3412 .invalidate_folio = reiserfs_invalidate_folio, 3413 .write_begin = reiserfs_write_begin, 3414 .write_end = reiserfs_write_end, 3415 .bmap = reiserfs_aop_bmap, 3416 .direct_IO = reiserfs_direct_IO, 3417 .dirty_folio = reiserfs_dirty_folio, 3418 }; 3419