1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. 4 */ 5 6 #include <linux/init.h> 7 #include <linux/buffer_head.h> 8 #include <linux/mpage.h> 9 #include <linux/bio.h> 10 #include <linux/blkdev.h> 11 #include <linux/time.h> 12 #include <linux/writeback.h> 13 #include <linux/uio.h> 14 #include <linux/random.h> 15 #include <linux/iversion.h> 16 17 #include "exfat_raw.h" 18 #include "exfat_fs.h" 19 20 int __exfat_write_inode(struct inode *inode, int sync) 21 { 22 unsigned long long on_disk_size; 23 struct exfat_dentry *ep, *ep2; 24 struct exfat_entry_set_cache es; 25 struct super_block *sb = inode->i_sb; 26 struct exfat_sb_info *sbi = EXFAT_SB(sb); 27 struct exfat_inode_info *ei = EXFAT_I(inode); 28 bool is_dir = (ei->type == TYPE_DIR) ? true : false; 29 struct timespec64 ts; 30 31 if (inode->i_ino == EXFAT_ROOT_INO) 32 return 0; 33 34 /* 35 * If the inode is already unlinked, there is no need for updating it. 36 */ 37 if (ei->dir.dir == DIR_DELETED) 38 return 0; 39 40 if (is_dir && ei->dir.dir == sbi->root_dir && ei->entry == -1) 41 return 0; 42 43 exfat_set_volume_dirty(sb); 44 45 /* get the directory entry of given file or directory */ 46 if (exfat_get_dentry_set(&es, sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES)) 47 return -EIO; 48 ep = exfat_get_dentry_cached(&es, ES_IDX_FILE); 49 ep2 = exfat_get_dentry_cached(&es, ES_IDX_STREAM); 50 51 ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode)); 52 53 /* set FILE_INFO structure using the acquired struct exfat_dentry */ 54 exfat_set_entry_time(sbi, &ei->i_crtime, 55 &ep->dentry.file.create_tz, 56 &ep->dentry.file.create_time, 57 &ep->dentry.file.create_date, 58 &ep->dentry.file.create_time_cs); 59 ts = inode_get_mtime(inode); 60 exfat_set_entry_time(sbi, &ts, 61 &ep->dentry.file.modify_tz, 62 &ep->dentry.file.modify_time, 63 &ep->dentry.file.modify_date, 64 &ep->dentry.file.modify_time_cs); 65 ts = inode_get_atime(inode); 66 exfat_set_entry_time(sbi, &ts, 67 &ep->dentry.file.access_tz, 68 &ep->dentry.file.access_time, 69 &ep->dentry.file.access_date, 70 NULL); 71 72 /* File size should be zero if there is no cluster allocated */ 73 on_disk_size = i_size_read(inode); 74 75 if (ei->start_clu == EXFAT_EOF_CLUSTER) 76 on_disk_size = 0; 77 78 ep2->dentry.stream.size = cpu_to_le64(on_disk_size); 79 /* 80 * mmap write does not use exfat_write_end(), valid_size may be 81 * extended to the sector-aligned length in exfat_get_block(). 82 * So we need to fixup valid_size to the writren length. 83 */ 84 if (on_disk_size < ei->valid_size) 85 ep2->dentry.stream.valid_size = ep2->dentry.stream.size; 86 else 87 ep2->dentry.stream.valid_size = cpu_to_le64(ei->valid_size); 88 89 if (on_disk_size) { 90 ep2->dentry.stream.flags = ei->flags; 91 ep2->dentry.stream.start_clu = cpu_to_le32(ei->start_clu); 92 } else { 93 ep2->dentry.stream.flags = ALLOC_FAT_CHAIN; 94 ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER; 95 } 96 97 exfat_update_dir_chksum(&es); 98 return exfat_put_dentry_set(&es, sync); 99 } 100 101 int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) 102 { 103 int ret; 104 105 if (unlikely(exfat_forced_shutdown(inode->i_sb))) 106 return -EIO; 107 108 mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock); 109 ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 110 mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock); 111 112 return ret; 113 } 114 115 void exfat_sync_inode(struct inode *inode) 116 { 117 lockdep_assert_held(&EXFAT_SB(inode->i_sb)->s_lock); 118 __exfat_write_inode(inode, 1); 119 } 120 121 /* 122 * Input: inode, (logical) clu_offset, target allocation area 123 * Output: errcode, cluster number 124 * *clu = (~0), if it's unable to allocate a new cluster 125 */ 126 static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, 127 unsigned int *clu, int create) 128 { 129 int ret; 130 unsigned int last_clu; 131 struct exfat_chain new_clu; 132 struct super_block *sb = inode->i_sb; 133 struct exfat_sb_info *sbi = EXFAT_SB(sb); 134 struct exfat_inode_info *ei = EXFAT_I(inode); 135 unsigned int local_clu_offset = clu_offset; 136 unsigned int num_to_be_allocated = 0, num_clusters; 137 138 num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi); 139 140 if (clu_offset >= num_clusters) 141 num_to_be_allocated = clu_offset - num_clusters + 1; 142 143 if (!create && (num_to_be_allocated > 0)) { 144 *clu = EXFAT_EOF_CLUSTER; 145 return 0; 146 } 147 148 *clu = last_clu = ei->start_clu; 149 150 if (ei->flags == ALLOC_NO_FAT_CHAIN) { 151 if (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) { 152 last_clu += clu_offset - 1; 153 154 if (clu_offset == num_clusters) 155 *clu = EXFAT_EOF_CLUSTER; 156 else 157 *clu += clu_offset; 158 } 159 } else if (ei->type == TYPE_FILE) { 160 unsigned int fclus = 0; 161 int err = exfat_get_cluster(inode, clu_offset, 162 &fclus, clu, &last_clu, 1); 163 if (err) 164 return -EIO; 165 166 clu_offset -= fclus; 167 } else { 168 /* hint information */ 169 if (clu_offset > 0 && ei->hint_bmap.off != EXFAT_EOF_CLUSTER && 170 ei->hint_bmap.off > 0 && clu_offset >= ei->hint_bmap.off) { 171 clu_offset -= ei->hint_bmap.off; 172 /* hint_bmap.clu should be valid */ 173 WARN_ON(ei->hint_bmap.clu < 2); 174 *clu = ei->hint_bmap.clu; 175 } 176 177 while (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) { 178 last_clu = *clu; 179 if (exfat_get_next_cluster(sb, clu)) 180 return -EIO; 181 clu_offset--; 182 } 183 } 184 185 if (*clu == EXFAT_EOF_CLUSTER) { 186 exfat_set_volume_dirty(sb); 187 188 new_clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ? 189 EXFAT_EOF_CLUSTER : last_clu + 1; 190 new_clu.size = 0; 191 new_clu.flags = ei->flags; 192 193 /* allocate a cluster */ 194 if (num_to_be_allocated < 1) { 195 /* Broken FAT (i_sze > allocated FAT) */ 196 exfat_fs_error(sb, "broken FAT chain."); 197 return -EIO; 198 } 199 200 ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu, 201 inode_needs_sync(inode)); 202 if (ret) 203 return ret; 204 205 if (new_clu.dir == EXFAT_EOF_CLUSTER || 206 new_clu.dir == EXFAT_FREE_CLUSTER) { 207 exfat_fs_error(sb, 208 "bogus cluster new allocated (last_clu : %u, new_clu : %u)", 209 last_clu, new_clu.dir); 210 return -EIO; 211 } 212 213 /* append to the FAT chain */ 214 if (last_clu == EXFAT_EOF_CLUSTER) { 215 if (new_clu.flags == ALLOC_FAT_CHAIN) 216 ei->flags = ALLOC_FAT_CHAIN; 217 ei->start_clu = new_clu.dir; 218 } else { 219 if (new_clu.flags != ei->flags) { 220 /* no-fat-chain bit is disabled, 221 * so fat-chain should be synced with 222 * alloc-bitmap 223 */ 224 exfat_chain_cont_cluster(sb, ei->start_clu, 225 num_clusters); 226 ei->flags = ALLOC_FAT_CHAIN; 227 } 228 if (new_clu.flags == ALLOC_FAT_CHAIN) 229 if (exfat_ent_set(sb, last_clu, new_clu.dir)) 230 return -EIO; 231 } 232 233 num_clusters += num_to_be_allocated; 234 *clu = new_clu.dir; 235 236 inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9; 237 238 /* 239 * Move *clu pointer along FAT chains (hole care) because the 240 * caller of this function expect *clu to be the last cluster. 241 * This only works when num_to_be_allocated >= 2, 242 * *clu = (the first cluster of the allocated chain) => 243 * (the last cluster of ...) 244 */ 245 if (ei->flags == ALLOC_NO_FAT_CHAIN) { 246 *clu += num_to_be_allocated - 1; 247 } else { 248 while (num_to_be_allocated > 1) { 249 if (exfat_get_next_cluster(sb, clu)) 250 return -EIO; 251 num_to_be_allocated--; 252 } 253 } 254 255 } 256 257 /* hint information */ 258 ei->hint_bmap.off = local_clu_offset; 259 ei->hint_bmap.clu = *clu; 260 261 return 0; 262 } 263 264 static int exfat_get_block(struct inode *inode, sector_t iblock, 265 struct buffer_head *bh_result, int create) 266 { 267 struct exfat_inode_info *ei = EXFAT_I(inode); 268 struct super_block *sb = inode->i_sb; 269 struct exfat_sb_info *sbi = EXFAT_SB(sb); 270 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 271 int err = 0; 272 unsigned long mapped_blocks = 0; 273 unsigned int cluster, sec_offset; 274 sector_t last_block; 275 sector_t phys = 0; 276 sector_t valid_blks; 277 278 mutex_lock(&sbi->s_lock); 279 last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb); 280 if (iblock >= last_block && !create) 281 goto done; 282 283 /* Is this block already allocated? */ 284 err = exfat_map_cluster(inode, iblock >> sbi->sect_per_clus_bits, 285 &cluster, create); 286 if (err) { 287 if (err != -ENOSPC) 288 exfat_fs_error_ratelimit(sb, 289 "failed to bmap (inode : %p iblock : %llu, err : %d)", 290 inode, (unsigned long long)iblock, err); 291 goto unlock_ret; 292 } 293 294 if (cluster == EXFAT_EOF_CLUSTER) 295 goto done; 296 297 /* sector offset in cluster */ 298 sec_offset = iblock & (sbi->sect_per_clus - 1); 299 300 phys = exfat_cluster_to_sector(sbi, cluster) + sec_offset; 301 mapped_blocks = sbi->sect_per_clus - sec_offset; 302 max_blocks = min(mapped_blocks, max_blocks); 303 304 map_bh(bh_result, sb, phys); 305 if (buffer_delay(bh_result)) 306 clear_buffer_delay(bh_result); 307 308 if (create) { 309 valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb); 310 311 if (iblock + max_blocks < valid_blks) { 312 /* The range has been written, map it */ 313 goto done; 314 } else if (iblock < valid_blks) { 315 /* 316 * The range has been partially written, 317 * map the written part. 318 */ 319 max_blocks = valid_blks - iblock; 320 goto done; 321 } 322 323 /* The area has not been written, map and mark as new. */ 324 set_buffer_new(bh_result); 325 326 ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb); 327 mark_inode_dirty(inode); 328 } else { 329 valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb); 330 331 if (iblock + max_blocks < valid_blks) { 332 /* The range has been written, map it */ 333 goto done; 334 } else if (iblock < valid_blks) { 335 /* 336 * The area has been partially written, 337 * map the written part. 338 */ 339 max_blocks = valid_blks - iblock; 340 goto done; 341 } else if (iblock == valid_blks && 342 (ei->valid_size & (sb->s_blocksize - 1))) { 343 /* 344 * The block has been partially written, 345 * zero the unwritten part and map the block. 346 */ 347 loff_t size, off, pos; 348 349 max_blocks = 1; 350 351 /* 352 * For direct read, the unwritten part will be zeroed in 353 * exfat_direct_IO() 354 */ 355 if (!bh_result->b_folio) 356 goto done; 357 358 pos = EXFAT_BLK_TO_B(iblock, sb); 359 size = ei->valid_size - pos; 360 off = pos & (PAGE_SIZE - 1); 361 362 folio_set_bh(bh_result, bh_result->b_folio, off); 363 err = bh_read(bh_result, 0); 364 if (err < 0) 365 goto unlock_ret; 366 367 folio_zero_segment(bh_result->b_folio, off + size, 368 off + sb->s_blocksize); 369 } else { 370 /* 371 * The range has not been written, clear the mapped flag 372 * to only zero the cache and do not read from disk. 373 */ 374 clear_buffer_mapped(bh_result); 375 } 376 } 377 done: 378 bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb); 379 unlock_ret: 380 mutex_unlock(&sbi->s_lock); 381 return err; 382 } 383 384 static int exfat_read_folio(struct file *file, struct folio *folio) 385 { 386 return mpage_read_folio(folio, exfat_get_block); 387 } 388 389 static void exfat_readahead(struct readahead_control *rac) 390 { 391 struct address_space *mapping = rac->mapping; 392 struct inode *inode = mapping->host; 393 struct exfat_inode_info *ei = EXFAT_I(inode); 394 loff_t pos = readahead_pos(rac); 395 396 /* Range cross valid_size, read it page by page. */ 397 if (ei->valid_size < i_size_read(inode) && 398 pos <= ei->valid_size && 399 ei->valid_size < pos + readahead_length(rac)) 400 return; 401 402 mpage_readahead(rac, exfat_get_block); 403 } 404 405 static int exfat_writepages(struct address_space *mapping, 406 struct writeback_control *wbc) 407 { 408 if (unlikely(exfat_forced_shutdown(mapping->host->i_sb))) 409 return -EIO; 410 411 return mpage_writepages(mapping, wbc, exfat_get_block); 412 } 413 414 static void exfat_write_failed(struct address_space *mapping, loff_t to) 415 { 416 struct inode *inode = mapping->host; 417 418 if (to > i_size_read(inode)) { 419 truncate_pagecache(inode, i_size_read(inode)); 420 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 421 exfat_truncate(inode); 422 } 423 } 424 425 static int exfat_write_begin(struct file *file, struct address_space *mapping, 426 loff_t pos, unsigned int len, 427 struct folio **foliop, void **fsdata) 428 { 429 int ret; 430 431 if (unlikely(exfat_forced_shutdown(mapping->host->i_sb))) 432 return -EIO; 433 434 ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block); 435 436 if (ret < 0) 437 exfat_write_failed(mapping, pos+len); 438 439 return ret; 440 } 441 442 static int exfat_write_end(struct file *file, struct address_space *mapping, 443 loff_t pos, unsigned int len, unsigned int copied, 444 struct folio *folio, void *fsdata) 445 { 446 struct inode *inode = mapping->host; 447 struct exfat_inode_info *ei = EXFAT_I(inode); 448 int err; 449 450 err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); 451 if (err < len) 452 exfat_write_failed(mapping, pos+len); 453 454 if (!(err < 0) && pos + err > ei->valid_size) { 455 ei->valid_size = pos + err; 456 mark_inode_dirty(inode); 457 } 458 459 if (!(err < 0) && !(ei->attr & EXFAT_ATTR_ARCHIVE)) { 460 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 461 ei->attr |= EXFAT_ATTR_ARCHIVE; 462 mark_inode_dirty(inode); 463 } 464 465 return err; 466 } 467 468 static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 469 { 470 struct address_space *mapping = iocb->ki_filp->f_mapping; 471 struct inode *inode = mapping->host; 472 struct exfat_inode_info *ei = EXFAT_I(inode); 473 loff_t pos = iocb->ki_pos; 474 loff_t size = pos + iov_iter_count(iter); 475 int rw = iov_iter_rw(iter); 476 ssize_t ret; 477 478 /* 479 * Need to use the DIO_LOCKING for avoiding the race 480 * condition of exfat_get_block() and ->truncate(). 481 */ 482 ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block); 483 if (ret < 0) { 484 if (rw == WRITE && ret != -EIOCBQUEUED) 485 exfat_write_failed(mapping, size); 486 487 return ret; 488 } else 489 size = pos + ret; 490 491 if (rw == WRITE) { 492 /* 493 * If the block had been partially written before this write, 494 * ->valid_size will not be updated in exfat_get_block(), 495 * update it here. 496 */ 497 if (ei->valid_size < size) { 498 ei->valid_size = size; 499 mark_inode_dirty(inode); 500 } 501 } else if (pos < ei->valid_size && ei->valid_size < size) { 502 /* zero the unwritten part in the partially written block */ 503 iov_iter_revert(iter, size - ei->valid_size); 504 iov_iter_zero(size - ei->valid_size, iter); 505 } 506 507 return ret; 508 } 509 510 static sector_t exfat_aop_bmap(struct address_space *mapping, sector_t block) 511 { 512 sector_t blocknr; 513 514 /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */ 515 down_read(&EXFAT_I(mapping->host)->truncate_lock); 516 blocknr = generic_block_bmap(mapping, block, exfat_get_block); 517 up_read(&EXFAT_I(mapping->host)->truncate_lock); 518 return blocknr; 519 } 520 521 /* 522 * exfat_block_truncate_page() zeroes out a mapping from file offset `from' 523 * up to the end of the block which corresponds to `from'. 524 * This is required during truncate to physically zeroout the tail end 525 * of that block so it doesn't yield old data if the file is later grown. 526 * Also, avoid causing failure from fsx for cases of "data past EOF" 527 */ 528 int exfat_block_truncate_page(struct inode *inode, loff_t from) 529 { 530 return block_truncate_page(inode->i_mapping, from, exfat_get_block); 531 } 532 533 static const struct address_space_operations exfat_aops = { 534 .dirty_folio = block_dirty_folio, 535 .invalidate_folio = block_invalidate_folio, 536 .read_folio = exfat_read_folio, 537 .readahead = exfat_readahead, 538 .writepages = exfat_writepages, 539 .write_begin = exfat_write_begin, 540 .write_end = exfat_write_end, 541 .direct_IO = exfat_direct_IO, 542 .bmap = exfat_aop_bmap, 543 .migrate_folio = buffer_migrate_folio, 544 }; 545 546 static inline unsigned long exfat_hash(loff_t i_pos) 547 { 548 return hash_32(i_pos, EXFAT_HASH_BITS); 549 } 550 551 void exfat_hash_inode(struct inode *inode, loff_t i_pos) 552 { 553 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); 554 struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); 555 556 spin_lock(&sbi->inode_hash_lock); 557 EXFAT_I(inode)->i_pos = i_pos; 558 hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head); 559 spin_unlock(&sbi->inode_hash_lock); 560 } 561 562 void exfat_unhash_inode(struct inode *inode) 563 { 564 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); 565 566 spin_lock(&sbi->inode_hash_lock); 567 hlist_del_init(&EXFAT_I(inode)->i_hash_fat); 568 EXFAT_I(inode)->i_pos = 0; 569 spin_unlock(&sbi->inode_hash_lock); 570 } 571 572 struct inode *exfat_iget(struct super_block *sb, loff_t i_pos) 573 { 574 struct exfat_sb_info *sbi = EXFAT_SB(sb); 575 struct exfat_inode_info *info; 576 struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); 577 struct inode *inode = NULL; 578 579 spin_lock(&sbi->inode_hash_lock); 580 hlist_for_each_entry(info, head, i_hash_fat) { 581 WARN_ON(info->vfs_inode.i_sb != sb); 582 583 if (i_pos != info->i_pos) 584 continue; 585 inode = igrab(&info->vfs_inode); 586 if (inode) 587 break; 588 } 589 spin_unlock(&sbi->inode_hash_lock); 590 return inode; 591 } 592 593 /* doesn't deal with root inode */ 594 static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info) 595 { 596 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); 597 struct exfat_inode_info *ei = EXFAT_I(inode); 598 loff_t size = info->size; 599 600 ei->dir = info->dir; 601 ei->entry = info->entry; 602 ei->attr = info->attr; 603 ei->start_clu = info->start_clu; 604 ei->flags = info->flags; 605 ei->type = info->type; 606 ei->valid_size = info->valid_size; 607 608 ei->version = 0; 609 ei->hint_stat.eidx = 0; 610 ei->hint_stat.clu = info->start_clu; 611 ei->hint_femp.eidx = EXFAT_HINT_NONE; 612 ei->hint_bmap.off = EXFAT_EOF_CLUSTER; 613 ei->i_pos = 0; 614 615 inode->i_uid = sbi->options.fs_uid; 616 inode->i_gid = sbi->options.fs_gid; 617 inode_inc_iversion(inode); 618 inode->i_generation = get_random_u32(); 619 620 if (info->attr & EXFAT_ATTR_SUBDIR) { /* directory */ 621 inode->i_generation &= ~1; 622 inode->i_mode = exfat_make_mode(sbi, info->attr, 0777); 623 inode->i_op = &exfat_dir_inode_operations; 624 inode->i_fop = &exfat_dir_operations; 625 set_nlink(inode, info->num_subdirs); 626 } else { /* regular file */ 627 inode->i_generation |= 1; 628 inode->i_mode = exfat_make_mode(sbi, info->attr, 0777); 629 inode->i_op = &exfat_file_inode_operations; 630 inode->i_fop = &exfat_file_operations; 631 inode->i_mapping->a_ops = &exfat_aops; 632 inode->i_mapping->nrpages = 0; 633 } 634 635 i_size_write(inode, size); 636 637 exfat_save_attr(inode, info->attr); 638 639 inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; 640 inode_set_mtime_to_ts(inode, info->mtime); 641 inode_set_ctime_to_ts(inode, info->mtime); 642 ei->i_crtime = info->crtime; 643 inode_set_atime_to_ts(inode, info->atime); 644 645 return 0; 646 } 647 648 struct inode *exfat_build_inode(struct super_block *sb, 649 struct exfat_dir_entry *info, loff_t i_pos) 650 { 651 struct inode *inode; 652 int err; 653 654 inode = exfat_iget(sb, i_pos); 655 if (inode) 656 goto out; 657 inode = new_inode(sb); 658 if (!inode) { 659 inode = ERR_PTR(-ENOMEM); 660 goto out; 661 } 662 inode->i_ino = iunique(sb, EXFAT_ROOT_INO); 663 inode_set_iversion(inode, 1); 664 err = exfat_fill_inode(inode, info); 665 if (err) { 666 iput(inode); 667 inode = ERR_PTR(err); 668 goto out; 669 } 670 exfat_hash_inode(inode, i_pos); 671 insert_inode_hash(inode); 672 out: 673 return inode; 674 } 675 676 void exfat_evict_inode(struct inode *inode) 677 { 678 truncate_inode_pages(&inode->i_data, 0); 679 680 if (!inode->i_nlink) { 681 i_size_write(inode, 0); 682 mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock); 683 __exfat_truncate(inode); 684 mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock); 685 } 686 687 invalidate_inode_buffers(inode); 688 clear_inode(inode); 689 exfat_cache_inval_inode(inode); 690 exfat_unhash_inode(inode); 691 } 692