1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/affs/file.c 4 * 5 * (c) 1996 Hans-Joachim Widmaier - Rewritten 6 * 7 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. 8 * 9 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. 10 * 11 * (C) 1991 Linus Torvalds - minix filesystem 12 * 13 * affs regular file handling primitives 14 */ 15 16 #include <linux/uio.h> 17 #include <linux/blkdev.h> 18 #include <linux/filelock.h> 19 #include <linux/mpage.h> 20 #include "affs.h" 21 22 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); 23 24 static int 25 affs_file_open(struct inode *inode, struct file *filp) 26 { 27 pr_debug("open(%lu,%d)\n", 28 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); 29 atomic_inc(&AFFS_I(inode)->i_opencnt); 30 return 0; 31 } 32 33 static int 34 affs_file_release(struct inode *inode, struct file *filp) 35 { 36 pr_debug("release(%lu, %d)\n", 37 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); 38 39 if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { 40 inode_lock(inode); 41 if (inode->i_size != AFFS_I(inode)->mmu_private) 42 affs_truncate(inode); 43 affs_free_prealloc(inode); 44 inode_unlock(inode); 45 } 46 47 return 0; 48 } 49 50 static int 51 affs_grow_extcache(struct inode *inode, u32 lc_idx) 52 { 53 struct super_block *sb = inode->i_sb; 54 struct buffer_head *bh; 55 u32 lc_max; 56 int i, j, key; 57 58 if (!AFFS_I(inode)->i_lc) { 59 char *ptr = (char *)get_zeroed_page(GFP_NOFS); 60 if (!ptr) 61 return -ENOMEM; 62 AFFS_I(inode)->i_lc = (u32 *)ptr; 63 AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); 64 } 65 66 lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; 67 68 if (AFFS_I(inode)->i_extcnt > lc_max) { 69 u32 lc_shift, lc_mask, tmp, off; 70 71 /* need to recalculate linear cache, start from old size */ 72 lc_shift = AFFS_I(inode)->i_lc_shift; 73 tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; 74 for (; tmp; tmp >>= 1) 75 lc_shift++; 76 lc_mask = (1 << lc_shift) - 1; 77 78 /* fix idx and old size to new shift */ 79 lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 80 AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 81 82 /* first shrink old cache to make more space */ 83 off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); 84 for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) 85 AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; 86 87 AFFS_I(inode)->i_lc_shift = lc_shift; 88 AFFS_I(inode)->i_lc_mask = lc_mask; 89 } 90 91 /* fill cache to the needed index */ 92 i = AFFS_I(inode)->i_lc_size; 93 AFFS_I(inode)->i_lc_size = lc_idx + 1; 94 for (; i <= lc_idx; i++) { 95 if (!i) { 96 AFFS_I(inode)->i_lc[0] = inode->i_ino; 97 continue; 98 } 99 key = AFFS_I(inode)->i_lc[i - 1]; 100 j = AFFS_I(inode)->i_lc_mask + 1; 101 // unlock cache 102 for (; j > 0; j--) { 103 bh = affs_bread(sb, key); 104 if (!bh) 105 goto err; 106 key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 107 affs_brelse(bh); 108 } 109 // lock cache 110 AFFS_I(inode)->i_lc[i] = key; 111 } 112 113 return 0; 114 115 err: 116 // lock cache 117 return -EIO; 118 } 119 120 static struct buffer_head * 121 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) 122 { 123 struct super_block *sb = inode->i_sb; 124 struct buffer_head *new_bh; 125 u32 blocknr, tmp; 126 127 blocknr = affs_alloc_block(inode, bh->b_blocknr); 128 if (!blocknr) 129 return ERR_PTR(-ENOSPC); 130 131 new_bh = affs_getzeroblk(sb, blocknr); 132 if (!new_bh) { 133 affs_free_block(sb, blocknr); 134 return ERR_PTR(-EIO); 135 } 136 137 AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); 138 AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); 139 AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); 140 AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); 141 affs_fix_checksum(sb, new_bh); 142 143 mark_buffer_dirty_inode(new_bh, inode); 144 145 tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 146 if (tmp) 147 affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); 148 AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); 149 affs_adjust_checksum(bh, blocknr - tmp); 150 mark_buffer_dirty_inode(bh, inode); 151 152 AFFS_I(inode)->i_extcnt++; 153 mark_inode_dirty(inode); 154 155 return new_bh; 156 } 157 158 static inline struct buffer_head * 159 affs_get_extblock(struct inode *inode, u32 ext) 160 { 161 /* inline the simplest case: same extended block as last time */ 162 struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; 163 if (ext == AFFS_I(inode)->i_ext_last) 164 get_bh(bh); 165 else 166 /* we have to do more (not inlined) */ 167 bh = affs_get_extblock_slow(inode, ext); 168 169 return bh; 170 } 171 172 static struct buffer_head * 173 affs_get_extblock_slow(struct inode *inode, u32 ext) 174 { 175 struct super_block *sb = inode->i_sb; 176 struct buffer_head *bh; 177 u32 ext_key; 178 u32 lc_idx, lc_off, ac_idx; 179 u32 tmp, idx; 180 181 if (ext == AFFS_I(inode)->i_ext_last + 1) { 182 /* read the next extended block from the current one */ 183 bh = AFFS_I(inode)->i_ext_bh; 184 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 185 if (ext < AFFS_I(inode)->i_extcnt) 186 goto read_ext; 187 BUG_ON(ext > AFFS_I(inode)->i_extcnt); 188 bh = affs_alloc_extblock(inode, bh, ext); 189 if (IS_ERR(bh)) 190 return bh; 191 goto store_ext; 192 } 193 194 if (ext == 0) { 195 /* we seek back to the file header block */ 196 ext_key = inode->i_ino; 197 goto read_ext; 198 } 199 200 if (ext >= AFFS_I(inode)->i_extcnt) { 201 struct buffer_head *prev_bh; 202 203 /* allocate a new extended block */ 204 BUG_ON(ext > AFFS_I(inode)->i_extcnt); 205 206 /* get previous extended block */ 207 prev_bh = affs_get_extblock(inode, ext - 1); 208 if (IS_ERR(prev_bh)) 209 return prev_bh; 210 bh = affs_alloc_extblock(inode, prev_bh, ext); 211 affs_brelse(prev_bh); 212 if (IS_ERR(bh)) 213 return bh; 214 goto store_ext; 215 } 216 217 again: 218 /* check if there is an extended cache and whether it's large enough */ 219 lc_idx = ext >> AFFS_I(inode)->i_lc_shift; 220 lc_off = ext & AFFS_I(inode)->i_lc_mask; 221 222 if (lc_idx >= AFFS_I(inode)->i_lc_size) { 223 int err; 224 225 err = affs_grow_extcache(inode, lc_idx); 226 if (err) 227 return ERR_PTR(err); 228 goto again; 229 } 230 231 /* every n'th key we find in the linear cache */ 232 if (!lc_off) { 233 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 234 goto read_ext; 235 } 236 237 /* maybe it's still in the associative cache */ 238 ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; 239 if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { 240 ext_key = AFFS_I(inode)->i_ac[ac_idx].key; 241 goto read_ext; 242 } 243 244 /* try to find one of the previous extended blocks */ 245 tmp = ext; 246 idx = ac_idx; 247 while (--tmp, --lc_off > 0) { 248 idx = (idx - 1) & AFFS_AC_MASK; 249 if (AFFS_I(inode)->i_ac[idx].ext == tmp) { 250 ext_key = AFFS_I(inode)->i_ac[idx].key; 251 goto find_ext; 252 } 253 } 254 255 /* fall back to the linear cache */ 256 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 257 find_ext: 258 /* read all extended blocks until we find the one we need */ 259 //unlock cache 260 do { 261 bh = affs_bread(sb, ext_key); 262 if (!bh) 263 goto err_bread; 264 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 265 affs_brelse(bh); 266 tmp++; 267 } while (tmp < ext); 268 //lock cache 269 270 /* store it in the associative cache */ 271 // recalculate ac_idx? 272 AFFS_I(inode)->i_ac[ac_idx].ext = ext; 273 AFFS_I(inode)->i_ac[ac_idx].key = ext_key; 274 275 read_ext: 276 /* finally read the right extended block */ 277 //unlock cache 278 bh = affs_bread(sb, ext_key); 279 if (!bh) 280 goto err_bread; 281 //lock cache 282 283 store_ext: 284 /* release old cached extended block and store the new one */ 285 affs_brelse(AFFS_I(inode)->i_ext_bh); 286 AFFS_I(inode)->i_ext_last = ext; 287 AFFS_I(inode)->i_ext_bh = bh; 288 get_bh(bh); 289 290 return bh; 291 292 err_bread: 293 affs_brelse(bh); 294 return ERR_PTR(-EIO); 295 } 296 297 static int 298 affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) 299 { 300 struct super_block *sb = inode->i_sb; 301 struct buffer_head *ext_bh; 302 u32 ext; 303 304 pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, 305 (unsigned long long)block); 306 307 BUG_ON(block > (sector_t)0x7fffffffUL); 308 309 if (block >= AFFS_I(inode)->i_blkcnt) { 310 if (block > AFFS_I(inode)->i_blkcnt || !create) 311 goto err_big; 312 } else 313 create = 0; 314 315 //lock cache 316 affs_lock_ext(inode); 317 318 ext = (u32)block / AFFS_SB(sb)->s_hashsize; 319 block -= ext * AFFS_SB(sb)->s_hashsize; 320 ext_bh = affs_get_extblock(inode, ext); 321 if (IS_ERR(ext_bh)) 322 goto err_ext; 323 map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); 324 325 if (create) { 326 u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); 327 if (!blocknr) 328 goto err_alloc; 329 set_buffer_new(bh_result); 330 AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; 331 AFFS_I(inode)->i_blkcnt++; 332 333 /* store new block */ 334 if (bh_result->b_blocknr) 335 affs_warning(sb, "get_block", 336 "block already set (%llx)", 337 (unsigned long long)bh_result->b_blocknr); 338 AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); 339 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); 340 affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); 341 bh_result->b_blocknr = blocknr; 342 343 if (!block) { 344 /* insert first block into header block */ 345 u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); 346 if (tmp) 347 affs_warning(sb, "get_block", "first block already set (%d)", tmp); 348 AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); 349 affs_adjust_checksum(ext_bh, blocknr - tmp); 350 } 351 } 352 353 affs_brelse(ext_bh); 354 //unlock cache 355 affs_unlock_ext(inode); 356 return 0; 357 358 err_big: 359 affs_error(inode->i_sb, "get_block", "strange block request %llu", 360 (unsigned long long)block); 361 return -EIO; 362 err_ext: 363 // unlock cache 364 affs_unlock_ext(inode); 365 return PTR_ERR(ext_bh); 366 err_alloc: 367 brelse(ext_bh); 368 clear_buffer_mapped(bh_result); 369 bh_result->b_bdev = NULL; 370 // unlock cache 371 affs_unlock_ext(inode); 372 return -ENOSPC; 373 } 374 375 static int affs_writepages(struct address_space *mapping, 376 struct writeback_control *wbc) 377 { 378 return mpage_writepages(mapping, wbc, affs_get_block); 379 } 380 381 static int affs_read_folio(struct file *file, struct folio *folio) 382 { 383 return block_read_full_folio(folio, affs_get_block); 384 } 385 386 static void affs_write_failed(struct address_space *mapping, loff_t to) 387 { 388 struct inode *inode = mapping->host; 389 390 if (to > inode->i_size) { 391 truncate_pagecache(inode, inode->i_size); 392 affs_truncate(inode); 393 } 394 } 395 396 static ssize_t 397 affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 398 { 399 struct file *file = iocb->ki_filp; 400 struct address_space *mapping = file->f_mapping; 401 struct inode *inode = mapping->host; 402 size_t count = iov_iter_count(iter); 403 loff_t offset = iocb->ki_pos; 404 ssize_t ret; 405 406 if (iov_iter_rw(iter) == WRITE) { 407 loff_t size = offset + count; 408 409 if (AFFS_I(inode)->mmu_private < size) 410 return 0; 411 } 412 413 ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block); 414 if (ret < 0 && iov_iter_rw(iter) == WRITE) 415 affs_write_failed(mapping, offset + count); 416 return ret; 417 } 418 419 static int affs_write_begin(const struct kiocb *iocb, 420 struct address_space *mapping, 421 loff_t pos, unsigned len, 422 struct folio **foliop, void **fsdata) 423 { 424 int ret; 425 426 ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, 427 affs_get_block, 428 &AFFS_I(mapping->host)->mmu_private); 429 if (unlikely(ret)) 430 affs_write_failed(mapping, pos + len); 431 432 return ret; 433 } 434 435 static int affs_write_end(const struct kiocb *iocb, 436 struct address_space *mapping, loff_t pos, 437 unsigned int len, unsigned int copied, 438 struct folio *folio, void *fsdata) 439 { 440 struct inode *inode = mapping->host; 441 int ret; 442 443 ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); 444 445 /* Clear Archived bit on file writes, as AmigaOS would do */ 446 if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { 447 AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; 448 mark_inode_dirty(inode); 449 } 450 451 return ret; 452 } 453 454 static sector_t _affs_bmap(struct address_space *mapping, sector_t block) 455 { 456 return generic_block_bmap(mapping,block,affs_get_block); 457 } 458 459 const struct address_space_operations affs_aops = { 460 .dirty_folio = block_dirty_folio, 461 .invalidate_folio = block_invalidate_folio, 462 .read_folio = affs_read_folio, 463 .writepages = affs_writepages, 464 .write_begin = affs_write_begin, 465 .write_end = affs_write_end, 466 .direct_IO = affs_direct_IO, 467 .migrate_folio = buffer_migrate_folio, 468 .bmap = _affs_bmap 469 }; 470 471 static inline struct buffer_head * 472 affs_bread_ino(struct inode *inode, int block, int create) 473 { 474 struct buffer_head *bh, tmp_bh; 475 int err; 476 477 tmp_bh.b_state = 0; 478 err = affs_get_block(inode, block, &tmp_bh, create); 479 if (!err) { 480 bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); 481 if (bh) { 482 bh->b_state |= tmp_bh.b_state; 483 return bh; 484 } 485 err = -EIO; 486 } 487 return ERR_PTR(err); 488 } 489 490 static inline struct buffer_head * 491 affs_getzeroblk_ino(struct inode *inode, int block) 492 { 493 struct buffer_head *bh, tmp_bh; 494 int err; 495 496 tmp_bh.b_state = 0; 497 err = affs_get_block(inode, block, &tmp_bh, 1); 498 if (!err) { 499 bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); 500 if (bh) { 501 bh->b_state |= tmp_bh.b_state; 502 return bh; 503 } 504 err = -EIO; 505 } 506 return ERR_PTR(err); 507 } 508 509 static inline struct buffer_head * 510 affs_getemptyblk_ino(struct inode *inode, int block) 511 { 512 struct buffer_head *bh, tmp_bh; 513 int err; 514 515 tmp_bh.b_state = 0; 516 err = affs_get_block(inode, block, &tmp_bh, 1); 517 if (!err) { 518 bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); 519 if (bh) { 520 bh->b_state |= tmp_bh.b_state; 521 return bh; 522 } 523 err = -EIO; 524 } 525 return ERR_PTR(err); 526 } 527 528 static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create) 529 { 530 struct inode *inode = folio->mapping->host; 531 struct super_block *sb = inode->i_sb; 532 struct buffer_head *bh; 533 size_t pos = 0; 534 size_t bidx, boff, bsize; 535 u32 tmp; 536 537 pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino, 538 folio->index, to); 539 BUG_ON(to > folio_size(folio)); 540 bsize = AFFS_SB(sb)->s_data_blksize; 541 tmp = folio_pos(folio); 542 bidx = tmp / bsize; 543 boff = tmp % bsize; 544 545 while (pos < to) { 546 bh = affs_bread_ino(inode, bidx, create); 547 if (IS_ERR(bh)) 548 return PTR_ERR(bh); 549 tmp = min(bsize - boff, to - pos); 550 BUG_ON(pos + tmp > to || tmp > bsize); 551 memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp); 552 affs_brelse(bh); 553 bidx++; 554 pos += tmp; 555 boff = 0; 556 } 557 return 0; 558 } 559 560 static int 561 affs_extent_file_ofs(struct inode *inode, u32 newsize) 562 { 563 struct super_block *sb = inode->i_sb; 564 struct buffer_head *bh, *prev_bh; 565 u32 bidx, boff; 566 u32 size, bsize; 567 u32 tmp; 568 569 pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); 570 bsize = AFFS_SB(sb)->s_data_blksize; 571 bh = NULL; 572 size = AFFS_I(inode)->mmu_private; 573 bidx = size / bsize; 574 boff = size % bsize; 575 if (boff) { 576 bh = affs_bread_ino(inode, bidx, 0); 577 if (IS_ERR(bh)) 578 return PTR_ERR(bh); 579 tmp = min(bsize - boff, newsize - size); 580 BUG_ON(boff + tmp > bsize || tmp > bsize); 581 memset(AFFS_DATA(bh) + boff, 0, tmp); 582 be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); 583 affs_fix_checksum(sb, bh); 584 mark_buffer_dirty_inode(bh, inode); 585 size += tmp; 586 bidx++; 587 } else if (bidx) { 588 bh = affs_bread_ino(inode, bidx - 1, 0); 589 if (IS_ERR(bh)) 590 return PTR_ERR(bh); 591 } 592 593 while (size < newsize) { 594 prev_bh = bh; 595 bh = affs_getzeroblk_ino(inode, bidx); 596 if (IS_ERR(bh)) 597 goto out; 598 tmp = min(bsize, newsize - size); 599 BUG_ON(tmp > bsize); 600 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 601 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 602 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); 603 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 604 affs_fix_checksum(sb, bh); 605 bh->b_state &= ~(1UL << BH_New); 606 mark_buffer_dirty_inode(bh, inode); 607 if (prev_bh) { 608 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 609 610 if (tmp_next) 611 affs_warning(sb, "extent_file_ofs", 612 "next block already set for %d (%d)", 613 bidx, tmp_next); 614 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 615 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 616 mark_buffer_dirty_inode(prev_bh, inode); 617 affs_brelse(prev_bh); 618 } 619 size += bsize; 620 bidx++; 621 } 622 affs_brelse(bh); 623 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 624 return 0; 625 626 out: 627 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 628 return PTR_ERR(bh); 629 } 630 631 static int affs_read_folio_ofs(struct file *file, struct folio *folio) 632 { 633 struct inode *inode = folio->mapping->host; 634 size_t to; 635 int err; 636 637 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index); 638 to = folio_size(folio); 639 if (folio_pos(folio) + to > inode->i_size) { 640 to = inode->i_size - folio_pos(folio); 641 folio_zero_segment(folio, to, folio_size(folio)); 642 } 643 644 err = affs_do_read_folio_ofs(folio, to, 0); 645 if (!err) 646 folio_mark_uptodate(folio); 647 folio_unlock(folio); 648 return err; 649 } 650 651 static int affs_write_begin_ofs(const struct kiocb *iocb, 652 struct address_space *mapping, 653 loff_t pos, unsigned len, 654 struct folio **foliop, void **fsdata) 655 { 656 struct inode *inode = mapping->host; 657 struct folio *folio; 658 pgoff_t index; 659 int err = 0; 660 661 pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, 662 pos + len); 663 if (pos > AFFS_I(inode)->mmu_private) { 664 /* XXX: this probably leaves a too-big i_size in case of 665 * failure. Should really be updating i_size at write_end time 666 */ 667 err = affs_extent_file_ofs(inode, pos); 668 if (err) 669 return err; 670 } 671 672 index = pos >> PAGE_SHIFT; 673 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 674 mapping_gfp_mask(mapping)); 675 if (IS_ERR(folio)) 676 return PTR_ERR(folio); 677 *foliop = folio; 678 679 if (folio_test_uptodate(folio)) 680 return 0; 681 682 /* XXX: inefficient but safe in the face of short writes */ 683 err = affs_do_read_folio_ofs(folio, folio_size(folio), 1); 684 if (err) { 685 folio_unlock(folio); 686 folio_put(folio); 687 } 688 return err; 689 } 690 691 static int affs_write_end_ofs(const struct kiocb *iocb, 692 struct address_space *mapping, 693 loff_t pos, unsigned len, unsigned copied, 694 struct folio *folio, void *fsdata) 695 { 696 struct inode *inode = mapping->host; 697 struct super_block *sb = inode->i_sb; 698 struct buffer_head *bh, *prev_bh; 699 char *data; 700 u32 bidx, boff, bsize; 701 unsigned from, to; 702 u32 tmp; 703 int written; 704 705 from = pos & (PAGE_SIZE - 1); 706 to = from + len; 707 /* 708 * XXX: not sure if this can handle short copies (len < copied), but 709 * we don't have to, because the folio should always be uptodate here, 710 * due to write_begin. 711 */ 712 713 pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, 714 pos + len); 715 bsize = AFFS_SB(sb)->s_data_blksize; 716 data = folio_address(folio); 717 718 bh = NULL; 719 written = 0; 720 tmp = (folio->index << PAGE_SHIFT) + from; 721 bidx = tmp / bsize; 722 boff = tmp % bsize; 723 if (boff) { 724 bh = affs_bread_ino(inode, bidx, 0); 725 if (IS_ERR(bh)) { 726 written = PTR_ERR(bh); 727 goto err_first_bh; 728 } 729 tmp = min(bsize - boff, to - from); 730 BUG_ON(boff + tmp > bsize || tmp > bsize); 731 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 732 AFFS_DATA_HEAD(bh)->size = cpu_to_be32( 733 max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size))); 734 affs_fix_checksum(sb, bh); 735 mark_buffer_dirty_inode(bh, inode); 736 written += tmp; 737 from += tmp; 738 bidx++; 739 } else if (bidx) { 740 bh = affs_bread_ino(inode, bidx - 1, 0); 741 if (IS_ERR(bh)) { 742 written = PTR_ERR(bh); 743 goto err_first_bh; 744 } 745 } 746 while (from + bsize <= to) { 747 prev_bh = bh; 748 bh = affs_getemptyblk_ino(inode, bidx); 749 if (IS_ERR(bh)) 750 goto err_bh; 751 memcpy(AFFS_DATA(bh), data + from, bsize); 752 if (buffer_new(bh)) { 753 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 754 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 755 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); 756 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); 757 AFFS_DATA_HEAD(bh)->next = 0; 758 bh->b_state &= ~(1UL << BH_New); 759 if (prev_bh) { 760 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 761 762 if (tmp_next) 763 affs_warning(sb, "commit_write_ofs", 764 "next block already set for %d (%d)", 765 bidx, tmp_next); 766 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 767 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 768 mark_buffer_dirty_inode(prev_bh, inode); 769 } 770 } 771 affs_brelse(prev_bh); 772 affs_fix_checksum(sb, bh); 773 mark_buffer_dirty_inode(bh, inode); 774 written += bsize; 775 from += bsize; 776 bidx++; 777 } 778 if (from < to) { 779 prev_bh = bh; 780 bh = affs_bread_ino(inode, bidx, 1); 781 if (IS_ERR(bh)) 782 goto err_bh; 783 tmp = min(bsize, to - from); 784 BUG_ON(tmp > bsize); 785 memcpy(AFFS_DATA(bh), data + from, tmp); 786 if (buffer_new(bh)) { 787 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 788 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 789 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); 790 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 791 AFFS_DATA_HEAD(bh)->next = 0; 792 bh->b_state &= ~(1UL << BH_New); 793 if (prev_bh) { 794 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 795 796 if (tmp_next) 797 affs_warning(sb, "commit_write_ofs", 798 "next block already set for %d (%d)", 799 bidx, tmp_next); 800 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 801 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 802 mark_buffer_dirty_inode(prev_bh, inode); 803 } 804 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) 805 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 806 affs_brelse(prev_bh); 807 affs_fix_checksum(sb, bh); 808 mark_buffer_dirty_inode(bh, inode); 809 written += tmp; 810 from += tmp; 811 bidx++; 812 } 813 folio_mark_uptodate(folio); 814 815 done: 816 affs_brelse(bh); 817 tmp = (folio->index << PAGE_SHIFT) + from; 818 if (tmp > inode->i_size) 819 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 820 821 /* Clear Archived bit on file writes, as AmigaOS would do */ 822 if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { 823 AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; 824 mark_inode_dirty(inode); 825 } 826 827 err_first_bh: 828 folio_unlock(folio); 829 folio_put(folio); 830 831 return written; 832 833 err_bh: 834 bh = prev_bh; 835 if (!written) 836 written = PTR_ERR(bh); 837 goto done; 838 } 839 840 const struct address_space_operations affs_aops_ofs = { 841 .dirty_folio = block_dirty_folio, 842 .invalidate_folio = block_invalidate_folio, 843 .read_folio = affs_read_folio_ofs, 844 //.writepages = affs_writepages_ofs, 845 .write_begin = affs_write_begin_ofs, 846 .write_end = affs_write_end_ofs, 847 .migrate_folio = filemap_migrate_folio, 848 }; 849 850 /* Free any preallocated blocks. */ 851 852 void 853 affs_free_prealloc(struct inode *inode) 854 { 855 struct super_block *sb = inode->i_sb; 856 857 pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino); 858 859 while (AFFS_I(inode)->i_pa_cnt) { 860 AFFS_I(inode)->i_pa_cnt--; 861 affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); 862 } 863 } 864 865 /* Truncate (or enlarge) a file to the requested size. */ 866 867 void 868 affs_truncate(struct inode *inode) 869 { 870 struct super_block *sb = inode->i_sb; 871 u32 ext, ext_key; 872 u32 last_blk, blkcnt, blk; 873 u32 size; 874 struct buffer_head *ext_bh; 875 int i; 876 877 pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", 878 inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); 879 880 last_blk = 0; 881 ext = 0; 882 if (inode->i_size) { 883 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; 884 ext = last_blk / AFFS_SB(sb)->s_hashsize; 885 } 886 887 if (inode->i_size > AFFS_I(inode)->mmu_private) { 888 struct address_space *mapping = inode->i_mapping; 889 struct folio *folio; 890 void *fsdata = NULL; 891 loff_t isize = inode->i_size; 892 int res; 893 894 res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata); 895 if (!res) 896 res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata); 897 else 898 inode->i_size = AFFS_I(inode)->mmu_private; 899 mark_inode_dirty(inode); 900 return; 901 } else if (inode->i_size == AFFS_I(inode)->mmu_private) 902 return; 903 904 // lock cache 905 ext_bh = affs_get_extblock(inode, ext); 906 if (IS_ERR(ext_bh)) { 907 affs_warning(sb, "truncate", 908 "unexpected read error for ext block %u (%ld)", 909 ext, PTR_ERR(ext_bh)); 910 return; 911 } 912 if (AFFS_I(inode)->i_lc) { 913 /* clear linear cache */ 914 i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; 915 if (AFFS_I(inode)->i_lc_size > i) { 916 AFFS_I(inode)->i_lc_size = i; 917 for (; i < AFFS_LC_SIZE; i++) 918 AFFS_I(inode)->i_lc[i] = 0; 919 } 920 /* clear associative cache */ 921 for (i = 0; i < AFFS_AC_SIZE; i++) 922 if (AFFS_I(inode)->i_ac[i].ext >= ext) 923 AFFS_I(inode)->i_ac[i].ext = 0; 924 } 925 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 926 927 blkcnt = AFFS_I(inode)->i_blkcnt; 928 i = 0; 929 blk = last_blk; 930 if (inode->i_size) { 931 i = last_blk % AFFS_SB(sb)->s_hashsize + 1; 932 blk++; 933 } else 934 AFFS_HEAD(ext_bh)->first_data = 0; 935 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); 936 size = AFFS_SB(sb)->s_hashsize; 937 if (size > blkcnt - blk + i) 938 size = blkcnt - blk + i; 939 for (; i < size; i++, blk++) { 940 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 941 AFFS_BLOCK(sb, ext_bh, i) = 0; 942 } 943 AFFS_TAIL(sb, ext_bh)->extension = 0; 944 affs_fix_checksum(sb, ext_bh); 945 mark_buffer_dirty_inode(ext_bh, inode); 946 affs_brelse(ext_bh); 947 948 if (inode->i_size) { 949 AFFS_I(inode)->i_blkcnt = last_blk + 1; 950 AFFS_I(inode)->i_extcnt = ext + 1; 951 if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) { 952 struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); 953 u32 tmp; 954 if (IS_ERR(bh)) { 955 affs_warning(sb, "truncate", 956 "unexpected read error for last block %u (%ld)", 957 ext, PTR_ERR(bh)); 958 return; 959 } 960 tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); 961 AFFS_DATA_HEAD(bh)->next = 0; 962 affs_adjust_checksum(bh, -tmp); 963 affs_brelse(bh); 964 } 965 } else { 966 AFFS_I(inode)->i_blkcnt = 0; 967 AFFS_I(inode)->i_extcnt = 1; 968 } 969 AFFS_I(inode)->mmu_private = inode->i_size; 970 // unlock cache 971 972 while (ext_key) { 973 ext_bh = affs_bread(sb, ext_key); 974 size = AFFS_SB(sb)->s_hashsize; 975 if (size > blkcnt - blk) 976 size = blkcnt - blk; 977 for (i = 0; i < size; i++, blk++) 978 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 979 affs_free_block(sb, ext_key); 980 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 981 affs_brelse(ext_bh); 982 } 983 affs_free_prealloc(inode); 984 } 985 986 int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 987 { 988 struct inode *inode = filp->f_mapping->host; 989 int ret, err; 990 991 err = file_write_and_wait_range(filp, start, end); 992 if (err) 993 return err; 994 995 inode_lock(inode); 996 ret = write_inode_now(inode, 0); 997 err = sync_blockdev(inode->i_sb->s_bdev); 998 if (!ret) 999 ret = err; 1000 inode_unlock(inode); 1001 return ret; 1002 } 1003 const struct file_operations affs_file_operations = { 1004 .llseek = generic_file_llseek, 1005 .read_iter = generic_file_read_iter, 1006 .write_iter = generic_file_write_iter, 1007 .mmap_prepare = generic_file_mmap_prepare, 1008 .open = affs_file_open, 1009 .release = affs_file_release, 1010 .fsync = affs_file_fsync, 1011 .splice_read = filemap_splice_read, 1012 .setlease = generic_setlease, 1013 }; 1014 1015 const struct inode_operations affs_file_inode_operations = { 1016 .setattr = affs_notify_change, 1017 }; 1018