1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Squashfs - a compressed read only filesystem for Linux 4 * 5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Phillip Lougher <phillip@squashfs.org.uk> 7 * 8 * file.c 9 */ 10 11 /* 12 * This file contains code for handling regular files. A regular file 13 * consists of a sequence of contiguous compressed blocks, and/or a 14 * compressed fragment block (tail-end packed block). The compressed size 15 * of each datablock is stored in a block list contained within the 16 * file inode (itself stored in one or more compressed metadata blocks). 17 * 18 * To speed up access to datablocks when reading 'large' files (256 Mbytes or 19 * larger), the code implements an index cache that caches the mapping from 20 * block index to datablock location on disk. 21 * 22 * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while 23 * retaining a simple and space-efficient block list on disk. The cache 24 * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). 25 * Larger files use multiple slots, with 1.75 TiB files using all 8 slots. 26 * The index cache is designed to be memory efficient, and by default uses 27 * 16 KiB. 28 */ 29 30 #include <linux/fs.h> 31 #include <linux/filelock.h> 32 #include <linux/vfs.h> 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/string.h> 36 #include <linux/pagemap.h> 37 #include <linux/mutex.h> 38 39 #include "squashfs_fs.h" 40 #include "squashfs_fs_sb.h" 41 #include "squashfs_fs_i.h" 42 #include "squashfs.h" 43 #include "page_actor.h" 44 45 /* 46 * Locate cache slot in range [offset, index] for specified inode. If 47 * there's more than one return the slot closest to index. 48 */ 49 static struct meta_index *locate_meta_index(struct inode *inode, int offset, 50 int index) 51 { 52 struct meta_index *meta = NULL; 53 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 54 int i; 55 56 mutex_lock(&msblk->meta_index_mutex); 57 58 TRACE("locate_meta_index: index %d, offset %d\n", index, offset); 59 60 if (msblk->meta_index == NULL) 61 goto not_allocated; 62 63 for (i = 0; i < SQUASHFS_META_SLOTS; i++) { 64 if (msblk->meta_index[i].inode_number == inode->i_ino && 65 msblk->meta_index[i].offset >= offset && 66 msblk->meta_index[i].offset <= index && 67 msblk->meta_index[i].locked == 0) { 68 TRACE("locate_meta_index: entry %d, offset %d\n", i, 69 msblk->meta_index[i].offset); 70 meta = &msblk->meta_index[i]; 71 offset = meta->offset; 72 } 73 } 74 75 if (meta) 76 meta->locked = 1; 77 78 not_allocated: 79 mutex_unlock(&msblk->meta_index_mutex); 80 81 return meta; 82 } 83 84 85 /* 86 * Find and initialise an empty cache slot for index offset. 87 */ 88 static struct meta_index *empty_meta_index(struct inode *inode, int offset, 89 int skip) 90 { 91 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 92 struct meta_index *meta = NULL; 93 int i; 94 95 mutex_lock(&msblk->meta_index_mutex); 96 97 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); 98 99 if (msblk->meta_index == NULL) { 100 /* 101 * First time cache index has been used, allocate and 102 * initialise. The cache index could be allocated at 103 * mount time but doing it here means it is allocated only 104 * if a 'large' file is read. 105 */ 106 msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS, 107 sizeof(*(msblk->meta_index)), GFP_KERNEL); 108 if (msblk->meta_index == NULL) { 109 ERROR("Failed to allocate meta_index\n"); 110 goto failed; 111 } 112 for (i = 0; i < SQUASHFS_META_SLOTS; i++) { 113 msblk->meta_index[i].inode_number = 0; 114 msblk->meta_index[i].locked = 0; 115 } 116 msblk->next_meta_index = 0; 117 } 118 119 for (i = SQUASHFS_META_SLOTS; i && 120 msblk->meta_index[msblk->next_meta_index].locked; i--) 121 msblk->next_meta_index = (msblk->next_meta_index + 1) % 122 SQUASHFS_META_SLOTS; 123 124 if (i == 0) { 125 TRACE("empty_meta_index: failed!\n"); 126 goto failed; 127 } 128 129 TRACE("empty_meta_index: returned meta entry %d, %p\n", 130 msblk->next_meta_index, 131 &msblk->meta_index[msblk->next_meta_index]); 132 133 meta = &msblk->meta_index[msblk->next_meta_index]; 134 msblk->next_meta_index = (msblk->next_meta_index + 1) % 135 SQUASHFS_META_SLOTS; 136 137 meta->inode_number = inode->i_ino; 138 meta->offset = offset; 139 meta->skip = skip; 140 meta->entries = 0; 141 meta->locked = 1; 142 143 failed: 144 mutex_unlock(&msblk->meta_index_mutex); 145 return meta; 146 } 147 148 149 static void release_meta_index(struct inode *inode, struct meta_index *meta) 150 { 151 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 152 mutex_lock(&msblk->meta_index_mutex); 153 meta->locked = 0; 154 mutex_unlock(&msblk->meta_index_mutex); 155 } 156 157 158 /* 159 * Read the next n blocks from the block list, starting from 160 * metadata block <start_block, offset>. 161 */ 162 static long long read_indexes(struct super_block *sb, int n, 163 u64 *start_block, int *offset) 164 { 165 int err, i; 166 long long block = 0; 167 __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL); 168 169 if (blist == NULL) { 170 ERROR("read_indexes: Failed to allocate block_list\n"); 171 return -ENOMEM; 172 } 173 174 while (n) { 175 int blocks = min_t(int, n, PAGE_SIZE >> 2); 176 177 err = squashfs_read_metadata(sb, blist, start_block, 178 offset, blocks << 2); 179 if (err < 0) { 180 ERROR("read_indexes: reading block [%llx:%x]\n", 181 *start_block, *offset); 182 goto failure; 183 } 184 185 for (i = 0; i < blocks; i++) { 186 int size = squashfs_block_size(blist[i]); 187 if (size < 0) { 188 err = size; 189 goto failure; 190 } 191 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 192 } 193 n -= blocks; 194 } 195 196 kfree(blist); 197 return block; 198 199 failure: 200 kfree(blist); 201 return err; 202 } 203 204 205 /* 206 * Each cache index slot has SQUASHFS_META_ENTRIES, each of which 207 * can cache one index -> datablock/blocklist-block mapping. We wish 208 * to distribute these over the length of the file, entry[0] maps index x, 209 * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on. 210 * The larger the file, the greater the skip factor. The skip factor is 211 * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure 212 * the number of metadata blocks that need to be read fits into the cache. 213 * If the skip factor is limited in this way then the file will use multiple 214 * slots. 215 */ 216 static inline int calculate_skip(u64 blocks) 217 { 218 u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1) 219 * SQUASHFS_META_INDEXES); 220 return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1); 221 } 222 223 224 /* 225 * Search and grow the index cache for the specified inode, returning the 226 * on-disk locations of the datablock and block list metadata block 227 * <index_block, index_offset> for index (scaled to nearest cache index). 228 */ 229 static int fill_meta_index(struct inode *inode, int index, 230 u64 *index_block, int *index_offset, u64 *data_block) 231 { 232 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 233 int skip = calculate_skip(i_size_read(inode) >> msblk->block_log); 234 int offset = 0; 235 struct meta_index *meta; 236 struct meta_entry *meta_entry; 237 u64 cur_index_block = squashfs_i(inode)->block_list_start; 238 int cur_offset = squashfs_i(inode)->offset; 239 u64 cur_data_block = squashfs_i(inode)->start; 240 int err, i; 241 242 /* 243 * Scale index to cache index (cache slot entry) 244 */ 245 index /= SQUASHFS_META_INDEXES * skip; 246 247 while (offset < index) { 248 meta = locate_meta_index(inode, offset + 1, index); 249 250 if (meta == NULL) { 251 meta = empty_meta_index(inode, offset + 1, skip); 252 if (meta == NULL) 253 goto all_done; 254 } else { 255 offset = index < meta->offset + meta->entries ? index : 256 meta->offset + meta->entries - 1; 257 meta_entry = &meta->meta_entry[offset - meta->offset]; 258 cur_index_block = meta_entry->index_block + 259 msblk->inode_table; 260 cur_offset = meta_entry->offset; 261 cur_data_block = meta_entry->data_block; 262 TRACE("get_meta_index: offset %d, meta->offset %d, " 263 "meta->entries %d\n", offset, meta->offset, 264 meta->entries); 265 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" 266 " data_block 0x%llx\n", cur_index_block, 267 cur_offset, cur_data_block); 268 } 269 270 /* 271 * If necessary grow cache slot by reading block list. Cache 272 * slot is extended up to index or to the end of the slot, in 273 * which case further slots will be used. 274 */ 275 for (i = meta->offset + meta->entries; i <= index && 276 i < meta->offset + SQUASHFS_META_ENTRIES; i++) { 277 int blocks = skip * SQUASHFS_META_INDEXES; 278 long long res = read_indexes(inode->i_sb, blocks, 279 &cur_index_block, &cur_offset); 280 281 if (res < 0) { 282 if (meta->entries == 0) 283 /* 284 * Don't leave an empty slot on read 285 * error allocated to this inode... 286 */ 287 meta->inode_number = 0; 288 err = res; 289 goto failed; 290 } 291 292 cur_data_block += res; 293 meta_entry = &meta->meta_entry[i - meta->offset]; 294 meta_entry->index_block = cur_index_block - 295 msblk->inode_table; 296 meta_entry->offset = cur_offset; 297 meta_entry->data_block = cur_data_block; 298 meta->entries++; 299 offset++; 300 } 301 302 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", 303 meta->offset, meta->entries); 304 305 release_meta_index(inode, meta); 306 } 307 308 all_done: 309 *index_block = cur_index_block; 310 *index_offset = cur_offset; 311 if (data_block) 312 *data_block = cur_data_block; 313 314 /* 315 * Scale cache index (cache slot entry) to index 316 */ 317 return offset * SQUASHFS_META_INDEXES * skip; 318 319 failed: 320 release_meta_index(inode, meta); 321 return err; 322 } 323 324 325 /* 326 * Get the on-disk location and compressed size of the datablock 327 * specified by index. Fill_meta_index() does most of the work. 328 */ 329 static int read_blocklist_ptrs(struct inode *inode, int index, u64 *start, 330 int *offset, u64 *block) 331 { 332 long long blks; 333 __le32 size; 334 int res = fill_meta_index(inode, index, start, offset, block); 335 336 TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset 0x%x, block 0x%llx\n", 337 res, index, *start, *offset, block ? *block : 0); 338 339 if (res < 0) 340 return res; 341 342 /* 343 * res contains the index of the mapping returned by fill_meta_index(), 344 * this will likely be less than the desired index (because the 345 * meta_index cache works at a higher granularity). Read any 346 * extra block indexes needed. 347 */ 348 if (res < index) { 349 blks = read_indexes(inode->i_sb, index - res, start, offset); 350 if (blks < 0) 351 return (int) blks; 352 if (block) 353 *block += blks; 354 } 355 356 /* 357 * Read length of block specified by index. 358 */ 359 res = squashfs_read_metadata(inode->i_sb, &size, start, offset, 360 sizeof(size)); 361 if (res < 0) 362 return res; 363 return squashfs_block_size(size); 364 } 365 366 static inline int read_blocklist(struct inode *inode, int index, u64 *block) 367 { 368 u64 start; 369 int offset; 370 371 return read_blocklist_ptrs(inode, index, &start, &offset, block); 372 } 373 374 static bool squashfs_fill_page(struct folio *folio, 375 struct squashfs_cache_entry *buffer, size_t offset, 376 size_t avail) 377 { 378 size_t copied; 379 void *pageaddr; 380 381 pageaddr = kmap_local_folio(folio, 0); 382 copied = squashfs_copy_data(pageaddr, buffer, offset, avail); 383 memset(pageaddr + copied, 0, PAGE_SIZE - copied); 384 kunmap_local(pageaddr); 385 386 flush_dcache_folio(folio); 387 388 return copied == avail; 389 } 390 391 /* Copy data into page cache */ 392 void squashfs_copy_cache(struct folio *folio, 393 struct squashfs_cache_entry *buffer, size_t bytes, 394 size_t offset) 395 { 396 struct address_space *mapping = folio->mapping; 397 struct inode *inode = mapping->host; 398 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 399 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 400 int start_index = folio->index & ~mask, end_index = start_index | mask; 401 402 /* 403 * Loop copying datablock into pages. As the datablock likely covers 404 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly 405 * grab the pages from the page cache, except for the page that we've 406 * been called to fill. 407 */ 408 for (i = start_index; i <= end_index && bytes > 0; i++, 409 bytes -= PAGE_SIZE, offset += PAGE_SIZE) { 410 struct folio *push_folio; 411 size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; 412 bool updated = false; 413 414 TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); 415 416 push_folio = (i == folio->index) ? folio : 417 __filemap_get_folio(mapping, i, 418 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 419 mapping_gfp_mask(mapping)); 420 421 if (IS_ERR(push_folio)) 422 continue; 423 424 if (folio_test_uptodate(push_folio)) 425 goto skip_folio; 426 427 updated = squashfs_fill_page(push_folio, buffer, offset, avail); 428 skip_folio: 429 folio_end_read(push_folio, updated); 430 if (i != folio->index) 431 folio_put(push_folio); 432 } 433 } 434 435 /* Read datablock stored packed inside a fragment (tail-end packed block) */ 436 static int squashfs_readpage_fragment(struct folio *folio, int expected) 437 { 438 struct inode *inode = folio->mapping->host; 439 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 440 squashfs_i(inode)->fragment_block, 441 squashfs_i(inode)->fragment_size); 442 int res = buffer->error; 443 444 if (res) 445 ERROR("Unable to read page, block %llx, size %x\n", 446 squashfs_i(inode)->fragment_block, 447 squashfs_i(inode)->fragment_size); 448 else 449 squashfs_copy_cache(folio, buffer, expected, 450 squashfs_i(inode)->fragment_offset); 451 452 squashfs_cache_put(buffer); 453 return res; 454 } 455 456 static int squashfs_readpage_sparse(struct folio *folio, int expected) 457 { 458 squashfs_copy_cache(folio, NULL, expected, 0); 459 return 0; 460 } 461 462 static int squashfs_read_folio(struct file *file, struct folio *folio) 463 { 464 struct inode *inode = folio->mapping->host; 465 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 466 int index = folio->index >> (msblk->block_log - PAGE_SHIFT); 467 int file_end = i_size_read(inode) >> msblk->block_log; 468 int expected = index == file_end ? 469 (i_size_read(inode) & (msblk->block_size - 1)) : 470 msblk->block_size; 471 int res = 0; 472 473 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 474 folio->index, squashfs_i(inode)->start); 475 476 if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> 477 PAGE_SHIFT)) 478 goto out; 479 480 if (index < file_end || squashfs_i(inode)->fragment_block == 481 SQUASHFS_INVALID_BLK) { 482 u64 block = 0; 483 484 res = read_blocklist(inode, index, &block); 485 if (res < 0) 486 goto out; 487 488 if (res == 0) 489 res = squashfs_readpage_sparse(folio, expected); 490 else 491 res = squashfs_readpage_block(folio, block, res, expected); 492 } else 493 res = squashfs_readpage_fragment(folio, expected); 494 495 if (!res) 496 return 0; 497 498 out: 499 folio_zero_segment(folio, 0, folio_size(folio)); 500 folio_end_read(folio, res == 0); 501 502 return res; 503 } 504 505 static int squashfs_readahead_fragment(struct inode *inode, struct page **page, 506 unsigned int pages, unsigned int expected, loff_t start) 507 { 508 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 509 squashfs_i(inode)->fragment_block, 510 squashfs_i(inode)->fragment_size); 511 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 512 int i, bytes, copied; 513 struct squashfs_page_actor *actor; 514 unsigned int offset; 515 void *addr; 516 struct page *last_page; 517 518 if (buffer->error) 519 goto out; 520 521 actor = squashfs_page_actor_init_special(msblk, page, pages, 522 expected, start); 523 if (!actor) 524 goto out; 525 526 squashfs_actor_nobuff(actor); 527 addr = squashfs_first_page(actor); 528 529 for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) { 530 int avail = min_t(int, expected - offset, PAGE_SIZE); 531 532 if (!IS_ERR(addr)) { 533 bytes = squashfs_copy_data(addr, buffer, offset + 534 squashfs_i(inode)->fragment_offset, avail); 535 536 if (bytes != avail) 537 goto failed; 538 } 539 540 copied += avail; 541 addr = squashfs_next_page(actor); 542 } 543 544 last_page = squashfs_page_actor_free(actor); 545 546 if (copied == expected && !IS_ERR(last_page)) { 547 /* Last page (if present) may have trailing bytes not filled */ 548 bytes = copied % PAGE_SIZE; 549 if (bytes && last_page) 550 memzero_page(last_page, bytes, PAGE_SIZE - bytes); 551 552 for (i = 0; i < pages; i++) { 553 flush_dcache_page(page[i]); 554 SetPageUptodate(page[i]); 555 } 556 } 557 558 for (i = 0; i < pages; i++) { 559 unlock_page(page[i]); 560 put_page(page[i]); 561 } 562 563 squashfs_cache_put(buffer); 564 return 0; 565 566 failed: 567 squashfs_page_actor_free(actor); 568 569 out: 570 squashfs_cache_put(buffer); 571 return 1; 572 } 573 574 static void squashfs_readahead(struct readahead_control *ractl) 575 { 576 struct inode *inode = ractl->mapping->host; 577 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 578 size_t mask = (1UL << msblk->block_log) - 1; 579 unsigned short shift = msblk->block_log - PAGE_SHIFT; 580 loff_t start = readahead_pos(ractl) & ~mask; 581 size_t len = readahead_length(ractl) + readahead_pos(ractl) - start; 582 struct squashfs_page_actor *actor; 583 unsigned int nr_pages = 0; 584 struct page **pages; 585 int i; 586 loff_t file_end = i_size_read(inode) >> msblk->block_log; 587 unsigned int max_pages = 1UL << shift; 588 589 readahead_expand(ractl, start, (len | mask) + 1); 590 591 pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL); 592 if (!pages) 593 return; 594 595 for (;;) { 596 int res, bsize; 597 u64 block = 0; 598 unsigned int expected; 599 struct page *last_page; 600 601 expected = start >> msblk->block_log == file_end ? 602 (i_size_read(inode) & (msblk->block_size - 1)) : 603 msblk->block_size; 604 605 max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT; 606 607 nr_pages = __readahead_batch(ractl, pages, max_pages); 608 if (!nr_pages) 609 break; 610 611 if (readahead_pos(ractl) >= i_size_read(inode)) 612 goto skip_pages; 613 614 if (start >> msblk->block_log == file_end && 615 squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { 616 res = squashfs_readahead_fragment(inode, pages, 617 nr_pages, expected, start); 618 if (res) 619 goto skip_pages; 620 continue; 621 } 622 623 bsize = read_blocklist(inode, start >> msblk->block_log, &block); 624 if (bsize == 0) 625 goto skip_pages; 626 627 actor = squashfs_page_actor_init_special(msblk, pages, nr_pages, 628 expected, start); 629 if (!actor) 630 goto skip_pages; 631 632 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 633 634 last_page = squashfs_page_actor_free(actor); 635 636 if (res == expected && !IS_ERR(last_page)) { 637 int bytes; 638 639 /* Last page (if present) may have trailing bytes not filled */ 640 bytes = res % PAGE_SIZE; 641 if (start >> msblk->block_log == file_end && bytes && last_page) 642 memzero_page(last_page, bytes, 643 PAGE_SIZE - bytes); 644 645 for (i = 0; i < nr_pages; i++) { 646 flush_dcache_page(pages[i]); 647 SetPageUptodate(pages[i]); 648 } 649 } 650 651 for (i = 0; i < nr_pages; i++) { 652 unlock_page(pages[i]); 653 put_page(pages[i]); 654 } 655 656 start += readahead_batch_length(ractl); 657 } 658 659 kfree(pages); 660 return; 661 662 skip_pages: 663 for (i = 0; i < nr_pages; i++) { 664 unlock_page(pages[i]); 665 put_page(pages[i]); 666 } 667 kfree(pages); 668 } 669 670 static loff_t seek_hole_data(struct file *file, loff_t offset, int whence) 671 { 672 struct inode *inode = file->f_mapping->host; 673 struct super_block *sb = inode->i_sb; 674 struct squashfs_sb_info *msblk = sb->s_fs_info; 675 u64 start, index = offset >> msblk->block_log; 676 u64 file_end = (i_size_read(inode) + msblk->block_size - 1) >> msblk->block_log; 677 int s_offset, length; 678 __le32 *blist = NULL; 679 680 /* reject offset if negative or beyond file end */ 681 if ((unsigned long long)offset >= i_size_read(inode)) 682 return -ENXIO; 683 684 /* is offset within tailend and is tailend packed into a fragment? */ 685 if (index + 1 == file_end && 686 squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { 687 if (whence == SEEK_DATA) 688 return offset; 689 690 /* there is an implicit hole at the end of any file */ 691 return i_size_read(inode); 692 } 693 694 length = read_blocklist_ptrs(inode, index, &start, &s_offset, NULL); 695 if (length < 0) 696 return length; 697 698 /* nothing more to do if offset matches desired whence value */ 699 if ((length == 0 && whence == SEEK_HOLE) || 700 (length && whence == SEEK_DATA)) 701 return offset; 702 703 /* skip scanning forwards if we're at file end */ 704 if (++ index == file_end) 705 goto not_found; 706 707 blist = kmalloc(SQUASHFS_SCAN_INDEXES << 2, GFP_KERNEL); 708 if (blist == NULL) { 709 ERROR("%s: Failed to allocate block_list\n", __func__); 710 return -ENOMEM; 711 } 712 713 while (index < file_end) { 714 int i, indexes = min(file_end - index, SQUASHFS_SCAN_INDEXES); 715 716 offset = squashfs_read_metadata(sb, blist, &start, &s_offset, indexes << 2); 717 if (offset < 0) 718 goto finished; 719 720 for (i = 0; i < indexes; i++) { 721 length = squashfs_block_size(blist[i]); 722 if (length < 0) { 723 offset = length; 724 goto finished; 725 } 726 727 /* does this block match desired whence value? */ 728 if ((length == 0 && whence == SEEK_HOLE) || 729 (length && whence == SEEK_DATA)) { 730 offset = (index + i) << msblk->block_log; 731 goto finished; 732 } 733 } 734 735 index += indexes; 736 } 737 738 not_found: 739 /* whence value determines what happens */ 740 if (whence == SEEK_DATA) 741 offset = -ENXIO; 742 else 743 /* there is an implicit hole at the end of any file */ 744 offset = i_size_read(inode); 745 746 finished: 747 kfree(blist); 748 return offset; 749 } 750 751 static loff_t squashfs_llseek(struct file *file, loff_t offset, int whence) 752 { 753 struct inode *inode = file->f_mapping->host; 754 755 switch (whence) { 756 default: 757 return generic_file_llseek(file, offset, whence); 758 case SEEK_DATA: 759 case SEEK_HOLE: 760 offset = seek_hole_data(file, offset, whence); 761 break; 762 } 763 764 if (offset < 0) 765 return offset; 766 767 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 768 } 769 770 const struct address_space_operations squashfs_aops = { 771 .read_folio = squashfs_read_folio, 772 .readahead = squashfs_readahead 773 }; 774 775 const struct file_operations squashfs_file_operations = { 776 .llseek = squashfs_llseek, 777 .read_iter = generic_file_read_iter, 778 .mmap_prepare = generic_file_readonly_mmap_prepare, 779 .splice_read = filemap_splice_read, 780 .setlease = generic_setlease, 781 }; 782