1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Squashfs - a compressed read only filesystem for Linux 4 * 5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Phillip Lougher <phillip@squashfs.org.uk> 7 * 8 * file.c 9 */ 10 11 /* 12 * This file contains code for handling regular files. A regular file 13 * consists of a sequence of contiguous compressed blocks, and/or a 14 * compressed fragment block (tail-end packed block). The compressed size 15 * of each datablock is stored in a block list contained within the 16 * file inode (itself stored in one or more compressed metadata blocks). 17 * 18 * To speed up access to datablocks when reading 'large' files (256 Mbytes or 19 * larger), the code implements an index cache that caches the mapping from 20 * block index to datablock location on disk. 21 * 22 * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while 23 * retaining a simple and space-efficient block list on disk. The cache 24 * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). 25 * Larger files use multiple slots, with 1.75 TiB files using all 8 slots. 26 * The index cache is designed to be memory efficient, and by default uses 27 * 16 KiB. 28 */ 29 30 #include <linux/fs.h> 31 #include <linux/filelock.h> 32 #include <linux/vfs.h> 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/string.h> 36 #include <linux/pagemap.h> 37 #include <linux/mutex.h> 38 39 #include "squashfs_fs.h" 40 #include "squashfs_fs_sb.h" 41 #include "squashfs_fs_i.h" 42 #include "squashfs.h" 43 #include "page_actor.h" 44 45 /* 46 * Locate cache slot in range [offset, index] for specified inode. If 47 * there's more than one return the slot closest to index. 48 */ 49 static struct meta_index *locate_meta_index(struct inode *inode, int offset, 50 int index) 51 { 52 struct meta_index *meta = NULL; 53 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 54 int i; 55 56 mutex_lock(&msblk->meta_index_mutex); 57 58 TRACE("locate_meta_index: index %d, offset %d\n", index, offset); 59 60 if (msblk->meta_index == NULL) 61 goto not_allocated; 62 63 for (i = 0; i < SQUASHFS_META_SLOTS; i++) { 64 if (msblk->meta_index[i].inode_number == inode->i_ino && 65 msblk->meta_index[i].offset >= offset && 66 msblk->meta_index[i].offset <= index && 67 msblk->meta_index[i].locked == 0) { 68 TRACE("locate_meta_index: entry %d, offset %d\n", i, 69 msblk->meta_index[i].offset); 70 meta = &msblk->meta_index[i]; 71 offset = meta->offset; 72 } 73 } 74 75 if (meta) 76 meta->locked = 1; 77 78 not_allocated: 79 mutex_unlock(&msblk->meta_index_mutex); 80 81 return meta; 82 } 83 84 85 /* 86 * Find and initialise an empty cache slot for index offset. 87 */ 88 static struct meta_index *empty_meta_index(struct inode *inode, int offset, 89 int skip) 90 { 91 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 92 struct meta_index *meta = NULL; 93 int i; 94 95 mutex_lock(&msblk->meta_index_mutex); 96 97 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); 98 99 if (msblk->meta_index == NULL) { 100 /* 101 * First time cache index has been used, allocate and 102 * initialise. The cache index could be allocated at 103 * mount time but doing it here means it is allocated only 104 * if a 'large' file is read. 105 */ 106 msblk->meta_index = kzalloc_objs(*(msblk->meta_index), 107 SQUASHFS_META_SLOTS, 108 GFP_KERNEL); 109 if (msblk->meta_index == NULL) { 110 ERROR("Failed to allocate meta_index\n"); 111 goto failed; 112 } 113 for (i = 0; i < SQUASHFS_META_SLOTS; i++) { 114 msblk->meta_index[i].inode_number = 0; 115 msblk->meta_index[i].locked = 0; 116 } 117 msblk->next_meta_index = 0; 118 } 119 120 for (i = SQUASHFS_META_SLOTS; i && 121 msblk->meta_index[msblk->next_meta_index].locked; i--) 122 msblk->next_meta_index = (msblk->next_meta_index + 1) % 123 SQUASHFS_META_SLOTS; 124 125 if (i == 0) { 126 TRACE("empty_meta_index: failed!\n"); 127 goto failed; 128 } 129 130 TRACE("empty_meta_index: returned meta entry %d, %p\n", 131 msblk->next_meta_index, 132 &msblk->meta_index[msblk->next_meta_index]); 133 134 meta = &msblk->meta_index[msblk->next_meta_index]; 135 msblk->next_meta_index = (msblk->next_meta_index + 1) % 136 SQUASHFS_META_SLOTS; 137 138 meta->inode_number = inode->i_ino; 139 meta->offset = offset; 140 meta->skip = skip; 141 meta->entries = 0; 142 meta->locked = 1; 143 144 failed: 145 mutex_unlock(&msblk->meta_index_mutex); 146 return meta; 147 } 148 149 150 static void release_meta_index(struct inode *inode, struct meta_index *meta) 151 { 152 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 153 mutex_lock(&msblk->meta_index_mutex); 154 meta->locked = 0; 155 mutex_unlock(&msblk->meta_index_mutex); 156 } 157 158 159 /* 160 * Read the next n blocks from the block list, starting from 161 * metadata block <start_block, offset>. 162 */ 163 static long long read_indexes(struct super_block *sb, int n, 164 u64 *start_block, int *offset) 165 { 166 int err, i; 167 long long block = 0; 168 __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL); 169 170 if (blist == NULL) { 171 ERROR("read_indexes: Failed to allocate block_list\n"); 172 return -ENOMEM; 173 } 174 175 while (n) { 176 int blocks = min_t(int, n, PAGE_SIZE >> 2); 177 178 err = squashfs_read_metadata(sb, blist, start_block, 179 offset, blocks << 2); 180 if (err < 0) { 181 ERROR("read_indexes: reading block [%llx:%x]\n", 182 *start_block, *offset); 183 goto failure; 184 } 185 186 for (i = 0; i < blocks; i++) { 187 int size = squashfs_block_size(blist[i]); 188 if (size < 0) { 189 err = size; 190 goto failure; 191 } 192 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 193 } 194 n -= blocks; 195 } 196 197 kfree(blist); 198 return block; 199 200 failure: 201 kfree(blist); 202 return err; 203 } 204 205 206 /* 207 * Each cache index slot has SQUASHFS_META_ENTRIES, each of which 208 * can cache one index -> datablock/blocklist-block mapping. We wish 209 * to distribute these over the length of the file, entry[0] maps index x, 210 * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on. 211 * The larger the file, the greater the skip factor. The skip factor is 212 * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure 213 * the number of metadata blocks that need to be read fits into the cache. 214 * If the skip factor is limited in this way then the file will use multiple 215 * slots. 216 */ 217 static inline int calculate_skip(u64 blocks) 218 { 219 u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1) 220 * SQUASHFS_META_INDEXES); 221 return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1); 222 } 223 224 225 /* 226 * Search and grow the index cache for the specified inode, returning the 227 * on-disk locations of the datablock and block list metadata block 228 * <index_block, index_offset> for index (scaled to nearest cache index). 229 */ 230 static int fill_meta_index(struct inode *inode, int index, 231 u64 *index_block, int *index_offset, u64 *data_block) 232 { 233 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 234 int skip = calculate_skip(i_size_read(inode) >> msblk->block_log); 235 int offset = 0; 236 struct meta_index *meta; 237 struct meta_entry *meta_entry; 238 u64 cur_index_block = squashfs_i(inode)->block_list_start; 239 int cur_offset = squashfs_i(inode)->offset; 240 u64 cur_data_block = squashfs_i(inode)->start; 241 int err, i; 242 243 /* 244 * Scale index to cache index (cache slot entry) 245 */ 246 index /= SQUASHFS_META_INDEXES * skip; 247 248 while (offset < index) { 249 meta = locate_meta_index(inode, offset + 1, index); 250 251 if (meta == NULL) { 252 meta = empty_meta_index(inode, offset + 1, skip); 253 if (meta == NULL) 254 goto all_done; 255 } else { 256 offset = index < meta->offset + meta->entries ? index : 257 meta->offset + meta->entries - 1; 258 meta_entry = &meta->meta_entry[offset - meta->offset]; 259 cur_index_block = meta_entry->index_block + 260 msblk->inode_table; 261 cur_offset = meta_entry->offset; 262 cur_data_block = meta_entry->data_block; 263 TRACE("get_meta_index: offset %d, meta->offset %d, " 264 "meta->entries %d\n", offset, meta->offset, 265 meta->entries); 266 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" 267 " data_block 0x%llx\n", cur_index_block, 268 cur_offset, cur_data_block); 269 } 270 271 /* 272 * If necessary grow cache slot by reading block list. Cache 273 * slot is extended up to index or to the end of the slot, in 274 * which case further slots will be used. 275 */ 276 for (i = meta->offset + meta->entries; i <= index && 277 i < meta->offset + SQUASHFS_META_ENTRIES; i++) { 278 int blocks = skip * SQUASHFS_META_INDEXES; 279 long long res = read_indexes(inode->i_sb, blocks, 280 &cur_index_block, &cur_offset); 281 282 if (res < 0) { 283 if (meta->entries == 0) 284 /* 285 * Don't leave an empty slot on read 286 * error allocated to this inode... 287 */ 288 meta->inode_number = 0; 289 err = res; 290 goto failed; 291 } 292 293 cur_data_block += res; 294 meta_entry = &meta->meta_entry[i - meta->offset]; 295 meta_entry->index_block = cur_index_block - 296 msblk->inode_table; 297 meta_entry->offset = cur_offset; 298 meta_entry->data_block = cur_data_block; 299 meta->entries++; 300 offset++; 301 } 302 303 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", 304 meta->offset, meta->entries); 305 306 release_meta_index(inode, meta); 307 } 308 309 all_done: 310 *index_block = cur_index_block; 311 *index_offset = cur_offset; 312 if (data_block) 313 *data_block = cur_data_block; 314 315 /* 316 * Scale cache index (cache slot entry) to index 317 */ 318 return offset * SQUASHFS_META_INDEXES * skip; 319 320 failed: 321 release_meta_index(inode, meta); 322 return err; 323 } 324 325 326 /* 327 * Get the on-disk location and compressed size of the datablock 328 * specified by index. Fill_meta_index() does most of the work. 329 */ 330 static int read_blocklist_ptrs(struct inode *inode, int index, u64 *start, 331 int *offset, u64 *block) 332 { 333 long long blks; 334 __le32 size; 335 int res = fill_meta_index(inode, index, start, offset, block); 336 337 TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset 0x%x, block 0x%llx\n", 338 res, index, *start, *offset, block ? *block : 0); 339 340 if (res < 0) 341 return res; 342 343 /* 344 * res contains the index of the mapping returned by fill_meta_index(), 345 * this will likely be less than the desired index (because the 346 * meta_index cache works at a higher granularity). Read any 347 * extra block indexes needed. 348 */ 349 if (res < index) { 350 blks = read_indexes(inode->i_sb, index - res, start, offset); 351 if (blks < 0) 352 return (int) blks; 353 if (block) 354 *block += blks; 355 } 356 357 /* 358 * Read length of block specified by index. 359 */ 360 res = squashfs_read_metadata(inode->i_sb, &size, start, offset, 361 sizeof(size)); 362 if (res < 0) 363 return res; 364 return squashfs_block_size(size); 365 } 366 367 static inline int read_blocklist(struct inode *inode, int index, u64 *block) 368 { 369 u64 start; 370 int offset; 371 372 return read_blocklist_ptrs(inode, index, &start, &offset, block); 373 } 374 375 static bool squashfs_fill_page(struct folio *folio, 376 struct squashfs_cache_entry *buffer, size_t offset, 377 size_t avail) 378 { 379 size_t copied; 380 void *pageaddr; 381 382 pageaddr = kmap_local_folio(folio, 0); 383 copied = squashfs_copy_data(pageaddr, buffer, offset, avail); 384 memset(pageaddr + copied, 0, PAGE_SIZE - copied); 385 kunmap_local(pageaddr); 386 387 flush_dcache_folio(folio); 388 389 return copied == avail; 390 } 391 392 /* Copy data into page cache */ 393 void squashfs_copy_cache(struct folio *folio, 394 struct squashfs_cache_entry *buffer, size_t bytes, 395 size_t offset) 396 { 397 struct address_space *mapping = folio->mapping; 398 struct inode *inode = mapping->host; 399 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 400 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 401 int start_index = folio->index & ~mask, end_index = start_index | mask; 402 403 /* 404 * Loop copying datablock into pages. As the datablock likely covers 405 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly 406 * grab the pages from the page cache, except for the page that we've 407 * been called to fill. 408 */ 409 for (i = start_index; i <= end_index && bytes > 0; i++, 410 bytes -= PAGE_SIZE, offset += PAGE_SIZE) { 411 struct folio *push_folio; 412 size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; 413 bool updated = false; 414 415 TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); 416 417 push_folio = (i == folio->index) ? folio : 418 __filemap_get_folio(mapping, i, 419 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 420 mapping_gfp_mask(mapping)); 421 422 if (IS_ERR(push_folio)) 423 continue; 424 425 if (folio_test_uptodate(push_folio)) 426 goto skip_folio; 427 428 updated = squashfs_fill_page(push_folio, buffer, offset, avail); 429 skip_folio: 430 folio_end_read(push_folio, updated); 431 if (i != folio->index) 432 folio_put(push_folio); 433 } 434 } 435 436 /* Read datablock stored packed inside a fragment (tail-end packed block) */ 437 static int squashfs_readpage_fragment(struct folio *folio, int expected) 438 { 439 struct inode *inode = folio->mapping->host; 440 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 441 squashfs_i(inode)->fragment_block, 442 squashfs_i(inode)->fragment_size); 443 int res = buffer->error; 444 445 if (res) 446 ERROR("Unable to read page, block %llx, size %x\n", 447 squashfs_i(inode)->fragment_block, 448 squashfs_i(inode)->fragment_size); 449 else 450 squashfs_copy_cache(folio, buffer, expected, 451 squashfs_i(inode)->fragment_offset); 452 453 squashfs_cache_put(buffer); 454 return res; 455 } 456 457 static int squashfs_readpage_sparse(struct folio *folio, int expected) 458 { 459 squashfs_copy_cache(folio, NULL, expected, 0); 460 return 0; 461 } 462 463 static int squashfs_read_folio(struct file *file, struct folio *folio) 464 { 465 struct inode *inode = folio->mapping->host; 466 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 467 int index = folio->index >> (msblk->block_log - PAGE_SHIFT); 468 int file_end = i_size_read(inode) >> msblk->block_log; 469 int expected = index == file_end ? 470 (i_size_read(inode) & (msblk->block_size - 1)) : 471 msblk->block_size; 472 int res = 0; 473 474 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 475 folio->index, squashfs_i(inode)->start); 476 477 if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> 478 PAGE_SHIFT)) 479 goto out; 480 481 if (index < file_end || squashfs_i(inode)->fragment_block == 482 SQUASHFS_INVALID_BLK) { 483 u64 block = 0; 484 485 res = read_blocklist(inode, index, &block); 486 if (res < 0) 487 goto out; 488 489 if (res == 0) 490 res = squashfs_readpage_sparse(folio, expected); 491 else 492 res = squashfs_readpage_block(folio, block, res, expected); 493 } else 494 res = squashfs_readpage_fragment(folio, expected); 495 496 if (!res) 497 return 0; 498 499 out: 500 folio_zero_segment(folio, 0, folio_size(folio)); 501 folio_end_read(folio, res == 0); 502 503 return res; 504 } 505 506 static int squashfs_readahead_fragment(struct inode *inode, struct page **page, 507 unsigned int pages, unsigned int expected, loff_t start) 508 { 509 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 510 squashfs_i(inode)->fragment_block, 511 squashfs_i(inode)->fragment_size); 512 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 513 int i, bytes, copied; 514 struct squashfs_page_actor *actor; 515 unsigned int offset; 516 void *addr; 517 struct page *last_page; 518 519 if (buffer->error) 520 goto out; 521 522 actor = squashfs_page_actor_init_special(msblk, page, pages, 523 expected, start); 524 if (!actor) 525 goto out; 526 527 squashfs_actor_nobuff(actor); 528 addr = squashfs_first_page(actor); 529 530 for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) { 531 int avail = min_t(int, expected - offset, PAGE_SIZE); 532 533 if (!IS_ERR(addr)) { 534 bytes = squashfs_copy_data(addr, buffer, offset + 535 squashfs_i(inode)->fragment_offset, avail); 536 537 if (bytes != avail) 538 goto failed; 539 } 540 541 copied += avail; 542 addr = squashfs_next_page(actor); 543 } 544 545 last_page = squashfs_page_actor_free(actor); 546 547 if (copied == expected && !IS_ERR(last_page)) { 548 /* Last page (if present) may have trailing bytes not filled */ 549 bytes = copied % PAGE_SIZE; 550 if (bytes && last_page) 551 memzero_page(last_page, bytes, PAGE_SIZE - bytes); 552 553 for (i = 0; i < pages; i++) { 554 flush_dcache_page(page[i]); 555 SetPageUptodate(page[i]); 556 } 557 } 558 559 for (i = 0; i < pages; i++) { 560 unlock_page(page[i]); 561 put_page(page[i]); 562 } 563 564 squashfs_cache_put(buffer); 565 return 0; 566 567 failed: 568 squashfs_page_actor_free(actor); 569 570 out: 571 squashfs_cache_put(buffer); 572 return 1; 573 } 574 575 static void squashfs_readahead(struct readahead_control *ractl) 576 { 577 struct inode *inode = ractl->mapping->host; 578 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 579 size_t mask = (1UL << msblk->block_log) - 1; 580 unsigned short shift = msblk->block_log - PAGE_SHIFT; 581 loff_t start = readahead_pos(ractl) & ~mask; 582 size_t len = readahead_length(ractl) + readahead_pos(ractl) - start; 583 struct squashfs_page_actor *actor; 584 unsigned int nr_pages = 0; 585 struct page **pages; 586 int i; 587 loff_t file_end = i_size_read(inode) >> msblk->block_log; 588 unsigned int max_pages = 1UL << shift; 589 590 readahead_expand(ractl, start, (len | mask) + 1); 591 592 pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL); 593 if (!pages) 594 return; 595 596 for (;;) { 597 int res, bsize; 598 u64 block = 0; 599 unsigned int expected; 600 struct page *last_page; 601 602 expected = start >> msblk->block_log == file_end ? 603 (i_size_read(inode) & (msblk->block_size - 1)) : 604 msblk->block_size; 605 606 max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT; 607 608 nr_pages = __readahead_batch(ractl, pages, max_pages); 609 if (!nr_pages) 610 break; 611 612 if (readahead_pos(ractl) >= i_size_read(inode)) 613 goto skip_pages; 614 615 if (start >> msblk->block_log == file_end && 616 squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { 617 res = squashfs_readahead_fragment(inode, pages, 618 nr_pages, expected, start); 619 if (res) 620 goto skip_pages; 621 continue; 622 } 623 624 bsize = read_blocklist(inode, start >> msblk->block_log, &block); 625 if (bsize == 0) 626 goto skip_pages; 627 628 actor = squashfs_page_actor_init_special(msblk, pages, nr_pages, 629 expected, start); 630 if (!actor) 631 goto skip_pages; 632 633 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 634 635 last_page = squashfs_page_actor_free(actor); 636 637 if (res == expected && !IS_ERR(last_page)) { 638 int bytes; 639 640 /* Last page (if present) may have trailing bytes not filled */ 641 bytes = res % PAGE_SIZE; 642 if (start >> msblk->block_log == file_end && bytes && last_page) 643 memzero_page(last_page, bytes, 644 PAGE_SIZE - bytes); 645 646 for (i = 0; i < nr_pages; i++) { 647 flush_dcache_page(pages[i]); 648 SetPageUptodate(pages[i]); 649 } 650 } 651 652 for (i = 0; i < nr_pages; i++) { 653 unlock_page(pages[i]); 654 put_page(pages[i]); 655 } 656 657 start += readahead_batch_length(ractl); 658 } 659 660 kfree(pages); 661 return; 662 663 skip_pages: 664 for (i = 0; i < nr_pages; i++) { 665 unlock_page(pages[i]); 666 put_page(pages[i]); 667 } 668 kfree(pages); 669 } 670 671 static loff_t seek_hole_data(struct file *file, loff_t offset, int whence) 672 { 673 struct inode *inode = file->f_mapping->host; 674 struct super_block *sb = inode->i_sb; 675 struct squashfs_sb_info *msblk = sb->s_fs_info; 676 u64 start, index = offset >> msblk->block_log; 677 u64 file_end = (i_size_read(inode) + msblk->block_size - 1) >> msblk->block_log; 678 int s_offset, length; 679 __le32 *blist = NULL; 680 681 /* reject offset if negative or beyond file end */ 682 if ((unsigned long long)offset >= i_size_read(inode)) 683 return -ENXIO; 684 685 /* is offset within tailend and is tailend packed into a fragment? */ 686 if (index + 1 == file_end && 687 squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { 688 if (whence == SEEK_DATA) 689 return offset; 690 691 /* there is an implicit hole at the end of any file */ 692 return i_size_read(inode); 693 } 694 695 length = read_blocklist_ptrs(inode, index, &start, &s_offset, NULL); 696 if (length < 0) 697 return length; 698 699 /* nothing more to do if offset matches desired whence value */ 700 if ((length == 0 && whence == SEEK_HOLE) || 701 (length && whence == SEEK_DATA)) 702 return offset; 703 704 /* skip scanning forwards if we're at file end */ 705 if (++ index == file_end) 706 goto not_found; 707 708 blist = kmalloc(SQUASHFS_SCAN_INDEXES << 2, GFP_KERNEL); 709 if (blist == NULL) { 710 ERROR("%s: Failed to allocate block_list\n", __func__); 711 return -ENOMEM; 712 } 713 714 while (index < file_end) { 715 int i, indexes = min(file_end - index, SQUASHFS_SCAN_INDEXES); 716 717 offset = squashfs_read_metadata(sb, blist, &start, &s_offset, indexes << 2); 718 if (offset < 0) 719 goto finished; 720 721 for (i = 0; i < indexes; i++) { 722 length = squashfs_block_size(blist[i]); 723 if (length < 0) { 724 offset = length; 725 goto finished; 726 } 727 728 /* does this block match desired whence value? */ 729 if ((length == 0 && whence == SEEK_HOLE) || 730 (length && whence == SEEK_DATA)) { 731 offset = (index + i) << msblk->block_log; 732 goto finished; 733 } 734 } 735 736 index += indexes; 737 } 738 739 not_found: 740 /* whence value determines what happens */ 741 if (whence == SEEK_DATA) 742 offset = -ENXIO; 743 else 744 /* there is an implicit hole at the end of any file */ 745 offset = i_size_read(inode); 746 747 finished: 748 kfree(blist); 749 return offset; 750 } 751 752 static loff_t squashfs_llseek(struct file *file, loff_t offset, int whence) 753 { 754 struct inode *inode = file->f_mapping->host; 755 756 switch (whence) { 757 default: 758 return generic_file_llseek(file, offset, whence); 759 case SEEK_DATA: 760 case SEEK_HOLE: 761 offset = seek_hole_data(file, offset, whence); 762 break; 763 } 764 765 if (offset < 0) 766 return offset; 767 768 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 769 } 770 771 const struct address_space_operations squashfs_aops = { 772 .read_folio = squashfs_read_folio, 773 .readahead = squashfs_readahead 774 }; 775 776 const struct file_operations squashfs_file_operations = { 777 .llseek = squashfs_llseek, 778 .read_iter = generic_file_read_iter, 779 .mmap_prepare = generic_file_readonly_mmap_prepare, 780 .splice_read = filemap_splice_read, 781 .setlease = generic_setlease, 782 }; 783