1 /* 2 * Compressed rom filesystem for Linux. 3 * 4 * Copyright (C) 1999 Linus Torvalds. 5 * 6 * This file is released under the GPL. 7 */ 8 9 /* 10 * These are the VFS interfaces to the compressed rom filesystem. 11 * The actual compression is based on zlib, see the other files. 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/pagemap.h> 20 #include <linux/ramfs.h> 21 #include <linux/init.h> 22 #include <linux/string.h> 23 #include <linux/blkdev.h> 24 #include <linux/mtd/mtd.h> 25 #include <linux/mtd/super.h> 26 #include <linux/fs_context.h> 27 #include <linux/slab.h> 28 #include <linux/vfs.h> 29 #include <linux/mutex.h> 30 #include <uapi/linux/cramfs_fs.h> 31 #include <linux/uaccess.h> 32 33 #include "internal.h" 34 35 /* 36 * cramfs super-block data in memory 37 */ 38 struct cramfs_sb_info { 39 unsigned long magic; 40 unsigned long size; 41 unsigned long blocks; 42 unsigned long files; 43 unsigned long flags; 44 void *linear_virt_addr; 45 resource_size_t linear_phys_addr; 46 size_t mtd_point_size; 47 }; 48 49 static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb) 50 { 51 return sb->s_fs_info; 52 } 53 54 static const struct super_operations cramfs_ops; 55 static const struct inode_operations cramfs_dir_inode_operations; 56 static const struct file_operations cramfs_directory_operations; 57 static const struct file_operations cramfs_physmem_fops; 58 static const struct address_space_operations cramfs_aops; 59 60 static DEFINE_MUTEX(read_mutex); 61 62 63 /* These macros may change in future, to provide better st_ino semantics. */ 64 #define OFFSET(x) ((x)->i_ino) 65 66 static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset) 67 { 68 if (!cino->offset) 69 return offset + 1; 70 if (!cino->size) 71 return offset + 1; 72 73 /* 74 * The file mode test fixes buggy mkcramfs implementations where 75 * cramfs_inode->offset is set to a non zero value for entries 76 * which did not contain data, like devices node and fifos. 77 */ 78 switch (cino->mode & S_IFMT) { 79 case S_IFREG: 80 case S_IFDIR: 81 case S_IFLNK: 82 return cino->offset << 2; 83 default: 84 break; 85 } 86 return offset + 1; 87 } 88 89 static struct inode *get_cramfs_inode(struct super_block *sb, 90 const struct cramfs_inode *cramfs_inode, unsigned int offset) 91 { 92 struct inode *inode; 93 static struct timespec64 zerotime; 94 95 inode = iget_locked(sb, cramino(cramfs_inode, offset)); 96 if (!inode) 97 return ERR_PTR(-ENOMEM); 98 if (!(inode->i_state & I_NEW)) 99 return inode; 100 101 switch (cramfs_inode->mode & S_IFMT) { 102 case S_IFREG: 103 inode->i_fop = &generic_ro_fops; 104 inode->i_data.a_ops = &cramfs_aops; 105 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && 106 CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS && 107 CRAMFS_SB(sb)->linear_phys_addr) 108 inode->i_fop = &cramfs_physmem_fops; 109 break; 110 case S_IFDIR: 111 inode->i_op = &cramfs_dir_inode_operations; 112 inode->i_fop = &cramfs_directory_operations; 113 break; 114 case S_IFLNK: 115 inode->i_op = &page_symlink_inode_operations; 116 inode_nohighmem(inode); 117 inode->i_data.a_ops = &cramfs_aops; 118 break; 119 case S_IFCHR: 120 case S_IFBLK: 121 case S_IFIFO: 122 case S_IFSOCK: 123 init_special_inode(inode, cramfs_inode->mode, 124 old_decode_dev(cramfs_inode->size)); 125 break; 126 default: 127 printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n", 128 inode->i_mode, inode->i_ino); 129 iget_failed(inode); 130 return ERR_PTR(-EIO); 131 } 132 133 inode->i_mode = cramfs_inode->mode; 134 i_uid_write(inode, cramfs_inode->uid); 135 i_gid_write(inode, cramfs_inode->gid); 136 137 /* if the lower 2 bits are zero, the inode contains data */ 138 if (!(inode->i_ino & 3)) { 139 inode->i_size = cramfs_inode->size; 140 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 141 } 142 143 /* Struct copy intentional */ 144 inode_set_mtime_to_ts(inode, 145 inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime))); 146 /* inode->i_nlink is left 1 - arguably wrong for directories, 147 but it's the best we can do without reading the directory 148 contents. 1 yields the right result in GNU find, even 149 without -noleaf option. */ 150 151 unlock_new_inode(inode); 152 153 return inode; 154 } 155 156 /* 157 * We have our own block cache: don't fill up the buffer cache 158 * with the rom-image, because the way the filesystem is set 159 * up the accesses should be fairly regular and cached in the 160 * page cache and dentry tree anyway.. 161 * 162 * This also acts as a way to guarantee contiguous areas of up to 163 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to 164 * worry about end-of-buffer issues even when decompressing a full 165 * page cache. 166 * 167 * Note: This is all optimized away at compile time when 168 * CONFIG_CRAMFS_BLOCKDEV=n. 169 */ 170 #define READ_BUFFERS (2) 171 /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */ 172 #define NEXT_BUFFER(_ix) ((_ix) ^ 1) 173 174 /* 175 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed" 176 * data that takes up more space than the original and with unlucky 177 * alignment. 178 */ 179 #define BLKS_PER_BUF_SHIFT (2) 180 #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 181 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE) 182 183 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 184 static unsigned buffer_blocknr[READ_BUFFERS]; 185 static struct super_block *buffer_dev[READ_BUFFERS]; 186 static int next_buffer; 187 188 /* 189 * Populate our block cache and return a pointer to it. 190 */ 191 static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset, 192 unsigned int len) 193 { 194 struct address_space *mapping = sb->s_bdev->bd_mapping; 195 struct file_ra_state ra = {}; 196 struct page *pages[BLKS_PER_BUF]; 197 unsigned i, blocknr, buffer; 198 unsigned long devsize; 199 char *data; 200 201 if (!len) 202 return NULL; 203 blocknr = offset >> PAGE_SHIFT; 204 offset &= PAGE_SIZE - 1; 205 206 /* Check if an existing buffer already has the data.. */ 207 for (i = 0; i < READ_BUFFERS; i++) { 208 unsigned int blk_offset; 209 210 if (buffer_dev[i] != sb) 211 continue; 212 if (blocknr < buffer_blocknr[i]) 213 continue; 214 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT; 215 blk_offset += offset; 216 if (blk_offset > BUFFER_SIZE || 217 blk_offset + len > BUFFER_SIZE) 218 continue; 219 return read_buffers[i] + blk_offset; 220 } 221 222 devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT; 223 224 /* Ok, read in BLKS_PER_BUF pages completely first. */ 225 file_ra_state_init(&ra, mapping); 226 page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF); 227 228 for (i = 0; i < BLKS_PER_BUF; i++) { 229 struct page *page = NULL; 230 231 if (blocknr + i < devsize) { 232 page = read_mapping_page(mapping, blocknr + i, NULL); 233 /* synchronous error? */ 234 if (IS_ERR(page)) 235 page = NULL; 236 } 237 pages[i] = page; 238 } 239 240 buffer = next_buffer; 241 next_buffer = NEXT_BUFFER(buffer); 242 buffer_blocknr[buffer] = blocknr; 243 buffer_dev[buffer] = sb; 244 245 data = read_buffers[buffer]; 246 for (i = 0; i < BLKS_PER_BUF; i++) { 247 struct page *page = pages[i]; 248 249 if (page) { 250 memcpy_from_page(data, page, 0, PAGE_SIZE); 251 put_page(page); 252 } else 253 memset(data, 0, PAGE_SIZE); 254 data += PAGE_SIZE; 255 } 256 return read_buffers[buffer] + offset; 257 } 258 259 /* 260 * Return a pointer to the linearly addressed cramfs image in memory. 261 */ 262 static void *cramfs_direct_read(struct super_block *sb, unsigned int offset, 263 unsigned int len) 264 { 265 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 266 267 if (!len) 268 return NULL; 269 if (len > sbi->size || offset > sbi->size - len) 270 return page_address(ZERO_PAGE(0)); 271 return sbi->linear_virt_addr + offset; 272 } 273 274 /* 275 * Returns a pointer to a buffer containing at least LEN bytes of 276 * filesystem starting at byte offset OFFSET into the filesystem. 277 */ 278 static void *cramfs_read(struct super_block *sb, unsigned int offset, 279 unsigned int len) 280 { 281 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 282 283 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr) 284 return cramfs_direct_read(sb, offset, len); 285 else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) 286 return cramfs_blkdev_read(sb, offset, len); 287 else 288 return NULL; 289 } 290 291 /* 292 * For a mapping to be possible, we need a range of uncompressed and 293 * contiguous blocks. Return the offset for the first block and number of 294 * valid blocks for which that is true, or zero otherwise. 295 */ 296 static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages) 297 { 298 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 299 int i; 300 u32 *blockptrs, first_block_addr; 301 302 /* 303 * We can dereference memory directly here as this code may be 304 * reached only when there is a direct filesystem image mapping 305 * available in memory. 306 */ 307 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4); 308 first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS; 309 i = 0; 310 do { 311 u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT); 312 u32 expect = (first_block_addr + block_off) | 313 CRAMFS_BLK_FLAG_DIRECT_PTR | 314 CRAMFS_BLK_FLAG_UNCOMPRESSED; 315 if (blockptrs[i] != expect) { 316 pr_debug("range: block %d/%d got %#x expects %#x\n", 317 pgoff+i, pgoff + *pages - 1, 318 blockptrs[i], expect); 319 if (i == 0) 320 return 0; 321 break; 322 } 323 } while (++i < *pages); 324 325 *pages = i; 326 return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 327 } 328 329 #ifdef CONFIG_MMU 330 331 /* 332 * Return true if the last page of a file in the filesystem image contains 333 * some other data that doesn't belong to that file. It is assumed that the 334 * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED 335 * (verified by cramfs_get_block_range() and directly accessible in memory. 336 */ 337 static bool cramfs_last_page_is_shared(struct inode *inode) 338 { 339 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 340 u32 partial, last_page, blockaddr, *blockptrs; 341 char *tail_data; 342 343 partial = offset_in_page(inode->i_size); 344 if (!partial) 345 return false; 346 last_page = inode->i_size >> PAGE_SHIFT; 347 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode)); 348 blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS; 349 blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 350 tail_data = sbi->linear_virt_addr + blockaddr + partial; 351 return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false; 352 } 353 354 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 355 { 356 struct inode *inode = file_inode(file); 357 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 358 unsigned int pages, max_pages, offset; 359 unsigned long address, pgoff = vma->vm_pgoff; 360 char *bailout_reason; 361 int ret; 362 363 ret = generic_file_readonly_mmap(file, vma); 364 if (ret) 365 return ret; 366 367 /* 368 * Now try to pre-populate ptes for this vma with a direct 369 * mapping avoiding memory allocation when possible. 370 */ 371 372 /* Could COW work here? */ 373 bailout_reason = "vma is writable"; 374 if (vma->vm_flags & VM_WRITE) 375 goto bailout; 376 377 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 378 bailout_reason = "beyond file limit"; 379 if (pgoff >= max_pages) 380 goto bailout; 381 pages = min(vma_pages(vma), max_pages - pgoff); 382 383 offset = cramfs_get_block_range(inode, pgoff, &pages); 384 bailout_reason = "unsuitable block layout"; 385 if (!offset) 386 goto bailout; 387 address = sbi->linear_phys_addr + offset; 388 bailout_reason = "data is not page aligned"; 389 if (!PAGE_ALIGNED(address)) 390 goto bailout; 391 392 /* Don't map the last page if it contains some other data */ 393 if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { 394 pr_debug("mmap: %pD: last page is shared\n", file); 395 pages--; 396 } 397 398 if (!pages) { 399 bailout_reason = "no suitable block remaining"; 400 goto bailout; 401 } 402 403 if (pages == vma_pages(vma)) { 404 /* 405 * The entire vma is mappable. remap_pfn_range() will 406 * make it distinguishable from a non-direct mapping 407 * in /proc/<pid>/maps by substituting the file offset 408 * with the actual physical address. 409 */ 410 ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, 411 pages * PAGE_SIZE, vma->vm_page_prot); 412 } else { 413 /* 414 * Let's create a mixed map if we can't map it all. 415 * The normal paging machinery will take care of the 416 * unpopulated ptes via cramfs_read_folio(). 417 */ 418 int i; 419 vm_flags_set(vma, VM_MIXEDMAP); 420 for (i = 0; i < pages && !ret; i++) { 421 vm_fault_t vmf; 422 unsigned long off = i * PAGE_SIZE; 423 vmf = vmf_insert_mixed(vma, vma->vm_start + off, 424 address + off); 425 if (vmf & VM_FAULT_ERROR) 426 ret = vm_fault_to_errno(vmf, 0); 427 } 428 } 429 430 if (!ret) 431 pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) " 432 "to vma 0x%08lx, page_prot 0x%llx\n", file, 433 pgoff, address, pages, vma_pages(vma), vma->vm_start, 434 (unsigned long long)pgprot_val(vma->vm_page_prot)); 435 return ret; 436 437 bailout: 438 pr_debug("%pD[%lu]: direct mmap impossible: %s\n", 439 file, pgoff, bailout_reason); 440 /* Didn't manage any direct map, but normal paging is still possible */ 441 return 0; 442 } 443 444 #else /* CONFIG_MMU */ 445 446 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 447 { 448 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS; 449 } 450 451 static unsigned long cramfs_physmem_get_unmapped_area(struct file *file, 452 unsigned long addr, unsigned long len, 453 unsigned long pgoff, unsigned long flags) 454 { 455 struct inode *inode = file_inode(file); 456 struct super_block *sb = inode->i_sb; 457 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 458 unsigned int pages, block_pages, max_pages, offset; 459 460 pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 461 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 462 if (pgoff >= max_pages || pages > max_pages - pgoff) 463 return -EINVAL; 464 block_pages = pages; 465 offset = cramfs_get_block_range(inode, pgoff, &block_pages); 466 if (!offset || block_pages != pages) 467 return -ENOSYS; 468 addr = sbi->linear_phys_addr + offset; 469 pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n", 470 file, pgoff*PAGE_SIZE, len, addr); 471 return addr; 472 } 473 474 static unsigned int cramfs_physmem_mmap_capabilities(struct file *file) 475 { 476 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | 477 NOMMU_MAP_READ | NOMMU_MAP_EXEC; 478 } 479 480 #endif /* CONFIG_MMU */ 481 482 static const struct file_operations cramfs_physmem_fops = { 483 .llseek = generic_file_llseek, 484 .read_iter = generic_file_read_iter, 485 .splice_read = filemap_splice_read, 486 .mmap = cramfs_physmem_mmap, 487 #ifndef CONFIG_MMU 488 .get_unmapped_area = cramfs_physmem_get_unmapped_area, 489 .mmap_capabilities = cramfs_physmem_mmap_capabilities, 490 #endif 491 }; 492 493 static void cramfs_kill_sb(struct super_block *sb) 494 { 495 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 496 497 generic_shutdown_super(sb); 498 499 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) { 500 if (sbi && sbi->mtd_point_size) 501 mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); 502 put_mtd_device(sb->s_mtd); 503 sb->s_mtd = NULL; 504 } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) { 505 sync_blockdev(sb->s_bdev); 506 bdev_fput(sb->s_bdev_file); 507 } 508 kfree(sbi); 509 } 510 511 static int cramfs_reconfigure(struct fs_context *fc) 512 { 513 sync_filesystem(fc->root->d_sb); 514 fc->sb_flags |= SB_RDONLY; 515 return 0; 516 } 517 518 static int cramfs_read_super(struct super_block *sb, struct fs_context *fc, 519 struct cramfs_super *super) 520 { 521 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 522 unsigned long root_offset; 523 bool silent = fc->sb_flags & SB_SILENT; 524 525 /* We don't know the real size yet */ 526 sbi->size = PAGE_SIZE; 527 528 /* Read the first block and get the superblock from it */ 529 mutex_lock(&read_mutex); 530 memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super)); 531 mutex_unlock(&read_mutex); 532 533 /* Do sanity checks on the superblock */ 534 if (super->magic != CRAMFS_MAGIC) { 535 /* check for wrong endianness */ 536 if (super->magic == CRAMFS_MAGIC_WEND) { 537 if (!silent) 538 errorfc(fc, "wrong endianness"); 539 return -EINVAL; 540 } 541 542 /* check at 512 byte offset */ 543 mutex_lock(&read_mutex); 544 memcpy(super, 545 cramfs_read(sb, 512, sizeof(*super)), 546 sizeof(*super)); 547 mutex_unlock(&read_mutex); 548 if (super->magic != CRAMFS_MAGIC) { 549 if (super->magic == CRAMFS_MAGIC_WEND && !silent) 550 errorfc(fc, "wrong endianness"); 551 else if (!silent) 552 errorfc(fc, "wrong magic"); 553 return -EINVAL; 554 } 555 } 556 557 /* get feature flags first */ 558 if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) { 559 errorfc(fc, "unsupported filesystem features"); 560 return -EINVAL; 561 } 562 563 /* Check that the root inode is in a sane state */ 564 if (!S_ISDIR(super->root.mode)) { 565 errorfc(fc, "root is not a directory"); 566 return -EINVAL; 567 } 568 /* correct strange, hard-coded permissions of mkcramfs */ 569 super->root.mode |= 0555; 570 571 root_offset = super->root.offset << 2; 572 if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) { 573 sbi->size = super->size; 574 sbi->blocks = super->fsid.blocks; 575 sbi->files = super->fsid.files; 576 } else { 577 sbi->size = 1<<28; 578 sbi->blocks = 0; 579 sbi->files = 0; 580 } 581 sbi->magic = super->magic; 582 sbi->flags = super->flags; 583 if (root_offset == 0) 584 infofc(fc, "empty filesystem"); 585 else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) && 586 ((root_offset != sizeof(struct cramfs_super)) && 587 (root_offset != 512 + sizeof(struct cramfs_super)))) 588 { 589 errorfc(fc, "bad root offset %lu", root_offset); 590 return -EINVAL; 591 } 592 593 return 0; 594 } 595 596 static int cramfs_finalize_super(struct super_block *sb, 597 struct cramfs_inode *cramfs_root) 598 { 599 struct inode *root; 600 601 /* Set it all up.. */ 602 sb->s_flags |= SB_RDONLY; 603 sb->s_time_min = 0; 604 sb->s_time_max = 0; 605 sb->s_op = &cramfs_ops; 606 root = get_cramfs_inode(sb, cramfs_root, 0); 607 if (IS_ERR(root)) 608 return PTR_ERR(root); 609 sb->s_root = d_make_root(root); 610 if (!sb->s_root) 611 return -ENOMEM; 612 return 0; 613 } 614 615 static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc) 616 { 617 struct cramfs_sb_info *sbi; 618 struct cramfs_super super; 619 int i, err; 620 621 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 622 if (!sbi) 623 return -ENOMEM; 624 sb->s_fs_info = sbi; 625 626 /* Invalidate the read buffers on mount: think disk change.. */ 627 for (i = 0; i < READ_BUFFERS; i++) 628 buffer_blocknr[i] = -1; 629 630 err = cramfs_read_super(sb, fc, &super); 631 if (err) 632 return err; 633 return cramfs_finalize_super(sb, &super.root); 634 } 635 636 static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc) 637 { 638 struct cramfs_sb_info *sbi; 639 struct cramfs_super super; 640 int err; 641 642 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 643 if (!sbi) 644 return -ENOMEM; 645 sb->s_fs_info = sbi; 646 647 /* Map only one page for now. Will remap it when fs size is known. */ 648 err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size, 649 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 650 if (err || sbi->mtd_point_size != PAGE_SIZE) { 651 pr_err("unable to get direct memory access to mtd:%s\n", 652 sb->s_mtd->name); 653 return err ? : -ENODATA; 654 } 655 656 pr_info("checking physical address %pap for linear cramfs image\n", 657 &sbi->linear_phys_addr); 658 err = cramfs_read_super(sb, fc, &super); 659 if (err) 660 return err; 661 662 /* Remap the whole filesystem now */ 663 pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n", 664 sb->s_mtd->name, sbi->size/1024); 665 mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE); 666 err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size, 667 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 668 if (err || sbi->mtd_point_size != sbi->size) { 669 pr_err("unable to get direct memory access to mtd:%s\n", 670 sb->s_mtd->name); 671 return err ? : -ENODATA; 672 } 673 674 return cramfs_finalize_super(sb, &super.root); 675 } 676 677 static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) 678 { 679 struct super_block *sb = dentry->d_sb; 680 u64 id = 0; 681 682 if (sb->s_bdev) 683 id = huge_encode_dev(sb->s_bdev->bd_dev); 684 else if (sb->s_dev) 685 id = huge_encode_dev(sb->s_dev); 686 687 buf->f_type = CRAMFS_MAGIC; 688 buf->f_bsize = PAGE_SIZE; 689 buf->f_blocks = CRAMFS_SB(sb)->blocks; 690 buf->f_bfree = 0; 691 buf->f_bavail = 0; 692 buf->f_files = CRAMFS_SB(sb)->files; 693 buf->f_ffree = 0; 694 buf->f_fsid = u64_to_fsid(id); 695 buf->f_namelen = CRAMFS_MAXPATHLEN; 696 return 0; 697 } 698 699 /* 700 * Read a cramfs directory entry. 701 */ 702 static int cramfs_readdir(struct file *file, struct dir_context *ctx) 703 { 704 struct inode *inode = file_inode(file); 705 struct super_block *sb = inode->i_sb; 706 char *buf; 707 unsigned int offset; 708 709 /* Offset within the thing. */ 710 if (ctx->pos >= inode->i_size) 711 return 0; 712 offset = ctx->pos; 713 /* Directory entries are always 4-byte aligned */ 714 if (offset & 3) 715 return -EINVAL; 716 717 buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL); 718 if (!buf) 719 return -ENOMEM; 720 721 while (offset < inode->i_size) { 722 struct cramfs_inode *de; 723 unsigned long nextoffset; 724 char *name; 725 ino_t ino; 726 umode_t mode; 727 int namelen; 728 729 mutex_lock(&read_mutex); 730 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); 731 name = (char *)(de+1); 732 733 /* 734 * Namelengths on disk are shifted by two 735 * and the name padded out to 4-byte boundaries 736 * with zeroes. 737 */ 738 namelen = de->namelen << 2; 739 memcpy(buf, name, namelen); 740 ino = cramino(de, OFFSET(inode) + offset); 741 mode = de->mode; 742 mutex_unlock(&read_mutex); 743 nextoffset = offset + sizeof(*de) + namelen; 744 for (;;) { 745 if (!namelen) { 746 kfree(buf); 747 return -EIO; 748 } 749 if (buf[namelen-1]) 750 break; 751 namelen--; 752 } 753 if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) 754 break; 755 756 ctx->pos = offset = nextoffset; 757 } 758 kfree(buf); 759 return 0; 760 } 761 762 /* 763 * Lookup and fill in the inode data.. 764 */ 765 static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 766 { 767 unsigned int offset = 0; 768 struct inode *inode = NULL; 769 int sorted; 770 771 mutex_lock(&read_mutex); 772 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 773 while (offset < dir->i_size) { 774 struct cramfs_inode *de; 775 char *name; 776 int namelen, retval; 777 int dir_off = OFFSET(dir) + offset; 778 779 de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN); 780 name = (char *)(de+1); 781 782 /* Try to take advantage of sorted directories */ 783 if (sorted && (dentry->d_name.name[0] < name[0])) 784 break; 785 786 namelen = de->namelen << 2; 787 offset += sizeof(*de) + namelen; 788 789 /* Quick check that the name is roughly the right length */ 790 if (((dentry->d_name.len + 3) & ~3) != namelen) 791 continue; 792 793 for (;;) { 794 if (!namelen) { 795 inode = ERR_PTR(-EIO); 796 goto out; 797 } 798 if (name[namelen-1]) 799 break; 800 namelen--; 801 } 802 if (namelen != dentry->d_name.len) 803 continue; 804 retval = memcmp(dentry->d_name.name, name, namelen); 805 if (retval > 0) 806 continue; 807 if (!retval) { 808 inode = get_cramfs_inode(dir->i_sb, de, dir_off); 809 break; 810 } 811 /* else (retval < 0) */ 812 if (sorted) 813 break; 814 } 815 out: 816 mutex_unlock(&read_mutex); 817 return d_splice_alias(inode, dentry); 818 } 819 820 static int cramfs_read_folio(struct file *file, struct folio *folio) 821 { 822 struct inode *inode = folio->mapping->host; 823 u32 maxblock; 824 int bytes_filled; 825 void *pgdata; 826 bool success = false; 827 828 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 829 bytes_filled = 0; 830 pgdata = kmap_local_folio(folio, 0); 831 832 if (folio->index < maxblock) { 833 struct super_block *sb = inode->i_sb; 834 u32 blkptr_offset = OFFSET(inode) + folio->index * 4; 835 u32 block_ptr, block_start, block_len; 836 bool uncompressed, direct; 837 838 mutex_lock(&read_mutex); 839 block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4); 840 uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED); 841 direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR); 842 block_ptr &= ~CRAMFS_BLK_FLAGS; 843 844 if (direct) { 845 /* 846 * The block pointer is an absolute start pointer, 847 * shifted by 2 bits. The size is included in the 848 * first 2 bytes of the data block when compressed, 849 * or PAGE_SIZE otherwise. 850 */ 851 block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 852 if (uncompressed) { 853 block_len = PAGE_SIZE; 854 /* if last block: cap to file length */ 855 if (folio->index == maxblock - 1) 856 block_len = 857 offset_in_page(inode->i_size); 858 } else { 859 block_len = *(u16 *) 860 cramfs_read(sb, block_start, 2); 861 block_start += 2; 862 } 863 } else { 864 /* 865 * The block pointer indicates one past the end of 866 * the current block (start of next block). If this 867 * is the first block then it starts where the block 868 * pointer table ends, otherwise its start comes 869 * from the previous block's pointer. 870 */ 871 block_start = OFFSET(inode) + maxblock * 4; 872 if (folio->index) 873 block_start = *(u32 *) 874 cramfs_read(sb, blkptr_offset - 4, 4); 875 /* Beware... previous ptr might be a direct ptr */ 876 if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { 877 /* See comments on earlier code. */ 878 u32 prev_start = block_start; 879 block_start = prev_start & ~CRAMFS_BLK_FLAGS; 880 block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 881 if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) { 882 block_start += PAGE_SIZE; 883 } else { 884 block_len = *(u16 *) 885 cramfs_read(sb, block_start, 2); 886 block_start += 2 + block_len; 887 } 888 } 889 block_start &= ~CRAMFS_BLK_FLAGS; 890 block_len = block_ptr - block_start; 891 } 892 893 if (block_len == 0) 894 ; /* hole */ 895 else if (unlikely(block_len > 2*PAGE_SIZE || 896 (uncompressed && block_len > PAGE_SIZE))) { 897 mutex_unlock(&read_mutex); 898 pr_err("bad data blocksize %u\n", block_len); 899 goto err; 900 } else if (uncompressed) { 901 memcpy(pgdata, 902 cramfs_read(sb, block_start, block_len), 903 block_len); 904 bytes_filled = block_len; 905 } else { 906 bytes_filled = cramfs_uncompress_block(pgdata, 907 PAGE_SIZE, 908 cramfs_read(sb, block_start, block_len), 909 block_len); 910 } 911 mutex_unlock(&read_mutex); 912 if (unlikely(bytes_filled < 0)) 913 goto err; 914 } 915 916 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled); 917 flush_dcache_folio(folio); 918 919 success = true; 920 err: 921 kunmap_local(pgdata); 922 folio_end_read(folio, success); 923 return 0; 924 } 925 926 static const struct address_space_operations cramfs_aops = { 927 .read_folio = cramfs_read_folio 928 }; 929 930 /* 931 * Our operations: 932 */ 933 934 /* 935 * A directory can only readdir 936 */ 937 static const struct file_operations cramfs_directory_operations = { 938 .llseek = generic_file_llseek, 939 .read = generic_read_dir, 940 .iterate_shared = cramfs_readdir, 941 }; 942 943 static const struct inode_operations cramfs_dir_inode_operations = { 944 .lookup = cramfs_lookup, 945 }; 946 947 static const struct super_operations cramfs_ops = { 948 .statfs = cramfs_statfs, 949 }; 950 951 static int cramfs_get_tree(struct fs_context *fc) 952 { 953 int ret = -ENOPROTOOPT; 954 955 if (IS_ENABLED(CONFIG_CRAMFS_MTD)) { 956 ret = get_tree_mtd(fc, cramfs_mtd_fill_super); 957 if (!ret) 958 return 0; 959 } 960 if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) 961 ret = get_tree_bdev(fc, cramfs_blkdev_fill_super); 962 return ret; 963 } 964 965 static const struct fs_context_operations cramfs_context_ops = { 966 .get_tree = cramfs_get_tree, 967 .reconfigure = cramfs_reconfigure, 968 }; 969 970 /* 971 * Set up the filesystem mount context. 972 */ 973 static int cramfs_init_fs_context(struct fs_context *fc) 974 { 975 fc->ops = &cramfs_context_ops; 976 return 0; 977 } 978 979 static struct file_system_type cramfs_fs_type = { 980 .owner = THIS_MODULE, 981 .name = "cramfs", 982 .init_fs_context = cramfs_init_fs_context, 983 .kill_sb = cramfs_kill_sb, 984 .fs_flags = FS_REQUIRES_DEV, 985 }; 986 MODULE_ALIAS_FS("cramfs"); 987 988 static int __init init_cramfs_fs(void) 989 { 990 int rv; 991 992 rv = cramfs_uncompress_init(); 993 if (rv < 0) 994 return rv; 995 rv = register_filesystem(&cramfs_fs_type); 996 if (rv < 0) 997 cramfs_uncompress_exit(); 998 return rv; 999 } 1000 1001 static void __exit exit_cramfs_fs(void) 1002 { 1003 cramfs_uncompress_exit(); 1004 unregister_filesystem(&cramfs_fs_type); 1005 } 1006 1007 module_init(init_cramfs_fs) 1008 module_exit(exit_cramfs_fs) 1009 MODULE_DESCRIPTION("Compressed ROM file system support"); 1010 MODULE_LICENSE("GPL"); 1011