1 /* 2 * Compressed rom filesystem for Linux. 3 * 4 * Copyright (C) 1999 Linus Torvalds. 5 * 6 * This file is released under the GPL. 7 */ 8 9 /* 10 * These are the VFS interfaces to the compressed rom filesystem. 11 * The actual compression is based on zlib, see the other files. 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/filelock.h> 20 #include <linux/pagemap.h> 21 #include <linux/ramfs.h> 22 #include <linux/init.h> 23 #include <linux/string.h> 24 #include <linux/blkdev.h> 25 #include <linux/mtd/mtd.h> 26 #include <linux/mtd/super.h> 27 #include <linux/fs_context.h> 28 #include <linux/slab.h> 29 #include <linux/vfs.h> 30 #include <linux/mutex.h> 31 #include <uapi/linux/cramfs_fs.h> 32 #include <linux/uaccess.h> 33 34 #include "internal.h" 35 36 /* 37 * cramfs super-block data in memory 38 */ 39 struct cramfs_sb_info { 40 unsigned long magic; 41 unsigned long size; 42 unsigned long blocks; 43 unsigned long files; 44 unsigned long flags; 45 void *linear_virt_addr; 46 resource_size_t linear_phys_addr; 47 size_t mtd_point_size; 48 }; 49 50 static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb) 51 { 52 return sb->s_fs_info; 53 } 54 55 static const struct super_operations cramfs_ops; 56 static const struct inode_operations cramfs_dir_inode_operations; 57 static const struct file_operations cramfs_directory_operations; 58 static const struct file_operations cramfs_physmem_fops; 59 static const struct address_space_operations cramfs_aops; 60 61 static DEFINE_MUTEX(read_mutex); 62 63 64 /* These macros may change in future, to provide better st_ino semantics. */ 65 #define OFFSET(x) ((x)->i_ino) 66 67 static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset) 68 { 69 if (!cino->offset) 70 return offset + 1; 71 if (!cino->size) 72 return offset + 1; 73 74 /* 75 * The file mode test fixes buggy mkcramfs implementations where 76 * cramfs_inode->offset is set to a non zero value for entries 77 * which did not contain data, like devices node and fifos. 78 */ 79 switch (cino->mode & S_IFMT) { 80 case S_IFREG: 81 case S_IFDIR: 82 case S_IFLNK: 83 return cino->offset << 2; 84 default: 85 break; 86 } 87 return offset + 1; 88 } 89 90 static struct inode *get_cramfs_inode(struct super_block *sb, 91 const struct cramfs_inode *cramfs_inode, unsigned int offset) 92 { 93 struct inode *inode; 94 static struct timespec64 zerotime; 95 96 inode = iget_locked(sb, cramino(cramfs_inode, offset)); 97 if (!inode) 98 return ERR_PTR(-ENOMEM); 99 if (!(inode_state_read_once(inode) & I_NEW)) 100 return inode; 101 102 switch (cramfs_inode->mode & S_IFMT) { 103 case S_IFREG: 104 inode->i_fop = &generic_ro_fops; 105 inode->i_data.a_ops = &cramfs_aops; 106 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && 107 CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS && 108 CRAMFS_SB(sb)->linear_phys_addr) 109 inode->i_fop = &cramfs_physmem_fops; 110 break; 111 case S_IFDIR: 112 inode->i_op = &cramfs_dir_inode_operations; 113 inode->i_fop = &cramfs_directory_operations; 114 break; 115 case S_IFLNK: 116 inode->i_op = &page_symlink_inode_operations; 117 inode_nohighmem(inode); 118 inode->i_data.a_ops = &cramfs_aops; 119 break; 120 case S_IFCHR: 121 case S_IFBLK: 122 case S_IFIFO: 123 case S_IFSOCK: 124 init_special_inode(inode, cramfs_inode->mode, 125 old_decode_dev(cramfs_inode->size)); 126 break; 127 default: 128 printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n", 129 inode->i_mode, inode->i_ino); 130 iget_failed(inode); 131 return ERR_PTR(-EIO); 132 } 133 134 inode->i_mode = cramfs_inode->mode; 135 i_uid_write(inode, cramfs_inode->uid); 136 i_gid_write(inode, cramfs_inode->gid); 137 138 /* if the lower 2 bits are zero, the inode contains data */ 139 if (!(inode->i_ino & 3)) { 140 inode->i_size = cramfs_inode->size; 141 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 142 } 143 144 /* Struct copy intentional */ 145 inode_set_mtime_to_ts(inode, 146 inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime))); 147 /* inode->i_nlink is left 1 - arguably wrong for directories, 148 but it's the best we can do without reading the directory 149 contents. 1 yields the right result in GNU find, even 150 without -noleaf option. */ 151 152 unlock_new_inode(inode); 153 154 return inode; 155 } 156 157 /* 158 * We have our own block cache: don't fill up the buffer cache 159 * with the rom-image, because the way the filesystem is set 160 * up the accesses should be fairly regular and cached in the 161 * page cache and dentry tree anyway.. 162 * 163 * This also acts as a way to guarantee contiguous areas of up to 164 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to 165 * worry about end-of-buffer issues even when decompressing a full 166 * page cache. 167 * 168 * Note: This is all optimized away at compile time when 169 * CONFIG_CRAMFS_BLOCKDEV=n. 170 */ 171 #define READ_BUFFERS (2) 172 /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */ 173 #define NEXT_BUFFER(_ix) ((_ix) ^ 1) 174 175 /* 176 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed" 177 * data that takes up more space than the original and with unlucky 178 * alignment. 179 */ 180 #define BLKS_PER_BUF_SHIFT (2) 181 #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 182 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE) 183 184 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 185 static unsigned buffer_blocknr[READ_BUFFERS]; 186 static struct super_block *buffer_dev[READ_BUFFERS]; 187 static int next_buffer; 188 189 /* 190 * Populate our block cache and return a pointer to it. 191 */ 192 static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset, 193 unsigned int len) 194 { 195 struct address_space *mapping = sb->s_bdev->bd_mapping; 196 struct file_ra_state ra = {}; 197 struct page *pages[BLKS_PER_BUF]; 198 unsigned i, blocknr, buffer; 199 unsigned long devsize; 200 char *data; 201 202 if (!len) 203 return NULL; 204 blocknr = offset >> PAGE_SHIFT; 205 offset &= PAGE_SIZE - 1; 206 207 /* Check if an existing buffer already has the data.. */ 208 for (i = 0; i < READ_BUFFERS; i++) { 209 unsigned int blk_offset; 210 211 if (buffer_dev[i] != sb) 212 continue; 213 if (blocknr < buffer_blocknr[i]) 214 continue; 215 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT; 216 blk_offset += offset; 217 if (blk_offset > BUFFER_SIZE || 218 blk_offset + len > BUFFER_SIZE) 219 continue; 220 return read_buffers[i] + blk_offset; 221 } 222 223 devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT; 224 225 /* Ok, read in BLKS_PER_BUF pages completely first. */ 226 file_ra_state_init(&ra, mapping); 227 page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF); 228 229 for (i = 0; i < BLKS_PER_BUF; i++) { 230 struct page *page = NULL; 231 232 if (blocknr + i < devsize) { 233 page = read_mapping_page(mapping, blocknr + i, NULL); 234 /* synchronous error? */ 235 if (IS_ERR(page)) 236 page = NULL; 237 } 238 pages[i] = page; 239 } 240 241 buffer = next_buffer; 242 next_buffer = NEXT_BUFFER(buffer); 243 buffer_blocknr[buffer] = blocknr; 244 buffer_dev[buffer] = sb; 245 246 data = read_buffers[buffer]; 247 for (i = 0; i < BLKS_PER_BUF; i++) { 248 struct page *page = pages[i]; 249 250 if (page) { 251 memcpy_from_page(data, page, 0, PAGE_SIZE); 252 put_page(page); 253 } else 254 memset(data, 0, PAGE_SIZE); 255 data += PAGE_SIZE; 256 } 257 return read_buffers[buffer] + offset; 258 } 259 260 /* 261 * Return a pointer to the linearly addressed cramfs image in memory. 262 */ 263 static void *cramfs_direct_read(struct super_block *sb, unsigned int offset, 264 unsigned int len) 265 { 266 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 267 268 if (!len) 269 return NULL; 270 if (len > sbi->size || offset > sbi->size - len) 271 return page_address(ZERO_PAGE(0)); 272 return sbi->linear_virt_addr + offset; 273 } 274 275 /* 276 * Returns a pointer to a buffer containing at least LEN bytes of 277 * filesystem starting at byte offset OFFSET into the filesystem. 278 */ 279 static void *cramfs_read(struct super_block *sb, unsigned int offset, 280 unsigned int len) 281 { 282 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 283 284 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr) 285 return cramfs_direct_read(sb, offset, len); 286 else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) 287 return cramfs_blkdev_read(sb, offset, len); 288 else 289 return NULL; 290 } 291 292 /* 293 * For a mapping to be possible, we need a range of uncompressed and 294 * contiguous blocks. Return the offset for the first block and number of 295 * valid blocks for which that is true, or zero otherwise. 296 */ 297 static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages) 298 { 299 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 300 int i; 301 u32 *blockptrs, first_block_addr; 302 303 /* 304 * We can dereference memory directly here as this code may be 305 * reached only when there is a direct filesystem image mapping 306 * available in memory. 307 */ 308 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4); 309 first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS; 310 i = 0; 311 do { 312 u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT); 313 u32 expect = (first_block_addr + block_off) | 314 CRAMFS_BLK_FLAG_DIRECT_PTR | 315 CRAMFS_BLK_FLAG_UNCOMPRESSED; 316 if (blockptrs[i] != expect) { 317 pr_debug("range: block %d/%d got %#x expects %#x\n", 318 pgoff+i, pgoff + *pages - 1, 319 blockptrs[i], expect); 320 if (i == 0) 321 return 0; 322 break; 323 } 324 } while (++i < *pages); 325 326 *pages = i; 327 return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 328 } 329 330 #ifdef CONFIG_MMU 331 332 /* 333 * Return true if the last page of a file in the filesystem image contains 334 * some other data that doesn't belong to that file. It is assumed that the 335 * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED 336 * (verified by cramfs_get_block_range() and directly accessible in memory. 337 */ 338 static bool cramfs_last_page_is_shared(struct inode *inode) 339 { 340 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 341 u32 partial, last_page, blockaddr, *blockptrs; 342 char *tail_data; 343 344 partial = offset_in_page(inode->i_size); 345 if (!partial) 346 return false; 347 last_page = inode->i_size >> PAGE_SHIFT; 348 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode)); 349 blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS; 350 blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 351 tail_data = sbi->linear_virt_addr + blockaddr + partial; 352 return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false; 353 } 354 355 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 356 { 357 struct inode *inode = file_inode(file); 358 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 359 unsigned int pages, max_pages, offset; 360 unsigned long address, pgoff = vma->vm_pgoff; 361 char *bailout_reason; 362 int ret; 363 364 ret = generic_file_readonly_mmap(file, vma); 365 if (ret) 366 return ret; 367 368 /* 369 * Now try to pre-populate ptes for this vma with a direct 370 * mapping avoiding memory allocation when possible. 371 */ 372 373 /* Could COW work here? */ 374 bailout_reason = "vma is writable"; 375 if (vma->vm_flags & VM_WRITE) 376 goto bailout; 377 378 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 379 bailout_reason = "beyond file limit"; 380 if (pgoff >= max_pages) 381 goto bailout; 382 pages = min(vma_pages(vma), max_pages - pgoff); 383 384 offset = cramfs_get_block_range(inode, pgoff, &pages); 385 bailout_reason = "unsuitable block layout"; 386 if (!offset) 387 goto bailout; 388 address = sbi->linear_phys_addr + offset; 389 bailout_reason = "data is not page aligned"; 390 if (!PAGE_ALIGNED(address)) 391 goto bailout; 392 393 /* Don't map the last page if it contains some other data */ 394 if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { 395 pr_debug("mmap: %pD: last page is shared\n", file); 396 pages--; 397 } 398 399 if (!pages) { 400 bailout_reason = "no suitable block remaining"; 401 goto bailout; 402 } 403 404 if (pages == vma_pages(vma)) { 405 /* 406 * The entire vma is mappable. remap_pfn_range() will 407 * make it distinguishable from a non-direct mapping 408 * in /proc/<pid>/maps by substituting the file offset 409 * with the actual physical address. 410 */ 411 ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, 412 pages * PAGE_SIZE, vma->vm_page_prot); 413 } else { 414 /* 415 * Let's create a mixed map if we can't map it all. 416 * The normal paging machinery will take care of the 417 * unpopulated ptes via cramfs_read_folio(). 418 */ 419 int i; 420 vm_flags_set(vma, VM_MIXEDMAP); 421 for (i = 0; i < pages && !ret; i++) { 422 vm_fault_t vmf; 423 unsigned long off = i * PAGE_SIZE; 424 vmf = vmf_insert_mixed(vma, vma->vm_start + off, 425 PHYS_PFN(address + off)); 426 if (vmf & VM_FAULT_ERROR) 427 ret = vm_fault_to_errno(vmf, 0); 428 } 429 } 430 431 if (!ret) 432 pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) " 433 "to vma 0x%08lx, page_prot 0x%llx\n", file, 434 pgoff, address, pages, vma_pages(vma), vma->vm_start, 435 (unsigned long long)pgprot_val(vma->vm_page_prot)); 436 return ret; 437 438 bailout: 439 pr_debug("%pD[%lu]: direct mmap impossible: %s\n", 440 file, pgoff, bailout_reason); 441 /* Didn't manage any direct map, but normal paging is still possible */ 442 return 0; 443 } 444 445 #else /* CONFIG_MMU */ 446 447 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 448 { 449 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS; 450 } 451 452 static unsigned long cramfs_physmem_get_unmapped_area(struct file *file, 453 unsigned long addr, unsigned long len, 454 unsigned long pgoff, unsigned long flags) 455 { 456 struct inode *inode = file_inode(file); 457 struct super_block *sb = inode->i_sb; 458 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 459 unsigned int pages, block_pages, max_pages, offset; 460 461 pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 462 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 463 if (pgoff >= max_pages || pages > max_pages - pgoff) 464 return -EINVAL; 465 block_pages = pages; 466 offset = cramfs_get_block_range(inode, pgoff, &block_pages); 467 if (!offset || block_pages != pages) 468 return -ENOSYS; 469 addr = sbi->linear_phys_addr + offset; 470 pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n", 471 file, pgoff*PAGE_SIZE, len, addr); 472 return addr; 473 } 474 475 static unsigned int cramfs_physmem_mmap_capabilities(struct file *file) 476 { 477 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | 478 NOMMU_MAP_READ | NOMMU_MAP_EXEC; 479 } 480 481 #endif /* CONFIG_MMU */ 482 483 static const struct file_operations cramfs_physmem_fops = { 484 .llseek = generic_file_llseek, 485 .read_iter = generic_file_read_iter, 486 .splice_read = filemap_splice_read, 487 .mmap = cramfs_physmem_mmap, 488 #ifndef CONFIG_MMU 489 .get_unmapped_area = cramfs_physmem_get_unmapped_area, 490 .mmap_capabilities = cramfs_physmem_mmap_capabilities, 491 #endif 492 }; 493 494 static void cramfs_kill_sb(struct super_block *sb) 495 { 496 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 497 498 generic_shutdown_super(sb); 499 500 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) { 501 if (sbi && sbi->mtd_point_size) 502 mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); 503 put_mtd_device(sb->s_mtd); 504 sb->s_mtd = NULL; 505 } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) { 506 sync_blockdev(sb->s_bdev); 507 bdev_fput(sb->s_bdev_file); 508 } 509 kfree(sbi); 510 } 511 512 static int cramfs_reconfigure(struct fs_context *fc) 513 { 514 sync_filesystem(fc->root->d_sb); 515 fc->sb_flags |= SB_RDONLY; 516 return 0; 517 } 518 519 static int cramfs_read_super(struct super_block *sb, struct fs_context *fc, 520 struct cramfs_super *super) 521 { 522 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 523 unsigned long root_offset; 524 bool silent = fc->sb_flags & SB_SILENT; 525 526 /* We don't know the real size yet */ 527 sbi->size = PAGE_SIZE; 528 529 /* Read the first block and get the superblock from it */ 530 mutex_lock(&read_mutex); 531 memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super)); 532 mutex_unlock(&read_mutex); 533 534 /* Do sanity checks on the superblock */ 535 if (super->magic != CRAMFS_MAGIC) { 536 /* check for wrong endianness */ 537 if (super->magic == CRAMFS_MAGIC_WEND) { 538 if (!silent) 539 errorfc(fc, "wrong endianness"); 540 return -EINVAL; 541 } 542 543 /* check at 512 byte offset */ 544 mutex_lock(&read_mutex); 545 memcpy(super, 546 cramfs_read(sb, 512, sizeof(*super)), 547 sizeof(*super)); 548 mutex_unlock(&read_mutex); 549 if (super->magic != CRAMFS_MAGIC) { 550 if (super->magic == CRAMFS_MAGIC_WEND && !silent) 551 errorfc(fc, "wrong endianness"); 552 else if (!silent) 553 errorfc(fc, "wrong magic"); 554 return -EINVAL; 555 } 556 } 557 558 /* get feature flags first */ 559 if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) { 560 errorfc(fc, "unsupported filesystem features"); 561 return -EINVAL; 562 } 563 564 /* Check that the root inode is in a sane state */ 565 if (!S_ISDIR(super->root.mode)) { 566 errorfc(fc, "root is not a directory"); 567 return -EINVAL; 568 } 569 /* correct strange, hard-coded permissions of mkcramfs */ 570 super->root.mode |= 0555; 571 572 root_offset = super->root.offset << 2; 573 if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) { 574 sbi->size = super->size; 575 sbi->blocks = super->fsid.blocks; 576 sbi->files = super->fsid.files; 577 } else { 578 sbi->size = 1<<28; 579 sbi->blocks = 0; 580 sbi->files = 0; 581 } 582 sbi->magic = super->magic; 583 sbi->flags = super->flags; 584 if (root_offset == 0) 585 infofc(fc, "empty filesystem"); 586 else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) && 587 ((root_offset != sizeof(struct cramfs_super)) && 588 (root_offset != 512 + sizeof(struct cramfs_super)))) 589 { 590 errorfc(fc, "bad root offset %lu", root_offset); 591 return -EINVAL; 592 } 593 594 return 0; 595 } 596 597 static int cramfs_finalize_super(struct super_block *sb, 598 struct cramfs_inode *cramfs_root) 599 { 600 struct inode *root; 601 602 /* Set it all up.. */ 603 sb->s_flags |= SB_RDONLY; 604 sb->s_time_min = 0; 605 sb->s_time_max = 0; 606 sb->s_op = &cramfs_ops; 607 root = get_cramfs_inode(sb, cramfs_root, 0); 608 if (IS_ERR(root)) 609 return PTR_ERR(root); 610 sb->s_root = d_make_root(root); 611 if (!sb->s_root) 612 return -ENOMEM; 613 return 0; 614 } 615 616 static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc) 617 { 618 struct cramfs_sb_info *sbi; 619 struct cramfs_super super; 620 int i, err; 621 622 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 623 if (!sbi) 624 return -ENOMEM; 625 sb->s_fs_info = sbi; 626 627 /* Invalidate the read buffers on mount: think disk change.. */ 628 for (i = 0; i < READ_BUFFERS; i++) 629 buffer_blocknr[i] = -1; 630 631 err = cramfs_read_super(sb, fc, &super); 632 if (err) 633 return err; 634 return cramfs_finalize_super(sb, &super.root); 635 } 636 637 static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc) 638 { 639 struct cramfs_sb_info *sbi; 640 struct cramfs_super super; 641 int err; 642 643 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 644 if (!sbi) 645 return -ENOMEM; 646 sb->s_fs_info = sbi; 647 648 /* Map only one page for now. Will remap it when fs size is known. */ 649 err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size, 650 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 651 if (err || sbi->mtd_point_size != PAGE_SIZE) { 652 pr_err("unable to get direct memory access to mtd:%s\n", 653 sb->s_mtd->name); 654 return err ? : -ENODATA; 655 } 656 657 pr_info("checking physical address %pap for linear cramfs image\n", 658 &sbi->linear_phys_addr); 659 err = cramfs_read_super(sb, fc, &super); 660 if (err) 661 return err; 662 663 /* Remap the whole filesystem now */ 664 pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n", 665 sb->s_mtd->name, sbi->size/1024); 666 mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE); 667 err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size, 668 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 669 if (err || sbi->mtd_point_size != sbi->size) { 670 pr_err("unable to get direct memory access to mtd:%s\n", 671 sb->s_mtd->name); 672 return err ? : -ENODATA; 673 } 674 675 return cramfs_finalize_super(sb, &super.root); 676 } 677 678 static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) 679 { 680 struct super_block *sb = dentry->d_sb; 681 u64 id = 0; 682 683 if (sb->s_bdev) 684 id = huge_encode_dev(sb->s_bdev->bd_dev); 685 else if (sb->s_dev) 686 id = huge_encode_dev(sb->s_dev); 687 688 buf->f_type = CRAMFS_MAGIC; 689 buf->f_bsize = PAGE_SIZE; 690 buf->f_blocks = CRAMFS_SB(sb)->blocks; 691 buf->f_bfree = 0; 692 buf->f_bavail = 0; 693 buf->f_files = CRAMFS_SB(sb)->files; 694 buf->f_ffree = 0; 695 buf->f_fsid = u64_to_fsid(id); 696 buf->f_namelen = CRAMFS_MAXPATHLEN; 697 return 0; 698 } 699 700 /* 701 * Read a cramfs directory entry. 702 */ 703 static int cramfs_readdir(struct file *file, struct dir_context *ctx) 704 { 705 struct inode *inode = file_inode(file); 706 struct super_block *sb = inode->i_sb; 707 char *buf; 708 unsigned int offset; 709 710 /* Offset within the thing. */ 711 if (ctx->pos >= inode->i_size) 712 return 0; 713 offset = ctx->pos; 714 /* Directory entries are always 4-byte aligned */ 715 if (offset & 3) 716 return -EINVAL; 717 718 buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL); 719 if (!buf) 720 return -ENOMEM; 721 722 while (offset < inode->i_size) { 723 struct cramfs_inode *de; 724 unsigned long nextoffset; 725 char *name; 726 ino_t ino; 727 umode_t mode; 728 int namelen; 729 730 mutex_lock(&read_mutex); 731 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); 732 name = (char *)(de+1); 733 734 /* 735 * Namelengths on disk are shifted by two 736 * and the name padded out to 4-byte boundaries 737 * with zeroes. 738 */ 739 namelen = de->namelen << 2; 740 memcpy(buf, name, namelen); 741 ino = cramino(de, OFFSET(inode) + offset); 742 mode = de->mode; 743 mutex_unlock(&read_mutex); 744 nextoffset = offset + sizeof(*de) + namelen; 745 for (;;) { 746 if (!namelen) { 747 kfree(buf); 748 return -EIO; 749 } 750 if (buf[namelen-1]) 751 break; 752 namelen--; 753 } 754 if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) 755 break; 756 757 ctx->pos = offset = nextoffset; 758 } 759 kfree(buf); 760 return 0; 761 } 762 763 /* 764 * Lookup and fill in the inode data.. 765 */ 766 static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 767 { 768 unsigned int offset = 0; 769 struct inode *inode = NULL; 770 int sorted; 771 772 mutex_lock(&read_mutex); 773 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 774 while (offset < dir->i_size) { 775 struct cramfs_inode *de; 776 char *name; 777 int namelen, retval; 778 int dir_off = OFFSET(dir) + offset; 779 780 de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN); 781 name = (char *)(de+1); 782 783 /* Try to take advantage of sorted directories */ 784 if (sorted && (dentry->d_name.name[0] < name[0])) 785 break; 786 787 namelen = de->namelen << 2; 788 offset += sizeof(*de) + namelen; 789 790 /* Quick check that the name is roughly the right length */ 791 if (((dentry->d_name.len + 3) & ~3) != namelen) 792 continue; 793 794 for (;;) { 795 if (!namelen) { 796 inode = ERR_PTR(-EIO); 797 goto out; 798 } 799 if (name[namelen-1]) 800 break; 801 namelen--; 802 } 803 if (namelen != dentry->d_name.len) 804 continue; 805 retval = memcmp(dentry->d_name.name, name, namelen); 806 if (retval > 0) 807 continue; 808 if (!retval) { 809 inode = get_cramfs_inode(dir->i_sb, de, dir_off); 810 break; 811 } 812 /* else (retval < 0) */ 813 if (sorted) 814 break; 815 } 816 out: 817 mutex_unlock(&read_mutex); 818 return d_splice_alias(inode, dentry); 819 } 820 821 static int cramfs_read_folio(struct file *file, struct folio *folio) 822 { 823 struct inode *inode = folio->mapping->host; 824 u32 maxblock; 825 int bytes_filled; 826 void *pgdata; 827 bool success = false; 828 829 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 830 bytes_filled = 0; 831 pgdata = kmap_local_folio(folio, 0); 832 833 if (folio->index < maxblock) { 834 struct super_block *sb = inode->i_sb; 835 u32 blkptr_offset = OFFSET(inode) + folio->index * 4; 836 u32 block_ptr, block_start, block_len; 837 bool uncompressed, direct; 838 839 mutex_lock(&read_mutex); 840 block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4); 841 uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED); 842 direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR); 843 block_ptr &= ~CRAMFS_BLK_FLAGS; 844 845 if (direct) { 846 /* 847 * The block pointer is an absolute start pointer, 848 * shifted by 2 bits. The size is included in the 849 * first 2 bytes of the data block when compressed, 850 * or PAGE_SIZE otherwise. 851 */ 852 block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 853 if (uncompressed) { 854 block_len = PAGE_SIZE; 855 /* if last block: cap to file length */ 856 if (folio->index == maxblock - 1) 857 block_len = 858 offset_in_page(inode->i_size); 859 } else { 860 block_len = *(u16 *) 861 cramfs_read(sb, block_start, 2); 862 block_start += 2; 863 } 864 } else { 865 /* 866 * The block pointer indicates one past the end of 867 * the current block (start of next block). If this 868 * is the first block then it starts where the block 869 * pointer table ends, otherwise its start comes 870 * from the previous block's pointer. 871 */ 872 block_start = OFFSET(inode) + maxblock * 4; 873 if (folio->index) 874 block_start = *(u32 *) 875 cramfs_read(sb, blkptr_offset - 4, 4); 876 /* Beware... previous ptr might be a direct ptr */ 877 if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { 878 /* See comments on earlier code. */ 879 u32 prev_start = block_start; 880 block_start = prev_start & ~CRAMFS_BLK_FLAGS; 881 block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 882 if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) { 883 block_start += PAGE_SIZE; 884 } else { 885 block_len = *(u16 *) 886 cramfs_read(sb, block_start, 2); 887 block_start += 2 + block_len; 888 } 889 } 890 block_start &= ~CRAMFS_BLK_FLAGS; 891 block_len = block_ptr - block_start; 892 } 893 894 if (block_len == 0) 895 ; /* hole */ 896 else if (unlikely(block_len > 2*PAGE_SIZE || 897 (uncompressed && block_len > PAGE_SIZE))) { 898 mutex_unlock(&read_mutex); 899 pr_err("bad data blocksize %u\n", block_len); 900 goto err; 901 } else if (uncompressed) { 902 memcpy(pgdata, 903 cramfs_read(sb, block_start, block_len), 904 block_len); 905 bytes_filled = block_len; 906 } else { 907 bytes_filled = cramfs_uncompress_block(pgdata, 908 PAGE_SIZE, 909 cramfs_read(sb, block_start, block_len), 910 block_len); 911 } 912 mutex_unlock(&read_mutex); 913 if (unlikely(bytes_filled < 0)) 914 goto err; 915 } 916 917 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled); 918 flush_dcache_folio(folio); 919 920 success = true; 921 err: 922 kunmap_local(pgdata); 923 folio_end_read(folio, success); 924 return 0; 925 } 926 927 static const struct address_space_operations cramfs_aops = { 928 .read_folio = cramfs_read_folio 929 }; 930 931 /* 932 * Our operations: 933 */ 934 935 /* 936 * A directory can only readdir 937 */ 938 static const struct file_operations cramfs_directory_operations = { 939 .llseek = generic_file_llseek, 940 .read = generic_read_dir, 941 .iterate_shared = cramfs_readdir, 942 .setlease = generic_setlease, 943 }; 944 945 static const struct inode_operations cramfs_dir_inode_operations = { 946 .lookup = cramfs_lookup, 947 }; 948 949 static const struct super_operations cramfs_ops = { 950 .statfs = cramfs_statfs, 951 }; 952 953 static int cramfs_get_tree(struct fs_context *fc) 954 { 955 int ret = -ENOPROTOOPT; 956 957 if (IS_ENABLED(CONFIG_CRAMFS_MTD)) { 958 ret = get_tree_mtd(fc, cramfs_mtd_fill_super); 959 if (!ret) 960 return 0; 961 } 962 if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) 963 ret = get_tree_bdev(fc, cramfs_blkdev_fill_super); 964 return ret; 965 } 966 967 static const struct fs_context_operations cramfs_context_ops = { 968 .get_tree = cramfs_get_tree, 969 .reconfigure = cramfs_reconfigure, 970 }; 971 972 /* 973 * Set up the filesystem mount context. 974 */ 975 static int cramfs_init_fs_context(struct fs_context *fc) 976 { 977 fc->ops = &cramfs_context_ops; 978 return 0; 979 } 980 981 static struct file_system_type cramfs_fs_type = { 982 .owner = THIS_MODULE, 983 .name = "cramfs", 984 .init_fs_context = cramfs_init_fs_context, 985 .kill_sb = cramfs_kill_sb, 986 .fs_flags = FS_REQUIRES_DEV, 987 }; 988 MODULE_ALIAS_FS("cramfs"); 989 990 static int __init init_cramfs_fs(void) 991 { 992 int rv; 993 994 rv = cramfs_uncompress_init(); 995 if (rv < 0) 996 return rv; 997 rv = register_filesystem(&cramfs_fs_type); 998 if (rv < 0) 999 cramfs_uncompress_exit(); 1000 return rv; 1001 } 1002 1003 static void __exit exit_cramfs_fs(void) 1004 { 1005 cramfs_uncompress_exit(); 1006 unregister_filesystem(&cramfs_fs_type); 1007 } 1008 1009 module_init(init_cramfs_fs) 1010 module_exit(exit_cramfs_fs) 1011 MODULE_DESCRIPTION("Compressed ROM file system support"); 1012 MODULE_LICENSE("GPL"); 1013