1 /* 2 * Compressed rom filesystem for Linux. 3 * 4 * Copyright (C) 1999 Linus Torvalds. 5 * 6 * This file is released under the GPL. 7 */ 8 9 /* 10 * These are the VFS interfaces to the compressed rom filesystem. 11 * The actual compression is based on zlib, see the other files. 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/pagemap.h> 20 #include <linux/pfn_t.h> 21 #include <linux/ramfs.h> 22 #include <linux/init.h> 23 #include <linux/string.h> 24 #include <linux/blkdev.h> 25 #include <linux/mtd/mtd.h> 26 #include <linux/mtd/super.h> 27 #include <linux/slab.h> 28 #include <linux/vfs.h> 29 #include <linux/mutex.h> 30 #include <uapi/linux/cramfs_fs.h> 31 #include <linux/uaccess.h> 32 33 #include "internal.h" 34 35 /* 36 * cramfs super-block data in memory 37 */ 38 struct cramfs_sb_info { 39 unsigned long magic; 40 unsigned long size; 41 unsigned long blocks; 42 unsigned long files; 43 unsigned long flags; 44 void *linear_virt_addr; 45 resource_size_t linear_phys_addr; 46 size_t mtd_point_size; 47 }; 48 49 static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb) 50 { 51 return sb->s_fs_info; 52 } 53 54 static const struct super_operations cramfs_ops; 55 static const struct inode_operations cramfs_dir_inode_operations; 56 static const struct file_operations cramfs_directory_operations; 57 static const struct file_operations cramfs_physmem_fops; 58 static const struct address_space_operations cramfs_aops; 59 60 static DEFINE_MUTEX(read_mutex); 61 62 63 /* These macros may change in future, to provide better st_ino semantics. */ 64 #define OFFSET(x) ((x)->i_ino) 65 66 static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset) 67 { 68 if (!cino->offset) 69 return offset + 1; 70 if (!cino->size) 71 return offset + 1; 72 73 /* 74 * The file mode test fixes buggy mkcramfs implementations where 75 * cramfs_inode->offset is set to a non zero value for entries 76 * which did not contain data, like devices node and fifos. 77 */ 78 switch (cino->mode & S_IFMT) { 79 case S_IFREG: 80 case S_IFDIR: 81 case S_IFLNK: 82 return cino->offset << 2; 83 default: 84 break; 85 } 86 return offset + 1; 87 } 88 89 static struct inode *get_cramfs_inode(struct super_block *sb, 90 const struct cramfs_inode *cramfs_inode, unsigned int offset) 91 { 92 struct inode *inode; 93 static struct timespec zerotime; 94 95 inode = iget_locked(sb, cramino(cramfs_inode, offset)); 96 if (!inode) 97 return ERR_PTR(-ENOMEM); 98 if (!(inode->i_state & I_NEW)) 99 return inode; 100 101 switch (cramfs_inode->mode & S_IFMT) { 102 case S_IFREG: 103 inode->i_fop = &generic_ro_fops; 104 inode->i_data.a_ops = &cramfs_aops; 105 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && 106 CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS && 107 CRAMFS_SB(sb)->linear_phys_addr) 108 inode->i_fop = &cramfs_physmem_fops; 109 break; 110 case S_IFDIR: 111 inode->i_op = &cramfs_dir_inode_operations; 112 inode->i_fop = &cramfs_directory_operations; 113 break; 114 case S_IFLNK: 115 inode->i_op = &page_symlink_inode_operations; 116 inode_nohighmem(inode); 117 inode->i_data.a_ops = &cramfs_aops; 118 break; 119 default: 120 init_special_inode(inode, cramfs_inode->mode, 121 old_decode_dev(cramfs_inode->size)); 122 } 123 124 inode->i_mode = cramfs_inode->mode; 125 i_uid_write(inode, cramfs_inode->uid); 126 i_gid_write(inode, cramfs_inode->gid); 127 128 /* if the lower 2 bits are zero, the inode contains data */ 129 if (!(inode->i_ino & 3)) { 130 inode->i_size = cramfs_inode->size; 131 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 132 } 133 134 /* Struct copy intentional */ 135 inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; 136 /* inode->i_nlink is left 1 - arguably wrong for directories, 137 but it's the best we can do without reading the directory 138 contents. 1 yields the right result in GNU find, even 139 without -noleaf option. */ 140 141 unlock_new_inode(inode); 142 143 return inode; 144 } 145 146 /* 147 * We have our own block cache: don't fill up the buffer cache 148 * with the rom-image, because the way the filesystem is set 149 * up the accesses should be fairly regular and cached in the 150 * page cache and dentry tree anyway.. 151 * 152 * This also acts as a way to guarantee contiguous areas of up to 153 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to 154 * worry about end-of-buffer issues even when decompressing a full 155 * page cache. 156 * 157 * Note: This is all optimized away at compile time when 158 * CONFIG_CRAMFS_BLOCKDEV=n. 159 */ 160 #define READ_BUFFERS (2) 161 /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */ 162 #define NEXT_BUFFER(_ix) ((_ix) ^ 1) 163 164 /* 165 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed" 166 * data that takes up more space than the original and with unlucky 167 * alignment. 168 */ 169 #define BLKS_PER_BUF_SHIFT (2) 170 #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 171 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE) 172 173 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 174 static unsigned buffer_blocknr[READ_BUFFERS]; 175 static struct super_block *buffer_dev[READ_BUFFERS]; 176 static int next_buffer; 177 178 /* 179 * Populate our block cache and return a pointer to it. 180 */ 181 static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset, 182 unsigned int len) 183 { 184 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 185 struct page *pages[BLKS_PER_BUF]; 186 unsigned i, blocknr, buffer; 187 unsigned long devsize; 188 char *data; 189 190 if (!len) 191 return NULL; 192 blocknr = offset >> PAGE_SHIFT; 193 offset &= PAGE_SIZE - 1; 194 195 /* Check if an existing buffer already has the data.. */ 196 for (i = 0; i < READ_BUFFERS; i++) { 197 unsigned int blk_offset; 198 199 if (buffer_dev[i] != sb) 200 continue; 201 if (blocknr < buffer_blocknr[i]) 202 continue; 203 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT; 204 blk_offset += offset; 205 if (blk_offset + len > BUFFER_SIZE) 206 continue; 207 return read_buffers[i] + blk_offset; 208 } 209 210 devsize = mapping->host->i_size >> PAGE_SHIFT; 211 212 /* Ok, read in BLKS_PER_BUF pages completely first. */ 213 for (i = 0; i < BLKS_PER_BUF; i++) { 214 struct page *page = NULL; 215 216 if (blocknr + i < devsize) { 217 page = read_mapping_page(mapping, blocknr + i, NULL); 218 /* synchronous error? */ 219 if (IS_ERR(page)) 220 page = NULL; 221 } 222 pages[i] = page; 223 } 224 225 for (i = 0; i < BLKS_PER_BUF; i++) { 226 struct page *page = pages[i]; 227 228 if (page) { 229 wait_on_page_locked(page); 230 if (!PageUptodate(page)) { 231 /* asynchronous error */ 232 put_page(page); 233 pages[i] = NULL; 234 } 235 } 236 } 237 238 buffer = next_buffer; 239 next_buffer = NEXT_BUFFER(buffer); 240 buffer_blocknr[buffer] = blocknr; 241 buffer_dev[buffer] = sb; 242 243 data = read_buffers[buffer]; 244 for (i = 0; i < BLKS_PER_BUF; i++) { 245 struct page *page = pages[i]; 246 247 if (page) { 248 memcpy(data, kmap(page), PAGE_SIZE); 249 kunmap(page); 250 put_page(page); 251 } else 252 memset(data, 0, PAGE_SIZE); 253 data += PAGE_SIZE; 254 } 255 return read_buffers[buffer] + offset; 256 } 257 258 /* 259 * Return a pointer to the linearly addressed cramfs image in memory. 260 */ 261 static void *cramfs_direct_read(struct super_block *sb, unsigned int offset, 262 unsigned int len) 263 { 264 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 265 266 if (!len) 267 return NULL; 268 if (len > sbi->size || offset > sbi->size - len) 269 return page_address(ZERO_PAGE(0)); 270 return sbi->linear_virt_addr + offset; 271 } 272 273 /* 274 * Returns a pointer to a buffer containing at least LEN bytes of 275 * filesystem starting at byte offset OFFSET into the filesystem. 276 */ 277 static void *cramfs_read(struct super_block *sb, unsigned int offset, 278 unsigned int len) 279 { 280 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 281 282 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr) 283 return cramfs_direct_read(sb, offset, len); 284 else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) 285 return cramfs_blkdev_read(sb, offset, len); 286 else 287 return NULL; 288 } 289 290 /* 291 * For a mapping to be possible, we need a range of uncompressed and 292 * contiguous blocks. Return the offset for the first block and number of 293 * valid blocks for which that is true, or zero otherwise. 294 */ 295 static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages) 296 { 297 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 298 int i; 299 u32 *blockptrs, first_block_addr; 300 301 /* 302 * We can dereference memory directly here as this code may be 303 * reached only when there is a direct filesystem image mapping 304 * available in memory. 305 */ 306 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4); 307 first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS; 308 i = 0; 309 do { 310 u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT); 311 u32 expect = (first_block_addr + block_off) | 312 CRAMFS_BLK_FLAG_DIRECT_PTR | 313 CRAMFS_BLK_FLAG_UNCOMPRESSED; 314 if (blockptrs[i] != expect) { 315 pr_debug("range: block %d/%d got %#x expects %#x\n", 316 pgoff+i, pgoff + *pages - 1, 317 blockptrs[i], expect); 318 if (i == 0) 319 return 0; 320 break; 321 } 322 } while (++i < *pages); 323 324 *pages = i; 325 return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 326 } 327 328 #ifdef CONFIG_MMU 329 330 /* 331 * Return true if the last page of a file in the filesystem image contains 332 * some other data that doesn't belong to that file. It is assumed that the 333 * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED 334 * (verified by cramfs_get_block_range() and directly accessible in memory. 335 */ 336 static bool cramfs_last_page_is_shared(struct inode *inode) 337 { 338 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 339 u32 partial, last_page, blockaddr, *blockptrs; 340 char *tail_data; 341 342 partial = offset_in_page(inode->i_size); 343 if (!partial) 344 return false; 345 last_page = inode->i_size >> PAGE_SHIFT; 346 blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode)); 347 blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS; 348 blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 349 tail_data = sbi->linear_virt_addr + blockaddr + partial; 350 return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false; 351 } 352 353 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 354 { 355 struct inode *inode = file_inode(file); 356 struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); 357 unsigned int pages, max_pages, offset; 358 unsigned long address, pgoff = vma->vm_pgoff; 359 char *bailout_reason; 360 int ret; 361 362 ret = generic_file_readonly_mmap(file, vma); 363 if (ret) 364 return ret; 365 366 /* 367 * Now try to pre-populate ptes for this vma with a direct 368 * mapping avoiding memory allocation when possible. 369 */ 370 371 /* Could COW work here? */ 372 bailout_reason = "vma is writable"; 373 if (vma->vm_flags & VM_WRITE) 374 goto bailout; 375 376 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 377 bailout_reason = "beyond file limit"; 378 if (pgoff >= max_pages) 379 goto bailout; 380 pages = min(vma_pages(vma), max_pages - pgoff); 381 382 offset = cramfs_get_block_range(inode, pgoff, &pages); 383 bailout_reason = "unsuitable block layout"; 384 if (!offset) 385 goto bailout; 386 address = sbi->linear_phys_addr + offset; 387 bailout_reason = "data is not page aligned"; 388 if (!PAGE_ALIGNED(address)) 389 goto bailout; 390 391 /* Don't map the last page if it contains some other data */ 392 if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { 393 pr_debug("mmap: %s: last page is shared\n", 394 file_dentry(file)->d_name.name); 395 pages--; 396 } 397 398 if (!pages) { 399 bailout_reason = "no suitable block remaining"; 400 goto bailout; 401 } 402 403 if (pages == vma_pages(vma)) { 404 /* 405 * The entire vma is mappable. remap_pfn_range() will 406 * make it distinguishable from a non-direct mapping 407 * in /proc/<pid>/maps by substituting the file offset 408 * with the actual physical address. 409 */ 410 ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, 411 pages * PAGE_SIZE, vma->vm_page_prot); 412 } else { 413 /* 414 * Let's create a mixed map if we can't map it all. 415 * The normal paging machinery will take care of the 416 * unpopulated ptes via cramfs_readpage(). 417 */ 418 int i; 419 vma->vm_flags |= VM_MIXEDMAP; 420 for (i = 0; i < pages && !ret; i++) { 421 unsigned long off = i * PAGE_SIZE; 422 pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV); 423 ret = vm_insert_mixed(vma, vma->vm_start + off, pfn); 424 } 425 } 426 427 if (!ret) 428 pr_debug("mapped %s[%lu] at 0x%08lx (%u/%lu pages) " 429 "to vma 0x%08lx, page_prot 0x%llx\n", 430 file_dentry(file)->d_name.name, pgoff, 431 address, pages, vma_pages(vma), vma->vm_start, 432 (unsigned long long)pgprot_val(vma->vm_page_prot)); 433 return ret; 434 435 bailout: 436 pr_debug("%s[%lu]: direct mmap impossible: %s\n", 437 file_dentry(file)->d_name.name, pgoff, bailout_reason); 438 /* Didn't manage any direct map, but normal paging is still possible */ 439 return 0; 440 } 441 442 #else /* CONFIG_MMU */ 443 444 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) 445 { 446 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS; 447 } 448 449 static unsigned long cramfs_physmem_get_unmapped_area(struct file *file, 450 unsigned long addr, unsigned long len, 451 unsigned long pgoff, unsigned long flags) 452 { 453 struct inode *inode = file_inode(file); 454 struct super_block *sb = inode->i_sb; 455 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 456 unsigned int pages, block_pages, max_pages, offset; 457 458 pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 459 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 460 if (pgoff >= max_pages || pages > max_pages - pgoff) 461 return -EINVAL; 462 block_pages = pages; 463 offset = cramfs_get_block_range(inode, pgoff, &block_pages); 464 if (!offset || block_pages != pages) 465 return -ENOSYS; 466 addr = sbi->linear_phys_addr + offset; 467 pr_debug("get_unmapped for %s ofs %#lx siz %lu at 0x%08lx\n", 468 file_dentry(file)->d_name.name, pgoff*PAGE_SIZE, len, addr); 469 return addr; 470 } 471 472 static unsigned int cramfs_physmem_mmap_capabilities(struct file *file) 473 { 474 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | 475 NOMMU_MAP_READ | NOMMU_MAP_EXEC; 476 } 477 478 #endif /* CONFIG_MMU */ 479 480 static const struct file_operations cramfs_physmem_fops = { 481 .llseek = generic_file_llseek, 482 .read_iter = generic_file_read_iter, 483 .splice_read = generic_file_splice_read, 484 .mmap = cramfs_physmem_mmap, 485 #ifndef CONFIG_MMU 486 .get_unmapped_area = cramfs_physmem_get_unmapped_area, 487 .mmap_capabilities = cramfs_physmem_mmap_capabilities, 488 #endif 489 }; 490 491 static void cramfs_kill_sb(struct super_block *sb) 492 { 493 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 494 495 if (IS_ENABLED(CCONFIG_CRAMFS_MTD) && sb->s_mtd) { 496 if (sbi && sbi->mtd_point_size) 497 mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); 498 kill_mtd_super(sb); 499 } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) { 500 kill_block_super(sb); 501 } 502 kfree(sbi); 503 } 504 505 static int cramfs_remount(struct super_block *sb, int *flags, char *data) 506 { 507 sync_filesystem(sb); 508 *flags |= SB_RDONLY; 509 return 0; 510 } 511 512 static int cramfs_read_super(struct super_block *sb, 513 struct cramfs_super *super, int silent) 514 { 515 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 516 unsigned long root_offset; 517 518 /* We don't know the real size yet */ 519 sbi->size = PAGE_SIZE; 520 521 /* Read the first block and get the superblock from it */ 522 mutex_lock(&read_mutex); 523 memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super)); 524 mutex_unlock(&read_mutex); 525 526 /* Do sanity checks on the superblock */ 527 if (super->magic != CRAMFS_MAGIC) { 528 /* check for wrong endianness */ 529 if (super->magic == CRAMFS_MAGIC_WEND) { 530 if (!silent) 531 pr_err("wrong endianness\n"); 532 return -EINVAL; 533 } 534 535 /* check at 512 byte offset */ 536 mutex_lock(&read_mutex); 537 memcpy(super, 538 cramfs_read(sb, 512, sizeof(*super)), 539 sizeof(*super)); 540 mutex_unlock(&read_mutex); 541 if (super->magic != CRAMFS_MAGIC) { 542 if (super->magic == CRAMFS_MAGIC_WEND && !silent) 543 pr_err("wrong endianness\n"); 544 else if (!silent) 545 pr_err("wrong magic\n"); 546 return -EINVAL; 547 } 548 } 549 550 /* get feature flags first */ 551 if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) { 552 pr_err("unsupported filesystem features\n"); 553 return -EINVAL; 554 } 555 556 /* Check that the root inode is in a sane state */ 557 if (!S_ISDIR(super->root.mode)) { 558 pr_err("root is not a directory\n"); 559 return -EINVAL; 560 } 561 /* correct strange, hard-coded permissions of mkcramfs */ 562 super->root.mode |= 0555; 563 564 root_offset = super->root.offset << 2; 565 if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) { 566 sbi->size = super->size; 567 sbi->blocks = super->fsid.blocks; 568 sbi->files = super->fsid.files; 569 } else { 570 sbi->size = 1<<28; 571 sbi->blocks = 0; 572 sbi->files = 0; 573 } 574 sbi->magic = super->magic; 575 sbi->flags = super->flags; 576 if (root_offset == 0) 577 pr_info("empty filesystem"); 578 else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) && 579 ((root_offset != sizeof(struct cramfs_super)) && 580 (root_offset != 512 + sizeof(struct cramfs_super)))) 581 { 582 pr_err("bad root offset %lu\n", root_offset); 583 return -EINVAL; 584 } 585 586 return 0; 587 } 588 589 static int cramfs_finalize_super(struct super_block *sb, 590 struct cramfs_inode *cramfs_root) 591 { 592 struct inode *root; 593 594 /* Set it all up.. */ 595 sb->s_flags |= SB_RDONLY; 596 sb->s_op = &cramfs_ops; 597 root = get_cramfs_inode(sb, cramfs_root, 0); 598 if (IS_ERR(root)) 599 return PTR_ERR(root); 600 sb->s_root = d_make_root(root); 601 if (!sb->s_root) 602 return -ENOMEM; 603 return 0; 604 } 605 606 static int cramfs_blkdev_fill_super(struct super_block *sb, void *data, 607 int silent) 608 { 609 struct cramfs_sb_info *sbi; 610 struct cramfs_super super; 611 int i, err; 612 613 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 614 if (!sbi) 615 return -ENOMEM; 616 sb->s_fs_info = sbi; 617 618 /* Invalidate the read buffers on mount: think disk change.. */ 619 for (i = 0; i < READ_BUFFERS; i++) 620 buffer_blocknr[i] = -1; 621 622 err = cramfs_read_super(sb, &super, silent); 623 if (err) 624 return err; 625 return cramfs_finalize_super(sb, &super.root); 626 } 627 628 static int cramfs_mtd_fill_super(struct super_block *sb, void *data, 629 int silent) 630 { 631 struct cramfs_sb_info *sbi; 632 struct cramfs_super super; 633 int err; 634 635 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 636 if (!sbi) 637 return -ENOMEM; 638 sb->s_fs_info = sbi; 639 640 /* Map only one page for now. Will remap it when fs size is known. */ 641 err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size, 642 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 643 if (err || sbi->mtd_point_size != PAGE_SIZE) { 644 pr_err("unable to get direct memory access to mtd:%s\n", 645 sb->s_mtd->name); 646 return err ? : -ENODATA; 647 } 648 649 pr_info("checking physical address %pap for linear cramfs image\n", 650 &sbi->linear_phys_addr); 651 err = cramfs_read_super(sb, &super, silent); 652 if (err) 653 return err; 654 655 /* Remap the whole filesystem now */ 656 pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n", 657 sb->s_mtd->name, sbi->size/1024); 658 mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE); 659 err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size, 660 &sbi->linear_virt_addr, &sbi->linear_phys_addr); 661 if (err || sbi->mtd_point_size != sbi->size) { 662 pr_err("unable to get direct memory access to mtd:%s\n", 663 sb->s_mtd->name); 664 return err ? : -ENODATA; 665 } 666 667 return cramfs_finalize_super(sb, &super.root); 668 } 669 670 static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) 671 { 672 struct super_block *sb = dentry->d_sb; 673 u64 id = 0; 674 675 if (sb->s_bdev) 676 id = huge_encode_dev(sb->s_bdev->bd_dev); 677 else if (sb->s_dev) 678 id = huge_encode_dev(sb->s_dev); 679 680 buf->f_type = CRAMFS_MAGIC; 681 buf->f_bsize = PAGE_SIZE; 682 buf->f_blocks = CRAMFS_SB(sb)->blocks; 683 buf->f_bfree = 0; 684 buf->f_bavail = 0; 685 buf->f_files = CRAMFS_SB(sb)->files; 686 buf->f_ffree = 0; 687 buf->f_fsid.val[0] = (u32)id; 688 buf->f_fsid.val[1] = (u32)(id >> 32); 689 buf->f_namelen = CRAMFS_MAXPATHLEN; 690 return 0; 691 } 692 693 /* 694 * Read a cramfs directory entry. 695 */ 696 static int cramfs_readdir(struct file *file, struct dir_context *ctx) 697 { 698 struct inode *inode = file_inode(file); 699 struct super_block *sb = inode->i_sb; 700 char *buf; 701 unsigned int offset; 702 703 /* Offset within the thing. */ 704 if (ctx->pos >= inode->i_size) 705 return 0; 706 offset = ctx->pos; 707 /* Directory entries are always 4-byte aligned */ 708 if (offset & 3) 709 return -EINVAL; 710 711 buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL); 712 if (!buf) 713 return -ENOMEM; 714 715 while (offset < inode->i_size) { 716 struct cramfs_inode *de; 717 unsigned long nextoffset; 718 char *name; 719 ino_t ino; 720 umode_t mode; 721 int namelen; 722 723 mutex_lock(&read_mutex); 724 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); 725 name = (char *)(de+1); 726 727 /* 728 * Namelengths on disk are shifted by two 729 * and the name padded out to 4-byte boundaries 730 * with zeroes. 731 */ 732 namelen = de->namelen << 2; 733 memcpy(buf, name, namelen); 734 ino = cramino(de, OFFSET(inode) + offset); 735 mode = de->mode; 736 mutex_unlock(&read_mutex); 737 nextoffset = offset + sizeof(*de) + namelen; 738 for (;;) { 739 if (!namelen) { 740 kfree(buf); 741 return -EIO; 742 } 743 if (buf[namelen-1]) 744 break; 745 namelen--; 746 } 747 if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) 748 break; 749 750 ctx->pos = offset = nextoffset; 751 } 752 kfree(buf); 753 return 0; 754 } 755 756 /* 757 * Lookup and fill in the inode data.. 758 */ 759 static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 760 { 761 unsigned int offset = 0; 762 struct inode *inode = NULL; 763 int sorted; 764 765 mutex_lock(&read_mutex); 766 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 767 while (offset < dir->i_size) { 768 struct cramfs_inode *de; 769 char *name; 770 int namelen, retval; 771 int dir_off = OFFSET(dir) + offset; 772 773 de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN); 774 name = (char *)(de+1); 775 776 /* Try to take advantage of sorted directories */ 777 if (sorted && (dentry->d_name.name[0] < name[0])) 778 break; 779 780 namelen = de->namelen << 2; 781 offset += sizeof(*de) + namelen; 782 783 /* Quick check that the name is roughly the right length */ 784 if (((dentry->d_name.len + 3) & ~3) != namelen) 785 continue; 786 787 for (;;) { 788 if (!namelen) { 789 inode = ERR_PTR(-EIO); 790 goto out; 791 } 792 if (name[namelen-1]) 793 break; 794 namelen--; 795 } 796 if (namelen != dentry->d_name.len) 797 continue; 798 retval = memcmp(dentry->d_name.name, name, namelen); 799 if (retval > 0) 800 continue; 801 if (!retval) { 802 inode = get_cramfs_inode(dir->i_sb, de, dir_off); 803 break; 804 } 805 /* else (retval < 0) */ 806 if (sorted) 807 break; 808 } 809 out: 810 mutex_unlock(&read_mutex); 811 if (IS_ERR(inode)) 812 return ERR_CAST(inode); 813 d_add(dentry, inode); 814 return NULL; 815 } 816 817 static int cramfs_readpage(struct file *file, struct page *page) 818 { 819 struct inode *inode = page->mapping->host; 820 u32 maxblock; 821 int bytes_filled; 822 void *pgdata; 823 824 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 825 bytes_filled = 0; 826 pgdata = kmap(page); 827 828 if (page->index < maxblock) { 829 struct super_block *sb = inode->i_sb; 830 u32 blkptr_offset = OFFSET(inode) + page->index * 4; 831 u32 block_ptr, block_start, block_len; 832 bool uncompressed, direct; 833 834 mutex_lock(&read_mutex); 835 block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4); 836 uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED); 837 direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR); 838 block_ptr &= ~CRAMFS_BLK_FLAGS; 839 840 if (direct) { 841 /* 842 * The block pointer is an absolute start pointer, 843 * shifted by 2 bits. The size is included in the 844 * first 2 bytes of the data block when compressed, 845 * or PAGE_SIZE otherwise. 846 */ 847 block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; 848 if (uncompressed) { 849 block_len = PAGE_SIZE; 850 /* if last block: cap to file length */ 851 if (page->index == maxblock - 1) 852 block_len = 853 offset_in_page(inode->i_size); 854 } else { 855 block_len = *(u16 *) 856 cramfs_read(sb, block_start, 2); 857 block_start += 2; 858 } 859 } else { 860 /* 861 * The block pointer indicates one past the end of 862 * the current block (start of next block). If this 863 * is the first block then it starts where the block 864 * pointer table ends, otherwise its start comes 865 * from the previous block's pointer. 866 */ 867 block_start = OFFSET(inode) + maxblock * 4; 868 if (page->index) 869 block_start = *(u32 *) 870 cramfs_read(sb, blkptr_offset - 4, 4); 871 /* Beware... previous ptr might be a direct ptr */ 872 if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { 873 /* See comments on earlier code. */ 874 u32 prev_start = block_start; 875 block_start = prev_start & ~CRAMFS_BLK_FLAGS; 876 block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; 877 if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) { 878 block_start += PAGE_SIZE; 879 } else { 880 block_len = *(u16 *) 881 cramfs_read(sb, block_start, 2); 882 block_start += 2 + block_len; 883 } 884 } 885 block_start &= ~CRAMFS_BLK_FLAGS; 886 block_len = block_ptr - block_start; 887 } 888 889 if (block_len == 0) 890 ; /* hole */ 891 else if (unlikely(block_len > 2*PAGE_SIZE || 892 (uncompressed && block_len > PAGE_SIZE))) { 893 mutex_unlock(&read_mutex); 894 pr_err("bad data blocksize %u\n", block_len); 895 goto err; 896 } else if (uncompressed) { 897 memcpy(pgdata, 898 cramfs_read(sb, block_start, block_len), 899 block_len); 900 bytes_filled = block_len; 901 } else { 902 bytes_filled = cramfs_uncompress_block(pgdata, 903 PAGE_SIZE, 904 cramfs_read(sb, block_start, block_len), 905 block_len); 906 } 907 mutex_unlock(&read_mutex); 908 if (unlikely(bytes_filled < 0)) 909 goto err; 910 } 911 912 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled); 913 flush_dcache_page(page); 914 kunmap(page); 915 SetPageUptodate(page); 916 unlock_page(page); 917 return 0; 918 919 err: 920 kunmap(page); 921 ClearPageUptodate(page); 922 SetPageError(page); 923 unlock_page(page); 924 return 0; 925 } 926 927 static const struct address_space_operations cramfs_aops = { 928 .readpage = cramfs_readpage 929 }; 930 931 /* 932 * Our operations: 933 */ 934 935 /* 936 * A directory can only readdir 937 */ 938 static const struct file_operations cramfs_directory_operations = { 939 .llseek = generic_file_llseek, 940 .read = generic_read_dir, 941 .iterate_shared = cramfs_readdir, 942 }; 943 944 static const struct inode_operations cramfs_dir_inode_operations = { 945 .lookup = cramfs_lookup, 946 }; 947 948 static const struct super_operations cramfs_ops = { 949 .remount_fs = cramfs_remount, 950 .statfs = cramfs_statfs, 951 }; 952 953 static struct dentry *cramfs_mount(struct file_system_type *fs_type, int flags, 954 const char *dev_name, void *data) 955 { 956 struct dentry *ret = ERR_PTR(-ENOPROTOOPT); 957 958 if (IS_ENABLED(CONFIG_CRAMFS_MTD)) { 959 ret = mount_mtd(fs_type, flags, dev_name, data, 960 cramfs_mtd_fill_super); 961 if (!IS_ERR(ret)) 962 return ret; 963 } 964 if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) { 965 ret = mount_bdev(fs_type, flags, dev_name, data, 966 cramfs_blkdev_fill_super); 967 } 968 return ret; 969 } 970 971 static struct file_system_type cramfs_fs_type = { 972 .owner = THIS_MODULE, 973 .name = "cramfs", 974 .mount = cramfs_mount, 975 .kill_sb = cramfs_kill_sb, 976 .fs_flags = FS_REQUIRES_DEV, 977 }; 978 MODULE_ALIAS_FS("cramfs"); 979 980 static int __init init_cramfs_fs(void) 981 { 982 int rv; 983 984 rv = cramfs_uncompress_init(); 985 if (rv < 0) 986 return rv; 987 rv = register_filesystem(&cramfs_fs_type); 988 if (rv < 0) 989 cramfs_uncompress_exit(); 990 return rv; 991 } 992 993 static void __exit exit_cramfs_fs(void) 994 { 995 cramfs_uncompress_exit(); 996 unregister_filesystem(&cramfs_fs_type); 997 } 998 999 module_init(init_cramfs_fs) 1000 module_exit(exit_cramfs_fs) 1001 MODULE_LICENSE("GPL"); 1002