1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) International Business Machines Corp., 2000-2004 4 * Portions Copyright (C) Christoph Hellwig, 2001-2002 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/module.h> 9 #include <linux/completion.h> 10 #include <linux/vfs.h> 11 #include <linux/quotaops.h> 12 #include <linux/fs_context.h> 13 #include <linux/fs_parser.h> 14 #include <linux/moduleparam.h> 15 #include <linux/kthread.h> 16 #include <linux/posix_acl.h> 17 #include <linux/buffer_head.h> 18 #include <linux/exportfs.h> 19 #include <linux/crc32.h> 20 #include <linux/slab.h> 21 #include <linux/uaccess.h> 22 #include <linux/seq_file.h> 23 #include <linux/blkdev.h> 24 25 #include "jfs_incore.h" 26 #include "jfs_filsys.h" 27 #include "jfs_inode.h" 28 #include "jfs_metapage.h" 29 #include "jfs_superblock.h" 30 #include "jfs_dmap.h" 31 #include "jfs_imap.h" 32 #include "jfs_acl.h" 33 #include "jfs_debug.h" 34 #include "jfs_xattr.h" 35 #include "jfs_dinode.h" 36 37 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); 38 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); 39 MODULE_LICENSE("GPL"); 40 41 static struct kmem_cache *jfs_inode_cachep; 42 43 static const struct super_operations jfs_super_operations; 44 static const struct export_operations jfs_export_operations; 45 static struct file_system_type jfs_fs_type; 46 47 #define MAX_COMMIT_THREADS 64 48 static int commit_threads; 49 module_param(commit_threads, int, 0); 50 MODULE_PARM_DESC(commit_threads, "Number of commit threads"); 51 52 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; 53 struct task_struct *jfsIOthread; 54 struct task_struct *jfsSyncThread; 55 56 #ifdef CONFIG_JFS_DEBUG 57 int jfsloglevel = JFS_LOGLEVEL_WARN; 58 module_param(jfsloglevel, int, 0644); 59 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); 60 #endif 61 62 static void jfs_handle_error(struct super_block *sb) 63 { 64 struct jfs_sb_info *sbi = JFS_SBI(sb); 65 66 if (sb_rdonly(sb)) 67 return; 68 69 updateSuper(sb, FM_DIRTY); 70 71 if (sbi->flag & JFS_ERR_PANIC) 72 panic("JFS (device %s): panic forced after error\n", 73 sb->s_id); 74 else if (sbi->flag & JFS_ERR_REMOUNT_RO) { 75 jfs_err("ERROR: (device %s): remounting filesystem as read-only", 76 sb->s_id); 77 sb->s_flags |= SB_RDONLY; 78 } 79 80 /* nothing is done for continue beyond marking the superblock dirty */ 81 } 82 83 void jfs_error(struct super_block *sb, const char *fmt, ...) 84 { 85 struct va_format vaf; 86 va_list args; 87 88 va_start(args, fmt); 89 90 vaf.fmt = fmt; 91 vaf.va = &args; 92 93 pr_err("ERROR: (device %s): %ps: %pV\n", 94 sb->s_id, __builtin_return_address(0), &vaf); 95 96 va_end(args); 97 98 jfs_handle_error(sb); 99 } 100 101 static struct inode *jfs_alloc_inode(struct super_block *sb) 102 { 103 struct jfs_inode_info *jfs_inode; 104 105 jfs_inode = alloc_inode_sb(sb, jfs_inode_cachep, GFP_NOFS); 106 if (!jfs_inode) 107 return NULL; 108 #ifdef CONFIG_QUOTA 109 memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot)); 110 #endif 111 return &jfs_inode->vfs_inode; 112 } 113 114 static void jfs_free_inode(struct inode *inode) 115 { 116 kmem_cache_free(jfs_inode_cachep, JFS_IP(inode)); 117 } 118 119 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) 120 { 121 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); 122 s64 maxinodes; 123 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; 124 125 jfs_info("In jfs_statfs"); 126 buf->f_type = JFS_SUPER_MAGIC; 127 buf->f_bsize = sbi->bsize; 128 buf->f_blocks = sbi->bmap->db_mapsize; 129 buf->f_bfree = sbi->bmap->db_nfree; 130 buf->f_bavail = sbi->bmap->db_nfree; 131 /* 132 * If we really return the number of allocated & free inodes, some 133 * applications will fail because they won't see enough free inodes. 134 * We'll try to calculate some guess as to how many inodes we can 135 * really allocate 136 * 137 * buf->f_files = atomic_read(&imap->im_numinos); 138 * buf->f_ffree = atomic_read(&imap->im_numfree); 139 */ 140 maxinodes = min((s64) atomic_read(&imap->im_numinos) + 141 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) 142 << L2INOSPEREXT), (s64) 0xffffffffLL); 143 buf->f_files = maxinodes; 144 buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - 145 atomic_read(&imap->im_numfree)); 146 buf->f_fsid.val[0] = crc32_le(0, (char *)&sbi->uuid, 147 sizeof(sbi->uuid)/2); 148 buf->f_fsid.val[1] = crc32_le(0, 149 (char *)&sbi->uuid + sizeof(sbi->uuid)/2, 150 sizeof(sbi->uuid)/2); 151 152 buf->f_namelen = JFS_NAME_MAX; 153 return 0; 154 } 155 156 #ifdef CONFIG_QUOTA 157 static int jfs_quota_off(struct super_block *sb, int type); 158 static int jfs_quota_on(struct super_block *sb, int type, int format_id, 159 const struct path *path); 160 161 static void jfs_quota_off_umount(struct super_block *sb) 162 { 163 int type; 164 165 for (type = 0; type < MAXQUOTAS; type++) 166 jfs_quota_off(sb, type); 167 } 168 169 static const struct quotactl_ops jfs_quotactl_ops = { 170 .quota_on = jfs_quota_on, 171 .quota_off = jfs_quota_off, 172 .quota_sync = dquot_quota_sync, 173 .get_state = dquot_get_state, 174 .set_info = dquot_set_dqinfo, 175 .get_dqblk = dquot_get_dqblk, 176 .set_dqblk = dquot_set_dqblk, 177 .get_nextdqblk = dquot_get_next_dqblk, 178 }; 179 #else 180 static inline void jfs_quota_off_umount(struct super_block *sb) 181 { 182 } 183 #endif 184 185 static void jfs_put_super(struct super_block *sb) 186 { 187 struct jfs_sb_info *sbi = JFS_SBI(sb); 188 int rc; 189 190 jfs_info("In jfs_put_super"); 191 192 jfs_quota_off_umount(sb); 193 194 rc = jfs_umount(sb); 195 if (rc) 196 jfs_err("jfs_umount failed with return code %d", rc); 197 198 unload_nls(sbi->nls_tab); 199 200 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 201 iput(sbi->direct_inode); 202 203 kfree(sbi); 204 } 205 206 enum { 207 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, 208 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, 209 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask, 210 Opt_discard, Opt_nodiscard, Opt_discard_minblk 211 }; 212 213 static const struct constant_table jfs_param_errors[] = { 214 {"continue", JFS_ERR_CONTINUE}, 215 {"remount-ro", JFS_ERR_REMOUNT_RO}, 216 {"panic", JFS_ERR_PANIC}, 217 {} 218 }; 219 220 static const struct fs_parameter_spec jfs_param_spec[] = { 221 fsparam_flag_no ("integrity", Opt_integrity), 222 fsparam_string ("iocharset", Opt_iocharset), 223 fsparam_u64 ("resize", Opt_resize), 224 fsparam_flag ("resize", Opt_resize_nosize), 225 fsparam_enum ("errors", Opt_errors, jfs_param_errors), 226 fsparam_flag ("quota", Opt_quota), 227 fsparam_flag ("noquota", Opt_ignore), 228 fsparam_flag ("usrquota", Opt_usrquota), 229 fsparam_flag ("grpquota", Opt_grpquota), 230 fsparam_uid ("uid", Opt_uid), 231 fsparam_gid ("gid", Opt_gid), 232 fsparam_u32oct ("umask", Opt_umask), 233 fsparam_flag ("discard", Opt_discard), 234 fsparam_u32 ("discard", Opt_discard_minblk), 235 fsparam_flag ("nodiscard", Opt_nodiscard), 236 {} 237 }; 238 239 struct jfs_context { 240 int flag; 241 kuid_t uid; 242 kgid_t gid; 243 uint umask; 244 uint minblks_trim; 245 void *nls_map; 246 bool resize; 247 s64 newLVSize; 248 }; 249 250 static int jfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 251 { 252 struct jfs_context *ctx = fc->fs_private; 253 int reconfigure = (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE); 254 struct fs_parse_result result; 255 struct nls_table *nls_map; 256 int opt; 257 258 opt = fs_parse(fc, jfs_param_spec, param, &result); 259 if (opt < 0) 260 return opt; 261 262 switch (opt) { 263 case Opt_integrity: 264 if (result.negated) 265 ctx->flag |= JFS_NOINTEGRITY; 266 else 267 ctx->flag &= ~JFS_NOINTEGRITY; 268 break; 269 case Opt_ignore: 270 /* Silently ignore the quota options */ 271 /* Don't do anything ;-) */ 272 break; 273 case Opt_iocharset: 274 if (ctx->nls_map && ctx->nls_map != (void *) -1) { 275 unload_nls(ctx->nls_map); 276 ctx->nls_map = NULL; 277 } 278 if (!strcmp(param->string, "none")) 279 ctx->nls_map = NULL; 280 else { 281 nls_map = load_nls(param->string); 282 if (!nls_map) { 283 pr_err("JFS: charset not found\n"); 284 return -EINVAL; 285 } 286 ctx->nls_map = nls_map; 287 } 288 break; 289 case Opt_resize: 290 if (!reconfigure) 291 return -EINVAL; 292 ctx->resize = true; 293 ctx->newLVSize = result.uint_64; 294 break; 295 case Opt_resize_nosize: 296 if (!reconfigure) 297 return -EINVAL; 298 ctx->resize = true; 299 break; 300 case Opt_errors: 301 ctx->flag &= ~JFS_ERR_MASK; 302 ctx->flag |= result.uint_32; 303 break; 304 305 #ifdef CONFIG_QUOTA 306 case Opt_quota: 307 case Opt_usrquota: 308 ctx->flag |= JFS_USRQUOTA; 309 break; 310 case Opt_grpquota: 311 ctx->flag |= JFS_GRPQUOTA; 312 break; 313 #else 314 case Opt_usrquota: 315 case Opt_grpquota: 316 case Opt_quota: 317 pr_err("JFS: quota operations not supported\n"); 318 break; 319 #endif 320 case Opt_uid: 321 ctx->uid = result.uid; 322 break; 323 324 case Opt_gid: 325 ctx->gid = result.gid; 326 break; 327 328 case Opt_umask: 329 if (result.uint_32 & ~0777) { 330 pr_err("JFS: Invalid value of umask\n"); 331 return -EINVAL; 332 } 333 ctx->umask = result.uint_32; 334 break; 335 336 case Opt_discard: 337 /* if set to 1, even copying files will cause 338 * trimming :O 339 * -> user has more control over the online trimming 340 */ 341 ctx->minblks_trim = 64; 342 ctx->flag |= JFS_DISCARD; 343 break; 344 345 case Opt_nodiscard: 346 ctx->flag &= ~JFS_DISCARD; 347 break; 348 349 case Opt_discard_minblk: 350 ctx->minblks_trim = result.uint_32; 351 ctx->flag |= JFS_DISCARD; 352 break; 353 354 default: 355 return -EINVAL; 356 } 357 358 return 0; 359 } 360 361 static int jfs_reconfigure(struct fs_context *fc) 362 { 363 struct jfs_context *ctx = fc->fs_private; 364 struct super_block *sb = fc->root->d_sb; 365 int readonly = fc->sb_flags & SB_RDONLY; 366 int rc = 0; 367 int flag = ctx->flag; 368 int ret; 369 370 sync_filesystem(sb); 371 372 /* Transfer results of parsing to the sbi */ 373 JFS_SBI(sb)->flag = ctx->flag; 374 JFS_SBI(sb)->uid = ctx->uid; 375 JFS_SBI(sb)->gid = ctx->gid; 376 JFS_SBI(sb)->umask = ctx->umask; 377 JFS_SBI(sb)->minblks_trim = ctx->minblks_trim; 378 if (ctx->nls_map != (void *) -1) { 379 unload_nls(JFS_SBI(sb)->nls_tab); 380 JFS_SBI(sb)->nls_tab = ctx->nls_map; 381 } 382 ctx->nls_map = NULL; 383 384 if (ctx->resize) { 385 if (sb_rdonly(sb)) { 386 pr_err("JFS: resize requires volume to be mounted read-write\n"); 387 return -EROFS; 388 } 389 390 if (!ctx->newLVSize) { 391 ctx->newLVSize = sb_bdev_nr_blocks(sb); 392 if (ctx->newLVSize == 0) 393 pr_err("JFS: Cannot determine volume size\n"); 394 } 395 396 rc = jfs_extendfs(sb, ctx->newLVSize, 0); 397 if (rc) 398 return rc; 399 } 400 401 if (sb_rdonly(sb) && !readonly) { 402 /* 403 * Invalidate any previously read metadata. fsck may have 404 * changed the on-disk data since we mounted r/o 405 */ 406 truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); 407 408 JFS_SBI(sb)->flag = flag; 409 ret = jfs_mount_rw(sb, 1); 410 411 /* mark the fs r/w for quota activity */ 412 sb->s_flags &= ~SB_RDONLY; 413 414 dquot_resume(sb, -1); 415 return ret; 416 } 417 if (!sb_rdonly(sb) && readonly) { 418 rc = dquot_suspend(sb, -1); 419 if (rc < 0) 420 return rc; 421 rc = jfs_umount_rw(sb); 422 JFS_SBI(sb)->flag = flag; 423 return rc; 424 } 425 if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) { 426 if (!sb_rdonly(sb)) { 427 rc = jfs_umount_rw(sb); 428 if (rc) 429 return rc; 430 431 JFS_SBI(sb)->flag = flag; 432 ret = jfs_mount_rw(sb, 1); 433 return ret; 434 } 435 } 436 JFS_SBI(sb)->flag = flag; 437 438 return 0; 439 } 440 441 static int jfs_fill_super(struct super_block *sb, struct fs_context *fc) 442 { 443 struct jfs_context *ctx = fc->fs_private; 444 int silent = fc->sb_flags & SB_SILENT; 445 struct jfs_sb_info *sbi; 446 struct inode *inode; 447 int rc; 448 int ret = -EINVAL; 449 450 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); 451 452 sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL); 453 if (!sbi) 454 return -ENOMEM; 455 456 sb->s_fs_info = sbi; 457 sb->s_max_links = JFS_LINK_MAX; 458 sb->s_time_min = 0; 459 sb->s_time_max = U32_MAX; 460 sbi->sb = sb; 461 462 /* Transfer results of parsing to the sbi */ 463 sbi->flag = ctx->flag; 464 sbi->uid = ctx->uid; 465 sbi->gid = ctx->gid; 466 sbi->umask = ctx->umask; 467 if (ctx->nls_map != (void *) -1) { 468 unload_nls(sbi->nls_tab); 469 sbi->nls_tab = ctx->nls_map; 470 } 471 ctx->nls_map = NULL; 472 473 if (sbi->flag & JFS_DISCARD) { 474 if (!bdev_max_discard_sectors(sb->s_bdev)) { 475 pr_err("JFS: discard option not supported on device\n"); 476 sbi->flag &= ~JFS_DISCARD; 477 } else { 478 sbi->minblks_trim = ctx->minblks_trim; 479 } 480 } 481 482 #ifdef CONFIG_JFS_POSIX_ACL 483 sb->s_flags |= SB_POSIXACL; 484 #endif 485 486 if (ctx->resize) { 487 pr_err("resize option for remount only\n"); 488 goto out_unload; 489 } 490 491 /* 492 * Initialize blocksize to 4K. 493 */ 494 sb_set_blocksize(sb, PSIZE); 495 496 /* 497 * Set method vectors. 498 */ 499 sb->s_op = &jfs_super_operations; 500 sb->s_export_op = &jfs_export_operations; 501 sb->s_xattr = jfs_xattr_handlers; 502 #ifdef CONFIG_QUOTA 503 sb->dq_op = &dquot_operations; 504 sb->s_qcop = &jfs_quotactl_ops; 505 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 506 #endif 507 508 /* 509 * Initialize direct-mapping inode/address-space 510 */ 511 inode = new_inode(sb); 512 if (inode == NULL) { 513 ret = -ENOMEM; 514 goto out_unload; 515 } 516 inode->i_size = bdev_nr_bytes(sb->s_bdev); 517 inode->i_mapping->a_ops = &jfs_metapage_aops; 518 inode_fake_hash(inode); 519 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 520 521 sbi->direct_inode = inode; 522 523 rc = jfs_mount(sb); 524 if (rc) { 525 if (!silent) 526 jfs_err("jfs_mount failed w/return code = %d", rc); 527 goto out_mount_failed; 528 } 529 if (sb_rdonly(sb)) 530 sbi->log = NULL; 531 else { 532 rc = jfs_mount_rw(sb, 0); 533 if (rc) { 534 if (!silent) { 535 jfs_err("jfs_mount_rw failed, return code = %d", 536 rc); 537 } 538 goto out_no_rw; 539 } 540 } 541 542 sb->s_magic = JFS_SUPER_MAGIC; 543 544 if (sbi->mntflag & JFS_OS2) 545 sb->s_d_op = &jfs_ci_dentry_operations; 546 547 inode = jfs_iget(sb, ROOT_I); 548 if (IS_ERR(inode)) { 549 ret = PTR_ERR(inode); 550 goto out_no_rw; 551 } 552 sb->s_root = d_make_root(inode); 553 if (!sb->s_root) 554 goto out_no_root; 555 556 /* logical blocks are represented by 40 bits in pxd_t, etc. 557 * and page cache is indexed by long 558 */ 559 sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE); 560 sb->s_time_gran = 1; 561 return 0; 562 563 out_no_root: 564 jfs_err("jfs_read_super: get root dentry failed"); 565 566 out_no_rw: 567 rc = jfs_umount(sb); 568 if (rc) 569 jfs_err("jfs_umount failed with return code %d", rc); 570 out_mount_failed: 571 filemap_write_and_wait(sbi->direct_inode->i_mapping); 572 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 573 make_bad_inode(sbi->direct_inode); 574 iput(sbi->direct_inode); 575 sbi->direct_inode = NULL; 576 out_unload: 577 unload_nls(sbi->nls_tab); 578 kfree(sbi); 579 return ret; 580 } 581 582 static int jfs_freeze(struct super_block *sb) 583 { 584 struct jfs_sb_info *sbi = JFS_SBI(sb); 585 struct jfs_log *log = sbi->log; 586 int rc = 0; 587 588 if (!sb_rdonly(sb)) { 589 txQuiesce(sb); 590 rc = lmLogShutdown(log); 591 if (rc) { 592 jfs_error(sb, "lmLogShutdown failed\n"); 593 594 /* let operations fail rather than hang */ 595 txResume(sb); 596 597 return rc; 598 } 599 rc = updateSuper(sb, FM_CLEAN); 600 if (rc) { 601 jfs_err("jfs_freeze: updateSuper failed"); 602 /* 603 * Don't fail here. Everything succeeded except 604 * marking the superblock clean, so there's really 605 * no harm in leaving it frozen for now. 606 */ 607 } 608 } 609 return 0; 610 } 611 612 static int jfs_unfreeze(struct super_block *sb) 613 { 614 struct jfs_sb_info *sbi = JFS_SBI(sb); 615 struct jfs_log *log = sbi->log; 616 int rc = 0; 617 618 if (!sb_rdonly(sb)) { 619 rc = updateSuper(sb, FM_MOUNT); 620 if (rc) { 621 jfs_error(sb, "updateSuper failed\n"); 622 goto out; 623 } 624 rc = lmLogInit(log); 625 if (rc) 626 jfs_error(sb, "lmLogInit failed\n"); 627 out: 628 txResume(sb); 629 } 630 return rc; 631 } 632 633 static int jfs_get_tree(struct fs_context *fc) 634 { 635 return get_tree_bdev(fc, jfs_fill_super); 636 } 637 638 static int jfs_sync_fs(struct super_block *sb, int wait) 639 { 640 struct jfs_log *log = JFS_SBI(sb)->log; 641 642 /* log == NULL indicates read-only mount */ 643 if (log) { 644 /* 645 * Write quota structures to quota file, sync_blockdev() will 646 * write them to disk later 647 */ 648 dquot_writeback_dquots(sb, -1); 649 jfs_flush_journal(log, wait); 650 jfs_syncpt(log, 0); 651 } 652 653 return 0; 654 } 655 656 static int jfs_show_options(struct seq_file *seq, struct dentry *root) 657 { 658 struct jfs_sb_info *sbi = JFS_SBI(root->d_sb); 659 660 if (uid_valid(sbi->uid)) 661 seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid)); 662 if (gid_valid(sbi->gid)) 663 seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid)); 664 if (sbi->umask != -1) 665 seq_printf(seq, ",umask=%03o", sbi->umask); 666 if (sbi->flag & JFS_NOINTEGRITY) 667 seq_puts(seq, ",nointegrity"); 668 if (sbi->flag & JFS_DISCARD) 669 seq_printf(seq, ",discard=%u", sbi->minblks_trim); 670 if (sbi->nls_tab) 671 seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); 672 if (sbi->flag & JFS_ERR_CONTINUE) 673 seq_printf(seq, ",errors=continue"); 674 if (sbi->flag & JFS_ERR_PANIC) 675 seq_printf(seq, ",errors=panic"); 676 677 #ifdef CONFIG_QUOTA 678 if (sbi->flag & JFS_USRQUOTA) 679 seq_puts(seq, ",usrquota"); 680 681 if (sbi->flag & JFS_GRPQUOTA) 682 seq_puts(seq, ",grpquota"); 683 #endif 684 685 return 0; 686 } 687 688 #ifdef CONFIG_QUOTA 689 690 /* Read data from quotafile - avoid pagecache and such because we cannot afford 691 * acquiring the locks... As quota files are never truncated and quota code 692 * itself serializes the operations (and no one else should touch the files) 693 * we don't have to be afraid of races */ 694 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, 695 size_t len, loff_t off) 696 { 697 struct inode *inode = sb_dqopt(sb)->files[type]; 698 sector_t blk = off >> sb->s_blocksize_bits; 699 int err = 0; 700 int offset = off & (sb->s_blocksize - 1); 701 int tocopy; 702 size_t toread; 703 struct buffer_head tmp_bh; 704 struct buffer_head *bh; 705 loff_t i_size = i_size_read(inode); 706 707 if (off > i_size) 708 return 0; 709 if (off+len > i_size) 710 len = i_size-off; 711 toread = len; 712 while (toread > 0) { 713 tocopy = min_t(size_t, sb->s_blocksize - offset, toread); 714 715 tmp_bh.b_state = 0; 716 tmp_bh.b_size = i_blocksize(inode); 717 err = jfs_get_block(inode, blk, &tmp_bh, 0); 718 if (err) 719 return err; 720 if (!buffer_mapped(&tmp_bh)) /* A hole? */ 721 memset(data, 0, tocopy); 722 else { 723 bh = sb_bread(sb, tmp_bh.b_blocknr); 724 if (!bh) 725 return -EIO; 726 memcpy(data, bh->b_data+offset, tocopy); 727 brelse(bh); 728 } 729 offset = 0; 730 toread -= tocopy; 731 data += tocopy; 732 blk++; 733 } 734 return len; 735 } 736 737 /* Write to quotafile */ 738 static ssize_t jfs_quota_write(struct super_block *sb, int type, 739 const char *data, size_t len, loff_t off) 740 { 741 struct inode *inode = sb_dqopt(sb)->files[type]; 742 sector_t blk = off >> sb->s_blocksize_bits; 743 int err = 0; 744 int offset = off & (sb->s_blocksize - 1); 745 int tocopy; 746 size_t towrite = len; 747 struct buffer_head tmp_bh; 748 struct buffer_head *bh; 749 750 inode_lock(inode); 751 while (towrite > 0) { 752 tocopy = min_t(size_t, sb->s_blocksize - offset, towrite); 753 754 tmp_bh.b_state = 0; 755 tmp_bh.b_size = i_blocksize(inode); 756 err = jfs_get_block(inode, blk, &tmp_bh, 1); 757 if (err) 758 goto out; 759 if (offset || tocopy != sb->s_blocksize) 760 bh = sb_bread(sb, tmp_bh.b_blocknr); 761 else 762 bh = sb_getblk(sb, tmp_bh.b_blocknr); 763 if (!bh) { 764 err = -EIO; 765 goto out; 766 } 767 lock_buffer(bh); 768 memcpy(bh->b_data+offset, data, tocopy); 769 flush_dcache_page(bh->b_page); 770 set_buffer_uptodate(bh); 771 mark_buffer_dirty(bh); 772 unlock_buffer(bh); 773 brelse(bh); 774 offset = 0; 775 towrite -= tocopy; 776 data += tocopy; 777 blk++; 778 } 779 out: 780 if (len == towrite) { 781 inode_unlock(inode); 782 return err; 783 } 784 if (inode->i_size < off+len-towrite) 785 i_size_write(inode, off+len-towrite); 786 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 787 mark_inode_dirty(inode); 788 inode_unlock(inode); 789 return len - towrite; 790 } 791 792 static struct dquot __rcu **jfs_get_dquots(struct inode *inode) 793 { 794 return JFS_IP(inode)->i_dquot; 795 } 796 797 static int jfs_quota_on(struct super_block *sb, int type, int format_id, 798 const struct path *path) 799 { 800 int err; 801 struct inode *inode; 802 803 err = dquot_quota_on(sb, type, format_id, path); 804 if (err) 805 return err; 806 807 inode = d_inode(path->dentry); 808 inode_lock(inode); 809 JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL; 810 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 811 S_NOATIME | S_IMMUTABLE); 812 inode_unlock(inode); 813 mark_inode_dirty(inode); 814 815 return 0; 816 } 817 818 static int jfs_quota_off(struct super_block *sb, int type) 819 { 820 struct inode *inode = sb_dqopt(sb)->files[type]; 821 int err; 822 823 if (!inode || !igrab(inode)) 824 goto out; 825 826 err = dquot_quota_off(sb, type); 827 if (err) 828 goto out_put; 829 830 inode_lock(inode); 831 JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL); 832 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 833 inode_unlock(inode); 834 mark_inode_dirty(inode); 835 out_put: 836 iput(inode); 837 return err; 838 out: 839 return dquot_quota_off(sb, type); 840 } 841 #endif 842 843 static const struct super_operations jfs_super_operations = { 844 .alloc_inode = jfs_alloc_inode, 845 .free_inode = jfs_free_inode, 846 .dirty_inode = jfs_dirty_inode, 847 .write_inode = jfs_write_inode, 848 .evict_inode = jfs_evict_inode, 849 .put_super = jfs_put_super, 850 .sync_fs = jfs_sync_fs, 851 .freeze_fs = jfs_freeze, 852 .unfreeze_fs = jfs_unfreeze, 853 .statfs = jfs_statfs, 854 .show_options = jfs_show_options, 855 #ifdef CONFIG_QUOTA 856 .quota_read = jfs_quota_read, 857 .quota_write = jfs_quota_write, 858 .get_dquots = jfs_get_dquots, 859 #endif 860 }; 861 862 static const struct export_operations jfs_export_operations = { 863 .encode_fh = generic_encode_ino32_fh, 864 .fh_to_dentry = jfs_fh_to_dentry, 865 .fh_to_parent = jfs_fh_to_parent, 866 .get_parent = jfs_get_parent, 867 }; 868 869 static void jfs_init_options(struct fs_context *fc, struct jfs_context *ctx) 870 { 871 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 872 struct super_block *sb = fc->root->d_sb; 873 874 /* Copy over current option values and mount flags */ 875 ctx->uid = JFS_SBI(sb)->uid; 876 ctx->gid = JFS_SBI(sb)->gid; 877 ctx->umask = JFS_SBI(sb)->umask; 878 ctx->nls_map = (void *)-1; 879 ctx->minblks_trim = JFS_SBI(sb)->minblks_trim; 880 ctx->flag = JFS_SBI(sb)->flag; 881 882 } else { 883 /* 884 * Initialize the mount flag and determine the default 885 * error handler 886 */ 887 ctx->flag = JFS_ERR_REMOUNT_RO; 888 ctx->uid = INVALID_UID; 889 ctx->gid = INVALID_GID; 890 ctx->umask = -1; 891 ctx->nls_map = (void *)-1; 892 } 893 } 894 895 static void jfs_free_fc(struct fs_context *fc) 896 { 897 struct jfs_context *ctx = fc->fs_private; 898 899 if (ctx->nls_map != (void *) -1) 900 unload_nls(ctx->nls_map); 901 kfree(ctx); 902 } 903 904 static const struct fs_context_operations jfs_context_ops = { 905 .parse_param = jfs_parse_param, 906 .get_tree = jfs_get_tree, 907 .reconfigure = jfs_reconfigure, 908 .free = jfs_free_fc, 909 }; 910 911 static int jfs_init_fs_context(struct fs_context *fc) 912 { 913 struct jfs_context *ctx; 914 915 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 916 if (!ctx) 917 return -ENOMEM; 918 919 jfs_init_options(fc, ctx); 920 921 fc->fs_private = ctx; 922 fc->ops = &jfs_context_ops; 923 924 return 0; 925 } 926 927 static struct file_system_type jfs_fs_type = { 928 .owner = THIS_MODULE, 929 .name = "jfs", 930 .kill_sb = kill_block_super, 931 .fs_flags = FS_REQUIRES_DEV, 932 .init_fs_context = jfs_init_fs_context, 933 .parameters = jfs_param_spec, 934 }; 935 MODULE_ALIAS_FS("jfs"); 936 937 static void init_once(void *foo) 938 { 939 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 940 941 memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 942 INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 943 init_rwsem(&jfs_ip->rdwrlock); 944 mutex_init(&jfs_ip->commit_mutex); 945 init_rwsem(&jfs_ip->xattr_sem); 946 spin_lock_init(&jfs_ip->ag_lock); 947 jfs_ip->active_ag = -1; 948 inode_init_once(&jfs_ip->vfs_inode); 949 } 950 951 static int __init init_jfs_fs(void) 952 { 953 int i; 954 int rc; 955 956 jfs_inode_cachep = 957 kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), 958 0, SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, 959 offsetof(struct jfs_inode_info, i_inline_all), 960 sizeof_field(struct jfs_inode_info, i_inline_all), 961 init_once); 962 if (jfs_inode_cachep == NULL) 963 return -ENOMEM; 964 965 /* 966 * Metapage initialization 967 */ 968 rc = metapage_init(); 969 if (rc) { 970 jfs_err("metapage_init failed w/rc = %d", rc); 971 goto free_slab; 972 } 973 974 /* 975 * Transaction Manager initialization 976 */ 977 rc = txInit(); 978 if (rc) { 979 jfs_err("txInit failed w/rc = %d", rc); 980 goto free_metapage; 981 } 982 983 /* 984 * I/O completion thread (endio) 985 */ 986 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); 987 if (IS_ERR(jfsIOthread)) { 988 rc = PTR_ERR(jfsIOthread); 989 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 990 goto end_txmngr; 991 } 992 993 if (commit_threads < 1) 994 commit_threads = num_online_cpus(); 995 if (commit_threads > MAX_COMMIT_THREADS) 996 commit_threads = MAX_COMMIT_THREADS; 997 998 for (i = 0; i < commit_threads; i++) { 999 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, 1000 "jfsCommit"); 1001 if (IS_ERR(jfsCommitThread[i])) { 1002 rc = PTR_ERR(jfsCommitThread[i]); 1003 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 1004 commit_threads = i; 1005 goto kill_committask; 1006 } 1007 } 1008 1009 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); 1010 if (IS_ERR(jfsSyncThread)) { 1011 rc = PTR_ERR(jfsSyncThread); 1012 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 1013 goto kill_committask; 1014 } 1015 1016 #ifdef PROC_FS_JFS 1017 jfs_proc_init(); 1018 #endif 1019 1020 rc = register_filesystem(&jfs_fs_type); 1021 if (!rc) 1022 return 0; 1023 1024 #ifdef PROC_FS_JFS 1025 jfs_proc_clean(); 1026 #endif 1027 kthread_stop(jfsSyncThread); 1028 kill_committask: 1029 for (i = 0; i < commit_threads; i++) 1030 kthread_stop(jfsCommitThread[i]); 1031 kthread_stop(jfsIOthread); 1032 end_txmngr: 1033 txExit(); 1034 free_metapage: 1035 metapage_exit(); 1036 free_slab: 1037 kmem_cache_destroy(jfs_inode_cachep); 1038 return rc; 1039 } 1040 1041 static void __exit exit_jfs_fs(void) 1042 { 1043 int i; 1044 1045 jfs_info("exit_jfs_fs called"); 1046 1047 txExit(); 1048 metapage_exit(); 1049 1050 kthread_stop(jfsIOthread); 1051 for (i = 0; i < commit_threads; i++) 1052 kthread_stop(jfsCommitThread[i]); 1053 kthread_stop(jfsSyncThread); 1054 #ifdef PROC_FS_JFS 1055 jfs_proc_clean(); 1056 #endif 1057 unregister_filesystem(&jfs_fs_type); 1058 1059 /* 1060 * Make sure all delayed rcu free inodes are flushed before we 1061 * destroy cache. 1062 */ 1063 rcu_barrier(); 1064 kmem_cache_destroy(jfs_inode_cachep); 1065 } 1066 1067 module_init(init_jfs_fs) 1068 module_exit(exit_jfs_fs) 1069