1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/ioctl.c 4 * 5 * Copyright (C) 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/capability.h> 13 #include <linux/time.h> 14 #include <linux/compat.h> 15 #include <linux/mount.h> 16 #include <linux/file.h> 17 #include <linux/quotaops.h> 18 #include <linux/random.h> 19 #include <linux/uaccess.h> 20 #include <linux/delay.h> 21 #include <linux/iversion.h> 22 #include <linux/fileattr.h> 23 #include <linux/uuid.h> 24 #include "ext4_jbd2.h" 25 #include "ext4.h" 26 #include <linux/fsmap.h> 27 #include "fsmap.h" 28 #include <trace/events/ext4.h> 29 #include <linux/fserror.h> 30 31 typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi, 32 struct ext4_super_block *es, 33 const void *arg); 34 35 /* 36 * Superblock modification callback function for changing file system 37 * label 38 */ 39 static void ext4_sb_setlabel(struct ext4_sb_info *sbi, 40 struct ext4_super_block *es, const void *arg) 41 { 42 /* Sanity check, this should never happen */ 43 BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX); 44 45 memcpy(es->s_volume_name, (char *)arg, EXT4_LABEL_MAX); 46 } 47 48 /* 49 * Superblock modification callback function for changing file system 50 * UUID. 51 */ 52 static void ext4_sb_setuuid(struct ext4_sb_info *sbi, 53 struct ext4_super_block *es, const void *arg) 54 { 55 memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE); 56 } 57 58 static 59 int ext4_update_primary_sb(struct super_block *sb, handle_t *handle, 60 ext4_update_sb_callback func, 61 const void *arg) 62 { 63 int err = 0; 64 struct ext4_sb_info *sbi = EXT4_SB(sb); 65 struct buffer_head *bh = sbi->s_sbh; 66 struct ext4_super_block *es = sbi->s_es; 67 68 trace_ext4_update_sb(sb, bh->b_blocknr, 1); 69 70 BUFFER_TRACE(bh, "get_write_access"); 71 err = ext4_journal_get_write_access(handle, sb, 72 bh, 73 EXT4_JTR_NONE); 74 if (err) 75 goto out_err; 76 77 lock_buffer(bh); 78 func(sbi, es, arg); 79 ext4_superblock_csum_set(sb); 80 unlock_buffer(bh); 81 82 if (buffer_write_io_error(bh) || !buffer_uptodate(bh)) { 83 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 84 "superblock detected"); 85 clear_buffer_write_io_error(bh); 86 set_buffer_uptodate(bh); 87 } 88 89 err = ext4_handle_dirty_metadata(handle, NULL, bh); 90 if (err) 91 goto out_err; 92 err = sync_dirty_buffer(bh); 93 out_err: 94 ext4_std_error(sb, err); 95 return err; 96 } 97 98 /* 99 * Update one backup superblock in the group 'grp' using the callback 100 * function 'func' and argument 'arg'. If the handle is NULL the 101 * modification is not journalled. 102 * 103 * Returns: 0 when no modification was done (no superblock in the group) 104 * 1 when the modification was successful 105 * <0 on error 106 */ 107 static int ext4_update_backup_sb(struct super_block *sb, 108 handle_t *handle, ext4_group_t grp, 109 ext4_update_sb_callback func, const void *arg) 110 { 111 int err = 0; 112 ext4_fsblk_t sb_block; 113 struct buffer_head *bh; 114 unsigned long offset = 0; 115 struct ext4_super_block *es; 116 117 if (!ext4_bg_has_super(sb, grp)) 118 return 0; 119 120 /* 121 * For the group 0 there is always 1k padding, so we have 122 * either adjust offset, or sb_block depending on blocksize 123 */ 124 if (grp == 0) { 125 sb_block = 1 * EXT4_MIN_BLOCK_SIZE; 126 offset = do_div(sb_block, sb->s_blocksize); 127 } else { 128 sb_block = ext4_group_first_block_no(sb, grp); 129 offset = 0; 130 } 131 132 trace_ext4_update_sb(sb, sb_block, handle ? 1 : 0); 133 134 bh = ext4_sb_bread(sb, sb_block, 0); 135 if (IS_ERR(bh)) 136 return PTR_ERR(bh); 137 138 if (handle) { 139 BUFFER_TRACE(bh, "get_write_access"); 140 err = ext4_journal_get_write_access(handle, sb, 141 bh, 142 EXT4_JTR_NONE); 143 if (err) 144 goto out_bh; 145 } 146 147 es = (struct ext4_super_block *) (bh->b_data + offset); 148 lock_buffer(bh); 149 if (ext4_has_feature_metadata_csum(sb) && 150 es->s_checksum != ext4_superblock_csum(es)) { 151 ext4_msg(sb, KERN_ERR, "Invalid checksum for backup " 152 "superblock %llu", sb_block); 153 unlock_buffer(bh); 154 goto out_bh; 155 } 156 func(EXT4_SB(sb), es, arg); 157 if (ext4_has_feature_metadata_csum(sb)) 158 es->s_checksum = ext4_superblock_csum(es); 159 set_buffer_uptodate(bh); 160 unlock_buffer(bh); 161 162 if (handle) { 163 err = ext4_handle_dirty_metadata(handle, NULL, bh); 164 if (err) 165 goto out_bh; 166 } else { 167 BUFFER_TRACE(bh, "marking dirty"); 168 mark_buffer_dirty(bh); 169 } 170 err = sync_dirty_buffer(bh); 171 172 out_bh: 173 brelse(bh); 174 ext4_std_error(sb, err); 175 return (err) ? err : 1; 176 } 177 178 /* 179 * Update primary and backup superblocks using the provided function 180 * func and argument arg. 181 * 182 * Only the primary superblock and at most two backup superblock 183 * modifications are journalled; the rest is modified without journal. 184 * This is safe because e2fsck will re-write them if there is a problem, 185 * and we're very unlikely to ever need more than two backups. 186 */ 187 static 188 int ext4_update_superblocks_fn(struct super_block *sb, 189 ext4_update_sb_callback func, 190 const void *arg) 191 { 192 handle_t *handle; 193 ext4_group_t ngroups; 194 unsigned int three = 1; 195 unsigned int five = 5; 196 unsigned int seven = 7; 197 int err = 0, ret, i; 198 ext4_group_t grp, primary_grp; 199 struct ext4_sb_info *sbi = EXT4_SB(sb); 200 201 /* 202 * We can't update superblocks while the online resize is running 203 */ 204 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 205 &sbi->s_ext4_flags)) { 206 ext4_msg(sb, KERN_ERR, "Can't modify superblock while" 207 "performing online resize"); 208 return -EBUSY; 209 } 210 211 /* 212 * We're only going to update primary superblock and two 213 * backup superblocks in this transaction. 214 */ 215 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 3); 216 if (IS_ERR(handle)) { 217 err = PTR_ERR(handle); 218 goto out; 219 } 220 221 /* Update primary superblock */ 222 err = ext4_update_primary_sb(sb, handle, func, arg); 223 if (err) { 224 ext4_msg(sb, KERN_ERR, "Failed to update primary " 225 "superblock"); 226 goto out_journal; 227 } 228 229 primary_grp = ext4_get_group_number(sb, sbi->s_sbh->b_blocknr); 230 ngroups = ext4_get_groups_count(sb); 231 232 /* 233 * Update backup superblocks. We have to start from group 0 234 * because it might not be where the primary superblock is 235 * if the fs is mounted with -o sb=<backup_sb_block> 236 */ 237 i = 0; 238 grp = 0; 239 while (grp < ngroups) { 240 /* Skip primary superblock */ 241 if (grp == primary_grp) 242 goto next_grp; 243 244 ret = ext4_update_backup_sb(sb, handle, grp, func, arg); 245 if (ret < 0) { 246 /* Ignore bad checksum; try to update next sb */ 247 if (ret == -EFSBADCRC) 248 goto next_grp; 249 err = ret; 250 goto out_journal; 251 } 252 253 i += ret; 254 if (handle && i > 1) { 255 /* 256 * We're only journalling primary superblock and 257 * two backup superblocks; the rest is not 258 * journalled. 259 */ 260 err = ext4_journal_stop(handle); 261 if (err) 262 goto out; 263 handle = NULL; 264 } 265 next_grp: 266 grp = ext4_list_backups(sb, &three, &five, &seven); 267 } 268 269 out_journal: 270 if (handle) { 271 ret = ext4_journal_stop(handle); 272 if (ret && !err) 273 err = ret; 274 } 275 out: 276 clear_bit_unlock(EXT4_FLAGS_RESIZING, &sbi->s_ext4_flags); 277 smp_mb__after_atomic(); 278 return err ? err : 0; 279 } 280 281 /* 282 * Swap memory between @a and @b for @len bytes. 283 * 284 * @a: pointer to first memory area 285 * @b: pointer to second memory area 286 * @len: number of bytes to swap 287 * 288 */ 289 static void memswap(void *a, void *b, size_t len) 290 { 291 unsigned char *ap, *bp; 292 293 ap = (unsigned char *)a; 294 bp = (unsigned char *)b; 295 while (len-- > 0) { 296 swap(*ap, *bp); 297 ap++; 298 bp++; 299 } 300 } 301 302 /* 303 * Swap i_data and associated attributes between @inode1 and @inode2. 304 * This function is used for the primary swap between inode1 and inode2 305 * and also to revert this primary swap in case of errors. 306 * 307 * Therefore you have to make sure, that calling this method twice 308 * will revert all changes. 309 * 310 * @inode1: pointer to first inode 311 * @inode2: pointer to second inode 312 */ 313 static void swap_inode_data(struct inode *inode1, struct inode *inode2) 314 { 315 loff_t isize; 316 struct ext4_inode_info *ei1; 317 struct ext4_inode_info *ei2; 318 unsigned long tmp; 319 struct timespec64 ts1, ts2; 320 321 ei1 = EXT4_I(inode1); 322 ei2 = EXT4_I(inode2); 323 324 swap(inode1->i_version, inode2->i_version); 325 326 ts1 = inode_get_atime(inode1); 327 ts2 = inode_get_atime(inode2); 328 inode_set_atime_to_ts(inode1, ts2); 329 inode_set_atime_to_ts(inode2, ts1); 330 331 ts1 = inode_get_mtime(inode1); 332 ts2 = inode_get_mtime(inode2); 333 inode_set_mtime_to_ts(inode1, ts2); 334 inode_set_mtime_to_ts(inode2, ts1); 335 336 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); 337 tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP; 338 ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) | 339 (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP); 340 ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP); 341 swap(ei1->i_disksize, ei2->i_disksize); 342 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); 343 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); 344 345 isize = i_size_read(inode1); 346 i_size_write(inode1, i_size_read(inode2)); 347 i_size_write(inode2, isize); 348 } 349 350 void ext4_reset_inode_seed(struct inode *inode) 351 { 352 struct ext4_inode_info *ei = EXT4_I(inode); 353 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 354 __le32 inum = cpu_to_le32(inode->i_ino); 355 __le32 gen = cpu_to_le32(inode->i_generation); 356 __u32 csum; 357 358 if (!ext4_has_feature_metadata_csum(inode->i_sb)) 359 return; 360 361 csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); 362 ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen)); 363 } 364 365 /* 366 * Swap the information from the given @inode and the inode 367 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other 368 * important fields of the inodes. 369 * 370 * @sb: the super block of the filesystem 371 * @idmap: idmap of the mount the inode was found from 372 * @inode: the inode to swap with EXT4_BOOT_LOADER_INO 373 * 374 */ 375 static long swap_inode_boot_loader(struct super_block *sb, 376 struct mnt_idmap *idmap, 377 struct inode *inode) 378 { 379 handle_t *handle; 380 int err; 381 struct inode *inode_bl; 382 struct ext4_inode_info *ei_bl; 383 qsize_t size, size_bl, diff; 384 blkcnt_t blocks; 385 unsigned short bytes; 386 387 inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, 388 EXT4_IGET_SPECIAL | EXT4_IGET_BAD); 389 if (IS_ERR(inode_bl)) 390 return PTR_ERR(inode_bl); 391 ei_bl = EXT4_I(inode_bl); 392 393 /* Protect orig inodes against a truncate and make sure, 394 * that only 1 swap_inode_boot_loader is running. */ 395 lock_two_nondirectories(inode, inode_bl); 396 397 if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) || 398 IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) || 399 (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) || 400 ext4_has_inline_data(inode)) { 401 err = -EINVAL; 402 goto journal_err_out; 403 } 404 405 if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || 406 !inode_owner_or_capable(idmap, inode) || 407 !capable(CAP_SYS_ADMIN)) { 408 err = -EPERM; 409 goto journal_err_out; 410 } 411 412 filemap_invalidate_lock(inode->i_mapping); 413 err = filemap_write_and_wait(inode->i_mapping); 414 if (err) 415 goto err_out; 416 417 err = filemap_write_and_wait(inode_bl->i_mapping); 418 if (err) 419 goto err_out; 420 421 /* Wait for all existing dio workers */ 422 inode_dio_wait(inode); 423 inode_dio_wait(inode_bl); 424 425 truncate_inode_pages(&inode->i_data, 0); 426 truncate_inode_pages(&inode_bl->i_data, 0); 427 428 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); 429 if (IS_ERR(handle)) { 430 err = -EINVAL; 431 goto err_out; 432 } 433 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle); 434 435 /* Protect extent tree against block allocations via delalloc */ 436 ext4_double_down_write_data_sem(inode, inode_bl); 437 438 if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) { 439 /* this inode has never been used as a BOOT_LOADER */ 440 set_nlink(inode_bl, 1); 441 i_uid_write(inode_bl, 0); 442 i_gid_write(inode_bl, 0); 443 inode_bl->i_flags = 0; 444 ei_bl->i_flags = 0; 445 inode_set_iversion(inode_bl, 1); 446 i_size_write(inode_bl, 0); 447 EXT4_I(inode_bl)->i_disksize = inode_bl->i_size; 448 inode_bl->i_mode = S_IFREG; 449 if (ext4_has_feature_extents(sb)) { 450 ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS); 451 ext4_ext_tree_init(handle, inode_bl); 452 } else 453 memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data)); 454 } 455 456 err = dquot_initialize(inode); 457 if (err) 458 goto err_out1; 459 460 size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes; 461 size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes; 462 diff = size - size_bl; 463 swap_inode_data(inode, inode_bl); 464 465 inode_set_ctime_current(inode); 466 inode_set_ctime_current(inode_bl); 467 inode_inc_iversion(inode); 468 469 inode->i_generation = get_random_u32(); 470 inode_bl->i_generation = get_random_u32(); 471 ext4_reset_inode_seed(inode); 472 ext4_reset_inode_seed(inode_bl); 473 474 ext4_discard_preallocations(inode); 475 476 err = ext4_mark_inode_dirty(handle, inode); 477 if (err < 0) { 478 /* No need to update quota information. */ 479 ext4_warning(inode->i_sb, 480 "couldn't mark inode #%lu dirty (err %d)", 481 inode->i_ino, err); 482 /* Revert all changes: */ 483 swap_inode_data(inode, inode_bl); 484 ext4_mark_inode_dirty(handle, inode); 485 goto err_out1; 486 } 487 488 blocks = inode_bl->i_blocks; 489 bytes = inode_bl->i_bytes; 490 inode_bl->i_blocks = inode->i_blocks; 491 inode_bl->i_bytes = inode->i_bytes; 492 err = ext4_mark_inode_dirty(handle, inode_bl); 493 if (err < 0) { 494 /* No need to update quota information. */ 495 ext4_warning(inode_bl->i_sb, 496 "couldn't mark inode #%lu dirty (err %d)", 497 inode_bl->i_ino, err); 498 goto revert; 499 } 500 501 /* Bootloader inode should not be counted into quota information. */ 502 if (diff > 0) 503 dquot_free_space(inode, diff); 504 else 505 err = dquot_alloc_space(inode, -1 * diff); 506 507 if (err < 0) { 508 revert: 509 /* Revert all changes: */ 510 inode_bl->i_blocks = blocks; 511 inode_bl->i_bytes = bytes; 512 swap_inode_data(inode, inode_bl); 513 ext4_mark_inode_dirty(handle, inode); 514 ext4_mark_inode_dirty(handle, inode_bl); 515 } 516 517 err_out1: 518 ext4_journal_stop(handle); 519 ext4_double_up_write_data_sem(inode, inode_bl); 520 521 err_out: 522 filemap_invalidate_unlock(inode->i_mapping); 523 journal_err_out: 524 unlock_two_nondirectories(inode, inode_bl); 525 iput(inode_bl); 526 return err; 527 } 528 529 /* 530 * If immutable is set and we are not clearing it, we're not allowed to change 531 * anything else in the inode. Don't error out if we're only trying to set 532 * immutable on an immutable file. 533 */ 534 static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid, 535 unsigned int flags) 536 { 537 struct ext4_inode_info *ei = EXT4_I(inode); 538 unsigned int oldflags = ei->i_flags; 539 540 if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL)) 541 return 0; 542 543 if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL)) 544 return -EPERM; 545 if (ext4_has_feature_project(inode->i_sb) && 546 __kprojid_val(ei->i_projid) != new_projid) 547 return -EPERM; 548 549 return 0; 550 } 551 552 static void ext4_dax_dontcache(struct inode *inode, unsigned int flags) 553 { 554 struct ext4_inode_info *ei = EXT4_I(inode); 555 556 if (S_ISDIR(inode->i_mode)) 557 return; 558 559 if (test_opt2(inode->i_sb, DAX_NEVER) || 560 test_opt(inode->i_sb, DAX_ALWAYS)) 561 return; 562 563 if ((ei->i_flags ^ flags) & EXT4_DAX_FL) 564 d_mark_dontcache(inode); 565 } 566 567 static bool dax_compatible(struct inode *inode, unsigned int oldflags, 568 unsigned int flags) 569 { 570 /* Allow the DAX flag to be changed on inline directories */ 571 if (S_ISDIR(inode->i_mode)) { 572 flags &= ~EXT4_INLINE_DATA_FL; 573 oldflags &= ~EXT4_INLINE_DATA_FL; 574 } 575 576 if (flags & EXT4_DAX_FL) { 577 if ((oldflags & EXT4_DAX_MUT_EXCL) || 578 ext4_test_inode_state(inode, 579 EXT4_STATE_VERITY_IN_PROGRESS)) { 580 return false; 581 } 582 } 583 584 if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL)) 585 return false; 586 587 return true; 588 } 589 590 static int ext4_ioctl_setflags(struct inode *inode, 591 unsigned int flags) 592 { 593 struct ext4_inode_info *ei = EXT4_I(inode); 594 handle_t *handle = NULL; 595 int err = -EPERM, migrate = 0; 596 struct ext4_iloc iloc; 597 unsigned int oldflags, mask, i; 598 struct super_block *sb = inode->i_sb; 599 600 /* Is it quota file? Do not allow user to mess with it */ 601 if (ext4_is_quota_file(inode)) 602 goto flags_out; 603 604 oldflags = ei->i_flags; 605 /* 606 * The JOURNAL_DATA flag can only be changed by 607 * the relevant capability. 608 */ 609 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { 610 if (!capable(CAP_SYS_RESOURCE)) 611 goto flags_out; 612 } 613 614 if (!dax_compatible(inode, oldflags, flags)) { 615 err = -EOPNOTSUPP; 616 goto flags_out; 617 } 618 619 if ((flags ^ oldflags) & EXT4_EXTENTS_FL) 620 migrate = 1; 621 622 if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) { 623 if (!ext4_has_feature_casefold(sb)) { 624 err = -EOPNOTSUPP; 625 goto flags_out; 626 } 627 628 if (!S_ISDIR(inode->i_mode)) { 629 err = -ENOTDIR; 630 goto flags_out; 631 } 632 633 if (!ext4_empty_dir(inode)) { 634 err = -ENOTEMPTY; 635 goto flags_out; 636 } 637 } 638 639 /* 640 * Wait for all pending directio and then flush all the dirty pages 641 * for this file. The flush marks all the pages readonly, so any 642 * subsequent attempt to write to the file (particularly mmap pages) 643 * will come through the filesystem and fail. 644 */ 645 if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) && 646 (flags & EXT4_IMMUTABLE_FL)) { 647 inode_dio_wait(inode); 648 err = filemap_write_and_wait(inode->i_mapping); 649 if (err) 650 goto flags_out; 651 } 652 653 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 654 if (IS_ERR(handle)) { 655 err = PTR_ERR(handle); 656 goto flags_out; 657 } 658 if (IS_SYNC(inode)) 659 ext4_handle_sync(handle); 660 err = ext4_reserve_inode_write(handle, inode, &iloc); 661 if (err) 662 goto flags_err; 663 664 ext4_dax_dontcache(inode, flags); 665 666 for (i = 0, mask = 1; i < 32; i++, mask <<= 1) { 667 if (!(mask & EXT4_FL_USER_MODIFIABLE)) 668 continue; 669 /* These flags get special treatment later */ 670 if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL) 671 continue; 672 if (mask & flags) 673 ext4_set_inode_flag(inode, i); 674 else 675 ext4_clear_inode_flag(inode, i); 676 } 677 678 ext4_set_inode_flags(inode, false); 679 680 inode_set_ctime_current(inode); 681 inode_inc_iversion(inode); 682 683 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 684 flags_err: 685 ext4_journal_stop(handle); 686 if (err) 687 goto flags_out; 688 689 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { 690 /* 691 * Changes to the journaling mode can cause unsafe changes to 692 * S_DAX if the inode is DAX 693 */ 694 if (IS_DAX(inode)) { 695 err = -EBUSY; 696 goto flags_out; 697 } 698 699 err = ext4_change_inode_journal_flag(inode, 700 flags & EXT4_JOURNAL_DATA_FL); 701 if (err) 702 goto flags_out; 703 } 704 if (migrate) { 705 if (flags & EXT4_EXTENTS_FL) 706 err = ext4_ext_migrate(inode); 707 else 708 err = ext4_ind_migrate(inode); 709 } 710 711 flags_out: 712 return err; 713 } 714 715 #ifdef CONFIG_QUOTA 716 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) 717 { 718 struct super_block *sb = inode->i_sb; 719 struct ext4_inode_info *ei = EXT4_I(inode); 720 int err, rc; 721 handle_t *handle; 722 kprojid_t kprojid; 723 struct ext4_iloc iloc; 724 struct ext4_inode *raw_inode; 725 struct dquot *transfer_to[MAXQUOTAS] = { }; 726 727 if (!ext4_has_feature_project(sb)) { 728 if (projid != EXT4_DEF_PROJID) 729 return -EOPNOTSUPP; 730 else 731 return 0; 732 } 733 734 if (EXT4_INODE_SIZE(sb) <= EXT4_GOOD_OLD_INODE_SIZE) 735 return -EOPNOTSUPP; 736 737 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 738 739 if (projid_eq(kprojid, EXT4_I(inode)->i_projid)) 740 return 0; 741 742 err = -EPERM; 743 /* Is it quota file? Do not allow user to mess with it */ 744 if (ext4_is_quota_file(inode)) 745 return err; 746 747 err = dquot_initialize(inode); 748 if (err) 749 return err; 750 751 err = ext4_get_inode_loc(inode, &iloc); 752 if (err) 753 return err; 754 755 raw_inode = ext4_raw_inode(&iloc); 756 if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) { 757 err = ext4_expand_extra_isize(inode, 758 EXT4_SB(sb)->s_want_extra_isize, 759 &iloc); 760 if (err) 761 return err; 762 } else { 763 brelse(iloc.bh); 764 } 765 766 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 767 EXT4_QUOTA_INIT_BLOCKS(sb) + 768 EXT4_QUOTA_DEL_BLOCKS(sb) + 3); 769 if (IS_ERR(handle)) 770 return PTR_ERR(handle); 771 772 err = ext4_reserve_inode_write(handle, inode, &iloc); 773 if (err) 774 goto out_stop; 775 776 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 777 if (!IS_ERR(transfer_to[PRJQUOTA])) { 778 779 /* __dquot_transfer() calls back ext4_get_inode_usage() which 780 * counts xattr inode references. 781 */ 782 down_read(&EXT4_I(inode)->xattr_sem); 783 err = __dquot_transfer(inode, transfer_to); 784 up_read(&EXT4_I(inode)->xattr_sem); 785 dqput(transfer_to[PRJQUOTA]); 786 if (err) 787 goto out_dirty; 788 } 789 790 EXT4_I(inode)->i_projid = kprojid; 791 inode_set_ctime_current(inode); 792 inode_inc_iversion(inode); 793 out_dirty: 794 rc = ext4_mark_iloc_dirty(handle, inode, &iloc); 795 if (!err) 796 err = rc; 797 out_stop: 798 ext4_journal_stop(handle); 799 return err; 800 } 801 #else 802 static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) 803 { 804 if (projid != EXT4_DEF_PROJID) 805 return -EOPNOTSUPP; 806 return 0; 807 } 808 #endif 809 810 int ext4_force_shutdown(struct super_block *sb, u32 flags) 811 { 812 struct ext4_sb_info *sbi = EXT4_SB(sb); 813 int ret; 814 815 if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH) 816 return -EINVAL; 817 818 if (ext4_forced_shutdown(sb)) 819 return 0; 820 821 ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags); 822 trace_ext4_shutdown(sb, flags); 823 824 switch (flags) { 825 case EXT4_GOING_FLAGS_DEFAULT: 826 ret = bdev_freeze(sb->s_bdev); 827 if (ret) 828 return ret; 829 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); 830 bdev_thaw(sb->s_bdev); 831 break; 832 case EXT4_GOING_FLAGS_LOGFLUSH: 833 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); 834 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) { 835 (void) ext4_force_commit(sb); 836 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN); 837 } 838 break; 839 case EXT4_GOING_FLAGS_NOLOGFLUSH: 840 set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); 841 if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) 842 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN); 843 break; 844 default: 845 return -EINVAL; 846 } 847 clear_opt(sb, DISCARD); 848 fserror_report_shutdown(sb, GFP_KERNEL); 849 return 0; 850 } 851 852 static int ext4_ioctl_shutdown(struct super_block *sb, unsigned long arg) 853 { 854 u32 flags; 855 856 if (!capable(CAP_SYS_ADMIN)) 857 return -EPERM; 858 859 if (get_user(flags, (__u32 __user *)arg)) 860 return -EFAULT; 861 862 return ext4_force_shutdown(sb, flags); 863 } 864 865 struct getfsmap_info { 866 struct super_block *gi_sb; 867 struct fsmap_head __user *gi_data; 868 unsigned int gi_idx; 869 __u32 gi_last_flags; 870 }; 871 872 static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv) 873 { 874 struct getfsmap_info *info = priv; 875 struct fsmap fm; 876 877 trace_ext4_getfsmap_mapping(info->gi_sb, xfm); 878 879 info->gi_last_flags = xfm->fmr_flags; 880 ext4_fsmap_from_internal(info->gi_sb, &fm, xfm); 881 if (copy_to_user(&info->gi_data->fmh_recs[info->gi_idx++], &fm, 882 sizeof(struct fsmap))) 883 return -EFAULT; 884 885 return 0; 886 } 887 888 static int ext4_ioc_getfsmap(struct super_block *sb, 889 struct fsmap_head __user *arg) 890 { 891 struct getfsmap_info info = { NULL }; 892 struct ext4_fsmap_head xhead = {0}; 893 struct fsmap_head head; 894 bool aborted = false; 895 int error; 896 897 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 898 return -EFAULT; 899 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 900 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 901 sizeof(head.fmh_keys[0].fmr_reserved)) || 902 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 903 sizeof(head.fmh_keys[1].fmr_reserved))) 904 return -EINVAL; 905 /* 906 * ext4 doesn't report file extents at all, so the only valid 907 * file offsets are the magic ones (all zeroes or all ones). 908 */ 909 if (head.fmh_keys[0].fmr_offset || 910 (head.fmh_keys[1].fmr_offset != 0 && 911 head.fmh_keys[1].fmr_offset != -1ULL)) 912 return -EINVAL; 913 914 xhead.fmh_iflags = head.fmh_iflags; 915 xhead.fmh_count = head.fmh_count; 916 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[0], &head.fmh_keys[0]); 917 ext4_fsmap_to_internal(sb, &xhead.fmh_keys[1], &head.fmh_keys[1]); 918 919 trace_ext4_getfsmap_low_key(sb, &xhead.fmh_keys[0]); 920 trace_ext4_getfsmap_high_key(sb, &xhead.fmh_keys[1]); 921 922 info.gi_sb = sb; 923 info.gi_data = arg; 924 error = ext4_getfsmap(sb, &xhead, ext4_getfsmap_format, &info); 925 if (error == EXT4_QUERY_RANGE_ABORT) 926 aborted = true; 927 else if (error) 928 return error; 929 930 /* If we didn't abort, set the "last" flag in the last fmx */ 931 if (!aborted && info.gi_idx) { 932 info.gi_last_flags |= FMR_OF_LAST; 933 if (copy_to_user(&info.gi_data->fmh_recs[info.gi_idx - 1].fmr_flags, 934 &info.gi_last_flags, 935 sizeof(info.gi_last_flags))) 936 return -EFAULT; 937 } 938 939 /* copy back header */ 940 head.fmh_entries = xhead.fmh_entries; 941 head.fmh_oflags = xhead.fmh_oflags; 942 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) 943 return -EFAULT; 944 945 return 0; 946 } 947 948 static long ext4_ioctl_group_add(struct file *file, 949 struct ext4_new_group_data *input) 950 { 951 struct super_block *sb = file_inode(file)->i_sb; 952 int err, err2=0; 953 954 err = ext4_resize_begin(sb); 955 if (err) 956 return err; 957 958 if (ext4_has_feature_bigalloc(sb)) { 959 ext4_msg(sb, KERN_ERR, 960 "Online resizing not supported with bigalloc"); 961 err = -EOPNOTSUPP; 962 goto group_add_out; 963 } 964 965 err = mnt_want_write_file(file); 966 if (err) 967 goto group_add_out; 968 969 err = ext4_group_add(sb, input); 970 if (EXT4_SB(sb)->s_journal) { 971 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 972 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 973 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 974 } 975 if (err == 0) 976 err = err2; 977 mnt_drop_write_file(file); 978 if (!err && ext4_has_group_desc_csum(sb) && 979 test_opt(sb, INIT_INODE_TABLE)) 980 err = ext4_register_li_request(sb, input->group); 981 group_add_out: 982 err2 = ext4_resize_end(sb, false); 983 if (err == 0) 984 err = err2; 985 return err; 986 } 987 988 int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa) 989 { 990 struct inode *inode = d_inode(dentry); 991 struct ext4_inode_info *ei = EXT4_I(inode); 992 u32 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 993 994 if (S_ISREG(inode->i_mode)) 995 flags &= ~FS_PROJINHERIT_FL; 996 997 fileattr_fill_flags(fa, flags); 998 if (ext4_has_feature_project(inode->i_sb)) 999 fa->fsx_projid = from_kprojid(&init_user_ns, ei->i_projid); 1000 1001 return 0; 1002 } 1003 1004 int ext4_fileattr_set(struct mnt_idmap *idmap, 1005 struct dentry *dentry, struct file_kattr *fa) 1006 { 1007 struct inode *inode = d_inode(dentry); 1008 u32 flags = fa->flags; 1009 int err = -EOPNOTSUPP; 1010 1011 if (flags & ~EXT4_FL_USER_VISIBLE) 1012 goto out; 1013 1014 /* 1015 * chattr(1) grabs flags via GETFLAGS, modifies the result and 1016 * passes that to SETFLAGS. So we cannot easily make SETFLAGS 1017 * more restrictive than just silently masking off visible but 1018 * not settable flags as we always did. 1019 */ 1020 flags &= EXT4_FL_USER_MODIFIABLE; 1021 if (ext4_mask_flags(inode->i_mode, flags) != flags) 1022 goto out; 1023 err = ext4_ioctl_check_immutable(inode, fa->fsx_projid, flags); 1024 if (err) 1025 goto out; 1026 err = ext4_ioctl_setflags(inode, flags); 1027 if (err) 1028 goto out; 1029 err = ext4_ioctl_setproject(inode, fa->fsx_projid); 1030 out: 1031 return err; 1032 } 1033 1034 /* So that the fiemap access checks can't overflow on 32 bit machines. */ 1035 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent)) 1036 1037 static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg) 1038 { 1039 struct fiemap fiemap; 1040 struct fiemap __user *ufiemap = (struct fiemap __user *) arg; 1041 struct fiemap_extent_info fieinfo = { 0, }; 1042 struct inode *inode = file_inode(filp); 1043 int error; 1044 1045 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap))) 1046 return -EFAULT; 1047 1048 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) 1049 return -EINVAL; 1050 1051 fieinfo.fi_flags = fiemap.fm_flags; 1052 fieinfo.fi_extents_max = fiemap.fm_extent_count; 1053 fieinfo.fi_extents_start = ufiemap->fm_extents; 1054 1055 error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start, 1056 fiemap.fm_length); 1057 fiemap.fm_flags = fieinfo.fi_flags; 1058 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; 1059 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) 1060 error = -EFAULT; 1061 1062 return error; 1063 } 1064 1065 static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg) 1066 { 1067 int err = 0; 1068 __u32 flags = 0; 1069 unsigned int flush_flags = 0; 1070 struct super_block *sb = file_inode(filp)->i_sb; 1071 1072 if (copy_from_user(&flags, (__u32 __user *)arg, 1073 sizeof(__u32))) 1074 return -EFAULT; 1075 1076 if (!capable(CAP_SYS_ADMIN)) 1077 return -EPERM; 1078 1079 /* check for invalid bits set */ 1080 if ((flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID) || 1081 ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && 1082 (flags & JBD2_JOURNAL_FLUSH_ZEROOUT))) 1083 return -EINVAL; 1084 1085 if (!EXT4_SB(sb)->s_journal) 1086 return -ENODEV; 1087 1088 if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && 1089 !bdev_max_discard_sectors(EXT4_SB(sb)->s_journal->j_dev)) 1090 return -EOPNOTSUPP; 1091 1092 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN) 1093 return 0; 1094 1095 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DISCARD) 1096 flush_flags |= JBD2_JOURNAL_FLUSH_DISCARD; 1097 1098 if (flags & EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT) { 1099 flush_flags |= JBD2_JOURNAL_FLUSH_ZEROOUT; 1100 pr_info_ratelimited("warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow"); 1101 } 1102 1103 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 1104 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, flush_flags); 1105 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 1106 1107 return err; 1108 } 1109 1110 static int ext4_ioctl_setlabel(struct file *filp, const char __user *user_label) 1111 { 1112 size_t len; 1113 int ret = 0; 1114 char new_label[EXT4_LABEL_MAX + 1]; 1115 struct super_block *sb = file_inode(filp)->i_sb; 1116 1117 if (!capable(CAP_SYS_ADMIN)) 1118 return -EPERM; 1119 1120 /* 1121 * Copy the maximum length allowed for ext4 label with one more to 1122 * find the required terminating null byte in order to test the 1123 * label length. The on disk label doesn't need to be null terminated. 1124 */ 1125 if (copy_from_user(new_label, user_label, EXT4_LABEL_MAX + 1)) 1126 return -EFAULT; 1127 1128 len = strnlen(new_label, EXT4_LABEL_MAX + 1); 1129 if (len > EXT4_LABEL_MAX) 1130 return -EINVAL; 1131 1132 /* 1133 * Clear the buffer after the new label 1134 */ 1135 memset(new_label + len, 0, EXT4_LABEL_MAX - len); 1136 1137 ret = mnt_want_write_file(filp); 1138 if (ret) 1139 return ret; 1140 1141 ret = ext4_update_superblocks_fn(sb, ext4_sb_setlabel, new_label); 1142 1143 mnt_drop_write_file(filp); 1144 return ret; 1145 } 1146 1147 static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label) 1148 { 1149 char label[EXT4_LABEL_MAX + 1]; 1150 1151 /* 1152 * EXT4_LABEL_MAX must always be smaller than FSLABEL_MAX because 1153 * FSLABEL_MAX must include terminating null byte, while s_volume_name 1154 * does not have to. 1155 */ 1156 BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX); 1157 1158 lock_buffer(sbi->s_sbh); 1159 memtostr_pad(label, sbi->s_es->s_volume_name); 1160 unlock_buffer(sbi->s_sbh); 1161 1162 if (copy_to_user(user_label, label, sizeof(label))) 1163 return -EFAULT; 1164 return 0; 1165 } 1166 1167 static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi, 1168 struct fsuuid __user *ufsuuid) 1169 { 1170 struct fsuuid fsuuid; 1171 __u8 uuid[UUID_SIZE]; 1172 1173 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid))) 1174 return -EFAULT; 1175 1176 if (fsuuid.fsu_len == 0) { 1177 fsuuid.fsu_len = UUID_SIZE; 1178 if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len, 1179 sizeof(fsuuid.fsu_len))) 1180 return -EFAULT; 1181 return 0; 1182 } 1183 1184 if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0) 1185 return -EINVAL; 1186 1187 lock_buffer(sbi->s_sbh); 1188 memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE); 1189 unlock_buffer(sbi->s_sbh); 1190 1191 fsuuid.fsu_len = UUID_SIZE; 1192 if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) || 1193 copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE)) 1194 return -EFAULT; 1195 return 0; 1196 } 1197 1198 static int ext4_ioctl_setuuid(struct file *filp, 1199 const struct fsuuid __user *ufsuuid) 1200 { 1201 int ret = 0; 1202 struct super_block *sb = file_inode(filp)->i_sb; 1203 struct fsuuid fsuuid; 1204 __u8 uuid[UUID_SIZE]; 1205 1206 if (!capable(CAP_SYS_ADMIN)) 1207 return -EPERM; 1208 1209 /* 1210 * If any checksums (group descriptors or metadata) are being used 1211 * then the checksum seed feature is required to change the UUID. 1212 */ 1213 if (((ext4_has_feature_gdt_csum(sb) || 1214 ext4_has_feature_metadata_csum(sb)) 1215 && !ext4_has_feature_csum_seed(sb)) 1216 || ext4_has_feature_stable_inodes(sb)) 1217 return -EOPNOTSUPP; 1218 1219 if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid))) 1220 return -EFAULT; 1221 1222 if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0) 1223 return -EINVAL; 1224 1225 if (copy_from_user(uuid, &ufsuuid->fsu_uuid[0], UUID_SIZE)) 1226 return -EFAULT; 1227 1228 ret = mnt_want_write_file(filp); 1229 if (ret) 1230 return ret; 1231 1232 ret = ext4_update_superblocks_fn(sb, ext4_sb_setuuid, &uuid); 1233 mnt_drop_write_file(filp); 1234 1235 return ret; 1236 } 1237 1238 1239 #define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \ 1240 EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \ 1241 EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \ 1242 EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \ 1243 EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \ 1244 EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \ 1245 EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \ 1246 EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \ 1247 EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \ 1248 EXT4_TUNE_FL_ENCODING_FLAGS) 1249 1250 #define EXT4_TUNE_SET_COMPAT_SUPP \ 1251 (EXT4_FEATURE_COMPAT_DIR_INDEX | \ 1252 EXT4_FEATURE_COMPAT_STABLE_INODES) 1253 #define EXT4_TUNE_SET_INCOMPAT_SUPP \ 1254 (EXT4_FEATURE_INCOMPAT_EXTENTS | \ 1255 EXT4_FEATURE_INCOMPAT_EA_INODE | \ 1256 EXT4_FEATURE_INCOMPAT_ENCRYPT | \ 1257 EXT4_FEATURE_INCOMPAT_CSUM_SEED | \ 1258 EXT4_FEATURE_INCOMPAT_LARGEDIR | \ 1259 EXT4_FEATURE_INCOMPAT_CASEFOLD) 1260 #define EXT4_TUNE_SET_RO_COMPAT_SUPP \ 1261 (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \ 1262 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ 1263 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ 1264 EXT4_FEATURE_RO_COMPAT_PROJECT | \ 1265 EXT4_FEATURE_RO_COMPAT_VERITY) 1266 1267 #define EXT4_TUNE_CLEAR_COMPAT_SUPP (0) 1268 #define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0) 1269 #define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0) 1270 1271 #define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \ 1272 SB_ENC_NO_COMPAT_FALLBACK_FL) 1273 1274 static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi, 1275 struct ext4_tune_sb_params __user *params) 1276 { 1277 struct ext4_tune_sb_params ret; 1278 struct ext4_super_block *es = sbi->s_es; 1279 1280 memset(&ret, 0, sizeof(ret)); 1281 ret.set_flags = TUNE_OPS_SUPPORTED; 1282 ret.errors_behavior = le16_to_cpu(es->s_errors); 1283 ret.mnt_count = le16_to_cpu(es->s_mnt_count); 1284 ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count); 1285 ret.checkinterval = le32_to_cpu(es->s_checkinterval); 1286 ret.last_check_time = le32_to_cpu(es->s_lastcheck); 1287 ret.reserved_blocks = ext4_r_blocks_count(es); 1288 ret.blocks_count = ext4_blocks_count(es); 1289 ret.reserved_uid = ext4_get_resuid(es); 1290 ret.reserved_gid = ext4_get_resgid(es); 1291 ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts); 1292 ret.def_hash_alg = es->s_def_hash_version; 1293 ret.raid_stride = le16_to_cpu(es->s_raid_stride); 1294 ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width); 1295 ret.encoding = le16_to_cpu(es->s_encoding); 1296 ret.encoding_flags = le16_to_cpu(es->s_encoding_flags); 1297 strscpy_pad(ret.mount_opts, es->s_mount_opts); 1298 ret.feature_compat = le32_to_cpu(es->s_feature_compat); 1299 ret.feature_incompat = le32_to_cpu(es->s_feature_incompat); 1300 ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat); 1301 ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP; 1302 ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP; 1303 ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP; 1304 ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP; 1305 ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP; 1306 ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP; 1307 if (copy_to_user(params, &ret, sizeof(ret))) 1308 return -EFAULT; 1309 return 0; 1310 } 1311 1312 static void ext4_sb_setparams(struct ext4_sb_info *sbi, 1313 struct ext4_super_block *es, const void *arg) 1314 { 1315 const struct ext4_tune_sb_params *params = arg; 1316 1317 if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) 1318 es->s_errors = cpu_to_le16(params->errors_behavior); 1319 if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT) 1320 es->s_mnt_count = cpu_to_le16(params->mnt_count); 1321 if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT) 1322 es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count); 1323 if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL) 1324 es->s_checkinterval = cpu_to_le32(params->checkinterval); 1325 if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME) 1326 es->s_lastcheck = cpu_to_le32(params->last_check_time); 1327 if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) { 1328 ext4_fsblk_t blk = params->reserved_blocks; 1329 1330 es->s_r_blocks_count_lo = cpu_to_le32((u32)blk); 1331 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32); 1332 } 1333 if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) { 1334 int uid = params->reserved_uid; 1335 1336 es->s_def_resuid = cpu_to_le16(uid & 0xFFFF); 1337 es->s_def_resuid_hi = cpu_to_le16(uid >> 16); 1338 } 1339 if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) { 1340 int gid = params->reserved_gid; 1341 1342 es->s_def_resgid = cpu_to_le16(gid & 0xFFFF); 1343 es->s_def_resgid_hi = cpu_to_le16(gid >> 16); 1344 } 1345 if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS) 1346 es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts); 1347 if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1348 es->s_def_hash_version = params->def_hash_alg; 1349 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE) 1350 es->s_raid_stride = cpu_to_le16(params->raid_stride); 1351 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH) 1352 es->s_raid_stripe_width = 1353 cpu_to_le32(params->raid_stripe_width); 1354 if (params->set_flags & EXT4_TUNE_FL_ENCODING) 1355 es->s_encoding = cpu_to_le16(params->encoding); 1356 if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) 1357 es->s_encoding_flags = cpu_to_le16(params->encoding_flags); 1358 strscpy_pad(es->s_mount_opts, params->mount_opts); 1359 if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) { 1360 es->s_feature_compat |= 1361 cpu_to_le32(params->set_feature_compat_mask); 1362 es->s_feature_incompat |= 1363 cpu_to_le32(params->set_feature_incompat_mask); 1364 es->s_feature_ro_compat |= 1365 cpu_to_le32(params->set_feature_ro_compat_mask); 1366 es->s_feature_compat &= 1367 ~cpu_to_le32(params->clear_feature_compat_mask); 1368 es->s_feature_incompat &= 1369 ~cpu_to_le32(params->clear_feature_incompat_mask); 1370 es->s_feature_ro_compat &= 1371 ~cpu_to_le32(params->clear_feature_ro_compat_mask); 1372 if (params->set_feature_compat_mask & 1373 EXT4_FEATURE_COMPAT_DIR_INDEX) 1374 es->s_def_hash_version = sbi->s_def_hash_version; 1375 if (params->set_feature_incompat_mask & 1376 EXT4_FEATURE_INCOMPAT_CSUM_SEED) 1377 es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed); 1378 } 1379 if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK) 1380 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 1381 } 1382 1383 static int ext4_ioctl_set_tune_sb(struct file *filp, 1384 struct ext4_tune_sb_params __user *in) 1385 { 1386 struct ext4_tune_sb_params params; 1387 struct super_block *sb = file_inode(filp)->i_sb; 1388 struct ext4_sb_info *sbi = EXT4_SB(sb); 1389 struct ext4_super_block *es = sbi->s_es; 1390 int enabling_casefold = 0; 1391 int ret; 1392 1393 if (!capable(CAP_SYS_ADMIN)) 1394 return -EPERM; 1395 1396 if (copy_from_user(¶ms, in, sizeof(params))) 1397 return -EFAULT; 1398 1399 if (strnlen(params.mount_opts, sizeof(params.mount_opts)) == 1400 sizeof(params.mount_opts)) 1401 return -E2BIG; 1402 1403 if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0) 1404 return -EOPNOTSUPP; 1405 1406 if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) && 1407 (params.errors_behavior > EXT4_ERRORS_PANIC)) 1408 return -EINVAL; 1409 1410 if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) && 1411 (params.reserved_blocks > ext4_blocks_count(sbi->s_es) / 2)) 1412 return -EINVAL; 1413 if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) && 1414 ((params.def_hash_alg > DX_HASH_LAST) || 1415 (params.def_hash_alg == DX_HASH_SIPHASH))) 1416 return -EINVAL; 1417 if ((params.set_flags & EXT4_TUNE_FL_FEATURES) && 1418 (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES)) 1419 return -EINVAL; 1420 1421 if (params.set_flags & EXT4_TUNE_FL_FEATURES) { 1422 params.set_feature_compat_mask = 1423 params.feature_compat & 1424 ~le32_to_cpu(es->s_feature_compat); 1425 params.set_feature_incompat_mask = 1426 params.feature_incompat & 1427 ~le32_to_cpu(es->s_feature_incompat); 1428 params.set_feature_ro_compat_mask = 1429 params.feature_ro_compat & 1430 ~le32_to_cpu(es->s_feature_ro_compat); 1431 params.clear_feature_compat_mask = 1432 ~params.feature_compat & 1433 le32_to_cpu(es->s_feature_compat); 1434 params.clear_feature_incompat_mask = 1435 ~params.feature_incompat & 1436 le32_to_cpu(es->s_feature_incompat); 1437 params.clear_feature_ro_compat_mask = 1438 ~params.feature_ro_compat & 1439 le32_to_cpu(es->s_feature_ro_compat); 1440 params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES; 1441 } 1442 if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) { 1443 if ((params.set_feature_compat_mask & 1444 ~EXT4_TUNE_SET_COMPAT_SUPP) || 1445 (params.set_feature_incompat_mask & 1446 ~EXT4_TUNE_SET_INCOMPAT_SUPP) || 1447 (params.set_feature_ro_compat_mask & 1448 ~EXT4_TUNE_SET_RO_COMPAT_SUPP) || 1449 (params.clear_feature_compat_mask & 1450 ~EXT4_TUNE_CLEAR_COMPAT_SUPP) || 1451 (params.clear_feature_incompat_mask & 1452 ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) || 1453 (params.clear_feature_ro_compat_mask & 1454 ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP)) 1455 return -EOPNOTSUPP; 1456 1457 /* 1458 * Filter out the features that are already set from 1459 * the set_mask. 1460 */ 1461 params.set_feature_compat_mask &= 1462 ~le32_to_cpu(es->s_feature_compat); 1463 params.set_feature_incompat_mask &= 1464 ~le32_to_cpu(es->s_feature_incompat); 1465 params.set_feature_ro_compat_mask &= 1466 ~le32_to_cpu(es->s_feature_ro_compat); 1467 if ((params.set_feature_incompat_mask & 1468 EXT4_FEATURE_INCOMPAT_CASEFOLD)) { 1469 enabling_casefold = 1; 1470 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) { 1471 params.encoding = EXT4_ENC_UTF8_12_1; 1472 params.set_flags |= EXT4_TUNE_FL_ENCODING; 1473 } 1474 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) { 1475 params.encoding_flags = 0; 1476 params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS; 1477 } 1478 } 1479 if ((params.set_feature_compat_mask & 1480 EXT4_FEATURE_COMPAT_DIR_INDEX)) { 1481 uuid_t uu; 1482 1483 memcpy(&uu, sbi->s_hash_seed, UUID_SIZE); 1484 if (uuid_is_null(&uu)) 1485 generate_random_uuid((char *) 1486 &sbi->s_hash_seed); 1487 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1488 sbi->s_def_hash_version = params.def_hash_alg; 1489 else if (sbi->s_def_hash_version == 0) 1490 sbi->s_def_hash_version = DX_HASH_HALF_MD4; 1491 if (!(es->s_flags & 1492 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) && 1493 !(es->s_flags & 1494 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) { 1495 #ifdef __CHAR_UNSIGNED__ 1496 sbi->s_hash_unsigned = 3; 1497 #else 1498 sbi->s_hash_unsigned = 0; 1499 #endif 1500 } 1501 } 1502 } 1503 if (params.set_flags & EXT4_TUNE_FL_ENCODING) { 1504 if (!enabling_casefold) 1505 return -EINVAL; 1506 if (params.encoding == 0) 1507 params.encoding = EXT4_ENC_UTF8_12_1; 1508 else if (params.encoding != EXT4_ENC_UTF8_12_1) 1509 return -EINVAL; 1510 } 1511 if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) { 1512 if (!enabling_casefold) 1513 return -EINVAL; 1514 if (params.encoding_flags & ~SB_ENC_SUPP_MASK) 1515 return -EINVAL; 1516 } 1517 1518 ret = mnt_want_write_file(filp); 1519 if (ret) 1520 return ret; 1521 1522 ret = ext4_update_superblocks_fn(sb, ext4_sb_setparams, ¶ms); 1523 mnt_drop_write_file(filp); 1524 1525 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1526 sbi->s_def_hash_version = params.def_hash_alg; 1527 1528 return ret; 1529 } 1530 1531 static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1532 { 1533 struct inode *inode = file_inode(filp); 1534 struct super_block *sb = inode->i_sb; 1535 struct mnt_idmap *idmap = file_mnt_idmap(filp); 1536 1537 ext4_debug("cmd = %u, arg = %lu\n", cmd, arg); 1538 1539 switch (cmd) { 1540 case FS_IOC_GETFSMAP: 1541 return ext4_ioc_getfsmap(sb, (void __user *)arg); 1542 case EXT4_IOC_GETVERSION: 1543 case EXT4_IOC_GETVERSION_OLD: 1544 return put_user(inode->i_generation, (int __user *) arg); 1545 case EXT4_IOC_SETVERSION: 1546 case EXT4_IOC_SETVERSION_OLD: { 1547 handle_t *handle; 1548 struct ext4_iloc iloc; 1549 __u32 generation; 1550 int err; 1551 1552 if (!inode_owner_or_capable(idmap, inode)) 1553 return -EPERM; 1554 1555 if (ext4_has_feature_metadata_csum(inode->i_sb)) { 1556 ext4_warning(sb, "Setting inode version is not " 1557 "supported with metadata_csum enabled."); 1558 return -ENOTTY; 1559 } 1560 1561 err = mnt_want_write_file(filp); 1562 if (err) 1563 return err; 1564 if (get_user(generation, (int __user *) arg)) { 1565 err = -EFAULT; 1566 goto setversion_out; 1567 } 1568 1569 inode_lock(inode); 1570 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 1571 if (IS_ERR(handle)) { 1572 err = PTR_ERR(handle); 1573 goto unlock_out; 1574 } 1575 err = ext4_reserve_inode_write(handle, inode, &iloc); 1576 if (err == 0) { 1577 inode_set_ctime_current(inode); 1578 inode_inc_iversion(inode); 1579 inode->i_generation = generation; 1580 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 1581 } 1582 ext4_journal_stop(handle); 1583 1584 unlock_out: 1585 inode_unlock(inode); 1586 setversion_out: 1587 mnt_drop_write_file(filp); 1588 return err; 1589 } 1590 case EXT4_IOC_GROUP_EXTEND: { 1591 ext4_fsblk_t n_blocks_count; 1592 int err, err2=0; 1593 1594 err = ext4_resize_begin(sb); 1595 if (err) 1596 return err; 1597 1598 if (get_user(n_blocks_count, (__u32 __user *)arg)) { 1599 err = -EFAULT; 1600 goto group_extend_out; 1601 } 1602 1603 if (ext4_has_feature_bigalloc(sb)) { 1604 ext4_msg(sb, KERN_ERR, 1605 "Online resizing not supported with bigalloc"); 1606 err = -EOPNOTSUPP; 1607 goto group_extend_out; 1608 } 1609 1610 err = mnt_want_write_file(filp); 1611 if (err) 1612 goto group_extend_out; 1613 1614 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); 1615 if (EXT4_SB(sb)->s_journal) { 1616 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 1617 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 1618 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 1619 } 1620 if (err == 0) 1621 err = err2; 1622 mnt_drop_write_file(filp); 1623 group_extend_out: 1624 err2 = ext4_resize_end(sb, false); 1625 if (err == 0) 1626 err = err2; 1627 return err; 1628 } 1629 1630 case EXT4_IOC_MOVE_EXT: { 1631 struct move_extent me; 1632 int err; 1633 1634 if (!(filp->f_mode & FMODE_READ) || 1635 !(filp->f_mode & FMODE_WRITE)) 1636 return -EBADF; 1637 1638 if (copy_from_user(&me, 1639 (struct move_extent __user *)arg, sizeof(me))) 1640 return -EFAULT; 1641 me.moved_len = 0; 1642 1643 CLASS(fd, donor)(me.donor_fd); 1644 if (fd_empty(donor)) 1645 return -EBADF; 1646 1647 if (!(fd_file(donor)->f_mode & FMODE_WRITE)) 1648 return -EBADF; 1649 1650 err = mnt_want_write_file(filp); 1651 if (err) 1652 return err; 1653 1654 err = ext4_move_extents(filp, fd_file(donor), me.orig_start, 1655 me.donor_start, me.len, &me.moved_len); 1656 mnt_drop_write_file(filp); 1657 1658 if (copy_to_user((struct move_extent __user *)arg, 1659 &me, sizeof(me))) 1660 err = -EFAULT; 1661 return err; 1662 } 1663 1664 case EXT4_IOC_GROUP_ADD: { 1665 struct ext4_new_group_data input; 1666 1667 if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg, 1668 sizeof(input))) 1669 return -EFAULT; 1670 1671 return ext4_ioctl_group_add(filp, &input); 1672 } 1673 1674 case EXT4_IOC_MIGRATE: 1675 { 1676 int err; 1677 if (!inode_owner_or_capable(idmap, inode)) 1678 return -EACCES; 1679 1680 err = mnt_want_write_file(filp); 1681 if (err) 1682 return err; 1683 /* 1684 * inode_mutex prevent write and truncate on the file. 1685 * Read still goes through. We take i_data_sem in 1686 * ext4_ext_swap_inode_data before we switch the 1687 * inode format to prevent read. 1688 */ 1689 inode_lock((inode)); 1690 err = ext4_ext_migrate(inode); 1691 inode_unlock((inode)); 1692 mnt_drop_write_file(filp); 1693 return err; 1694 } 1695 1696 case EXT4_IOC_ALLOC_DA_BLKS: 1697 { 1698 int err; 1699 if (!inode_owner_or_capable(idmap, inode)) 1700 return -EACCES; 1701 1702 err = mnt_want_write_file(filp); 1703 if (err) 1704 return err; 1705 err = ext4_alloc_da_blocks(inode); 1706 mnt_drop_write_file(filp); 1707 return err; 1708 } 1709 1710 case EXT4_IOC_SWAP_BOOT: 1711 { 1712 int err; 1713 if (!(filp->f_mode & FMODE_WRITE)) 1714 return -EBADF; 1715 err = mnt_want_write_file(filp); 1716 if (err) 1717 return err; 1718 err = swap_inode_boot_loader(sb, idmap, inode); 1719 mnt_drop_write_file(filp); 1720 return err; 1721 } 1722 1723 case EXT4_IOC_RESIZE_FS: { 1724 ext4_fsblk_t n_blocks_count; 1725 int err = 0, err2 = 0; 1726 ext4_group_t o_group = EXT4_SB(sb)->s_groups_count; 1727 1728 if (copy_from_user(&n_blocks_count, (__u64 __user *)arg, 1729 sizeof(__u64))) { 1730 return -EFAULT; 1731 } 1732 1733 err = ext4_resize_begin(sb); 1734 if (err) 1735 return err; 1736 1737 err = mnt_want_write_file(filp); 1738 if (err) 1739 goto resizefs_out; 1740 1741 err = ext4_resize_fs(sb, n_blocks_count); 1742 if (EXT4_SB(sb)->s_journal) { 1743 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL); 1744 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 1745 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 1746 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 1747 } 1748 if (err == 0) 1749 err = err2; 1750 mnt_drop_write_file(filp); 1751 if (!err && (o_group < EXT4_SB(sb)->s_groups_count) && 1752 ext4_has_group_desc_csum(sb) && 1753 test_opt(sb, INIT_INODE_TABLE)) 1754 err = ext4_register_li_request(sb, o_group); 1755 1756 resizefs_out: 1757 err2 = ext4_resize_end(sb, true); 1758 if (err == 0) 1759 err = err2; 1760 return err; 1761 } 1762 1763 case FITRIM: 1764 { 1765 struct fstrim_range range; 1766 int ret = 0; 1767 1768 if (!capable(CAP_SYS_ADMIN)) 1769 return -EPERM; 1770 1771 if (!bdev_max_discard_sectors(sb->s_bdev)) 1772 return -EOPNOTSUPP; 1773 1774 /* 1775 * We haven't replayed the journal, so we cannot use our 1776 * block-bitmap-guided storage zapping commands. 1777 */ 1778 if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) 1779 return -EROFS; 1780 1781 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1782 sizeof(range))) 1783 return -EFAULT; 1784 1785 ret = ext4_trim_fs(sb, &range); 1786 if (ret < 0) 1787 return ret; 1788 1789 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1790 sizeof(range))) 1791 return -EFAULT; 1792 1793 return 0; 1794 } 1795 case EXT4_IOC_PRECACHE_EXTENTS: 1796 { 1797 int ret; 1798 1799 inode_lock_shared(inode); 1800 ret = ext4_ext_precache(inode); 1801 inode_unlock_shared(inode); 1802 return ret; 1803 } 1804 case FS_IOC_SET_ENCRYPTION_POLICY: 1805 if (!ext4_has_feature_encrypt(sb)) 1806 return -EOPNOTSUPP; 1807 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 1808 1809 case FS_IOC_GET_ENCRYPTION_PWSALT: 1810 return ext4_ioctl_get_encryption_pwsalt(filp, (void __user *)arg); 1811 1812 case FS_IOC_GET_ENCRYPTION_POLICY: 1813 if (!ext4_has_feature_encrypt(sb)) 1814 return -EOPNOTSUPP; 1815 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 1816 1817 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 1818 if (!ext4_has_feature_encrypt(sb)) 1819 return -EOPNOTSUPP; 1820 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); 1821 1822 case FS_IOC_ADD_ENCRYPTION_KEY: 1823 if (!ext4_has_feature_encrypt(sb)) 1824 return -EOPNOTSUPP; 1825 return fscrypt_ioctl_add_key(filp, (void __user *)arg); 1826 1827 case FS_IOC_REMOVE_ENCRYPTION_KEY: 1828 if (!ext4_has_feature_encrypt(sb)) 1829 return -EOPNOTSUPP; 1830 return fscrypt_ioctl_remove_key(filp, (void __user *)arg); 1831 1832 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 1833 if (!ext4_has_feature_encrypt(sb)) 1834 return -EOPNOTSUPP; 1835 return fscrypt_ioctl_remove_key_all_users(filp, 1836 (void __user *)arg); 1837 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 1838 if (!ext4_has_feature_encrypt(sb)) 1839 return -EOPNOTSUPP; 1840 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); 1841 1842 case FS_IOC_GET_ENCRYPTION_NONCE: 1843 if (!ext4_has_feature_encrypt(sb)) 1844 return -EOPNOTSUPP; 1845 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); 1846 1847 case EXT4_IOC_CLEAR_ES_CACHE: 1848 { 1849 if (!inode_owner_or_capable(idmap, inode)) 1850 return -EACCES; 1851 ext4_clear_inode_es(inode); 1852 return 0; 1853 } 1854 1855 case EXT4_IOC_GETSTATE: 1856 { 1857 __u32 state = 0; 1858 1859 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED)) 1860 state |= EXT4_STATE_FLAG_EXT_PRECACHED; 1861 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 1862 state |= EXT4_STATE_FLAG_NEW; 1863 if (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) 1864 state |= EXT4_STATE_FLAG_NEWENTRY; 1865 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) 1866 state |= EXT4_STATE_FLAG_DA_ALLOC_CLOSE; 1867 1868 return put_user(state, (__u32 __user *) arg); 1869 } 1870 1871 case EXT4_IOC_GET_ES_CACHE: 1872 return ext4_ioctl_get_es_cache(filp, arg); 1873 1874 case EXT4_IOC_SHUTDOWN: 1875 return ext4_ioctl_shutdown(sb, arg); 1876 1877 case FS_IOC_ENABLE_VERITY: 1878 if (!ext4_has_feature_verity(sb)) 1879 return -EOPNOTSUPP; 1880 return fsverity_ioctl_enable(filp, (const void __user *)arg); 1881 1882 case FS_IOC_MEASURE_VERITY: 1883 if (!ext4_has_feature_verity(sb)) 1884 return -EOPNOTSUPP; 1885 return fsverity_ioctl_measure(filp, (void __user *)arg); 1886 1887 case FS_IOC_READ_VERITY_METADATA: 1888 if (!ext4_has_feature_verity(sb)) 1889 return -EOPNOTSUPP; 1890 return fsverity_ioctl_read_metadata(filp, 1891 (const void __user *)arg); 1892 1893 case EXT4_IOC_CHECKPOINT: 1894 return ext4_ioctl_checkpoint(filp, arg); 1895 1896 case FS_IOC_GETFSLABEL: 1897 return ext4_ioctl_getlabel(EXT4_SB(sb), (void __user *)arg); 1898 1899 case FS_IOC_SETFSLABEL: 1900 return ext4_ioctl_setlabel(filp, 1901 (const void __user *)arg); 1902 1903 case EXT4_IOC_GETFSUUID: 1904 return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg); 1905 case EXT4_IOC_SETFSUUID: 1906 return ext4_ioctl_setuuid(filp, (const void __user *)arg); 1907 case EXT4_IOC_GET_TUNE_SB_PARAM: 1908 return ext4_ioctl_get_tune_sb(EXT4_SB(sb), 1909 (void __user *)arg); 1910 case EXT4_IOC_SET_TUNE_SB_PARAM: 1911 return ext4_ioctl_set_tune_sb(filp, (void __user *)arg); 1912 default: 1913 return -ENOTTY; 1914 } 1915 } 1916 1917 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1918 { 1919 return __ext4_ioctl(filp, cmd, arg); 1920 } 1921 1922 #ifdef CONFIG_COMPAT 1923 long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1924 { 1925 /* These are just misnamed, they actually get/put from/to user an int */ 1926 switch (cmd) { 1927 case EXT4_IOC32_GETVERSION: 1928 cmd = EXT4_IOC_GETVERSION; 1929 break; 1930 case EXT4_IOC32_SETVERSION: 1931 cmd = EXT4_IOC_SETVERSION; 1932 break; 1933 case EXT4_IOC32_GROUP_EXTEND: 1934 cmd = EXT4_IOC_GROUP_EXTEND; 1935 break; 1936 case EXT4_IOC32_GETVERSION_OLD: 1937 cmd = EXT4_IOC_GETVERSION_OLD; 1938 break; 1939 case EXT4_IOC32_SETVERSION_OLD: 1940 cmd = EXT4_IOC_SETVERSION_OLD; 1941 break; 1942 case EXT4_IOC32_GETRSVSZ: 1943 cmd = EXT4_IOC_GETRSVSZ; 1944 break; 1945 case EXT4_IOC32_SETRSVSZ: 1946 cmd = EXT4_IOC_SETRSVSZ; 1947 break; 1948 case EXT4_IOC32_GROUP_ADD: { 1949 struct compat_ext4_new_group_input __user *uinput; 1950 struct ext4_new_group_data input; 1951 int err; 1952 1953 uinput = compat_ptr(arg); 1954 err = get_user(input.group, &uinput->group); 1955 err |= get_user(input.block_bitmap, &uinput->block_bitmap); 1956 err |= get_user(input.inode_bitmap, &uinput->inode_bitmap); 1957 err |= get_user(input.inode_table, &uinput->inode_table); 1958 err |= get_user(input.blocks_count, &uinput->blocks_count); 1959 err |= get_user(input.reserved_blocks, 1960 &uinput->reserved_blocks); 1961 if (err) 1962 return -EFAULT; 1963 return ext4_ioctl_group_add(file, &input); 1964 } 1965 case EXT4_IOC_MOVE_EXT: 1966 case EXT4_IOC_RESIZE_FS: 1967 case FITRIM: 1968 case EXT4_IOC_PRECACHE_EXTENTS: 1969 case FS_IOC_SET_ENCRYPTION_POLICY: 1970 case FS_IOC_GET_ENCRYPTION_PWSALT: 1971 case FS_IOC_GET_ENCRYPTION_POLICY: 1972 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 1973 case FS_IOC_ADD_ENCRYPTION_KEY: 1974 case FS_IOC_REMOVE_ENCRYPTION_KEY: 1975 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 1976 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 1977 case FS_IOC_GET_ENCRYPTION_NONCE: 1978 case EXT4_IOC_SHUTDOWN: 1979 case FS_IOC_GETFSMAP: 1980 case FS_IOC_ENABLE_VERITY: 1981 case FS_IOC_MEASURE_VERITY: 1982 case FS_IOC_READ_VERITY_METADATA: 1983 case EXT4_IOC_CLEAR_ES_CACHE: 1984 case EXT4_IOC_GETSTATE: 1985 case EXT4_IOC_GET_ES_CACHE: 1986 case EXT4_IOC_CHECKPOINT: 1987 case FS_IOC_GETFSLABEL: 1988 case FS_IOC_SETFSLABEL: 1989 case EXT4_IOC_GETFSUUID: 1990 case EXT4_IOC_SETFSUUID: 1991 break; 1992 default: 1993 return -ENOIOCTLCMD; 1994 } 1995 return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 1996 } 1997 #endif 1998 1999 static void set_overhead(struct ext4_sb_info *sbi, 2000 struct ext4_super_block *es, const void *arg) 2001 { 2002 es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg)); 2003 } 2004 2005 int ext4_update_overhead(struct super_block *sb, bool force) 2006 { 2007 struct ext4_sb_info *sbi = EXT4_SB(sb); 2008 2009 if (ext4_emergency_state(sb) || sb_rdonly(sb)) 2010 return 0; 2011 if (!force && 2012 (sbi->s_overhead == 0 || 2013 sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))) 2014 return 0; 2015 return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead); 2016 } 2017