1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/fsnotify.h> 25 #include <linux/pagemap.h> 26 #include <linux/highmem.h> 27 #include <linux/time.h> 28 #include <linux/init.h> 29 #include <linux/string.h> 30 #include <linux/backing-dev.h> 31 #include <linux/mount.h> 32 #include <linux/mpage.h> 33 #include <linux/namei.h> 34 #include <linux/swap.h> 35 #include <linux/writeback.h> 36 #include <linux/statfs.h> 37 #include <linux/compat.h> 38 #include <linux/bit_spinlock.h> 39 #include <linux/security.h> 40 #include <linux/xattr.h> 41 #include <linux/vmalloc.h> 42 #include <linux/slab.h> 43 #include "compat.h" 44 #include "ctree.h" 45 #include "disk-io.h" 46 #include "transaction.h" 47 #include "btrfs_inode.h" 48 #include "ioctl.h" 49 #include "print-tree.h" 50 #include "volumes.h" 51 #include "locking.h" 52 53 /* Mask out flags that are inappropriate for the given type of inode. */ 54 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 55 { 56 if (S_ISDIR(mode)) 57 return flags; 58 else if (S_ISREG(mode)) 59 return flags & ~FS_DIRSYNC_FL; 60 else 61 return flags & (FS_NODUMP_FL | FS_NOATIME_FL); 62 } 63 64 /* 65 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl. 66 */ 67 static unsigned int btrfs_flags_to_ioctl(unsigned int flags) 68 { 69 unsigned int iflags = 0; 70 71 if (flags & BTRFS_INODE_SYNC) 72 iflags |= FS_SYNC_FL; 73 if (flags & BTRFS_INODE_IMMUTABLE) 74 iflags |= FS_IMMUTABLE_FL; 75 if (flags & BTRFS_INODE_APPEND) 76 iflags |= FS_APPEND_FL; 77 if (flags & BTRFS_INODE_NODUMP) 78 iflags |= FS_NODUMP_FL; 79 if (flags & BTRFS_INODE_NOATIME) 80 iflags |= FS_NOATIME_FL; 81 if (flags & BTRFS_INODE_DIRSYNC) 82 iflags |= FS_DIRSYNC_FL; 83 84 return iflags; 85 } 86 87 /* 88 * Update inode->i_flags based on the btrfs internal flags. 89 */ 90 void btrfs_update_iflags(struct inode *inode) 91 { 92 struct btrfs_inode *ip = BTRFS_I(inode); 93 94 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 95 96 if (ip->flags & BTRFS_INODE_SYNC) 97 inode->i_flags |= S_SYNC; 98 if (ip->flags & BTRFS_INODE_IMMUTABLE) 99 inode->i_flags |= S_IMMUTABLE; 100 if (ip->flags & BTRFS_INODE_APPEND) 101 inode->i_flags |= S_APPEND; 102 if (ip->flags & BTRFS_INODE_NOATIME) 103 inode->i_flags |= S_NOATIME; 104 if (ip->flags & BTRFS_INODE_DIRSYNC) 105 inode->i_flags |= S_DIRSYNC; 106 } 107 108 /* 109 * Inherit flags from the parent inode. 110 * 111 * Unlike extN we don't have any flags we don't want to inherit currently. 112 */ 113 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) 114 { 115 unsigned int flags; 116 117 if (!dir) 118 return; 119 120 flags = BTRFS_I(dir)->flags; 121 122 if (S_ISREG(inode->i_mode)) 123 flags &= ~BTRFS_INODE_DIRSYNC; 124 else if (!S_ISDIR(inode->i_mode)) 125 flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME); 126 127 BTRFS_I(inode)->flags = flags; 128 btrfs_update_iflags(inode); 129 } 130 131 static int btrfs_ioctl_getflags(struct file *file, void __user *arg) 132 { 133 struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode); 134 unsigned int flags = btrfs_flags_to_ioctl(ip->flags); 135 136 if (copy_to_user(arg, &flags, sizeof(flags))) 137 return -EFAULT; 138 return 0; 139 } 140 141 static int btrfs_ioctl_setflags(struct file *file, void __user *arg) 142 { 143 struct inode *inode = file->f_path.dentry->d_inode; 144 struct btrfs_inode *ip = BTRFS_I(inode); 145 struct btrfs_root *root = ip->root; 146 struct btrfs_trans_handle *trans; 147 unsigned int flags, oldflags; 148 int ret; 149 150 if (copy_from_user(&flags, arg, sizeof(flags))) 151 return -EFAULT; 152 153 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 154 FS_NOATIME_FL | FS_NODUMP_FL | \ 155 FS_SYNC_FL | FS_DIRSYNC_FL)) 156 return -EOPNOTSUPP; 157 158 if (!is_owner_or_cap(inode)) 159 return -EACCES; 160 161 mutex_lock(&inode->i_mutex); 162 163 flags = btrfs_mask_flags(inode->i_mode, flags); 164 oldflags = btrfs_flags_to_ioctl(ip->flags); 165 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { 166 if (!capable(CAP_LINUX_IMMUTABLE)) { 167 ret = -EPERM; 168 goto out_unlock; 169 } 170 } 171 172 ret = mnt_want_write(file->f_path.mnt); 173 if (ret) 174 goto out_unlock; 175 176 if (flags & FS_SYNC_FL) 177 ip->flags |= BTRFS_INODE_SYNC; 178 else 179 ip->flags &= ~BTRFS_INODE_SYNC; 180 if (flags & FS_IMMUTABLE_FL) 181 ip->flags |= BTRFS_INODE_IMMUTABLE; 182 else 183 ip->flags &= ~BTRFS_INODE_IMMUTABLE; 184 if (flags & FS_APPEND_FL) 185 ip->flags |= BTRFS_INODE_APPEND; 186 else 187 ip->flags &= ~BTRFS_INODE_APPEND; 188 if (flags & FS_NODUMP_FL) 189 ip->flags |= BTRFS_INODE_NODUMP; 190 else 191 ip->flags &= ~BTRFS_INODE_NODUMP; 192 if (flags & FS_NOATIME_FL) 193 ip->flags |= BTRFS_INODE_NOATIME; 194 else 195 ip->flags &= ~BTRFS_INODE_NOATIME; 196 if (flags & FS_DIRSYNC_FL) 197 ip->flags |= BTRFS_INODE_DIRSYNC; 198 else 199 ip->flags &= ~BTRFS_INODE_DIRSYNC; 200 201 202 trans = btrfs_join_transaction(root, 1); 203 BUG_ON(!trans); 204 205 ret = btrfs_update_inode(trans, root, inode); 206 BUG_ON(ret); 207 208 btrfs_update_iflags(inode); 209 inode->i_ctime = CURRENT_TIME; 210 btrfs_end_transaction(trans, root); 211 212 mnt_drop_write(file->f_path.mnt); 213 out_unlock: 214 mutex_unlock(&inode->i_mutex); 215 return 0; 216 } 217 218 static int btrfs_ioctl_getversion(struct file *file, int __user *arg) 219 { 220 struct inode *inode = file->f_path.dentry->d_inode; 221 222 return put_user(inode->i_generation, arg); 223 } 224 225 static noinline int create_subvol(struct btrfs_root *root, 226 struct dentry *dentry, 227 char *name, int namelen) 228 { 229 struct btrfs_trans_handle *trans; 230 struct btrfs_key key; 231 struct btrfs_root_item root_item; 232 struct btrfs_inode_item *inode_item; 233 struct extent_buffer *leaf; 234 struct btrfs_root *new_root; 235 struct inode *dir = dentry->d_parent->d_inode; 236 int ret; 237 int err; 238 u64 objectid; 239 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; 240 u64 index = 0; 241 242 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 243 0, &objectid); 244 if (ret) 245 return ret; 246 /* 247 * 1 - inode item 248 * 2 - refs 249 * 1 - root item 250 * 2 - dir items 251 */ 252 trans = btrfs_start_transaction(root, 6); 253 if (IS_ERR(trans)) 254 return PTR_ERR(trans); 255 256 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 257 0, objectid, NULL, 0, 0, 0); 258 if (IS_ERR(leaf)) { 259 ret = PTR_ERR(leaf); 260 goto fail; 261 } 262 263 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); 264 btrfs_set_header_bytenr(leaf, leaf->start); 265 btrfs_set_header_generation(leaf, trans->transid); 266 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 267 btrfs_set_header_owner(leaf, objectid); 268 269 write_extent_buffer(leaf, root->fs_info->fsid, 270 (unsigned long)btrfs_header_fsid(leaf), 271 BTRFS_FSID_SIZE); 272 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 273 (unsigned long)btrfs_header_chunk_tree_uuid(leaf), 274 BTRFS_UUID_SIZE); 275 btrfs_mark_buffer_dirty(leaf); 276 277 inode_item = &root_item.inode; 278 memset(inode_item, 0, sizeof(*inode_item)); 279 inode_item->generation = cpu_to_le64(1); 280 inode_item->size = cpu_to_le64(3); 281 inode_item->nlink = cpu_to_le32(1); 282 inode_item->nbytes = cpu_to_le64(root->leafsize); 283 inode_item->mode = cpu_to_le32(S_IFDIR | 0755); 284 285 btrfs_set_root_bytenr(&root_item, leaf->start); 286 btrfs_set_root_generation(&root_item, trans->transid); 287 btrfs_set_root_level(&root_item, 0); 288 btrfs_set_root_refs(&root_item, 1); 289 btrfs_set_root_used(&root_item, leaf->len); 290 btrfs_set_root_last_snapshot(&root_item, 0); 291 292 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress)); 293 root_item.drop_level = 0; 294 295 btrfs_tree_unlock(leaf); 296 free_extent_buffer(leaf); 297 leaf = NULL; 298 299 btrfs_set_root_dirid(&root_item, new_dirid); 300 301 key.objectid = objectid; 302 key.offset = 0; 303 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 304 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, 305 &root_item); 306 if (ret) 307 goto fail; 308 309 key.offset = (u64)-1; 310 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); 311 BUG_ON(IS_ERR(new_root)); 312 313 btrfs_record_root_in_trans(trans, new_root); 314 315 ret = btrfs_create_subvol_root(trans, new_root, new_dirid, 316 BTRFS_I(dir)->block_group); 317 /* 318 * insert the directory item 319 */ 320 ret = btrfs_set_inode_index(dir, &index); 321 BUG_ON(ret); 322 323 ret = btrfs_insert_dir_item(trans, root, 324 name, namelen, dir->i_ino, &key, 325 BTRFS_FT_DIR, index); 326 if (ret) 327 goto fail; 328 329 btrfs_i_size_write(dir, dir->i_size + namelen * 2); 330 ret = btrfs_update_inode(trans, root, dir); 331 BUG_ON(ret); 332 333 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 334 objectid, root->root_key.objectid, 335 dir->i_ino, index, name, namelen); 336 337 BUG_ON(ret); 338 339 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 340 fail: 341 err = btrfs_commit_transaction(trans, root); 342 if (err && !ret) 343 ret = err; 344 return ret; 345 } 346 347 static int create_snapshot(struct btrfs_root *root, struct dentry *dentry) 348 { 349 struct inode *inode; 350 struct btrfs_pending_snapshot *pending_snapshot; 351 struct btrfs_trans_handle *trans; 352 int ret; 353 354 if (!root->ref_cows) 355 return -EINVAL; 356 357 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 358 if (!pending_snapshot) 359 return -ENOMEM; 360 361 btrfs_init_block_rsv(&pending_snapshot->block_rsv); 362 pending_snapshot->dentry = dentry; 363 pending_snapshot->root = root; 364 365 trans = btrfs_start_transaction(root->fs_info->extent_root, 5); 366 if (IS_ERR(trans)) { 367 ret = PTR_ERR(trans); 368 goto fail; 369 } 370 371 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); 372 BUG_ON(ret); 373 374 list_add(&pending_snapshot->list, 375 &trans->transaction->pending_snapshots); 376 ret = btrfs_commit_transaction(trans, root->fs_info->extent_root); 377 BUG_ON(ret); 378 379 ret = pending_snapshot->error; 380 if (ret) 381 goto fail; 382 383 btrfs_orphan_cleanup(pending_snapshot->snap); 384 385 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 386 if (IS_ERR(inode)) { 387 ret = PTR_ERR(inode); 388 goto fail; 389 } 390 BUG_ON(!inode); 391 d_instantiate(dentry, inode); 392 ret = 0; 393 fail: 394 kfree(pending_snapshot); 395 return ret; 396 } 397 398 /* copy of may_create in fs/namei.c() */ 399 static inline int btrfs_may_create(struct inode *dir, struct dentry *child) 400 { 401 if (child->d_inode) 402 return -EEXIST; 403 if (IS_DEADDIR(dir)) 404 return -ENOENT; 405 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 406 } 407 408 /* 409 * Create a new subvolume below @parent. This is largely modeled after 410 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup 411 * inside this filesystem so it's quite a bit simpler. 412 */ 413 static noinline int btrfs_mksubvol(struct path *parent, 414 char *name, int namelen, 415 struct btrfs_root *snap_src) 416 { 417 struct inode *dir = parent->dentry->d_inode; 418 struct dentry *dentry; 419 int error; 420 421 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 422 423 dentry = lookup_one_len(name, parent->dentry, namelen); 424 error = PTR_ERR(dentry); 425 if (IS_ERR(dentry)) 426 goto out_unlock; 427 428 error = -EEXIST; 429 if (dentry->d_inode) 430 goto out_dput; 431 432 error = mnt_want_write(parent->mnt); 433 if (error) 434 goto out_dput; 435 436 error = btrfs_may_create(dir, dentry); 437 if (error) 438 goto out_drop_write; 439 440 down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem); 441 442 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0) 443 goto out_up_read; 444 445 if (snap_src) { 446 error = create_snapshot(snap_src, dentry); 447 } else { 448 error = create_subvol(BTRFS_I(dir)->root, dentry, 449 name, namelen); 450 } 451 if (!error) 452 fsnotify_mkdir(dir, dentry); 453 out_up_read: 454 up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem); 455 out_drop_write: 456 mnt_drop_write(parent->mnt); 457 out_dput: 458 dput(dentry); 459 out_unlock: 460 mutex_unlock(&dir->i_mutex); 461 return error; 462 } 463 464 static int should_defrag_range(struct inode *inode, u64 start, u64 len, 465 int thresh, u64 *last_len, u64 *skip, 466 u64 *defrag_end) 467 { 468 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 469 struct extent_map *em = NULL; 470 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 471 int ret = 1; 472 473 474 if (thresh == 0) 475 thresh = 256 * 1024; 476 477 /* 478 * make sure that once we start defragging and extent, we keep on 479 * defragging it 480 */ 481 if (start < *defrag_end) 482 return 1; 483 484 *skip = 0; 485 486 /* 487 * hopefully we have this extent in the tree already, try without 488 * the full extent lock 489 */ 490 read_lock(&em_tree->lock); 491 em = lookup_extent_mapping(em_tree, start, len); 492 read_unlock(&em_tree->lock); 493 494 if (!em) { 495 /* get the big lock and read metadata off disk */ 496 lock_extent(io_tree, start, start + len - 1, GFP_NOFS); 497 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 498 unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); 499 500 if (IS_ERR(em)) 501 return 0; 502 } 503 504 /* this will cover holes, and inline extents */ 505 if (em->block_start >= EXTENT_MAP_LAST_BYTE) 506 ret = 0; 507 508 /* 509 * we hit a real extent, if it is big don't bother defragging it again 510 */ 511 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) 512 ret = 0; 513 514 /* 515 * last_len ends up being a counter of how many bytes we've defragged. 516 * every time we choose not to defrag an extent, we reset *last_len 517 * so that the next tiny extent will force a defrag. 518 * 519 * The end result of this is that tiny extents before a single big 520 * extent will force at least part of that big extent to be defragged. 521 */ 522 if (ret) { 523 *last_len += len; 524 *defrag_end = extent_map_end(em); 525 } else { 526 *last_len = 0; 527 *skip = extent_map_end(em); 528 *defrag_end = 0; 529 } 530 531 free_extent_map(em); 532 return ret; 533 } 534 535 static int btrfs_defrag_file(struct file *file, 536 struct btrfs_ioctl_defrag_range_args *range) 537 { 538 struct inode *inode = fdentry(file)->d_inode; 539 struct btrfs_root *root = BTRFS_I(inode)->root; 540 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 541 struct btrfs_ordered_extent *ordered; 542 struct page *page; 543 unsigned long last_index; 544 unsigned long ra_pages = root->fs_info->bdi.ra_pages; 545 unsigned long total_read = 0; 546 u64 page_start; 547 u64 page_end; 548 u64 last_len = 0; 549 u64 skip = 0; 550 u64 defrag_end = 0; 551 unsigned long i; 552 int ret; 553 554 if (inode->i_size == 0) 555 return 0; 556 557 if (range->start + range->len > range->start) { 558 last_index = min_t(u64, inode->i_size - 1, 559 range->start + range->len - 1) >> PAGE_CACHE_SHIFT; 560 } else { 561 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 562 } 563 564 i = range->start >> PAGE_CACHE_SHIFT; 565 while (i <= last_index) { 566 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 567 PAGE_CACHE_SIZE, 568 range->extent_thresh, 569 &last_len, &skip, 570 &defrag_end)) { 571 unsigned long next; 572 /* 573 * the should_defrag function tells us how much to skip 574 * bump our counter by the suggested amount 575 */ 576 next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 577 i = max(i + 1, next); 578 continue; 579 } 580 581 if (total_read % ra_pages == 0) { 582 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, 583 min(last_index, i + ra_pages - 1)); 584 } 585 total_read++; 586 mutex_lock(&inode->i_mutex); 587 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) 588 BTRFS_I(inode)->force_compress = 1; 589 590 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 591 if (ret) 592 goto err_unlock; 593 again: 594 if (inode->i_size == 0 || 595 i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { 596 ret = 0; 597 goto err_reservations; 598 } 599 600 page = grab_cache_page(inode->i_mapping, i); 601 if (!page) { 602 ret = -ENOMEM; 603 goto err_reservations; 604 } 605 606 if (!PageUptodate(page)) { 607 btrfs_readpage(NULL, page); 608 lock_page(page); 609 if (!PageUptodate(page)) { 610 unlock_page(page); 611 page_cache_release(page); 612 ret = -EIO; 613 goto err_reservations; 614 } 615 } 616 617 if (page->mapping != inode->i_mapping) { 618 unlock_page(page); 619 page_cache_release(page); 620 goto again; 621 } 622 623 wait_on_page_writeback(page); 624 625 if (PageDirty(page)) { 626 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 627 goto loop_unlock; 628 } 629 630 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 631 page_end = page_start + PAGE_CACHE_SIZE - 1; 632 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 633 634 ordered = btrfs_lookup_ordered_extent(inode, page_start); 635 if (ordered) { 636 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 637 unlock_page(page); 638 page_cache_release(page); 639 btrfs_start_ordered_extent(inode, ordered, 1); 640 btrfs_put_ordered_extent(ordered); 641 goto again; 642 } 643 set_page_extent_mapped(page); 644 645 /* 646 * this makes sure page_mkwrite is called on the 647 * page if it is dirtied again later 648 */ 649 clear_page_dirty_for_io(page); 650 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, 651 page_end, EXTENT_DIRTY | EXTENT_DELALLOC | 652 EXTENT_DO_ACCOUNTING, GFP_NOFS); 653 654 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 655 ClearPageChecked(page); 656 set_page_dirty(page); 657 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 658 659 loop_unlock: 660 unlock_page(page); 661 page_cache_release(page); 662 mutex_unlock(&inode->i_mutex); 663 664 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); 665 i++; 666 } 667 668 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) 669 filemap_flush(inode->i_mapping); 670 671 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { 672 /* the filemap_flush will queue IO into the worker threads, but 673 * we have to make sure the IO is actually started and that 674 * ordered extents get created before we return 675 */ 676 atomic_inc(&root->fs_info->async_submit_draining); 677 while (atomic_read(&root->fs_info->nr_async_submits) || 678 atomic_read(&root->fs_info->async_delalloc_pages)) { 679 wait_event(root->fs_info->async_submit_wait, 680 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 681 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 682 } 683 atomic_dec(&root->fs_info->async_submit_draining); 684 685 mutex_lock(&inode->i_mutex); 686 BTRFS_I(inode)->force_compress = 0; 687 mutex_unlock(&inode->i_mutex); 688 } 689 690 return 0; 691 692 err_reservations: 693 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 694 err_unlock: 695 mutex_unlock(&inode->i_mutex); 696 return ret; 697 } 698 699 static noinline int btrfs_ioctl_resize(struct btrfs_root *root, 700 void __user *arg) 701 { 702 u64 new_size; 703 u64 old_size; 704 u64 devid = 1; 705 struct btrfs_ioctl_vol_args *vol_args; 706 struct btrfs_trans_handle *trans; 707 struct btrfs_device *device = NULL; 708 char *sizestr; 709 char *devstr = NULL; 710 int ret = 0; 711 int namelen; 712 int mod = 0; 713 714 if (root->fs_info->sb->s_flags & MS_RDONLY) 715 return -EROFS; 716 717 if (!capable(CAP_SYS_ADMIN)) 718 return -EPERM; 719 720 vol_args = memdup_user(arg, sizeof(*vol_args)); 721 if (IS_ERR(vol_args)) 722 return PTR_ERR(vol_args); 723 724 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 725 namelen = strlen(vol_args->name); 726 727 mutex_lock(&root->fs_info->volume_mutex); 728 sizestr = vol_args->name; 729 devstr = strchr(sizestr, ':'); 730 if (devstr) { 731 char *end; 732 sizestr = devstr + 1; 733 *devstr = '\0'; 734 devstr = vol_args->name; 735 devid = simple_strtoull(devstr, &end, 10); 736 printk(KERN_INFO "resizing devid %llu\n", 737 (unsigned long long)devid); 738 } 739 device = btrfs_find_device(root, devid, NULL, NULL); 740 if (!device) { 741 printk(KERN_INFO "resizer unable to find device %llu\n", 742 (unsigned long long)devid); 743 ret = -EINVAL; 744 goto out_unlock; 745 } 746 if (!strcmp(sizestr, "max")) 747 new_size = device->bdev->bd_inode->i_size; 748 else { 749 if (sizestr[0] == '-') { 750 mod = -1; 751 sizestr++; 752 } else if (sizestr[0] == '+') { 753 mod = 1; 754 sizestr++; 755 } 756 new_size = memparse(sizestr, NULL); 757 if (new_size == 0) { 758 ret = -EINVAL; 759 goto out_unlock; 760 } 761 } 762 763 old_size = device->total_bytes; 764 765 if (mod < 0) { 766 if (new_size > old_size) { 767 ret = -EINVAL; 768 goto out_unlock; 769 } 770 new_size = old_size - new_size; 771 } else if (mod > 0) { 772 new_size = old_size + new_size; 773 } 774 775 if (new_size < 256 * 1024 * 1024) { 776 ret = -EINVAL; 777 goto out_unlock; 778 } 779 if (new_size > device->bdev->bd_inode->i_size) { 780 ret = -EFBIG; 781 goto out_unlock; 782 } 783 784 do_div(new_size, root->sectorsize); 785 new_size *= root->sectorsize; 786 787 printk(KERN_INFO "new size for %s is %llu\n", 788 device->name, (unsigned long long)new_size); 789 790 if (new_size > old_size) { 791 trans = btrfs_start_transaction(root, 0); 792 ret = btrfs_grow_device(trans, device, new_size); 793 btrfs_commit_transaction(trans, root); 794 } else { 795 ret = btrfs_shrink_device(device, new_size); 796 } 797 798 out_unlock: 799 mutex_unlock(&root->fs_info->volume_mutex); 800 kfree(vol_args); 801 return ret; 802 } 803 804 static noinline int btrfs_ioctl_snap_create(struct file *file, 805 void __user *arg, int subvol) 806 { 807 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; 808 struct btrfs_ioctl_vol_args *vol_args; 809 struct file *src_file; 810 int namelen; 811 int ret = 0; 812 813 if (root->fs_info->sb->s_flags & MS_RDONLY) 814 return -EROFS; 815 816 vol_args = memdup_user(arg, sizeof(*vol_args)); 817 if (IS_ERR(vol_args)) 818 return PTR_ERR(vol_args); 819 820 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 821 namelen = strlen(vol_args->name); 822 if (strchr(vol_args->name, '/')) { 823 ret = -EINVAL; 824 goto out; 825 } 826 827 if (subvol) { 828 ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen, 829 NULL); 830 } else { 831 struct inode *src_inode; 832 src_file = fget(vol_args->fd); 833 if (!src_file) { 834 ret = -EINVAL; 835 goto out; 836 } 837 838 src_inode = src_file->f_path.dentry->d_inode; 839 if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) { 840 printk(KERN_INFO "btrfs: Snapshot src from " 841 "another FS\n"); 842 ret = -EINVAL; 843 fput(src_file); 844 goto out; 845 } 846 ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen, 847 BTRFS_I(src_inode)->root); 848 fput(src_file); 849 } 850 out: 851 kfree(vol_args); 852 return ret; 853 } 854 855 /* 856 * helper to check if the subvolume references other subvolumes 857 */ 858 static noinline int may_destroy_subvol(struct btrfs_root *root) 859 { 860 struct btrfs_path *path; 861 struct btrfs_key key; 862 int ret; 863 864 path = btrfs_alloc_path(); 865 if (!path) 866 return -ENOMEM; 867 868 key.objectid = root->root_key.objectid; 869 key.type = BTRFS_ROOT_REF_KEY; 870 key.offset = (u64)-1; 871 872 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, 873 &key, path, 0, 0); 874 if (ret < 0) 875 goto out; 876 BUG_ON(ret == 0); 877 878 ret = 0; 879 if (path->slots[0] > 0) { 880 path->slots[0]--; 881 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 882 if (key.objectid == root->root_key.objectid && 883 key.type == BTRFS_ROOT_REF_KEY) 884 ret = -ENOTEMPTY; 885 } 886 out: 887 btrfs_free_path(path); 888 return ret; 889 } 890 891 static noinline int key_in_sk(struct btrfs_key *key, 892 struct btrfs_ioctl_search_key *sk) 893 { 894 struct btrfs_key test; 895 int ret; 896 897 test.objectid = sk->min_objectid; 898 test.type = sk->min_type; 899 test.offset = sk->min_offset; 900 901 ret = btrfs_comp_cpu_keys(key, &test); 902 if (ret < 0) 903 return 0; 904 905 test.objectid = sk->max_objectid; 906 test.type = sk->max_type; 907 test.offset = sk->max_offset; 908 909 ret = btrfs_comp_cpu_keys(key, &test); 910 if (ret > 0) 911 return 0; 912 return 1; 913 } 914 915 static noinline int copy_to_sk(struct btrfs_root *root, 916 struct btrfs_path *path, 917 struct btrfs_key *key, 918 struct btrfs_ioctl_search_key *sk, 919 char *buf, 920 unsigned long *sk_offset, 921 int *num_found) 922 { 923 u64 found_transid; 924 struct extent_buffer *leaf; 925 struct btrfs_ioctl_search_header sh; 926 unsigned long item_off; 927 unsigned long item_len; 928 int nritems; 929 int i; 930 int slot; 931 int found = 0; 932 int ret = 0; 933 934 leaf = path->nodes[0]; 935 slot = path->slots[0]; 936 nritems = btrfs_header_nritems(leaf); 937 938 if (btrfs_header_generation(leaf) > sk->max_transid) { 939 i = nritems; 940 goto advance_key; 941 } 942 found_transid = btrfs_header_generation(leaf); 943 944 for (i = slot; i < nritems; i++) { 945 item_off = btrfs_item_ptr_offset(leaf, i); 946 item_len = btrfs_item_size_nr(leaf, i); 947 948 if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) 949 item_len = 0; 950 951 if (sizeof(sh) + item_len + *sk_offset > 952 BTRFS_SEARCH_ARGS_BUFSIZE) { 953 ret = 1; 954 goto overflow; 955 } 956 957 btrfs_item_key_to_cpu(leaf, key, i); 958 if (!key_in_sk(key, sk)) 959 continue; 960 961 sh.objectid = key->objectid; 962 sh.offset = key->offset; 963 sh.type = key->type; 964 sh.len = item_len; 965 sh.transid = found_transid; 966 967 /* copy search result header */ 968 memcpy(buf + *sk_offset, &sh, sizeof(sh)); 969 *sk_offset += sizeof(sh); 970 971 if (item_len) { 972 char *p = buf + *sk_offset; 973 /* copy the item */ 974 read_extent_buffer(leaf, p, 975 item_off, item_len); 976 *sk_offset += item_len; 977 } 978 found++; 979 980 if (*num_found >= sk->nr_items) 981 break; 982 } 983 advance_key: 984 ret = 0; 985 if (key->offset < (u64)-1 && key->offset < sk->max_offset) 986 key->offset++; 987 else if (key->type < (u8)-1 && key->type < sk->max_type) { 988 key->offset = 0; 989 key->type++; 990 } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { 991 key->offset = 0; 992 key->type = 0; 993 key->objectid++; 994 } else 995 ret = 1; 996 overflow: 997 *num_found += found; 998 return ret; 999 } 1000 1001 static noinline int search_ioctl(struct inode *inode, 1002 struct btrfs_ioctl_search_args *args) 1003 { 1004 struct btrfs_root *root; 1005 struct btrfs_key key; 1006 struct btrfs_key max_key; 1007 struct btrfs_path *path; 1008 struct btrfs_ioctl_search_key *sk = &args->key; 1009 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; 1010 int ret; 1011 int num_found = 0; 1012 unsigned long sk_offset = 0; 1013 1014 path = btrfs_alloc_path(); 1015 if (!path) 1016 return -ENOMEM; 1017 1018 if (sk->tree_id == 0) { 1019 /* search the root of the inode that was passed */ 1020 root = BTRFS_I(inode)->root; 1021 } else { 1022 key.objectid = sk->tree_id; 1023 key.type = BTRFS_ROOT_ITEM_KEY; 1024 key.offset = (u64)-1; 1025 root = btrfs_read_fs_root_no_name(info, &key); 1026 if (IS_ERR(root)) { 1027 printk(KERN_ERR "could not find root %llu\n", 1028 sk->tree_id); 1029 btrfs_free_path(path); 1030 return -ENOENT; 1031 } 1032 } 1033 1034 key.objectid = sk->min_objectid; 1035 key.type = sk->min_type; 1036 key.offset = sk->min_offset; 1037 1038 max_key.objectid = sk->max_objectid; 1039 max_key.type = sk->max_type; 1040 max_key.offset = sk->max_offset; 1041 1042 path->keep_locks = 1; 1043 1044 while(1) { 1045 ret = btrfs_search_forward(root, &key, &max_key, path, 0, 1046 sk->min_transid); 1047 if (ret != 0) { 1048 if (ret > 0) 1049 ret = 0; 1050 goto err; 1051 } 1052 ret = copy_to_sk(root, path, &key, sk, args->buf, 1053 &sk_offset, &num_found); 1054 btrfs_release_path(root, path); 1055 if (ret || num_found >= sk->nr_items) 1056 break; 1057 1058 } 1059 ret = 0; 1060 err: 1061 sk->nr_items = num_found; 1062 btrfs_free_path(path); 1063 return ret; 1064 } 1065 1066 static noinline int btrfs_ioctl_tree_search(struct file *file, 1067 void __user *argp) 1068 { 1069 struct btrfs_ioctl_search_args *args; 1070 struct inode *inode; 1071 int ret; 1072 1073 if (!capable(CAP_SYS_ADMIN)) 1074 return -EPERM; 1075 1076 args = kmalloc(sizeof(*args), GFP_KERNEL); 1077 if (!args) 1078 return -ENOMEM; 1079 1080 if (copy_from_user(args, argp, sizeof(*args))) { 1081 kfree(args); 1082 return -EFAULT; 1083 } 1084 inode = fdentry(file)->d_inode; 1085 ret = search_ioctl(inode, args); 1086 if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) 1087 ret = -EFAULT; 1088 kfree(args); 1089 return ret; 1090 } 1091 1092 /* 1093 * Search INODE_REFs to identify path name of 'dirid' directory 1094 * in a 'tree_id' tree. and sets path name to 'name'. 1095 */ 1096 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, 1097 u64 tree_id, u64 dirid, char *name) 1098 { 1099 struct btrfs_root *root; 1100 struct btrfs_key key; 1101 char *ptr; 1102 int ret = -1; 1103 int slot; 1104 int len; 1105 int total_len = 0; 1106 struct btrfs_inode_ref *iref; 1107 struct extent_buffer *l; 1108 struct btrfs_path *path; 1109 1110 if (dirid == BTRFS_FIRST_FREE_OBJECTID) { 1111 name[0]='\0'; 1112 return 0; 1113 } 1114 1115 path = btrfs_alloc_path(); 1116 if (!path) 1117 return -ENOMEM; 1118 1119 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; 1120 1121 key.objectid = tree_id; 1122 key.type = BTRFS_ROOT_ITEM_KEY; 1123 key.offset = (u64)-1; 1124 root = btrfs_read_fs_root_no_name(info, &key); 1125 if (IS_ERR(root)) { 1126 printk(KERN_ERR "could not find root %llu\n", tree_id); 1127 ret = -ENOENT; 1128 goto out; 1129 } 1130 1131 key.objectid = dirid; 1132 key.type = BTRFS_INODE_REF_KEY; 1133 key.offset = (u64)-1; 1134 1135 while(1) { 1136 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1137 if (ret < 0) 1138 goto out; 1139 1140 l = path->nodes[0]; 1141 slot = path->slots[0]; 1142 if (ret > 0 && slot > 0) 1143 slot--; 1144 btrfs_item_key_to_cpu(l, &key, slot); 1145 1146 if (ret > 0 && (key.objectid != dirid || 1147 key.type != BTRFS_INODE_REF_KEY)) { 1148 ret = -ENOENT; 1149 goto out; 1150 } 1151 1152 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); 1153 len = btrfs_inode_ref_name_len(l, iref); 1154 ptr -= len + 1; 1155 total_len += len + 1; 1156 if (ptr < name) 1157 goto out; 1158 1159 *(ptr + len) = '/'; 1160 read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len); 1161 1162 if (key.offset == BTRFS_FIRST_FREE_OBJECTID) 1163 break; 1164 1165 btrfs_release_path(root, path); 1166 key.objectid = key.offset; 1167 key.offset = (u64)-1; 1168 dirid = key.objectid; 1169 1170 } 1171 if (ptr < name) 1172 goto out; 1173 memcpy(name, ptr, total_len); 1174 name[total_len]='\0'; 1175 ret = 0; 1176 out: 1177 btrfs_free_path(path); 1178 return ret; 1179 } 1180 1181 static noinline int btrfs_ioctl_ino_lookup(struct file *file, 1182 void __user *argp) 1183 { 1184 struct btrfs_ioctl_ino_lookup_args *args; 1185 struct inode *inode; 1186 int ret; 1187 1188 if (!capable(CAP_SYS_ADMIN)) 1189 return -EPERM; 1190 1191 args = kmalloc(sizeof(*args), GFP_KERNEL); 1192 if (!args) 1193 return -ENOMEM; 1194 1195 if (copy_from_user(args, argp, sizeof(*args))) { 1196 kfree(args); 1197 return -EFAULT; 1198 } 1199 inode = fdentry(file)->d_inode; 1200 1201 if (args->treeid == 0) 1202 args->treeid = BTRFS_I(inode)->root->root_key.objectid; 1203 1204 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, 1205 args->treeid, args->objectid, 1206 args->name); 1207 1208 if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) 1209 ret = -EFAULT; 1210 1211 kfree(args); 1212 return ret; 1213 } 1214 1215 static noinline int btrfs_ioctl_snap_destroy(struct file *file, 1216 void __user *arg) 1217 { 1218 struct dentry *parent = fdentry(file); 1219 struct dentry *dentry; 1220 struct inode *dir = parent->d_inode; 1221 struct inode *inode; 1222 struct btrfs_root *root = BTRFS_I(dir)->root; 1223 struct btrfs_root *dest = NULL; 1224 struct btrfs_ioctl_vol_args *vol_args; 1225 struct btrfs_trans_handle *trans; 1226 int namelen; 1227 int ret; 1228 int err = 0; 1229 1230 if (!capable(CAP_SYS_ADMIN)) 1231 return -EPERM; 1232 1233 vol_args = memdup_user(arg, sizeof(*vol_args)); 1234 if (IS_ERR(vol_args)) 1235 return PTR_ERR(vol_args); 1236 1237 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 1238 namelen = strlen(vol_args->name); 1239 if (strchr(vol_args->name, '/') || 1240 strncmp(vol_args->name, "..", namelen) == 0) { 1241 err = -EINVAL; 1242 goto out; 1243 } 1244 1245 err = mnt_want_write(file->f_path.mnt); 1246 if (err) 1247 goto out; 1248 1249 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 1250 dentry = lookup_one_len(vol_args->name, parent, namelen); 1251 if (IS_ERR(dentry)) { 1252 err = PTR_ERR(dentry); 1253 goto out_unlock_dir; 1254 } 1255 1256 if (!dentry->d_inode) { 1257 err = -ENOENT; 1258 goto out_dput; 1259 } 1260 1261 inode = dentry->d_inode; 1262 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 1263 err = -EINVAL; 1264 goto out_dput; 1265 } 1266 1267 dest = BTRFS_I(inode)->root; 1268 1269 mutex_lock(&inode->i_mutex); 1270 err = d_invalidate(dentry); 1271 if (err) 1272 goto out_unlock; 1273 1274 down_write(&root->fs_info->subvol_sem); 1275 1276 err = may_destroy_subvol(dest); 1277 if (err) 1278 goto out_up_write; 1279 1280 trans = btrfs_start_transaction(root, 0); 1281 if (IS_ERR(trans)) { 1282 err = PTR_ERR(trans); 1283 goto out_up_write; 1284 } 1285 trans->block_rsv = &root->fs_info->global_block_rsv; 1286 1287 ret = btrfs_unlink_subvol(trans, root, dir, 1288 dest->root_key.objectid, 1289 dentry->d_name.name, 1290 dentry->d_name.len); 1291 BUG_ON(ret); 1292 1293 btrfs_record_root_in_trans(trans, dest); 1294 1295 memset(&dest->root_item.drop_progress, 0, 1296 sizeof(dest->root_item.drop_progress)); 1297 dest->root_item.drop_level = 0; 1298 btrfs_set_root_refs(&dest->root_item, 0); 1299 1300 if (!xchg(&dest->orphan_item_inserted, 1)) { 1301 ret = btrfs_insert_orphan_item(trans, 1302 root->fs_info->tree_root, 1303 dest->root_key.objectid); 1304 BUG_ON(ret); 1305 } 1306 1307 ret = btrfs_commit_transaction(trans, root); 1308 BUG_ON(ret); 1309 inode->i_flags |= S_DEAD; 1310 out_up_write: 1311 up_write(&root->fs_info->subvol_sem); 1312 out_unlock: 1313 mutex_unlock(&inode->i_mutex); 1314 if (!err) { 1315 shrink_dcache_sb(root->fs_info->sb); 1316 btrfs_invalidate_inodes(dest); 1317 d_delete(dentry); 1318 } 1319 out_dput: 1320 dput(dentry); 1321 out_unlock_dir: 1322 mutex_unlock(&dir->i_mutex); 1323 mnt_drop_write(file->f_path.mnt); 1324 out: 1325 kfree(vol_args); 1326 return err; 1327 } 1328 1329 static int btrfs_ioctl_defrag(struct file *file, void __user *argp) 1330 { 1331 struct inode *inode = fdentry(file)->d_inode; 1332 struct btrfs_root *root = BTRFS_I(inode)->root; 1333 struct btrfs_ioctl_defrag_range_args *range; 1334 int ret; 1335 1336 ret = mnt_want_write(file->f_path.mnt); 1337 if (ret) 1338 return ret; 1339 1340 switch (inode->i_mode & S_IFMT) { 1341 case S_IFDIR: 1342 if (!capable(CAP_SYS_ADMIN)) { 1343 ret = -EPERM; 1344 goto out; 1345 } 1346 ret = btrfs_defrag_root(root, 0); 1347 if (ret) 1348 goto out; 1349 ret = btrfs_defrag_root(root->fs_info->extent_root, 0); 1350 break; 1351 case S_IFREG: 1352 if (!(file->f_mode & FMODE_WRITE)) { 1353 ret = -EINVAL; 1354 goto out; 1355 } 1356 1357 range = kzalloc(sizeof(*range), GFP_KERNEL); 1358 if (!range) { 1359 ret = -ENOMEM; 1360 goto out; 1361 } 1362 1363 if (argp) { 1364 if (copy_from_user(range, argp, 1365 sizeof(*range))) { 1366 ret = -EFAULT; 1367 kfree(range); 1368 goto out; 1369 } 1370 /* compression requires us to start the IO */ 1371 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { 1372 range->flags |= BTRFS_DEFRAG_RANGE_START_IO; 1373 range->extent_thresh = (u32)-1; 1374 } 1375 } else { 1376 /* the rest are all set to zero by kzalloc */ 1377 range->len = (u64)-1; 1378 } 1379 ret = btrfs_defrag_file(file, range); 1380 kfree(range); 1381 break; 1382 default: 1383 ret = -EINVAL; 1384 } 1385 out: 1386 mnt_drop_write(file->f_path.mnt); 1387 return ret; 1388 } 1389 1390 static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg) 1391 { 1392 struct btrfs_ioctl_vol_args *vol_args; 1393 int ret; 1394 1395 if (!capable(CAP_SYS_ADMIN)) 1396 return -EPERM; 1397 1398 vol_args = memdup_user(arg, sizeof(*vol_args)); 1399 if (IS_ERR(vol_args)) 1400 return PTR_ERR(vol_args); 1401 1402 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 1403 ret = btrfs_init_new_device(root, vol_args->name); 1404 1405 kfree(vol_args); 1406 return ret; 1407 } 1408 1409 static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) 1410 { 1411 struct btrfs_ioctl_vol_args *vol_args; 1412 int ret; 1413 1414 if (!capable(CAP_SYS_ADMIN)) 1415 return -EPERM; 1416 1417 if (root->fs_info->sb->s_flags & MS_RDONLY) 1418 return -EROFS; 1419 1420 vol_args = memdup_user(arg, sizeof(*vol_args)); 1421 if (IS_ERR(vol_args)) 1422 return PTR_ERR(vol_args); 1423 1424 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 1425 ret = btrfs_rm_device(root, vol_args->name); 1426 1427 kfree(vol_args); 1428 return ret; 1429 } 1430 1431 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, 1432 u64 off, u64 olen, u64 destoff) 1433 { 1434 struct inode *inode = fdentry(file)->d_inode; 1435 struct btrfs_root *root = BTRFS_I(inode)->root; 1436 struct file *src_file; 1437 struct inode *src; 1438 struct btrfs_trans_handle *trans; 1439 struct btrfs_path *path; 1440 struct extent_buffer *leaf; 1441 char *buf; 1442 struct btrfs_key key; 1443 u32 nritems; 1444 int slot; 1445 int ret; 1446 u64 len = olen; 1447 u64 bs = root->fs_info->sb->s_blocksize; 1448 u64 hint_byte; 1449 1450 /* 1451 * TODO: 1452 * - split compressed inline extents. annoying: we need to 1453 * decompress into destination's address_space (the file offset 1454 * may change, so source mapping won't do), then recompress (or 1455 * otherwise reinsert) a subrange. 1456 * - allow ranges within the same file to be cloned (provided 1457 * they don't overlap)? 1458 */ 1459 1460 /* the destination must be opened for writing */ 1461 if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) 1462 return -EINVAL; 1463 1464 ret = mnt_want_write(file->f_path.mnt); 1465 if (ret) 1466 return ret; 1467 1468 src_file = fget(srcfd); 1469 if (!src_file) { 1470 ret = -EBADF; 1471 goto out_drop_write; 1472 } 1473 1474 src = src_file->f_dentry->d_inode; 1475 1476 ret = -EINVAL; 1477 if (src == inode) 1478 goto out_fput; 1479 1480 /* the src must be open for reading */ 1481 if (!(src_file->f_mode & FMODE_READ)) 1482 goto out_fput; 1483 1484 ret = -EISDIR; 1485 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) 1486 goto out_fput; 1487 1488 ret = -EXDEV; 1489 if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root) 1490 goto out_fput; 1491 1492 ret = -ENOMEM; 1493 buf = vmalloc(btrfs_level_size(root, 0)); 1494 if (!buf) 1495 goto out_fput; 1496 1497 path = btrfs_alloc_path(); 1498 if (!path) { 1499 vfree(buf); 1500 goto out_fput; 1501 } 1502 path->reada = 2; 1503 1504 if (inode < src) { 1505 mutex_lock(&inode->i_mutex); 1506 mutex_lock(&src->i_mutex); 1507 } else { 1508 mutex_lock(&src->i_mutex); 1509 mutex_lock(&inode->i_mutex); 1510 } 1511 1512 /* determine range to clone */ 1513 ret = -EINVAL; 1514 if (off + len > src->i_size || off + len < off) 1515 goto out_unlock; 1516 if (len == 0) 1517 olen = len = src->i_size - off; 1518 /* if we extend to eof, continue to block boundary */ 1519 if (off + len == src->i_size) 1520 len = ((src->i_size + bs-1) & ~(bs-1)) 1521 - off; 1522 1523 /* verify the end result is block aligned */ 1524 if ((off & (bs-1)) || 1525 ((off + len) & (bs-1))) 1526 goto out_unlock; 1527 1528 /* do any pending delalloc/csum calc on src, one way or 1529 another, and lock file content */ 1530 while (1) { 1531 struct btrfs_ordered_extent *ordered; 1532 lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 1533 ordered = btrfs_lookup_first_ordered_extent(inode, off+len); 1534 if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered) 1535 break; 1536 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 1537 if (ordered) 1538 btrfs_put_ordered_extent(ordered); 1539 btrfs_wait_ordered_range(src, off, off+len); 1540 } 1541 1542 /* clone data */ 1543 key.objectid = src->i_ino; 1544 key.type = BTRFS_EXTENT_DATA_KEY; 1545 key.offset = 0; 1546 1547 while (1) { 1548 /* 1549 * note the key will change type as we walk through the 1550 * tree. 1551 */ 1552 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1553 if (ret < 0) 1554 goto out; 1555 1556 nritems = btrfs_header_nritems(path->nodes[0]); 1557 if (path->slots[0] >= nritems) { 1558 ret = btrfs_next_leaf(root, path); 1559 if (ret < 0) 1560 goto out; 1561 if (ret > 0) 1562 break; 1563 nritems = btrfs_header_nritems(path->nodes[0]); 1564 } 1565 leaf = path->nodes[0]; 1566 slot = path->slots[0]; 1567 1568 btrfs_item_key_to_cpu(leaf, &key, slot); 1569 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || 1570 key.objectid != src->i_ino) 1571 break; 1572 1573 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { 1574 struct btrfs_file_extent_item *extent; 1575 int type; 1576 u32 size; 1577 struct btrfs_key new_key; 1578 u64 disko = 0, diskl = 0; 1579 u64 datao = 0, datal = 0; 1580 u8 comp; 1581 u64 endoff; 1582 1583 size = btrfs_item_size_nr(leaf, slot); 1584 read_extent_buffer(leaf, buf, 1585 btrfs_item_ptr_offset(leaf, slot), 1586 size); 1587 1588 extent = btrfs_item_ptr(leaf, slot, 1589 struct btrfs_file_extent_item); 1590 comp = btrfs_file_extent_compression(leaf, extent); 1591 type = btrfs_file_extent_type(leaf, extent); 1592 if (type == BTRFS_FILE_EXTENT_REG || 1593 type == BTRFS_FILE_EXTENT_PREALLOC) { 1594 disko = btrfs_file_extent_disk_bytenr(leaf, 1595 extent); 1596 diskl = btrfs_file_extent_disk_num_bytes(leaf, 1597 extent); 1598 datao = btrfs_file_extent_offset(leaf, extent); 1599 datal = btrfs_file_extent_num_bytes(leaf, 1600 extent); 1601 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1602 /* take upper bound, may be compressed */ 1603 datal = btrfs_file_extent_ram_bytes(leaf, 1604 extent); 1605 } 1606 btrfs_release_path(root, path); 1607 1608 if (key.offset + datal < off || 1609 key.offset >= off+len) 1610 goto next; 1611 1612 memcpy(&new_key, &key, sizeof(new_key)); 1613 new_key.objectid = inode->i_ino; 1614 new_key.offset = key.offset + destoff - off; 1615 1616 trans = btrfs_start_transaction(root, 1); 1617 if (IS_ERR(trans)) { 1618 ret = PTR_ERR(trans); 1619 goto out; 1620 } 1621 1622 if (type == BTRFS_FILE_EXTENT_REG || 1623 type == BTRFS_FILE_EXTENT_PREALLOC) { 1624 if (off > key.offset) { 1625 datao += off - key.offset; 1626 datal -= off - key.offset; 1627 } 1628 1629 if (key.offset + datal > off + len) 1630 datal = off + len - key.offset; 1631 1632 ret = btrfs_drop_extents(trans, inode, 1633 new_key.offset, 1634 new_key.offset + datal, 1635 &hint_byte, 1); 1636 BUG_ON(ret); 1637 1638 ret = btrfs_insert_empty_item(trans, root, path, 1639 &new_key, size); 1640 BUG_ON(ret); 1641 1642 leaf = path->nodes[0]; 1643 slot = path->slots[0]; 1644 write_extent_buffer(leaf, buf, 1645 btrfs_item_ptr_offset(leaf, slot), 1646 size); 1647 1648 extent = btrfs_item_ptr(leaf, slot, 1649 struct btrfs_file_extent_item); 1650 1651 /* disko == 0 means it's a hole */ 1652 if (!disko) 1653 datao = 0; 1654 1655 btrfs_set_file_extent_offset(leaf, extent, 1656 datao); 1657 btrfs_set_file_extent_num_bytes(leaf, extent, 1658 datal); 1659 if (disko) { 1660 inode_add_bytes(inode, datal); 1661 ret = btrfs_inc_extent_ref(trans, root, 1662 disko, diskl, 0, 1663 root->root_key.objectid, 1664 inode->i_ino, 1665 new_key.offset - datao); 1666 BUG_ON(ret); 1667 } 1668 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1669 u64 skip = 0; 1670 u64 trim = 0; 1671 if (off > key.offset) { 1672 skip = off - key.offset; 1673 new_key.offset += skip; 1674 } 1675 1676 if (key.offset + datal > off+len) 1677 trim = key.offset + datal - (off+len); 1678 1679 if (comp && (skip || trim)) { 1680 ret = -EINVAL; 1681 btrfs_end_transaction(trans, root); 1682 goto out; 1683 } 1684 size -= skip + trim; 1685 datal -= skip + trim; 1686 1687 ret = btrfs_drop_extents(trans, inode, 1688 new_key.offset, 1689 new_key.offset + datal, 1690 &hint_byte, 1); 1691 BUG_ON(ret); 1692 1693 ret = btrfs_insert_empty_item(trans, root, path, 1694 &new_key, size); 1695 BUG_ON(ret); 1696 1697 if (skip) { 1698 u32 start = 1699 btrfs_file_extent_calc_inline_size(0); 1700 memmove(buf+start, buf+start+skip, 1701 datal); 1702 } 1703 1704 leaf = path->nodes[0]; 1705 slot = path->slots[0]; 1706 write_extent_buffer(leaf, buf, 1707 btrfs_item_ptr_offset(leaf, slot), 1708 size); 1709 inode_add_bytes(inode, datal); 1710 } 1711 1712 btrfs_mark_buffer_dirty(leaf); 1713 btrfs_release_path(root, path); 1714 1715 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1716 1717 /* 1718 * we round up to the block size at eof when 1719 * determining which extents to clone above, 1720 * but shouldn't round up the file size 1721 */ 1722 endoff = new_key.offset + datal; 1723 if (endoff > off+olen) 1724 endoff = off+olen; 1725 if (endoff > inode->i_size) 1726 btrfs_i_size_write(inode, endoff); 1727 1728 BTRFS_I(inode)->flags = BTRFS_I(src)->flags; 1729 ret = btrfs_update_inode(trans, root, inode); 1730 BUG_ON(ret); 1731 btrfs_end_transaction(trans, root); 1732 } 1733 next: 1734 btrfs_release_path(root, path); 1735 key.offset++; 1736 } 1737 ret = 0; 1738 out: 1739 btrfs_release_path(root, path); 1740 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 1741 out_unlock: 1742 mutex_unlock(&src->i_mutex); 1743 mutex_unlock(&inode->i_mutex); 1744 vfree(buf); 1745 btrfs_free_path(path); 1746 out_fput: 1747 fput(src_file); 1748 out_drop_write: 1749 mnt_drop_write(file->f_path.mnt); 1750 return ret; 1751 } 1752 1753 static long btrfs_ioctl_clone_range(struct file *file, void __user *argp) 1754 { 1755 struct btrfs_ioctl_clone_range_args args; 1756 1757 if (copy_from_user(&args, argp, sizeof(args))) 1758 return -EFAULT; 1759 return btrfs_ioctl_clone(file, args.src_fd, args.src_offset, 1760 args.src_length, args.dest_offset); 1761 } 1762 1763 /* 1764 * there are many ways the trans_start and trans_end ioctls can lead 1765 * to deadlocks. They should only be used by applications that 1766 * basically own the machine, and have a very in depth understanding 1767 * of all the possible deadlocks and enospc problems. 1768 */ 1769 static long btrfs_ioctl_trans_start(struct file *file) 1770 { 1771 struct inode *inode = fdentry(file)->d_inode; 1772 struct btrfs_root *root = BTRFS_I(inode)->root; 1773 struct btrfs_trans_handle *trans; 1774 int ret; 1775 1776 ret = -EPERM; 1777 if (!capable(CAP_SYS_ADMIN)) 1778 goto out; 1779 1780 ret = -EINPROGRESS; 1781 if (file->private_data) 1782 goto out; 1783 1784 ret = mnt_want_write(file->f_path.mnt); 1785 if (ret) 1786 goto out; 1787 1788 mutex_lock(&root->fs_info->trans_mutex); 1789 root->fs_info->open_ioctl_trans++; 1790 mutex_unlock(&root->fs_info->trans_mutex); 1791 1792 ret = -ENOMEM; 1793 trans = btrfs_start_ioctl_transaction(root, 0); 1794 if (!trans) 1795 goto out_drop; 1796 1797 file->private_data = trans; 1798 return 0; 1799 1800 out_drop: 1801 mutex_lock(&root->fs_info->trans_mutex); 1802 root->fs_info->open_ioctl_trans--; 1803 mutex_unlock(&root->fs_info->trans_mutex); 1804 mnt_drop_write(file->f_path.mnt); 1805 out: 1806 return ret; 1807 } 1808 1809 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) 1810 { 1811 struct inode *inode = fdentry(file)->d_inode; 1812 struct btrfs_root *root = BTRFS_I(inode)->root; 1813 struct btrfs_root *new_root; 1814 struct btrfs_dir_item *di; 1815 struct btrfs_trans_handle *trans; 1816 struct btrfs_path *path; 1817 struct btrfs_key location; 1818 struct btrfs_disk_key disk_key; 1819 struct btrfs_super_block *disk_super; 1820 u64 features; 1821 u64 objectid = 0; 1822 u64 dir_id; 1823 1824 if (!capable(CAP_SYS_ADMIN)) 1825 return -EPERM; 1826 1827 if (copy_from_user(&objectid, argp, sizeof(objectid))) 1828 return -EFAULT; 1829 1830 if (!objectid) 1831 objectid = root->root_key.objectid; 1832 1833 location.objectid = objectid; 1834 location.type = BTRFS_ROOT_ITEM_KEY; 1835 location.offset = (u64)-1; 1836 1837 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); 1838 if (IS_ERR(new_root)) 1839 return PTR_ERR(new_root); 1840 1841 if (btrfs_root_refs(&new_root->root_item) == 0) 1842 return -ENOENT; 1843 1844 path = btrfs_alloc_path(); 1845 if (!path) 1846 return -ENOMEM; 1847 path->leave_spinning = 1; 1848 1849 trans = btrfs_start_transaction(root, 1); 1850 if (!trans) { 1851 btrfs_free_path(path); 1852 return -ENOMEM; 1853 } 1854 1855 dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); 1856 di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, 1857 dir_id, "default", 7, 1); 1858 if (IS_ERR_OR_NULL(di)) { 1859 btrfs_free_path(path); 1860 btrfs_end_transaction(trans, root); 1861 printk(KERN_ERR "Umm, you don't have the default dir item, " 1862 "this isn't going to work\n"); 1863 return -ENOENT; 1864 } 1865 1866 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); 1867 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); 1868 btrfs_mark_buffer_dirty(path->nodes[0]); 1869 btrfs_free_path(path); 1870 1871 disk_super = &root->fs_info->super_copy; 1872 features = btrfs_super_incompat_flags(disk_super); 1873 if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) { 1874 features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL; 1875 btrfs_set_super_incompat_flags(disk_super, features); 1876 } 1877 btrfs_end_transaction(trans, root); 1878 1879 return 0; 1880 } 1881 1882 long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) 1883 { 1884 struct btrfs_ioctl_space_args space_args; 1885 struct btrfs_ioctl_space_info space; 1886 struct btrfs_ioctl_space_info *dest; 1887 struct btrfs_ioctl_space_info *dest_orig; 1888 struct btrfs_ioctl_space_info *user_dest; 1889 struct btrfs_space_info *info; 1890 int alloc_size; 1891 int ret = 0; 1892 int slot_count = 0; 1893 1894 if (copy_from_user(&space_args, 1895 (struct btrfs_ioctl_space_args __user *)arg, 1896 sizeof(space_args))) 1897 return -EFAULT; 1898 1899 /* first we count slots */ 1900 rcu_read_lock(); 1901 list_for_each_entry_rcu(info, &root->fs_info->space_info, list) 1902 slot_count++; 1903 rcu_read_unlock(); 1904 1905 /* space_slots == 0 means they are asking for a count */ 1906 if (space_args.space_slots == 0) { 1907 space_args.total_spaces = slot_count; 1908 goto out; 1909 } 1910 alloc_size = sizeof(*dest) * slot_count; 1911 /* we generally have at most 6 or so space infos, one for each raid 1912 * level. So, a whole page should be more than enough for everyone 1913 */ 1914 if (alloc_size > PAGE_CACHE_SIZE) 1915 return -ENOMEM; 1916 1917 space_args.total_spaces = 0; 1918 dest = kmalloc(alloc_size, GFP_NOFS); 1919 if (!dest) 1920 return -ENOMEM; 1921 dest_orig = dest; 1922 1923 /* now we have a buffer to copy into */ 1924 rcu_read_lock(); 1925 list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { 1926 /* make sure we don't copy more than we allocated 1927 * in our buffer 1928 */ 1929 if (slot_count == 0) 1930 break; 1931 slot_count--; 1932 1933 /* make sure userland has enough room in their buffer */ 1934 if (space_args.total_spaces >= space_args.space_slots) 1935 break; 1936 1937 space.flags = info->flags; 1938 space.total_bytes = info->total_bytes; 1939 space.used_bytes = info->bytes_used; 1940 memcpy(dest, &space, sizeof(space)); 1941 dest++; 1942 space_args.total_spaces++; 1943 } 1944 rcu_read_unlock(); 1945 1946 user_dest = (struct btrfs_ioctl_space_info *) 1947 (arg + sizeof(struct btrfs_ioctl_space_args)); 1948 1949 if (copy_to_user(user_dest, dest_orig, alloc_size)) 1950 ret = -EFAULT; 1951 1952 kfree(dest_orig); 1953 out: 1954 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) 1955 ret = -EFAULT; 1956 1957 return ret; 1958 } 1959 1960 /* 1961 * there are many ways the trans_start and trans_end ioctls can lead 1962 * to deadlocks. They should only be used by applications that 1963 * basically own the machine, and have a very in depth understanding 1964 * of all the possible deadlocks and enospc problems. 1965 */ 1966 long btrfs_ioctl_trans_end(struct file *file) 1967 { 1968 struct inode *inode = fdentry(file)->d_inode; 1969 struct btrfs_root *root = BTRFS_I(inode)->root; 1970 struct btrfs_trans_handle *trans; 1971 1972 trans = file->private_data; 1973 if (!trans) 1974 return -EINVAL; 1975 file->private_data = NULL; 1976 1977 btrfs_end_transaction(trans, root); 1978 1979 mutex_lock(&root->fs_info->trans_mutex); 1980 root->fs_info->open_ioctl_trans--; 1981 mutex_unlock(&root->fs_info->trans_mutex); 1982 1983 mnt_drop_write(file->f_path.mnt); 1984 return 0; 1985 } 1986 1987 long btrfs_ioctl(struct file *file, unsigned int 1988 cmd, unsigned long arg) 1989 { 1990 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; 1991 void __user *argp = (void __user *)arg; 1992 1993 switch (cmd) { 1994 case FS_IOC_GETFLAGS: 1995 return btrfs_ioctl_getflags(file, argp); 1996 case FS_IOC_SETFLAGS: 1997 return btrfs_ioctl_setflags(file, argp); 1998 case FS_IOC_GETVERSION: 1999 return btrfs_ioctl_getversion(file, argp); 2000 case BTRFS_IOC_SNAP_CREATE: 2001 return btrfs_ioctl_snap_create(file, argp, 0); 2002 case BTRFS_IOC_SUBVOL_CREATE: 2003 return btrfs_ioctl_snap_create(file, argp, 1); 2004 case BTRFS_IOC_SNAP_DESTROY: 2005 return btrfs_ioctl_snap_destroy(file, argp); 2006 case BTRFS_IOC_DEFAULT_SUBVOL: 2007 return btrfs_ioctl_default_subvol(file, argp); 2008 case BTRFS_IOC_DEFRAG: 2009 return btrfs_ioctl_defrag(file, NULL); 2010 case BTRFS_IOC_DEFRAG_RANGE: 2011 return btrfs_ioctl_defrag(file, argp); 2012 case BTRFS_IOC_RESIZE: 2013 return btrfs_ioctl_resize(root, argp); 2014 case BTRFS_IOC_ADD_DEV: 2015 return btrfs_ioctl_add_dev(root, argp); 2016 case BTRFS_IOC_RM_DEV: 2017 return btrfs_ioctl_rm_dev(root, argp); 2018 case BTRFS_IOC_BALANCE: 2019 return btrfs_balance(root->fs_info->dev_root); 2020 case BTRFS_IOC_CLONE: 2021 return btrfs_ioctl_clone(file, arg, 0, 0, 0); 2022 case BTRFS_IOC_CLONE_RANGE: 2023 return btrfs_ioctl_clone_range(file, argp); 2024 case BTRFS_IOC_TRANS_START: 2025 return btrfs_ioctl_trans_start(file); 2026 case BTRFS_IOC_TRANS_END: 2027 return btrfs_ioctl_trans_end(file); 2028 case BTRFS_IOC_TREE_SEARCH: 2029 return btrfs_ioctl_tree_search(file, argp); 2030 case BTRFS_IOC_INO_LOOKUP: 2031 return btrfs_ioctl_ino_lookup(file, argp); 2032 case BTRFS_IOC_SPACE_INFO: 2033 return btrfs_ioctl_space_info(root, argp); 2034 case BTRFS_IOC_SYNC: 2035 btrfs_sync_fs(file->f_dentry->d_sb, 1); 2036 return 0; 2037 } 2038 2039 return -ENOTTY; 2040 } 2041