1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_VOLUMES_H 7 #define BTRFS_VOLUMES_H 8 9 #include <linux/bio.h> 10 #include <linux/sort.h> 11 #include <linux/btrfs.h> 12 #include "async-thread.h" 13 14 #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) 15 16 extern struct mutex uuid_mutex; 17 18 #define BTRFS_STRIPE_LEN SZ_64K 19 20 /* Used by sanity check for btrfs_raid_types. */ 21 #define const_ffs(n) (__builtin_ctzll(n) + 1) 22 23 /* 24 * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires 25 * RAID0 always to be the lowest profile bit. 26 * Although it's part of on-disk format and should never change, do extra 27 * compile-time sanity checks. 28 */ 29 static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) < 30 const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0)); 31 static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) > 32 ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK)); 33 34 /* ilog2() can handle both constants and variables */ 35 #define BTRFS_BG_FLAG_TO_INDEX(profile) \ 36 ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1)) 37 38 enum btrfs_raid_types { 39 /* SINGLE is the special one as it doesn't have on-disk bit. */ 40 BTRFS_RAID_SINGLE = 0, 41 42 BTRFS_RAID_RAID0 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0), 43 BTRFS_RAID_RAID1 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1), 44 BTRFS_RAID_DUP = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP), 45 BTRFS_RAID_RAID10 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10), 46 BTRFS_RAID_RAID5 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5), 47 BTRFS_RAID_RAID6 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6), 48 BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3), 49 BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4), 50 51 BTRFS_NR_RAID_TYPES 52 }; 53 54 struct btrfs_io_geometry { 55 /* remaining bytes before crossing a stripe */ 56 u64 len; 57 /* offset of logical address in chunk */ 58 u64 offset; 59 /* length of single IO stripe */ 60 u32 stripe_len; 61 /* offset of address in stripe */ 62 u32 stripe_offset; 63 /* number of stripe where address falls */ 64 u64 stripe_nr; 65 /* offset of raid56 stripe into the chunk */ 66 u64 raid56_stripe_offset; 67 }; 68 69 /* 70 * Use sequence counter to get consistent device stat data on 71 * 32-bit processors. 72 */ 73 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 74 #include <linux/seqlock.h> 75 #define __BTRFS_NEED_DEVICE_DATA_ORDERED 76 #define btrfs_device_data_ordered_init(device) \ 77 seqcount_init(&device->data_seqcount) 78 #else 79 #define btrfs_device_data_ordered_init(device) do { } while (0) 80 #endif 81 82 #define BTRFS_DEV_STATE_WRITEABLE (0) 83 #define BTRFS_DEV_STATE_IN_FS_METADATA (1) 84 #define BTRFS_DEV_STATE_MISSING (2) 85 #define BTRFS_DEV_STATE_REPLACE_TGT (3) 86 #define BTRFS_DEV_STATE_FLUSH_SENT (4) 87 #define BTRFS_DEV_STATE_NO_READA (5) 88 89 struct btrfs_zoned_device_info; 90 91 struct btrfs_device { 92 struct list_head dev_list; /* device_list_mutex */ 93 struct list_head dev_alloc_list; /* chunk mutex */ 94 struct list_head post_commit_list; /* chunk mutex */ 95 struct btrfs_fs_devices *fs_devices; 96 struct btrfs_fs_info *fs_info; 97 98 struct rcu_string __rcu *name; 99 100 u64 generation; 101 102 struct block_device *bdev; 103 104 struct btrfs_zoned_device_info *zone_info; 105 106 /* the mode sent to blkdev_get */ 107 fmode_t mode; 108 109 /* 110 * Device's major-minor number. Must be set even if the device is not 111 * opened (bdev == NULL), unless the device is missing. 112 */ 113 dev_t devt; 114 unsigned long dev_state; 115 blk_status_t last_flush_error; 116 117 #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED 118 seqcount_t data_seqcount; 119 #endif 120 121 /* the internal btrfs device id */ 122 u64 devid; 123 124 /* size of the device in memory */ 125 u64 total_bytes; 126 127 /* size of the device on disk */ 128 u64 disk_total_bytes; 129 130 /* bytes used */ 131 u64 bytes_used; 132 133 /* optimal io alignment for this device */ 134 u32 io_align; 135 136 /* optimal io width for this device */ 137 u32 io_width; 138 /* type and info about this device */ 139 u64 type; 140 141 /* minimal io size for this device */ 142 u32 sector_size; 143 144 /* physical drive uuid (or lvm uuid) */ 145 u8 uuid[BTRFS_UUID_SIZE]; 146 147 /* 148 * size of the device on the current transaction 149 * 150 * This variant is update when committing the transaction, 151 * and protected by chunk mutex 152 */ 153 u64 commit_total_bytes; 154 155 /* bytes used on the current transaction */ 156 u64 commit_bytes_used; 157 158 /* Bio used for flushing device barriers */ 159 struct bio flush_bio; 160 struct completion flush_wait; 161 162 /* per-device scrub information */ 163 struct scrub_ctx *scrub_ctx; 164 165 /* disk I/O failure stats. For detailed description refer to 166 * enum btrfs_dev_stat_values in ioctl.h */ 167 int dev_stats_valid; 168 169 /* Counter to record the change of device stats */ 170 atomic_t dev_stats_ccnt; 171 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; 172 173 struct extent_io_tree alloc_state; 174 175 struct completion kobj_unregister; 176 /* For sysfs/FSID/devinfo/devid/ */ 177 struct kobject devid_kobj; 178 179 /* Bandwidth limit for scrub, in bytes */ 180 u64 scrub_speed_max; 181 }; 182 183 /* 184 * Block group or device which contains an active swapfile. Used for preventing 185 * unsafe operations while a swapfile is active. 186 * 187 * These are sorted on (ptr, inode) (note that a block group or device can 188 * contain more than one swapfile). We compare the pointer values because we 189 * don't actually care what the object is, we just need a quick check whether 190 * the object exists in the rbtree. 191 */ 192 struct btrfs_swapfile_pin { 193 struct rb_node node; 194 void *ptr; 195 struct inode *inode; 196 /* 197 * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr 198 * points to a struct btrfs_device. 199 */ 200 bool is_block_group; 201 /* 202 * Only used when 'is_block_group' is true and it is the number of 203 * extents used by a swapfile for this block group ('ptr' field). 204 */ 205 int bg_extent_count; 206 }; 207 208 /* 209 * If we read those variants at the context of their own lock, we needn't 210 * use the following helpers, reading them directly is safe. 211 */ 212 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 213 #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 214 static inline u64 \ 215 btrfs_device_get_##name(const struct btrfs_device *dev) \ 216 { \ 217 u64 size; \ 218 unsigned int seq; \ 219 \ 220 do { \ 221 seq = read_seqcount_begin(&dev->data_seqcount); \ 222 size = dev->name; \ 223 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \ 224 return size; \ 225 } \ 226 \ 227 static inline void \ 228 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 229 { \ 230 preempt_disable(); \ 231 write_seqcount_begin(&dev->data_seqcount); \ 232 dev->name = size; \ 233 write_seqcount_end(&dev->data_seqcount); \ 234 preempt_enable(); \ 235 } 236 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) 237 #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 238 static inline u64 \ 239 btrfs_device_get_##name(const struct btrfs_device *dev) \ 240 { \ 241 u64 size; \ 242 \ 243 preempt_disable(); \ 244 size = dev->name; \ 245 preempt_enable(); \ 246 return size; \ 247 } \ 248 \ 249 static inline void \ 250 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 251 { \ 252 preempt_disable(); \ 253 dev->name = size; \ 254 preempt_enable(); \ 255 } 256 #else 257 #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 258 static inline u64 \ 259 btrfs_device_get_##name(const struct btrfs_device *dev) \ 260 { \ 261 return dev->name; \ 262 } \ 263 \ 264 static inline void \ 265 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 266 { \ 267 dev->name = size; \ 268 } 269 #endif 270 271 BTRFS_DEVICE_GETSET_FUNCS(total_bytes); 272 BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes); 273 BTRFS_DEVICE_GETSET_FUNCS(bytes_used); 274 275 enum btrfs_chunk_allocation_policy { 276 BTRFS_CHUNK_ALLOC_REGULAR, 277 BTRFS_CHUNK_ALLOC_ZONED, 278 }; 279 280 /* 281 * Read policies for mirrored block group profiles, read picks the stripe based 282 * on these policies. 283 */ 284 enum btrfs_read_policy { 285 /* Use process PID to choose the stripe */ 286 BTRFS_READ_POLICY_PID, 287 BTRFS_NR_READ_POLICY, 288 }; 289 290 struct btrfs_fs_devices { 291 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 292 u8 metadata_uuid[BTRFS_FSID_SIZE]; 293 bool fsid_change; 294 struct list_head fs_list; 295 296 /* 297 * Number of devices under this fsid including missing and 298 * replace-target device and excludes seed devices. 299 */ 300 u64 num_devices; 301 302 /* 303 * The number of devices that successfully opened, including 304 * replace-target, excludes seed devices. 305 */ 306 u64 open_devices; 307 308 /* The number of devices that are under the chunk allocation list. */ 309 u64 rw_devices; 310 311 /* Count of missing devices under this fsid excluding seed device. */ 312 u64 missing_devices; 313 u64 total_rw_bytes; 314 315 /* 316 * Count of devices from btrfs_super_block::num_devices for this fsid, 317 * which includes the seed device, excludes the transient replace-target 318 * device. 319 */ 320 u64 total_devices; 321 322 /* Highest generation number of seen devices */ 323 u64 latest_generation; 324 325 /* 326 * The mount device or a device with highest generation after removal 327 * or replace. 328 */ 329 struct btrfs_device *latest_dev; 330 331 /* all of the devices in the FS, protected by a mutex 332 * so we can safely walk it to write out the supers without 333 * worrying about add/remove by the multi-device code. 334 * Scrubbing super can kick off supers writing by holding 335 * this mutex lock. 336 */ 337 struct mutex device_list_mutex; 338 339 /* List of all devices, protected by device_list_mutex */ 340 struct list_head devices; 341 342 /* 343 * Devices which can satisfy space allocation. Protected by 344 * chunk_mutex 345 */ 346 struct list_head alloc_list; 347 348 struct list_head seed_list; 349 bool seeding; 350 351 int opened; 352 353 /* set when we find or add a device that doesn't have the 354 * nonrot flag set 355 */ 356 bool rotating; 357 358 struct btrfs_fs_info *fs_info; 359 /* sysfs kobjects */ 360 struct kobject fsid_kobj; 361 struct kobject *devices_kobj; 362 struct kobject *devinfo_kobj; 363 struct completion kobj_unregister; 364 365 enum btrfs_chunk_allocation_policy chunk_alloc_policy; 366 367 /* Policy used to read the mirrored stripes */ 368 enum btrfs_read_policy read_policy; 369 }; 370 371 #define BTRFS_BIO_INLINE_CSUM_SIZE 64 372 373 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ 374 - sizeof(struct btrfs_chunk)) \ 375 / sizeof(struct btrfs_stripe) + 1) 376 377 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 378 - 2 * sizeof(struct btrfs_disk_key) \ 379 - 2 * sizeof(struct btrfs_chunk)) \ 380 / sizeof(struct btrfs_stripe) + 1) 381 382 /* 383 * Maximum number of sectors for a single bio to limit the size of the 384 * checksum array. This matches the number of bio_vecs per bio and thus the 385 * I/O size for buffered I/O. 386 */ 387 #define BTRFS_MAX_BIO_SECTORS (256) 388 389 typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio); 390 391 /* 392 * Additional info to pass along bio. 393 * 394 * Mostly for btrfs specific features like csum and mirror_num. 395 */ 396 struct btrfs_bio { 397 unsigned int mirror_num; 398 399 /* for direct I/O */ 400 u64 file_offset; 401 402 /* @device is for stripe IO submission. */ 403 struct btrfs_device *device; 404 u8 *csum; 405 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE]; 406 struct bvec_iter iter; 407 408 /* End I/O information supplied to btrfs_bio_alloc */ 409 btrfs_bio_end_io_t end_io; 410 void *private; 411 412 /* For read end I/O handling */ 413 struct work_struct end_io_work; 414 415 /* 416 * This member must come last, bio_alloc_bioset will allocate enough 417 * bytes for entire btrfs_bio but relies on bio being last. 418 */ 419 struct bio bio; 420 }; 421 422 static inline struct btrfs_bio *btrfs_bio(struct bio *bio) 423 { 424 return container_of(bio, struct btrfs_bio, bio); 425 } 426 427 int __init btrfs_bioset_init(void); 428 void __cold btrfs_bioset_exit(void); 429 430 struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 431 btrfs_bio_end_io_t end_io, void *private); 432 struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size, 433 btrfs_bio_end_io_t end_io, void *private); 434 435 static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) 436 { 437 bbio->bio.bi_status = status; 438 bbio->end_io(bbio); 439 } 440 441 static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio) 442 { 443 if (bbio->csum != bbio->csum_inline) { 444 kfree(bbio->csum); 445 bbio->csum = NULL; 446 } 447 } 448 449 /* 450 * Iterate through a btrfs_bio (@bbio) on a per-sector basis. 451 * 452 * bvl - struct bio_vec 453 * bbio - struct btrfs_bio 454 * iters - struct bvec_iter 455 * bio_offset - unsigned int 456 */ 457 #define btrfs_bio_for_each_sector(fs_info, bvl, bbio, iter, bio_offset) \ 458 for ((iter) = (bbio)->iter, (bio_offset) = 0; \ 459 (iter).bi_size && \ 460 (((bvl) = bio_iter_iovec((&(bbio)->bio), (iter))), 1); \ 461 (bio_offset) += fs_info->sectorsize, \ 462 bio_advance_iter_single(&(bbio)->bio, &(iter), \ 463 (fs_info)->sectorsize)) 464 465 struct btrfs_io_stripe { 466 struct btrfs_device *dev; 467 union { 468 /* Block mapping */ 469 u64 physical; 470 /* For the endio handler */ 471 struct btrfs_io_context *bioc; 472 }; 473 }; 474 475 struct btrfs_discard_stripe { 476 struct btrfs_device *dev; 477 u64 physical; 478 u64 length; 479 }; 480 481 /* 482 * Context for IO subsmission for device stripe. 483 * 484 * - Track the unfinished mirrors for mirror based profiles 485 * Mirror based profiles are SINGLE/DUP/RAID1/RAID10. 486 * 487 * - Contain the logical -> physical mapping info 488 * Used by submit_stripe_bio() for mapping logical bio 489 * into physical device address. 490 * 491 * - Contain device replace info 492 * Used by handle_ops_on_dev_replace() to copy logical bios 493 * into the new device. 494 * 495 * - Contain RAID56 full stripe logical bytenrs 496 */ 497 struct btrfs_io_context { 498 refcount_t refs; 499 struct btrfs_fs_info *fs_info; 500 u64 map_type; /* get from map_lookup->type */ 501 struct bio *orig_bio; 502 atomic_t error; 503 int max_errors; 504 int num_stripes; 505 int mirror_num; 506 int num_tgtdevs; 507 int *tgtdev_map; 508 /* 509 * logical block numbers for the start of each stripe 510 * The last one or two are p/q. These are sorted, 511 * so raid_map[0] is the start of our full stripe 512 */ 513 u64 *raid_map; 514 struct btrfs_io_stripe stripes[]; 515 }; 516 517 struct btrfs_device_info { 518 struct btrfs_device *dev; 519 u64 dev_offset; 520 u64 max_avail; 521 u64 total_avail; 522 }; 523 524 struct btrfs_raid_attr { 525 u8 sub_stripes; /* sub_stripes info for map */ 526 u8 dev_stripes; /* stripes per dev */ 527 u8 devs_max; /* max devs to use */ 528 u8 devs_min; /* min devs needed */ 529 u8 tolerated_failures; /* max tolerated fail devs */ 530 u8 devs_increment; /* ndevs has to be a multiple of this */ 531 u8 ncopies; /* how many copies to data has */ 532 u8 nparity; /* number of stripes worth of bytes to store 533 * parity information */ 534 u8 mindev_error; /* error code if min devs requisite is unmet */ 535 const char raid_name[8]; /* name of the raid */ 536 u64 bg_flag; /* block group flag of the raid */ 537 }; 538 539 extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES]; 540 541 struct map_lookup { 542 u64 type; 543 int io_align; 544 int io_width; 545 u32 stripe_len; 546 int num_stripes; 547 int sub_stripes; 548 int verified_stripes; /* For mount time dev extent verification */ 549 struct btrfs_io_stripe stripes[]; 550 }; 551 552 #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 553 (sizeof(struct btrfs_io_stripe) * (n))) 554 555 struct btrfs_balance_args; 556 struct btrfs_balance_progress; 557 struct btrfs_balance_control { 558 struct btrfs_balance_args data; 559 struct btrfs_balance_args meta; 560 struct btrfs_balance_args sys; 561 562 u64 flags; 563 564 struct btrfs_balance_progress stat; 565 }; 566 567 /* 568 * Search for a given device by the set parameters 569 */ 570 struct btrfs_dev_lookup_args { 571 u64 devid; 572 u8 *uuid; 573 u8 *fsid; 574 bool missing; 575 }; 576 577 /* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */ 578 #define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 } 579 580 #define BTRFS_DEV_LOOKUP_ARGS(name) \ 581 struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT 582 583 enum btrfs_map_op { 584 BTRFS_MAP_READ, 585 BTRFS_MAP_WRITE, 586 BTRFS_MAP_DISCARD, 587 BTRFS_MAP_GET_READ_MIRRORS, 588 }; 589 590 static inline enum btrfs_map_op btrfs_op(struct bio *bio) 591 { 592 switch (bio_op(bio)) { 593 case REQ_OP_DISCARD: 594 return BTRFS_MAP_DISCARD; 595 case REQ_OP_WRITE: 596 case REQ_OP_ZONE_APPEND: 597 return BTRFS_MAP_WRITE; 598 default: 599 WARN_ON_ONCE(1); 600 fallthrough; 601 case REQ_OP_READ: 602 return BTRFS_MAP_READ; 603 } 604 } 605 606 void btrfs_get_bioc(struct btrfs_io_context *bioc); 607 void btrfs_put_bioc(struct btrfs_io_context *bioc); 608 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 609 u64 logical, u64 *length, 610 struct btrfs_io_context **bioc_ret, int mirror_num); 611 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 612 u64 logical, u64 *length, 613 struct btrfs_io_context **bioc_ret); 614 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 615 u64 logical, u64 *length_ret, 616 u32 *num_stripes); 617 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map, 618 enum btrfs_map_op op, u64 logical, 619 struct btrfs_io_geometry *io_geom); 620 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); 621 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); 622 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 623 u64 type); 624 void btrfs_mapping_tree_free(struct extent_map_tree *tree); 625 void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num); 626 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 627 fmode_t flags, void *holder); 628 struct btrfs_device *btrfs_scan_one_device(const char *path, 629 fmode_t flags, void *holder); 630 int btrfs_forget_devices(dev_t devt); 631 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices); 632 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices); 633 void btrfs_assign_next_active_device(struct btrfs_device *device, 634 struct btrfs_device *this_dev); 635 struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, 636 u64 devid, 637 const char *devpath); 638 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 639 struct btrfs_dev_lookup_args *args, 640 const char *path); 641 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 642 const u64 *devid, 643 const u8 *uuid); 644 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args); 645 void btrfs_free_device(struct btrfs_device *device); 646 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 647 struct btrfs_dev_lookup_args *args, 648 struct block_device **bdev, fmode_t *mode); 649 void __exit btrfs_cleanup_fs_uuids(void); 650 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); 651 int btrfs_grow_device(struct btrfs_trans_handle *trans, 652 struct btrfs_device *device, u64 new_size); 653 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 654 const struct btrfs_dev_lookup_args *args); 655 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); 656 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); 657 int btrfs_balance(struct btrfs_fs_info *fs_info, 658 struct btrfs_balance_control *bctl, 659 struct btrfs_ioctl_balance_args *bargs); 660 void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf); 661 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info); 662 int btrfs_recover_balance(struct btrfs_fs_info *fs_info); 663 int btrfs_pause_balance(struct btrfs_fs_info *fs_info); 664 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset); 665 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); 666 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); 667 int btrfs_uuid_scan_kthread(void *data); 668 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset); 669 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 670 u64 *start, u64 *max_avail); 671 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); 672 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 673 struct btrfs_ioctl_get_dev_stats *stats); 674 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info); 675 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); 676 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans); 677 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev); 678 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev); 679 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev); 680 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, 681 u64 logical, u64 len); 682 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 683 u64 logical); 684 u64 btrfs_calc_stripe_length(const struct extent_map *em); 685 int btrfs_nr_parity_stripes(u64 type); 686 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 687 struct btrfs_block_group *bg); 688 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset); 689 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 690 u64 logical, u64 length); 691 void btrfs_release_disk_super(struct btrfs_super_block *super); 692 693 static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, 694 int index) 695 { 696 atomic_inc(dev->dev_stat_values + index); 697 /* 698 * This memory barrier orders stores updating statistics before stores 699 * updating dev_stats_ccnt. 700 * 701 * It pairs with smp_rmb() in btrfs_run_dev_stats(). 702 */ 703 smp_mb__before_atomic(); 704 atomic_inc(&dev->dev_stats_ccnt); 705 } 706 707 static inline int btrfs_dev_stat_read(struct btrfs_device *dev, 708 int index) 709 { 710 return atomic_read(dev->dev_stat_values + index); 711 } 712 713 static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev, 714 int index) 715 { 716 int ret; 717 718 ret = atomic_xchg(dev->dev_stat_values + index, 0); 719 /* 720 * atomic_xchg implies a full memory barriers as per atomic_t.txt: 721 * - RMW operations that have a return value are fully ordered; 722 * 723 * This implicit memory barriers is paired with the smp_rmb in 724 * btrfs_run_dev_stats 725 */ 726 atomic_inc(&dev->dev_stats_ccnt); 727 return ret; 728 } 729 730 static inline void btrfs_dev_stat_set(struct btrfs_device *dev, 731 int index, unsigned long val) 732 { 733 atomic_set(dev->dev_stat_values + index, val); 734 /* 735 * This memory barrier orders stores updating statistics before stores 736 * updating dev_stats_ccnt. 737 * 738 * It pairs with smp_rmb() in btrfs_run_dev_stats(). 739 */ 740 smp_mb__before_atomic(); 741 atomic_inc(&dev->dev_stats_ccnt); 742 } 743 744 void btrfs_commit_device_sizes(struct btrfs_transaction *trans); 745 746 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); 747 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 748 struct btrfs_device *failing_dev); 749 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 750 struct block_device *bdev, 751 const char *device_path); 752 753 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags); 754 int btrfs_bg_type_to_factor(u64 flags); 755 const char *btrfs_bg_type_to_raid_name(u64 flags); 756 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info); 757 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical); 758 759 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); 760 761 #endif 762