1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/bitops.h> 4 #include <linux/slab.h> 5 #include <linux/blkdev.h> 6 #include <linux/sched/mm.h> 7 #include <linux/atomic.h> 8 #include <linux/vmalloc.h> 9 #include "ctree.h" 10 #include "volumes.h" 11 #include "zoned.h" 12 #include "rcu-string.h" 13 #include "disk-io.h" 14 #include "block-group.h" 15 #include "dev-replace.h" 16 #include "space-info.h" 17 #include "fs.h" 18 #include "accessors.h" 19 #include "bio.h" 20 21 /* Maximum number of zones to report per blkdev_report_zones() call */ 22 #define BTRFS_REPORT_NR_ZONES 4096 23 /* Invalid allocation pointer value for missing devices */ 24 #define WP_MISSING_DEV ((u64)-1) 25 /* Pseudo write pointer value for conventional zone */ 26 #define WP_CONVENTIONAL ((u64)-2) 27 28 /* 29 * Location of the first zone of superblock logging zone pairs. 30 * 31 * - primary superblock: 0B (zone 0) 32 * - first copy: 512G (zone starting at that offset) 33 * - second copy: 4T (zone starting at that offset) 34 */ 35 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL) 36 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G) 37 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G) 38 39 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET) 40 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET) 41 42 /* Number of superblock log zones */ 43 #define BTRFS_NR_SB_LOG_ZONES 2 44 45 /* 46 * Minimum of active zones we need: 47 * 48 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors 49 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group 50 * - 1 zone for tree-log dedicated block group 51 * - 1 zone for relocation 52 */ 53 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5) 54 55 /* 56 * Minimum / maximum supported zone size. Currently, SMR disks have a zone 57 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. 58 * We do not expect the zone size to become larger than 8GiB or smaller than 59 * 4MiB in the near future. 60 */ 61 #define BTRFS_MAX_ZONE_SIZE SZ_8G 62 #define BTRFS_MIN_ZONE_SIZE SZ_4M 63 64 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT) 65 66 static void wait_eb_writebacks(struct btrfs_block_group *block_group); 67 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written); 68 69 static inline bool sb_zone_is_full(const struct blk_zone *zone) 70 { 71 return (zone->cond == BLK_ZONE_COND_FULL) || 72 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity); 73 } 74 75 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data) 76 { 77 struct blk_zone *zones = data; 78 79 memcpy(&zones[idx], zone, sizeof(*zone)); 80 81 return 0; 82 } 83 84 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, 85 u64 *wp_ret) 86 { 87 bool empty[BTRFS_NR_SB_LOG_ZONES]; 88 bool full[BTRFS_NR_SB_LOG_ZONES]; 89 sector_t sector; 90 91 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 92 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL); 93 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY); 94 full[i] = sb_zone_is_full(&zones[i]); 95 } 96 97 /* 98 * Possible states of log buffer zones 99 * 100 * Empty[0] In use[0] Full[0] 101 * Empty[1] * 0 1 102 * In use[1] x x 1 103 * Full[1] 0 0 C 104 * 105 * Log position: 106 * *: Special case, no superblock is written 107 * 0: Use write pointer of zones[0] 108 * 1: Use write pointer of zones[1] 109 * C: Compare super blocks from zones[0] and zones[1], use the latest 110 * one determined by generation 111 * x: Invalid state 112 */ 113 114 if (empty[0] && empty[1]) { 115 /* Special case to distinguish no superblock to read */ 116 *wp_ret = zones[0].start << SECTOR_SHIFT; 117 return -ENOENT; 118 } else if (full[0] && full[1]) { 119 /* Compare two super blocks */ 120 struct address_space *mapping = bdev->bd_mapping; 121 struct page *page[BTRFS_NR_SB_LOG_ZONES]; 122 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES]; 123 124 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 125 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT; 126 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) - 127 BTRFS_SUPER_INFO_SIZE; 128 129 page[i] = read_cache_page_gfp(mapping, 130 bytenr >> PAGE_SHIFT, GFP_NOFS); 131 if (IS_ERR(page[i])) { 132 if (i == 1) 133 btrfs_release_disk_super(super[0]); 134 return PTR_ERR(page[i]); 135 } 136 super[i] = page_address(page[i]); 137 } 138 139 if (btrfs_super_generation(super[0]) > 140 btrfs_super_generation(super[1])) 141 sector = zones[1].start; 142 else 143 sector = zones[0].start; 144 145 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) 146 btrfs_release_disk_super(super[i]); 147 } else if (!full[0] && (empty[1] || full[1])) { 148 sector = zones[0].wp; 149 } else if (full[0]) { 150 sector = zones[1].wp; 151 } else { 152 return -EUCLEAN; 153 } 154 *wp_ret = sector << SECTOR_SHIFT; 155 return 0; 156 } 157 158 /* 159 * Get the first zone number of the superblock mirror 160 */ 161 static inline u32 sb_zone_number(int shift, int mirror) 162 { 163 u64 zone = U64_MAX; 164 165 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX); 166 switch (mirror) { 167 case 0: zone = 0; break; 168 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break; 169 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break; 170 } 171 172 ASSERT(zone <= U32_MAX); 173 174 return (u32)zone; 175 } 176 177 static inline sector_t zone_start_sector(u32 zone_number, 178 struct block_device *bdev) 179 { 180 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev)); 181 } 182 183 static inline u64 zone_start_physical(u32 zone_number, 184 struct btrfs_zoned_device_info *zone_info) 185 { 186 return (u64)zone_number << zone_info->zone_size_shift; 187 } 188 189 /* 190 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block 191 * device into static sized chunks and fake a conventional zone on each of 192 * them. 193 */ 194 static int emulate_report_zones(struct btrfs_device *device, u64 pos, 195 struct blk_zone *zones, unsigned int nr_zones) 196 { 197 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT; 198 sector_t bdev_size = bdev_nr_sectors(device->bdev); 199 unsigned int i; 200 201 pos >>= SECTOR_SHIFT; 202 for (i = 0; i < nr_zones; i++) { 203 zones[i].start = i * zone_sectors + pos; 204 zones[i].len = zone_sectors; 205 zones[i].capacity = zone_sectors; 206 zones[i].wp = zones[i].start + zone_sectors; 207 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL; 208 zones[i].cond = BLK_ZONE_COND_NOT_WP; 209 210 if (zones[i].wp >= bdev_size) { 211 i++; 212 break; 213 } 214 } 215 216 return i; 217 } 218 219 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos, 220 struct blk_zone *zones, unsigned int *nr_zones) 221 { 222 struct btrfs_zoned_device_info *zinfo = device->zone_info; 223 int ret; 224 225 if (!*nr_zones) 226 return 0; 227 228 if (!bdev_is_zoned(device->bdev)) { 229 ret = emulate_report_zones(device, pos, zones, *nr_zones); 230 *nr_zones = ret; 231 return 0; 232 } 233 234 /* Check cache */ 235 if (zinfo->zone_cache) { 236 unsigned int i; 237 u32 zno; 238 239 ASSERT(IS_ALIGNED(pos, zinfo->zone_size)); 240 zno = pos >> zinfo->zone_size_shift; 241 /* 242 * We cannot report zones beyond the zone end. So, it is OK to 243 * cap *nr_zones to at the end. 244 */ 245 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno); 246 247 for (i = 0; i < *nr_zones; i++) { 248 struct blk_zone *zone_info; 249 250 zone_info = &zinfo->zone_cache[zno + i]; 251 if (!zone_info->len) 252 break; 253 } 254 255 if (i == *nr_zones) { 256 /* Cache hit on all the zones */ 257 memcpy(zones, zinfo->zone_cache + zno, 258 sizeof(*zinfo->zone_cache) * *nr_zones); 259 return 0; 260 } 261 } 262 263 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones, 264 copy_zone_info_cb, zones); 265 if (ret < 0) { 266 btrfs_err_in_rcu(device->fs_info, 267 "zoned: failed to read zone %llu on %s (devid %llu)", 268 pos, rcu_str_deref(device->name), 269 device->devid); 270 return ret; 271 } 272 *nr_zones = ret; 273 if (!ret) 274 return -EIO; 275 276 /* Populate cache */ 277 if (zinfo->zone_cache) { 278 u32 zno = pos >> zinfo->zone_size_shift; 279 280 memcpy(zinfo->zone_cache + zno, zones, 281 sizeof(*zinfo->zone_cache) * *nr_zones); 282 } 283 284 return 0; 285 } 286 287 /* The emulated zone size is determined from the size of device extent */ 288 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info) 289 { 290 struct btrfs_path *path; 291 struct btrfs_root *root = fs_info->dev_root; 292 struct btrfs_key key; 293 struct extent_buffer *leaf; 294 struct btrfs_dev_extent *dext; 295 int ret = 0; 296 297 key.objectid = 1; 298 key.type = BTRFS_DEV_EXTENT_KEY; 299 key.offset = 0; 300 301 path = btrfs_alloc_path(); 302 if (!path) 303 return -ENOMEM; 304 305 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 306 if (ret < 0) 307 goto out; 308 309 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 310 ret = btrfs_next_leaf(root, path); 311 if (ret < 0) 312 goto out; 313 /* No dev extents at all? Not good */ 314 if (ret > 0) { 315 ret = -EUCLEAN; 316 goto out; 317 } 318 } 319 320 leaf = path->nodes[0]; 321 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 322 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext); 323 ret = 0; 324 325 out: 326 btrfs_free_path(path); 327 328 return ret; 329 } 330 331 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) 332 { 333 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 334 struct btrfs_device *device; 335 int ret = 0; 336 337 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */ 338 if (!btrfs_fs_incompat(fs_info, ZONED)) 339 return 0; 340 341 mutex_lock(&fs_devices->device_list_mutex); 342 list_for_each_entry(device, &fs_devices->devices, dev_list) { 343 /* We can skip reading of zone info for missing devices */ 344 if (!device->bdev) 345 continue; 346 347 ret = btrfs_get_dev_zone_info(device, true); 348 if (ret) 349 break; 350 } 351 mutex_unlock(&fs_devices->device_list_mutex); 352 353 return ret; 354 } 355 356 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) 357 { 358 struct btrfs_fs_info *fs_info = device->fs_info; 359 struct btrfs_zoned_device_info *zone_info = NULL; 360 struct block_device *bdev = device->bdev; 361 unsigned int max_active_zones; 362 unsigned int nactive; 363 sector_t nr_sectors; 364 sector_t sector = 0; 365 struct blk_zone *zones = NULL; 366 unsigned int i, nreported = 0, nr_zones; 367 sector_t zone_sectors; 368 char *model, *emulated; 369 int ret; 370 371 /* 372 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not 373 * yet be set. 374 */ 375 if (!btrfs_fs_incompat(fs_info, ZONED)) 376 return 0; 377 378 if (device->zone_info) 379 return 0; 380 381 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL); 382 if (!zone_info) 383 return -ENOMEM; 384 385 device->zone_info = zone_info; 386 387 if (!bdev_is_zoned(bdev)) { 388 if (!fs_info->zone_size) { 389 ret = calculate_emulated_zone_size(fs_info); 390 if (ret) 391 goto out; 392 } 393 394 ASSERT(fs_info->zone_size); 395 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT; 396 } else { 397 zone_sectors = bdev_zone_sectors(bdev); 398 } 399 400 ASSERT(is_power_of_two_u64(zone_sectors)); 401 zone_info->zone_size = zone_sectors << SECTOR_SHIFT; 402 403 /* We reject devices with a zone size larger than 8GB */ 404 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) { 405 btrfs_err_in_rcu(fs_info, 406 "zoned: %s: zone size %llu larger than supported maximum %llu", 407 rcu_str_deref(device->name), 408 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE); 409 ret = -EINVAL; 410 goto out; 411 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) { 412 btrfs_err_in_rcu(fs_info, 413 "zoned: %s: zone size %llu smaller than supported minimum %u", 414 rcu_str_deref(device->name), 415 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE); 416 ret = -EINVAL; 417 goto out; 418 } 419 420 nr_sectors = bdev_nr_sectors(bdev); 421 zone_info->zone_size_shift = ilog2(zone_info->zone_size); 422 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); 423 if (!IS_ALIGNED(nr_sectors, zone_sectors)) 424 zone_info->nr_zones++; 425 426 max_active_zones = bdev_max_active_zones(bdev); 427 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) { 428 btrfs_err_in_rcu(fs_info, 429 "zoned: %s: max active zones %u is too small, need at least %u active zones", 430 rcu_str_deref(device->name), max_active_zones, 431 BTRFS_MIN_ACTIVE_ZONES); 432 ret = -EINVAL; 433 goto out; 434 } 435 zone_info->max_active_zones = max_active_zones; 436 437 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 438 if (!zone_info->seq_zones) { 439 ret = -ENOMEM; 440 goto out; 441 } 442 443 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 444 if (!zone_info->empty_zones) { 445 ret = -ENOMEM; 446 goto out; 447 } 448 449 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 450 if (!zone_info->active_zones) { 451 ret = -ENOMEM; 452 goto out; 453 } 454 455 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL); 456 if (!zones) { 457 ret = -ENOMEM; 458 goto out; 459 } 460 461 /* 462 * Enable zone cache only for a zoned device. On a non-zoned device, we 463 * fill the zone info with emulated CONVENTIONAL zones, so no need to 464 * use the cache. 465 */ 466 if (populate_cache && bdev_is_zoned(device->bdev)) { 467 zone_info->zone_cache = vcalloc(zone_info->nr_zones, 468 sizeof(struct blk_zone)); 469 if (!zone_info->zone_cache) { 470 btrfs_err_in_rcu(device->fs_info, 471 "zoned: failed to allocate zone cache for %s", 472 rcu_str_deref(device->name)); 473 ret = -ENOMEM; 474 goto out; 475 } 476 } 477 478 /* Get zones type */ 479 nactive = 0; 480 while (sector < nr_sectors) { 481 nr_zones = BTRFS_REPORT_NR_ZONES; 482 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones, 483 &nr_zones); 484 if (ret) 485 goto out; 486 487 for (i = 0; i < nr_zones; i++) { 488 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ) 489 __set_bit(nreported, zone_info->seq_zones); 490 switch (zones[i].cond) { 491 case BLK_ZONE_COND_EMPTY: 492 __set_bit(nreported, zone_info->empty_zones); 493 break; 494 case BLK_ZONE_COND_IMP_OPEN: 495 case BLK_ZONE_COND_EXP_OPEN: 496 case BLK_ZONE_COND_CLOSED: 497 __set_bit(nreported, zone_info->active_zones); 498 nactive++; 499 break; 500 } 501 nreported++; 502 } 503 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; 504 } 505 506 if (nreported != zone_info->nr_zones) { 507 btrfs_err_in_rcu(device->fs_info, 508 "inconsistent number of zones on %s (%u/%u)", 509 rcu_str_deref(device->name), nreported, 510 zone_info->nr_zones); 511 ret = -EIO; 512 goto out; 513 } 514 515 if (max_active_zones) { 516 if (nactive > max_active_zones) { 517 btrfs_err_in_rcu(device->fs_info, 518 "zoned: %u active zones on %s exceeds max_active_zones %u", 519 nactive, rcu_str_deref(device->name), 520 max_active_zones); 521 ret = -EIO; 522 goto out; 523 } 524 atomic_set(&zone_info->active_zones_left, 525 max_active_zones - nactive); 526 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags); 527 } 528 529 /* Validate superblock log */ 530 nr_zones = BTRFS_NR_SB_LOG_ZONES; 531 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 532 u32 sb_zone; 533 u64 sb_wp; 534 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i; 535 536 sb_zone = sb_zone_number(zone_info->zone_size_shift, i); 537 if (sb_zone + 1 >= zone_info->nr_zones) 538 continue; 539 540 ret = btrfs_get_dev_zones(device, 541 zone_start_physical(sb_zone, zone_info), 542 &zone_info->sb_zones[sb_pos], 543 &nr_zones); 544 if (ret) 545 goto out; 546 547 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) { 548 btrfs_err_in_rcu(device->fs_info, 549 "zoned: failed to read super block log zone info at devid %llu zone %u", 550 device->devid, sb_zone); 551 ret = -EUCLEAN; 552 goto out; 553 } 554 555 /* 556 * If zones[0] is conventional, always use the beginning of the 557 * zone to record superblock. No need to validate in that case. 558 */ 559 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type == 560 BLK_ZONE_TYPE_CONVENTIONAL) 561 continue; 562 563 ret = sb_write_pointer(device->bdev, 564 &zone_info->sb_zones[sb_pos], &sb_wp); 565 if (ret != -ENOENT && ret) { 566 btrfs_err_in_rcu(device->fs_info, 567 "zoned: super block log zone corrupted devid %llu zone %u", 568 device->devid, sb_zone); 569 ret = -EUCLEAN; 570 goto out; 571 } 572 } 573 574 575 kvfree(zones); 576 577 if (bdev_is_zoned(bdev)) { 578 model = "host-managed zoned"; 579 emulated = ""; 580 } else { 581 model = "regular"; 582 emulated = "emulated "; 583 } 584 585 btrfs_info_in_rcu(fs_info, 586 "%s block device %s, %u %szones of %llu bytes", 587 model, rcu_str_deref(device->name), zone_info->nr_zones, 588 emulated, zone_info->zone_size); 589 590 return 0; 591 592 out: 593 kvfree(zones); 594 btrfs_destroy_dev_zone_info(device); 595 return ret; 596 } 597 598 void btrfs_destroy_dev_zone_info(struct btrfs_device *device) 599 { 600 struct btrfs_zoned_device_info *zone_info = device->zone_info; 601 602 if (!zone_info) 603 return; 604 605 bitmap_free(zone_info->active_zones); 606 bitmap_free(zone_info->seq_zones); 607 bitmap_free(zone_info->empty_zones); 608 vfree(zone_info->zone_cache); 609 kfree(zone_info); 610 device->zone_info = NULL; 611 } 612 613 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev) 614 { 615 struct btrfs_zoned_device_info *zone_info; 616 617 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL); 618 if (!zone_info) 619 return NULL; 620 621 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 622 if (!zone_info->seq_zones) 623 goto out; 624 625 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones, 626 zone_info->nr_zones); 627 628 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 629 if (!zone_info->empty_zones) 630 goto out; 631 632 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones, 633 zone_info->nr_zones); 634 635 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL); 636 if (!zone_info->active_zones) 637 goto out; 638 639 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones, 640 zone_info->nr_zones); 641 zone_info->zone_cache = NULL; 642 643 return zone_info; 644 645 out: 646 bitmap_free(zone_info->seq_zones); 647 bitmap_free(zone_info->empty_zones); 648 bitmap_free(zone_info->active_zones); 649 kfree(zone_info); 650 return NULL; 651 } 652 653 static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone) 654 { 655 unsigned int nr_zones = 1; 656 int ret; 657 658 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones); 659 if (ret != 0 || !nr_zones) 660 return ret ? ret : -EIO; 661 662 return 0; 663 } 664 665 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info) 666 { 667 struct btrfs_device *device; 668 669 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { 670 if (device->bdev && bdev_is_zoned(device->bdev)) { 671 btrfs_err(fs_info, 672 "zoned: mode not enabled but zoned device found: %pg", 673 device->bdev); 674 return -EINVAL; 675 } 676 } 677 678 return 0; 679 } 680 681 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info) 682 { 683 struct queue_limits *lim = &fs_info->limits; 684 struct btrfs_device *device; 685 u64 zone_size = 0; 686 int ret; 687 688 /* 689 * Host-Managed devices can't be used without the ZONED flag. With the 690 * ZONED all devices can be used, using zone emulation if required. 691 */ 692 if (!btrfs_fs_incompat(fs_info, ZONED)) 693 return btrfs_check_for_zoned_device(fs_info); 694 695 blk_set_stacking_limits(lim); 696 697 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { 698 struct btrfs_zoned_device_info *zone_info = device->zone_info; 699 700 if (!device->bdev) 701 continue; 702 703 if (!zone_size) { 704 zone_size = zone_info->zone_size; 705 } else if (zone_info->zone_size != zone_size) { 706 btrfs_err(fs_info, 707 "zoned: unequal block device zone sizes: have %llu found %llu", 708 zone_info->zone_size, zone_size); 709 return -EINVAL; 710 } 711 712 /* 713 * With the zoned emulation, we can have non-zoned device on the 714 * zoned mode. In this case, we don't have a valid max zone 715 * append size. 716 */ 717 if (bdev_is_zoned(device->bdev)) { 718 blk_stack_limits(lim, 719 &bdev_get_queue(device->bdev)->limits, 720 0); 721 } 722 } 723 724 /* 725 * stripe_size is always aligned to BTRFS_STRIPE_LEN in 726 * btrfs_create_chunk(). Since we want stripe_len == zone_size, 727 * check the alignment here. 728 */ 729 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) { 730 btrfs_err(fs_info, 731 "zoned: zone size %llu not aligned to stripe %u", 732 zone_size, BTRFS_STRIPE_LEN); 733 return -EINVAL; 734 } 735 736 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 737 btrfs_err(fs_info, "zoned: mixed block groups not supported"); 738 return -EINVAL; 739 } 740 741 fs_info->zone_size = zone_size; 742 /* 743 * Also limit max_zone_append_size by max_segments * PAGE_SIZE. 744 * Technically, we can have multiple pages per segment. But, since 745 * we add the pages one by one to a bio, and cannot increase the 746 * metadata reservation even if it increases the number of extents, it 747 * is safe to stick with the limit. 748 */ 749 fs_info->max_zone_append_size = ALIGN_DOWN( 750 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT, 751 (u64)lim->max_sectors << SECTOR_SHIFT, 752 (u64)lim->max_segments << PAGE_SHIFT), 753 fs_info->sectorsize); 754 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED; 755 if (fs_info->max_zone_append_size < fs_info->max_extent_size) 756 fs_info->max_extent_size = fs_info->max_zone_append_size; 757 758 /* 759 * Check mount options here, because we might change fs_info->zoned 760 * from fs_info->zone_size. 761 */ 762 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt); 763 if (ret) 764 return ret; 765 766 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size); 767 return 0; 768 } 769 770 int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info, 771 unsigned long long *mount_opt) 772 { 773 if (!btrfs_is_zoned(info)) 774 return 0; 775 776 /* 777 * Space cache writing is not COWed. Disable that to avoid write errors 778 * in sequential zones. 779 */ 780 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { 781 btrfs_err(info, "zoned: space cache v1 is not supported"); 782 return -EINVAL; 783 } 784 785 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) { 786 btrfs_err(info, "zoned: NODATACOW not supported"); 787 return -EINVAL; 788 } 789 790 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) { 791 btrfs_info(info, 792 "zoned: async discard ignored and disabled for zoned mode"); 793 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC); 794 } 795 796 return 0; 797 } 798 799 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones, 800 int rw, u64 *bytenr_ret) 801 { 802 u64 wp; 803 int ret; 804 805 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) { 806 *bytenr_ret = zones[0].start << SECTOR_SHIFT; 807 return 0; 808 } 809 810 ret = sb_write_pointer(bdev, zones, &wp); 811 if (ret != -ENOENT && ret < 0) 812 return ret; 813 814 if (rw == WRITE) { 815 struct blk_zone *reset = NULL; 816 817 if (wp == zones[0].start << SECTOR_SHIFT) 818 reset = &zones[0]; 819 else if (wp == zones[1].start << SECTOR_SHIFT) 820 reset = &zones[1]; 821 822 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) { 823 unsigned int nofs_flags; 824 825 ASSERT(sb_zone_is_full(reset)); 826 827 nofs_flags = memalloc_nofs_save(); 828 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 829 reset->start, reset->len); 830 memalloc_nofs_restore(nofs_flags); 831 if (ret) 832 return ret; 833 834 reset->cond = BLK_ZONE_COND_EMPTY; 835 reset->wp = reset->start; 836 } 837 } else if (ret != -ENOENT) { 838 /* 839 * For READ, we want the previous one. Move write pointer to 840 * the end of a zone, if it is at the head of a zone. 841 */ 842 u64 zone_end = 0; 843 844 if (wp == zones[0].start << SECTOR_SHIFT) 845 zone_end = zones[1].start + zones[1].capacity; 846 else if (wp == zones[1].start << SECTOR_SHIFT) 847 zone_end = zones[0].start + zones[0].capacity; 848 if (zone_end) 849 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT, 850 BTRFS_SUPER_INFO_SIZE); 851 852 wp -= BTRFS_SUPER_INFO_SIZE; 853 } 854 855 *bytenr_ret = wp; 856 return 0; 857 858 } 859 860 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 861 u64 *bytenr_ret) 862 { 863 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES]; 864 sector_t zone_sectors; 865 u32 sb_zone; 866 int ret; 867 u8 zone_sectors_shift; 868 sector_t nr_sectors; 869 u32 nr_zones; 870 871 if (!bdev_is_zoned(bdev)) { 872 *bytenr_ret = btrfs_sb_offset(mirror); 873 return 0; 874 } 875 876 ASSERT(rw == READ || rw == WRITE); 877 878 zone_sectors = bdev_zone_sectors(bdev); 879 if (!is_power_of_2(zone_sectors)) 880 return -EINVAL; 881 zone_sectors_shift = ilog2(zone_sectors); 882 nr_sectors = bdev_nr_sectors(bdev); 883 nr_zones = nr_sectors >> zone_sectors_shift; 884 885 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror); 886 if (sb_zone + 1 >= nr_zones) 887 return -ENOENT; 888 889 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev), 890 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb, 891 zones); 892 if (ret < 0) 893 return ret; 894 if (ret != BTRFS_NR_SB_LOG_ZONES) 895 return -EIO; 896 897 return sb_log_location(bdev, zones, rw, bytenr_ret); 898 } 899 900 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 901 u64 *bytenr_ret) 902 { 903 struct btrfs_zoned_device_info *zinfo = device->zone_info; 904 u32 zone_num; 905 906 /* 907 * For a zoned filesystem on a non-zoned block device, use the same 908 * super block locations as regular filesystem. Doing so, the super 909 * block can always be retrieved and the zoned flag of the volume 910 * detected from the super block information. 911 */ 912 if (!bdev_is_zoned(device->bdev)) { 913 *bytenr_ret = btrfs_sb_offset(mirror); 914 return 0; 915 } 916 917 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); 918 if (zone_num + 1 >= zinfo->nr_zones) 919 return -ENOENT; 920 921 return sb_log_location(device->bdev, 922 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror], 923 rw, bytenr_ret); 924 } 925 926 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo, 927 int mirror) 928 { 929 u32 zone_num; 930 931 if (!zinfo) 932 return false; 933 934 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror); 935 if (zone_num + 1 >= zinfo->nr_zones) 936 return false; 937 938 if (!test_bit(zone_num, zinfo->seq_zones)) 939 return false; 940 941 return true; 942 } 943 944 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror) 945 { 946 struct btrfs_zoned_device_info *zinfo = device->zone_info; 947 struct blk_zone *zone; 948 int i; 949 950 if (!is_sb_log_zone(zinfo, mirror)) 951 return 0; 952 953 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror]; 954 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 955 /* Advance the next zone */ 956 if (zone->cond == BLK_ZONE_COND_FULL) { 957 zone++; 958 continue; 959 } 960 961 if (zone->cond == BLK_ZONE_COND_EMPTY) 962 zone->cond = BLK_ZONE_COND_IMP_OPEN; 963 964 zone->wp += SUPER_INFO_SECTORS; 965 966 if (sb_zone_is_full(zone)) { 967 /* 968 * No room left to write new superblock. Since 969 * superblock is written with REQ_SYNC, it is safe to 970 * finish the zone now. 971 * 972 * If the write pointer is exactly at the capacity, 973 * explicit ZONE_FINISH is not necessary. 974 */ 975 if (zone->wp != zone->start + zone->capacity) { 976 unsigned int nofs_flags; 977 int ret; 978 979 nofs_flags = memalloc_nofs_save(); 980 ret = blkdev_zone_mgmt(device->bdev, 981 REQ_OP_ZONE_FINISH, zone->start, 982 zone->len); 983 memalloc_nofs_restore(nofs_flags); 984 if (ret) 985 return ret; 986 } 987 988 zone->wp = zone->start + zone->len; 989 zone->cond = BLK_ZONE_COND_FULL; 990 } 991 return 0; 992 } 993 994 /* All the zones are FULL. Should not reach here. */ 995 ASSERT(0); 996 return -EIO; 997 } 998 999 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) 1000 { 1001 unsigned int nofs_flags; 1002 sector_t zone_sectors; 1003 sector_t nr_sectors; 1004 u8 zone_sectors_shift; 1005 u32 sb_zone; 1006 u32 nr_zones; 1007 int ret; 1008 1009 zone_sectors = bdev_zone_sectors(bdev); 1010 zone_sectors_shift = ilog2(zone_sectors); 1011 nr_sectors = bdev_nr_sectors(bdev); 1012 nr_zones = nr_sectors >> zone_sectors_shift; 1013 1014 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror); 1015 if (sb_zone + 1 >= nr_zones) 1016 return -ENOENT; 1017 1018 nofs_flags = memalloc_nofs_save(); 1019 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 1020 zone_start_sector(sb_zone, bdev), 1021 zone_sectors * BTRFS_NR_SB_LOG_ZONES); 1022 memalloc_nofs_restore(nofs_flags); 1023 return ret; 1024 } 1025 1026 /* 1027 * Find allocatable zones within a given region. 1028 * 1029 * @device: the device to allocate a region on 1030 * @hole_start: the position of the hole to allocate the region 1031 * @num_bytes: size of wanted region 1032 * @hole_end: the end of the hole 1033 * @return: position of allocatable zones 1034 * 1035 * Allocatable region should not contain any superblock locations. 1036 */ 1037 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, 1038 u64 hole_end, u64 num_bytes) 1039 { 1040 struct btrfs_zoned_device_info *zinfo = device->zone_info; 1041 const u8 shift = zinfo->zone_size_shift; 1042 u64 nzones = num_bytes >> shift; 1043 u64 pos = hole_start; 1044 u64 begin, end; 1045 bool have_sb; 1046 int i; 1047 1048 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); 1049 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size)); 1050 1051 while (pos < hole_end) { 1052 begin = pos >> shift; 1053 end = begin + nzones; 1054 1055 if (end > zinfo->nr_zones) 1056 return hole_end; 1057 1058 /* Check if zones in the region are all empty */ 1059 if (btrfs_dev_is_sequential(device, pos) && 1060 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) { 1061 pos += zinfo->zone_size; 1062 continue; 1063 } 1064 1065 have_sb = false; 1066 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 1067 u32 sb_zone; 1068 u64 sb_pos; 1069 1070 sb_zone = sb_zone_number(shift, i); 1071 if (!(end <= sb_zone || 1072 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) { 1073 have_sb = true; 1074 pos = zone_start_physical( 1075 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo); 1076 break; 1077 } 1078 1079 /* We also need to exclude regular superblock positions */ 1080 sb_pos = btrfs_sb_offset(i); 1081 if (!(pos + num_bytes <= sb_pos || 1082 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) { 1083 have_sb = true; 1084 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE, 1085 zinfo->zone_size); 1086 break; 1087 } 1088 } 1089 if (!have_sb) 1090 break; 1091 } 1092 1093 return pos; 1094 } 1095 1096 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos) 1097 { 1098 struct btrfs_zoned_device_info *zone_info = device->zone_info; 1099 unsigned int zno = (pos >> zone_info->zone_size_shift); 1100 1101 /* We can use any number of zones */ 1102 if (zone_info->max_active_zones == 0) 1103 return true; 1104 1105 if (!test_bit(zno, zone_info->active_zones)) { 1106 /* Active zone left? */ 1107 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0) 1108 return false; 1109 if (test_and_set_bit(zno, zone_info->active_zones)) { 1110 /* Someone already set the bit */ 1111 atomic_inc(&zone_info->active_zones_left); 1112 } 1113 } 1114 1115 return true; 1116 } 1117 1118 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos) 1119 { 1120 struct btrfs_zoned_device_info *zone_info = device->zone_info; 1121 unsigned int zno = (pos >> zone_info->zone_size_shift); 1122 1123 /* We can use any number of zones */ 1124 if (zone_info->max_active_zones == 0) 1125 return; 1126 1127 if (test_and_clear_bit(zno, zone_info->active_zones)) 1128 atomic_inc(&zone_info->active_zones_left); 1129 } 1130 1131 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, 1132 u64 length, u64 *bytes) 1133 { 1134 unsigned int nofs_flags; 1135 int ret; 1136 1137 *bytes = 0; 1138 nofs_flags = memalloc_nofs_save(); 1139 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET, 1140 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT); 1141 memalloc_nofs_restore(nofs_flags); 1142 if (ret) 1143 return ret; 1144 1145 *bytes = length; 1146 while (length) { 1147 btrfs_dev_set_zone_empty(device, physical); 1148 btrfs_dev_clear_active_zone(device, physical); 1149 physical += device->zone_info->zone_size; 1150 length -= device->zone_info->zone_size; 1151 } 1152 1153 return 0; 1154 } 1155 1156 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) 1157 { 1158 struct btrfs_zoned_device_info *zinfo = device->zone_info; 1159 const u8 shift = zinfo->zone_size_shift; 1160 unsigned long begin = start >> shift; 1161 unsigned long nbits = size >> shift; 1162 u64 pos; 1163 int ret; 1164 1165 ASSERT(IS_ALIGNED(start, zinfo->zone_size)); 1166 ASSERT(IS_ALIGNED(size, zinfo->zone_size)); 1167 1168 if (begin + nbits > zinfo->nr_zones) 1169 return -ERANGE; 1170 1171 /* All the zones are conventional */ 1172 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits)) 1173 return 0; 1174 1175 /* All the zones are sequential and empty */ 1176 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) && 1177 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits)) 1178 return 0; 1179 1180 for (pos = start; pos < start + size; pos += zinfo->zone_size) { 1181 u64 reset_bytes; 1182 1183 if (!btrfs_dev_is_sequential(device, pos) || 1184 btrfs_dev_is_empty_zone(device, pos)) 1185 continue; 1186 1187 /* Free regions should be empty */ 1188 btrfs_warn_in_rcu( 1189 device->fs_info, 1190 "zoned: resetting device %s (devid %llu) zone %llu for allocation", 1191 rcu_str_deref(device->name), device->devid, pos >> shift); 1192 WARN_ON_ONCE(1); 1193 1194 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size, 1195 &reset_bytes); 1196 if (ret) 1197 return ret; 1198 } 1199 1200 return 0; 1201 } 1202 1203 /* 1204 * Calculate an allocation pointer from the extent allocation information 1205 * for a block group consist of conventional zones. It is pointed to the 1206 * end of the highest addressed extent in the block group as an allocation 1207 * offset. 1208 */ 1209 static int calculate_alloc_pointer(struct btrfs_block_group *cache, 1210 u64 *offset_ret, bool new) 1211 { 1212 struct btrfs_fs_info *fs_info = cache->fs_info; 1213 struct btrfs_root *root; 1214 struct btrfs_path *path; 1215 struct btrfs_key key; 1216 struct btrfs_key found_key; 1217 int ret; 1218 u64 length; 1219 1220 /* 1221 * Avoid tree lookups for a new block group, there's no use for it. 1222 * It must always be 0. 1223 * 1224 * Also, we have a lock chain of extent buffer lock -> chunk mutex. 1225 * For new a block group, this function is called from 1226 * btrfs_make_block_group() which is already taking the chunk mutex. 1227 * Thus, we cannot call calculate_alloc_pointer() which takes extent 1228 * buffer locks to avoid deadlock. 1229 */ 1230 if (new) { 1231 *offset_ret = 0; 1232 return 0; 1233 } 1234 1235 path = btrfs_alloc_path(); 1236 if (!path) 1237 return -ENOMEM; 1238 1239 key.objectid = cache->start + cache->length; 1240 key.type = 0; 1241 key.offset = 0; 1242 1243 root = btrfs_extent_root(fs_info, key.objectid); 1244 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1245 /* We should not find the exact match */ 1246 if (!ret) 1247 ret = -EUCLEAN; 1248 if (ret < 0) 1249 goto out; 1250 1251 ret = btrfs_previous_extent_item(root, path, cache->start); 1252 if (ret) { 1253 if (ret == 1) { 1254 ret = 0; 1255 *offset_ret = 0; 1256 } 1257 goto out; 1258 } 1259 1260 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 1261 1262 if (found_key.type == BTRFS_EXTENT_ITEM_KEY) 1263 length = found_key.offset; 1264 else 1265 length = fs_info->nodesize; 1266 1267 if (!(found_key.objectid >= cache->start && 1268 found_key.objectid + length <= cache->start + cache->length)) { 1269 ret = -EUCLEAN; 1270 goto out; 1271 } 1272 *offset_ret = found_key.objectid + length - cache->start; 1273 ret = 0; 1274 1275 out: 1276 btrfs_free_path(path); 1277 return ret; 1278 } 1279 1280 struct zone_info { 1281 u64 physical; 1282 u64 capacity; 1283 u64 alloc_offset; 1284 }; 1285 1286 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx, 1287 struct zone_info *info, unsigned long *active, 1288 struct btrfs_chunk_map *map) 1289 { 1290 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1291 struct btrfs_device *device; 1292 int dev_replace_is_ongoing = 0; 1293 unsigned int nofs_flag; 1294 struct blk_zone zone; 1295 int ret; 1296 1297 info->physical = map->stripes[zone_idx].physical; 1298 1299 down_read(&dev_replace->rwsem); 1300 device = map->stripes[zone_idx].dev; 1301 1302 if (!device->bdev) { 1303 up_read(&dev_replace->rwsem); 1304 info->alloc_offset = WP_MISSING_DEV; 1305 return 0; 1306 } 1307 1308 /* Consider a zone as active if we can allow any number of active zones. */ 1309 if (!device->zone_info->max_active_zones) 1310 __set_bit(zone_idx, active); 1311 1312 if (!btrfs_dev_is_sequential(device, info->physical)) { 1313 up_read(&dev_replace->rwsem); 1314 info->alloc_offset = WP_CONVENTIONAL; 1315 return 0; 1316 } 1317 1318 /* This zone will be used for allocation, so mark this zone non-empty. */ 1319 btrfs_dev_clear_zone_empty(device, info->physical); 1320 1321 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 1322 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) 1323 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical); 1324 1325 /* 1326 * The group is mapped to a sequential zone. Get the zone write pointer 1327 * to determine the allocation offset within the zone. 1328 */ 1329 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size)); 1330 nofs_flag = memalloc_nofs_save(); 1331 ret = btrfs_get_dev_zone(device, info->physical, &zone); 1332 memalloc_nofs_restore(nofs_flag); 1333 if (ret) { 1334 up_read(&dev_replace->rwsem); 1335 if (ret != -EIO && ret != -EOPNOTSUPP) 1336 return ret; 1337 info->alloc_offset = WP_MISSING_DEV; 1338 return 0; 1339 } 1340 1341 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { 1342 btrfs_err_in_rcu(fs_info, 1343 "zoned: unexpected conventional zone %llu on device %s (devid %llu)", 1344 zone.start << SECTOR_SHIFT, rcu_str_deref(device->name), 1345 device->devid); 1346 up_read(&dev_replace->rwsem); 1347 return -EIO; 1348 } 1349 1350 info->capacity = (zone.capacity << SECTOR_SHIFT); 1351 1352 switch (zone.cond) { 1353 case BLK_ZONE_COND_OFFLINE: 1354 case BLK_ZONE_COND_READONLY: 1355 btrfs_err(fs_info, 1356 "zoned: offline/readonly zone %llu on device %s (devid %llu)", 1357 (info->physical >> device->zone_info->zone_size_shift), 1358 rcu_str_deref(device->name), device->devid); 1359 info->alloc_offset = WP_MISSING_DEV; 1360 break; 1361 case BLK_ZONE_COND_EMPTY: 1362 info->alloc_offset = 0; 1363 break; 1364 case BLK_ZONE_COND_FULL: 1365 info->alloc_offset = info->capacity; 1366 break; 1367 default: 1368 /* Partially used zone. */ 1369 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT); 1370 __set_bit(zone_idx, active); 1371 break; 1372 } 1373 1374 up_read(&dev_replace->rwsem); 1375 1376 return 0; 1377 } 1378 1379 static int btrfs_load_block_group_single(struct btrfs_block_group *bg, 1380 struct zone_info *info, 1381 unsigned long *active) 1382 { 1383 if (info->alloc_offset == WP_MISSING_DEV) { 1384 btrfs_err(bg->fs_info, 1385 "zoned: cannot recover write pointer for zone %llu", 1386 info->physical); 1387 return -EIO; 1388 } 1389 1390 bg->alloc_offset = info->alloc_offset; 1391 bg->zone_capacity = info->capacity; 1392 if (test_bit(0, active)) 1393 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); 1394 return 0; 1395 } 1396 1397 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg, 1398 struct btrfs_chunk_map *map, 1399 struct zone_info *zone_info, 1400 unsigned long *active) 1401 { 1402 struct btrfs_fs_info *fs_info = bg->fs_info; 1403 1404 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { 1405 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree"); 1406 return -EINVAL; 1407 } 1408 1409 if (zone_info[0].alloc_offset == WP_MISSING_DEV) { 1410 btrfs_err(bg->fs_info, 1411 "zoned: cannot recover write pointer for zone %llu", 1412 zone_info[0].physical); 1413 return -EIO; 1414 } 1415 if (zone_info[1].alloc_offset == WP_MISSING_DEV) { 1416 btrfs_err(bg->fs_info, 1417 "zoned: cannot recover write pointer for zone %llu", 1418 zone_info[1].physical); 1419 return -EIO; 1420 } 1421 if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) { 1422 btrfs_err(bg->fs_info, 1423 "zoned: write pointer offset mismatch of zones in DUP profile"); 1424 return -EIO; 1425 } 1426 1427 if (test_bit(0, active) != test_bit(1, active)) { 1428 if (!btrfs_zone_activate(bg)) 1429 return -EIO; 1430 } else if (test_bit(0, active)) { 1431 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); 1432 } 1433 1434 bg->alloc_offset = zone_info[0].alloc_offset; 1435 bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity); 1436 return 0; 1437 } 1438 1439 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg, 1440 struct btrfs_chunk_map *map, 1441 struct zone_info *zone_info, 1442 unsigned long *active) 1443 { 1444 struct btrfs_fs_info *fs_info = bg->fs_info; 1445 int i; 1446 1447 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { 1448 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", 1449 btrfs_bg_type_to_raid_name(map->type)); 1450 return -EINVAL; 1451 } 1452 1453 for (i = 0; i < map->num_stripes; i++) { 1454 if (zone_info[i].alloc_offset == WP_MISSING_DEV || 1455 zone_info[i].alloc_offset == WP_CONVENTIONAL) 1456 continue; 1457 1458 if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && 1459 !btrfs_test_opt(fs_info, DEGRADED)) { 1460 btrfs_err(fs_info, 1461 "zoned: write pointer offset mismatch of zones in %s profile", 1462 btrfs_bg_type_to_raid_name(map->type)); 1463 return -EIO; 1464 } 1465 if (test_bit(0, active) != test_bit(i, active)) { 1466 if (!btrfs_test_opt(fs_info, DEGRADED) && 1467 !btrfs_zone_activate(bg)) { 1468 return -EIO; 1469 } 1470 } else { 1471 if (test_bit(0, active)) 1472 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); 1473 } 1474 /* In case a device is missing we have a cap of 0, so don't use it. */ 1475 bg->zone_capacity = min_not_zero(zone_info[0].capacity, 1476 zone_info[1].capacity); 1477 } 1478 1479 if (zone_info[0].alloc_offset != WP_MISSING_DEV) 1480 bg->alloc_offset = zone_info[0].alloc_offset; 1481 else 1482 bg->alloc_offset = zone_info[i - 1].alloc_offset; 1483 1484 return 0; 1485 } 1486 1487 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg, 1488 struct btrfs_chunk_map *map, 1489 struct zone_info *zone_info, 1490 unsigned long *active) 1491 { 1492 struct btrfs_fs_info *fs_info = bg->fs_info; 1493 1494 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { 1495 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", 1496 btrfs_bg_type_to_raid_name(map->type)); 1497 return -EINVAL; 1498 } 1499 1500 for (int i = 0; i < map->num_stripes; i++) { 1501 if (zone_info[i].alloc_offset == WP_MISSING_DEV || 1502 zone_info[i].alloc_offset == WP_CONVENTIONAL) 1503 continue; 1504 1505 if (test_bit(0, active) != test_bit(i, active)) { 1506 if (!btrfs_zone_activate(bg)) 1507 return -EIO; 1508 } else { 1509 if (test_bit(0, active)) 1510 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); 1511 } 1512 bg->zone_capacity += zone_info[i].capacity; 1513 bg->alloc_offset += zone_info[i].alloc_offset; 1514 } 1515 1516 return 0; 1517 } 1518 1519 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg, 1520 struct btrfs_chunk_map *map, 1521 struct zone_info *zone_info, 1522 unsigned long *active) 1523 { 1524 struct btrfs_fs_info *fs_info = bg->fs_info; 1525 1526 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { 1527 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", 1528 btrfs_bg_type_to_raid_name(map->type)); 1529 return -EINVAL; 1530 } 1531 1532 for (int i = 0; i < map->num_stripes; i++) { 1533 if (zone_info[i].alloc_offset == WP_MISSING_DEV || 1534 zone_info[i].alloc_offset == WP_CONVENTIONAL) 1535 continue; 1536 1537 if (test_bit(0, active) != test_bit(i, active)) { 1538 if (!btrfs_zone_activate(bg)) 1539 return -EIO; 1540 } else { 1541 if (test_bit(0, active)) 1542 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags); 1543 } 1544 1545 if ((i % map->sub_stripes) == 0) { 1546 bg->zone_capacity += zone_info[i].capacity; 1547 bg->alloc_offset += zone_info[i].alloc_offset; 1548 } 1549 } 1550 1551 return 0; 1552 } 1553 1554 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) 1555 { 1556 struct btrfs_fs_info *fs_info = cache->fs_info; 1557 struct btrfs_chunk_map *map; 1558 u64 logical = cache->start; 1559 u64 length = cache->length; 1560 struct zone_info *zone_info = NULL; 1561 int ret; 1562 int i; 1563 unsigned long *active = NULL; 1564 u64 last_alloc = 0; 1565 u32 num_sequential = 0, num_conventional = 0; 1566 1567 if (!btrfs_is_zoned(fs_info)) 1568 return 0; 1569 1570 /* Sanity check */ 1571 if (!IS_ALIGNED(length, fs_info->zone_size)) { 1572 btrfs_err(fs_info, 1573 "zoned: block group %llu len %llu unaligned to zone size %llu", 1574 logical, length, fs_info->zone_size); 1575 return -EIO; 1576 } 1577 1578 map = btrfs_find_chunk_map(fs_info, logical, length); 1579 if (!map) 1580 return -EINVAL; 1581 1582 cache->physical_map = map; 1583 1584 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS); 1585 if (!zone_info) { 1586 ret = -ENOMEM; 1587 goto out; 1588 } 1589 1590 active = bitmap_zalloc(map->num_stripes, GFP_NOFS); 1591 if (!active) { 1592 ret = -ENOMEM; 1593 goto out; 1594 } 1595 1596 for (i = 0; i < map->num_stripes; i++) { 1597 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map); 1598 if (ret) 1599 goto out; 1600 1601 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) 1602 num_conventional++; 1603 else 1604 num_sequential++; 1605 } 1606 1607 if (num_sequential > 0) 1608 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags); 1609 1610 if (num_conventional > 0) { 1611 /* Zone capacity is always zone size in emulation */ 1612 cache->zone_capacity = cache->length; 1613 ret = calculate_alloc_pointer(cache, &last_alloc, new); 1614 if (ret) { 1615 btrfs_err(fs_info, 1616 "zoned: failed to determine allocation offset of bg %llu", 1617 cache->start); 1618 goto out; 1619 } else if (map->num_stripes == num_conventional) { 1620 cache->alloc_offset = last_alloc; 1621 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags); 1622 goto out; 1623 } 1624 } 1625 1626 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 1627 case 0: /* single */ 1628 ret = btrfs_load_block_group_single(cache, &zone_info[0], active); 1629 break; 1630 case BTRFS_BLOCK_GROUP_DUP: 1631 ret = btrfs_load_block_group_dup(cache, map, zone_info, active); 1632 break; 1633 case BTRFS_BLOCK_GROUP_RAID1: 1634 case BTRFS_BLOCK_GROUP_RAID1C3: 1635 case BTRFS_BLOCK_GROUP_RAID1C4: 1636 ret = btrfs_load_block_group_raid1(cache, map, zone_info, active); 1637 break; 1638 case BTRFS_BLOCK_GROUP_RAID0: 1639 ret = btrfs_load_block_group_raid0(cache, map, zone_info, active); 1640 break; 1641 case BTRFS_BLOCK_GROUP_RAID10: 1642 ret = btrfs_load_block_group_raid10(cache, map, zone_info, active); 1643 break; 1644 case BTRFS_BLOCK_GROUP_RAID5: 1645 case BTRFS_BLOCK_GROUP_RAID6: 1646 default: 1647 btrfs_err(fs_info, "zoned: profile %s not yet supported", 1648 btrfs_bg_type_to_raid_name(map->type)); 1649 ret = -EINVAL; 1650 goto out; 1651 } 1652 1653 out: 1654 /* Reject non SINGLE data profiles without RST */ 1655 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && 1656 (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && 1657 !fs_info->stripe_root) { 1658 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", 1659 btrfs_bg_type_to_raid_name(map->type)); 1660 return -EINVAL; 1661 } 1662 1663 if (cache->alloc_offset > cache->zone_capacity) { 1664 btrfs_err(fs_info, 1665 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", 1666 cache->alloc_offset, cache->zone_capacity, 1667 cache->start); 1668 ret = -EIO; 1669 } 1670 1671 /* An extent is allocated after the write pointer */ 1672 if (!ret && num_conventional && last_alloc > cache->alloc_offset) { 1673 btrfs_err(fs_info, 1674 "zoned: got wrong write pointer in BG %llu: %llu > %llu", 1675 logical, last_alloc, cache->alloc_offset); 1676 ret = -EIO; 1677 } 1678 1679 if (!ret) { 1680 cache->meta_write_pointer = cache->alloc_offset + cache->start; 1681 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) { 1682 btrfs_get_block_group(cache); 1683 spin_lock(&fs_info->zone_active_bgs_lock); 1684 list_add_tail(&cache->active_bg_list, 1685 &fs_info->zone_active_bgs); 1686 spin_unlock(&fs_info->zone_active_bgs_lock); 1687 } 1688 } else { 1689 btrfs_free_chunk_map(cache->physical_map); 1690 cache->physical_map = NULL; 1691 } 1692 bitmap_free(active); 1693 kfree(zone_info); 1694 1695 return ret; 1696 } 1697 1698 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) 1699 { 1700 u64 unusable, free; 1701 1702 if (!btrfs_is_zoned(cache->fs_info)) 1703 return; 1704 1705 WARN_ON(cache->bytes_super != 0); 1706 unusable = (cache->alloc_offset - cache->used) + 1707 (cache->length - cache->zone_capacity); 1708 free = cache->zone_capacity - cache->alloc_offset; 1709 1710 /* We only need ->free_space in ALLOC_SEQ block groups */ 1711 cache->cached = BTRFS_CACHE_FINISHED; 1712 cache->free_space_ctl->free_space = free; 1713 cache->zone_unusable = unusable; 1714 } 1715 1716 bool btrfs_use_zone_append(struct btrfs_bio *bbio) 1717 { 1718 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT); 1719 struct btrfs_inode *inode = bbio->inode; 1720 struct btrfs_fs_info *fs_info = bbio->fs_info; 1721 struct btrfs_block_group *cache; 1722 bool ret = false; 1723 1724 if (!btrfs_is_zoned(fs_info)) 1725 return false; 1726 1727 if (!inode || !is_data_inode(inode)) 1728 return false; 1729 1730 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) 1731 return false; 1732 1733 /* 1734 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the 1735 * extent layout the relocation code has. 1736 * Furthermore we have set aside own block-group from which only the 1737 * relocation "process" can allocate and make sure only one process at a 1738 * time can add pages to an extent that gets relocated, so it's safe to 1739 * use regular REQ_OP_WRITE for this special case. 1740 */ 1741 if (btrfs_is_data_reloc_root(inode->root)) 1742 return false; 1743 1744 cache = btrfs_lookup_block_group(fs_info, start); 1745 ASSERT(cache); 1746 if (!cache) 1747 return false; 1748 1749 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags); 1750 btrfs_put_block_group(cache); 1751 1752 return ret; 1753 } 1754 1755 void btrfs_record_physical_zoned(struct btrfs_bio *bbio) 1756 { 1757 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 1758 struct btrfs_ordered_sum *sum = bbio->sums; 1759 1760 if (physical < bbio->orig_physical) 1761 sum->logical -= bbio->orig_physical - physical; 1762 else 1763 sum->logical += physical - bbio->orig_physical; 1764 } 1765 1766 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered, 1767 u64 logical) 1768 { 1769 struct extent_map_tree *em_tree = &ordered->inode->extent_tree; 1770 struct extent_map *em; 1771 1772 ordered->disk_bytenr = logical; 1773 1774 write_lock(&em_tree->lock); 1775 em = search_extent_mapping(em_tree, ordered->file_offset, 1776 ordered->num_bytes); 1777 /* The em should be a new COW extent, thus it should not have an offset. */ 1778 ASSERT(em->offset == 0); 1779 em->disk_bytenr = logical; 1780 free_extent_map(em); 1781 write_unlock(&em_tree->lock); 1782 } 1783 1784 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered, 1785 u64 logical, u64 len) 1786 { 1787 struct btrfs_ordered_extent *new; 1788 1789 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && 1790 split_extent_map(ordered->inode, ordered->file_offset, 1791 ordered->num_bytes, len, logical)) 1792 return false; 1793 1794 new = btrfs_split_ordered_extent(ordered, len); 1795 if (IS_ERR(new)) 1796 return false; 1797 new->disk_bytenr = logical; 1798 btrfs_finish_one_ordered(new); 1799 return true; 1800 } 1801 1802 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered) 1803 { 1804 struct btrfs_inode *inode = ordered->inode; 1805 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1806 struct btrfs_ordered_sum *sum; 1807 u64 logical, len; 1808 1809 /* 1810 * Write to pre-allocated region is for the data relocation, and so 1811 * it should use WRITE operation. No split/rewrite are necessary. 1812 */ 1813 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 1814 return; 1815 1816 ASSERT(!list_empty(&ordered->list)); 1817 /* The ordered->list can be empty in the above pre-alloc case. */ 1818 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list); 1819 logical = sum->logical; 1820 len = sum->len; 1821 1822 while (len < ordered->disk_num_bytes) { 1823 sum = list_next_entry(sum, list); 1824 if (sum->logical == logical + len) { 1825 len += sum->len; 1826 continue; 1827 } 1828 if (!btrfs_zoned_split_ordered(ordered, logical, len)) { 1829 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 1830 btrfs_err(fs_info, "failed to split ordered extent"); 1831 goto out; 1832 } 1833 logical = sum->logical; 1834 len = sum->len; 1835 } 1836 1837 if (ordered->disk_bytenr != logical) 1838 btrfs_rewrite_logical_zoned(ordered, logical); 1839 1840 out: 1841 /* 1842 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures 1843 * were allocated by btrfs_alloc_dummy_sum only to record the logical 1844 * addresses and don't contain actual checksums. We thus must free them 1845 * here so that we don't attempt to log the csums later. 1846 */ 1847 if ((inode->flags & BTRFS_INODE_NODATASUM) || 1848 test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) { 1849 while ((sum = list_first_entry_or_null(&ordered->list, 1850 typeof(*sum), list))) { 1851 list_del(&sum->list); 1852 kfree(sum); 1853 } 1854 } 1855 } 1856 1857 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx, 1858 struct btrfs_block_group **active_bg) 1859 { 1860 const struct writeback_control *wbc = ctx->wbc; 1861 struct btrfs_block_group *block_group = ctx->zoned_bg; 1862 struct btrfs_fs_info *fs_info = block_group->fs_info; 1863 1864 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) 1865 return true; 1866 1867 if (fs_info->treelog_bg == block_group->start) { 1868 if (!btrfs_zone_activate(block_group)) { 1869 int ret_fin = btrfs_zone_finish_one_bg(fs_info); 1870 1871 if (ret_fin != 1 || !btrfs_zone_activate(block_group)) 1872 return false; 1873 } 1874 } else if (*active_bg != block_group) { 1875 struct btrfs_block_group *tgt = *active_bg; 1876 1877 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */ 1878 lockdep_assert_held(&fs_info->zoned_meta_io_lock); 1879 1880 if (tgt) { 1881 /* 1882 * If there is an unsent IO left in the allocated area, 1883 * we cannot wait for them as it may cause a deadlock. 1884 */ 1885 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) { 1886 if (wbc->sync_mode == WB_SYNC_NONE || 1887 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)) 1888 return false; 1889 } 1890 1891 /* Pivot active metadata/system block group. */ 1892 btrfs_zoned_meta_io_unlock(fs_info); 1893 wait_eb_writebacks(tgt); 1894 do_zone_finish(tgt, true); 1895 btrfs_zoned_meta_io_lock(fs_info); 1896 if (*active_bg == tgt) { 1897 btrfs_put_block_group(tgt); 1898 *active_bg = NULL; 1899 } 1900 } 1901 if (!btrfs_zone_activate(block_group)) 1902 return false; 1903 if (*active_bg != block_group) { 1904 ASSERT(*active_bg == NULL); 1905 *active_bg = block_group; 1906 btrfs_get_block_group(block_group); 1907 } 1908 } 1909 1910 return true; 1911 } 1912 1913 /* 1914 * Check if @ctx->eb is aligned to the write pointer. 1915 * 1916 * Return: 1917 * 0: @ctx->eb is at the write pointer. You can write it. 1918 * -EAGAIN: There is a hole. The caller should handle the case. 1919 * -EBUSY: There is a hole, but the caller can just bail out. 1920 */ 1921 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 1922 struct btrfs_eb_write_context *ctx) 1923 { 1924 const struct writeback_control *wbc = ctx->wbc; 1925 const struct extent_buffer *eb = ctx->eb; 1926 struct btrfs_block_group *block_group = ctx->zoned_bg; 1927 1928 if (!btrfs_is_zoned(fs_info)) 1929 return 0; 1930 1931 if (block_group) { 1932 if (block_group->start > eb->start || 1933 block_group->start + block_group->length <= eb->start) { 1934 btrfs_put_block_group(block_group); 1935 block_group = NULL; 1936 ctx->zoned_bg = NULL; 1937 } 1938 } 1939 1940 if (!block_group) { 1941 block_group = btrfs_lookup_block_group(fs_info, eb->start); 1942 if (!block_group) 1943 return 0; 1944 ctx->zoned_bg = block_group; 1945 } 1946 1947 if (block_group->meta_write_pointer == eb->start) { 1948 struct btrfs_block_group **tgt; 1949 1950 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) 1951 return 0; 1952 1953 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) 1954 tgt = &fs_info->active_system_bg; 1955 else 1956 tgt = &fs_info->active_meta_bg; 1957 if (check_bg_is_active(ctx, tgt)) 1958 return 0; 1959 } 1960 1961 /* 1962 * Since we may release fs_info->zoned_meta_io_lock, someone can already 1963 * start writing this eb. In that case, we can just bail out. 1964 */ 1965 if (block_group->meta_write_pointer > eb->start) 1966 return -EBUSY; 1967 1968 /* If for_sync, this hole will be filled with trasnsaction commit. */ 1969 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 1970 return -EAGAIN; 1971 return -EBUSY; 1972 } 1973 1974 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length) 1975 { 1976 if (!btrfs_dev_is_sequential(device, physical)) 1977 return -EOPNOTSUPP; 1978 1979 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT, 1980 length >> SECTOR_SHIFT, GFP_NOFS, 0); 1981 } 1982 1983 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical, 1984 struct blk_zone *zone) 1985 { 1986 struct btrfs_io_context *bioc = NULL; 1987 u64 mapped_length = PAGE_SIZE; 1988 unsigned int nofs_flag; 1989 int nmirrors; 1990 int i, ret; 1991 1992 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 1993 &mapped_length, &bioc, NULL, NULL); 1994 if (ret || !bioc || mapped_length < PAGE_SIZE) { 1995 ret = -EIO; 1996 goto out_put_bioc; 1997 } 1998 1999 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 2000 ret = -EINVAL; 2001 goto out_put_bioc; 2002 } 2003 2004 nofs_flag = memalloc_nofs_save(); 2005 nmirrors = (int)bioc->num_stripes; 2006 for (i = 0; i < nmirrors; i++) { 2007 u64 physical = bioc->stripes[i].physical; 2008 struct btrfs_device *dev = bioc->stripes[i].dev; 2009 2010 /* Missing device */ 2011 if (!dev->bdev) 2012 continue; 2013 2014 ret = btrfs_get_dev_zone(dev, physical, zone); 2015 /* Failing device */ 2016 if (ret == -EIO || ret == -EOPNOTSUPP) 2017 continue; 2018 break; 2019 } 2020 memalloc_nofs_restore(nofs_flag); 2021 out_put_bioc: 2022 btrfs_put_bioc(bioc); 2023 return ret; 2024 } 2025 2026 /* 2027 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by 2028 * filling zeros between @physical_pos to a write pointer of dev-replace 2029 * source device. 2030 */ 2031 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, 2032 u64 physical_start, u64 physical_pos) 2033 { 2034 struct btrfs_fs_info *fs_info = tgt_dev->fs_info; 2035 struct blk_zone zone; 2036 u64 length; 2037 u64 wp; 2038 int ret; 2039 2040 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos)) 2041 return 0; 2042 2043 ret = read_zone_info(fs_info, logical, &zone); 2044 if (ret) 2045 return ret; 2046 2047 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT); 2048 2049 if (physical_pos == wp) 2050 return 0; 2051 2052 if (physical_pos > wp) 2053 return -EUCLEAN; 2054 2055 length = wp - physical_pos; 2056 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length); 2057 } 2058 2059 /* 2060 * Activate block group and underlying device zones 2061 * 2062 * @block_group: the block group to activate 2063 * 2064 * Return: true on success, false otherwise 2065 */ 2066 bool btrfs_zone_activate(struct btrfs_block_group *block_group) 2067 { 2068 struct btrfs_fs_info *fs_info = block_group->fs_info; 2069 struct btrfs_chunk_map *map; 2070 struct btrfs_device *device; 2071 u64 physical; 2072 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA); 2073 bool ret; 2074 int i; 2075 2076 if (!btrfs_is_zoned(block_group->fs_info)) 2077 return true; 2078 2079 map = block_group->physical_map; 2080 2081 spin_lock(&fs_info->zone_active_bgs_lock); 2082 spin_lock(&block_group->lock); 2083 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { 2084 ret = true; 2085 goto out_unlock; 2086 } 2087 2088 /* No space left */ 2089 if (btrfs_zoned_bg_is_full(block_group)) { 2090 ret = false; 2091 goto out_unlock; 2092 } 2093 2094 for (i = 0; i < map->num_stripes; i++) { 2095 struct btrfs_zoned_device_info *zinfo; 2096 int reserved = 0; 2097 2098 device = map->stripes[i].dev; 2099 physical = map->stripes[i].physical; 2100 zinfo = device->zone_info; 2101 2102 if (zinfo->max_active_zones == 0) 2103 continue; 2104 2105 if (is_data) 2106 reserved = zinfo->reserved_active_zones; 2107 /* 2108 * For the data block group, leave active zones for one 2109 * metadata block group and one system block group. 2110 */ 2111 if (atomic_read(&zinfo->active_zones_left) <= reserved) { 2112 ret = false; 2113 goto out_unlock; 2114 } 2115 2116 if (!btrfs_dev_set_active_zone(device, physical)) { 2117 /* Cannot activate the zone */ 2118 ret = false; 2119 goto out_unlock; 2120 } 2121 if (!is_data) 2122 zinfo->reserved_active_zones--; 2123 } 2124 2125 /* Successfully activated all the zones */ 2126 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); 2127 spin_unlock(&block_group->lock); 2128 2129 /* For the active block group list */ 2130 btrfs_get_block_group(block_group); 2131 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs); 2132 spin_unlock(&fs_info->zone_active_bgs_lock); 2133 2134 return true; 2135 2136 out_unlock: 2137 spin_unlock(&block_group->lock); 2138 spin_unlock(&fs_info->zone_active_bgs_lock); 2139 return ret; 2140 } 2141 2142 static void wait_eb_writebacks(struct btrfs_block_group *block_group) 2143 { 2144 struct btrfs_fs_info *fs_info = block_group->fs_info; 2145 const u64 end = block_group->start + block_group->length; 2146 struct radix_tree_iter iter; 2147 struct extent_buffer *eb; 2148 void __rcu **slot; 2149 2150 rcu_read_lock(); 2151 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 2152 block_group->start >> fs_info->sectorsize_bits) { 2153 eb = radix_tree_deref_slot(slot); 2154 if (!eb) 2155 continue; 2156 if (radix_tree_deref_retry(eb)) { 2157 slot = radix_tree_iter_retry(&iter); 2158 continue; 2159 } 2160 2161 if (eb->start < block_group->start) 2162 continue; 2163 if (eb->start >= end) 2164 break; 2165 2166 slot = radix_tree_iter_resume(slot, &iter); 2167 rcu_read_unlock(); 2168 wait_on_extent_buffer_writeback(eb); 2169 rcu_read_lock(); 2170 } 2171 rcu_read_unlock(); 2172 } 2173 2174 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written) 2175 { 2176 struct btrfs_fs_info *fs_info = block_group->fs_info; 2177 struct btrfs_chunk_map *map; 2178 const bool is_metadata = (block_group->flags & 2179 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)); 2180 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 2181 int ret = 0; 2182 int i; 2183 2184 spin_lock(&block_group->lock); 2185 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { 2186 spin_unlock(&block_group->lock); 2187 return 0; 2188 } 2189 2190 /* Check if we have unwritten allocated space */ 2191 if (is_metadata && 2192 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) { 2193 spin_unlock(&block_group->lock); 2194 return -EAGAIN; 2195 } 2196 2197 /* 2198 * If we are sure that the block group is full (= no more room left for 2199 * new allocation) and the IO for the last usable block is completed, we 2200 * don't need to wait for the other IOs. This holds because we ensure 2201 * the sequential IO submissions using the ZONE_APPEND command for data 2202 * and block_group->meta_write_pointer for metadata. 2203 */ 2204 if (!fully_written) { 2205 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { 2206 spin_unlock(&block_group->lock); 2207 return -EAGAIN; 2208 } 2209 spin_unlock(&block_group->lock); 2210 2211 ret = btrfs_inc_block_group_ro(block_group, false); 2212 if (ret) 2213 return ret; 2214 2215 /* Ensure all writes in this block group finish */ 2216 btrfs_wait_block_group_reservations(block_group); 2217 /* No need to wait for NOCOW writers. Zoned mode does not allow that */ 2218 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group); 2219 /* Wait for extent buffers to be written. */ 2220 if (is_metadata) 2221 wait_eb_writebacks(block_group); 2222 2223 spin_lock(&block_group->lock); 2224 2225 /* 2226 * Bail out if someone already deactivated the block group, or 2227 * allocated space is left in the block group. 2228 */ 2229 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, 2230 &block_group->runtime_flags)) { 2231 spin_unlock(&block_group->lock); 2232 btrfs_dec_block_group_ro(block_group); 2233 return 0; 2234 } 2235 2236 if (block_group->reserved || 2237 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, 2238 &block_group->runtime_flags)) { 2239 spin_unlock(&block_group->lock); 2240 btrfs_dec_block_group_ro(block_group); 2241 return -EAGAIN; 2242 } 2243 } 2244 2245 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); 2246 block_group->alloc_offset = block_group->zone_capacity; 2247 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) 2248 block_group->meta_write_pointer = block_group->start + 2249 block_group->zone_capacity; 2250 block_group->free_space_ctl->free_space = 0; 2251 btrfs_clear_treelog_bg(block_group); 2252 btrfs_clear_data_reloc_bg(block_group); 2253 spin_unlock(&block_group->lock); 2254 2255 down_read(&dev_replace->rwsem); 2256 map = block_group->physical_map; 2257 for (i = 0; i < map->num_stripes; i++) { 2258 struct btrfs_device *device = map->stripes[i].dev; 2259 const u64 physical = map->stripes[i].physical; 2260 struct btrfs_zoned_device_info *zinfo = device->zone_info; 2261 unsigned int nofs_flags; 2262 2263 if (zinfo->max_active_zones == 0) 2264 continue; 2265 2266 nofs_flags = memalloc_nofs_save(); 2267 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, 2268 physical >> SECTOR_SHIFT, 2269 zinfo->zone_size >> SECTOR_SHIFT); 2270 memalloc_nofs_restore(nofs_flags); 2271 2272 if (ret) { 2273 up_read(&dev_replace->rwsem); 2274 return ret; 2275 } 2276 2277 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 2278 zinfo->reserved_active_zones++; 2279 btrfs_dev_clear_active_zone(device, physical); 2280 } 2281 up_read(&dev_replace->rwsem); 2282 2283 if (!fully_written) 2284 btrfs_dec_block_group_ro(block_group); 2285 2286 spin_lock(&fs_info->zone_active_bgs_lock); 2287 ASSERT(!list_empty(&block_group->active_bg_list)); 2288 list_del_init(&block_group->active_bg_list); 2289 spin_unlock(&fs_info->zone_active_bgs_lock); 2290 2291 /* For active_bg_list */ 2292 btrfs_put_block_group(block_group); 2293 2294 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); 2295 2296 return 0; 2297 } 2298 2299 int btrfs_zone_finish(struct btrfs_block_group *block_group) 2300 { 2301 if (!btrfs_is_zoned(block_group->fs_info)) 2302 return 0; 2303 2304 return do_zone_finish(block_group, false); 2305 } 2306 2307 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) 2308 { 2309 struct btrfs_fs_info *fs_info = fs_devices->fs_info; 2310 struct btrfs_device *device; 2311 bool ret = false; 2312 2313 if (!btrfs_is_zoned(fs_info)) 2314 return true; 2315 2316 /* Check if there is a device with active zones left */ 2317 mutex_lock(&fs_info->chunk_mutex); 2318 spin_lock(&fs_info->zone_active_bgs_lock); 2319 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 2320 struct btrfs_zoned_device_info *zinfo = device->zone_info; 2321 int reserved = 0; 2322 2323 if (!device->bdev) 2324 continue; 2325 2326 if (!zinfo->max_active_zones) { 2327 ret = true; 2328 break; 2329 } 2330 2331 if (flags & BTRFS_BLOCK_GROUP_DATA) 2332 reserved = zinfo->reserved_active_zones; 2333 2334 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 2335 case 0: /* single */ 2336 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved)); 2337 break; 2338 case BTRFS_BLOCK_GROUP_DUP: 2339 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved)); 2340 break; 2341 } 2342 if (ret) 2343 break; 2344 } 2345 spin_unlock(&fs_info->zone_active_bgs_lock); 2346 mutex_unlock(&fs_info->chunk_mutex); 2347 2348 if (!ret) 2349 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); 2350 2351 return ret; 2352 } 2353 2354 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) 2355 { 2356 struct btrfs_block_group *block_group; 2357 u64 min_alloc_bytes; 2358 2359 if (!btrfs_is_zoned(fs_info)) 2360 return; 2361 2362 block_group = btrfs_lookup_block_group(fs_info, logical); 2363 ASSERT(block_group); 2364 2365 /* No MIXED_BG on zoned btrfs. */ 2366 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) 2367 min_alloc_bytes = fs_info->sectorsize; 2368 else 2369 min_alloc_bytes = fs_info->nodesize; 2370 2371 /* Bail out if we can allocate more data from this block group. */ 2372 if (logical + length + min_alloc_bytes <= 2373 block_group->start + block_group->zone_capacity) 2374 goto out; 2375 2376 do_zone_finish(block_group, true); 2377 2378 out: 2379 btrfs_put_block_group(block_group); 2380 } 2381 2382 static void btrfs_zone_finish_endio_workfn(struct work_struct *work) 2383 { 2384 struct btrfs_block_group *bg = 2385 container_of(work, struct btrfs_block_group, zone_finish_work); 2386 2387 wait_on_extent_buffer_writeback(bg->last_eb); 2388 free_extent_buffer(bg->last_eb); 2389 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length); 2390 btrfs_put_block_group(bg); 2391 } 2392 2393 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, 2394 struct extent_buffer *eb) 2395 { 2396 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) || 2397 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity) 2398 return; 2399 2400 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) { 2401 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing", 2402 bg->start); 2403 return; 2404 } 2405 2406 /* For the work */ 2407 btrfs_get_block_group(bg); 2408 atomic_inc(&eb->refs); 2409 bg->last_eb = eb; 2410 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); 2411 queue_work(system_unbound_wq, &bg->zone_finish_work); 2412 } 2413 2414 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) 2415 { 2416 struct btrfs_fs_info *fs_info = bg->fs_info; 2417 2418 spin_lock(&fs_info->relocation_bg_lock); 2419 if (fs_info->data_reloc_bg == bg->start) 2420 fs_info->data_reloc_bg = 0; 2421 spin_unlock(&fs_info->relocation_bg_lock); 2422 } 2423 2424 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) 2425 { 2426 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2427 struct btrfs_device *device; 2428 2429 if (!btrfs_is_zoned(fs_info)) 2430 return; 2431 2432 mutex_lock(&fs_devices->device_list_mutex); 2433 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2434 if (device->zone_info) { 2435 vfree(device->zone_info->zone_cache); 2436 device->zone_info->zone_cache = NULL; 2437 } 2438 } 2439 mutex_unlock(&fs_devices->device_list_mutex); 2440 } 2441 2442 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) 2443 { 2444 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2445 struct btrfs_device *device; 2446 u64 used = 0; 2447 u64 total = 0; 2448 u64 factor; 2449 2450 ASSERT(btrfs_is_zoned(fs_info)); 2451 2452 if (fs_info->bg_reclaim_threshold == 0) 2453 return false; 2454 2455 mutex_lock(&fs_devices->device_list_mutex); 2456 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2457 if (!device->bdev) 2458 continue; 2459 2460 total += device->disk_total_bytes; 2461 used += device->bytes_used; 2462 } 2463 mutex_unlock(&fs_devices->device_list_mutex); 2464 2465 factor = div64_u64(used * 100, total); 2466 return factor >= fs_info->bg_reclaim_threshold; 2467 } 2468 2469 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, 2470 u64 length) 2471 { 2472 struct btrfs_block_group *block_group; 2473 2474 if (!btrfs_is_zoned(fs_info)) 2475 return; 2476 2477 block_group = btrfs_lookup_block_group(fs_info, logical); 2478 /* It should be called on a previous data relocation block group. */ 2479 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); 2480 2481 spin_lock(&block_group->lock); 2482 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) 2483 goto out; 2484 2485 /* All relocation extents are written. */ 2486 if (block_group->start + block_group->alloc_offset == logical + length) { 2487 /* 2488 * Now, release this block group for further allocations and 2489 * zone finish. 2490 */ 2491 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, 2492 &block_group->runtime_flags); 2493 } 2494 2495 out: 2496 spin_unlock(&block_group->lock); 2497 btrfs_put_block_group(block_group); 2498 } 2499 2500 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info) 2501 { 2502 struct btrfs_block_group *block_group; 2503 struct btrfs_block_group *min_bg = NULL; 2504 u64 min_avail = U64_MAX; 2505 int ret; 2506 2507 spin_lock(&fs_info->zone_active_bgs_lock); 2508 list_for_each_entry(block_group, &fs_info->zone_active_bgs, 2509 active_bg_list) { 2510 u64 avail; 2511 2512 spin_lock(&block_group->lock); 2513 if (block_group->reserved || block_group->alloc_offset == 0 || 2514 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || 2515 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { 2516 spin_unlock(&block_group->lock); 2517 continue; 2518 } 2519 2520 avail = block_group->zone_capacity - block_group->alloc_offset; 2521 if (min_avail > avail) { 2522 if (min_bg) 2523 btrfs_put_block_group(min_bg); 2524 min_bg = block_group; 2525 min_avail = avail; 2526 btrfs_get_block_group(min_bg); 2527 } 2528 spin_unlock(&block_group->lock); 2529 } 2530 spin_unlock(&fs_info->zone_active_bgs_lock); 2531 2532 if (!min_bg) 2533 return 0; 2534 2535 ret = btrfs_zone_finish(min_bg); 2536 btrfs_put_block_group(min_bg); 2537 2538 return ret < 0 ? ret : 1; 2539 } 2540 2541 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, 2542 struct btrfs_space_info *space_info, 2543 bool do_finish) 2544 { 2545 struct btrfs_block_group *bg; 2546 int index; 2547 2548 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) 2549 return 0; 2550 2551 for (;;) { 2552 int ret; 2553 bool need_finish = false; 2554 2555 down_read(&space_info->groups_sem); 2556 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) { 2557 list_for_each_entry(bg, &space_info->block_groups[index], 2558 list) { 2559 if (!spin_trylock(&bg->lock)) 2560 continue; 2561 if (btrfs_zoned_bg_is_full(bg) || 2562 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, 2563 &bg->runtime_flags)) { 2564 spin_unlock(&bg->lock); 2565 continue; 2566 } 2567 spin_unlock(&bg->lock); 2568 2569 if (btrfs_zone_activate(bg)) { 2570 up_read(&space_info->groups_sem); 2571 return 1; 2572 } 2573 2574 need_finish = true; 2575 } 2576 } 2577 up_read(&space_info->groups_sem); 2578 2579 if (!do_finish || !need_finish) 2580 break; 2581 2582 ret = btrfs_zone_finish_one_bg(fs_info); 2583 if (ret == 0) 2584 break; 2585 if (ret < 0) 2586 return ret; 2587 } 2588 2589 return 0; 2590 } 2591 2592 /* 2593 * Reserve zones for one metadata block group, one tree-log block group, and one 2594 * system block group. 2595 */ 2596 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) 2597 { 2598 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2599 struct btrfs_block_group *block_group; 2600 struct btrfs_device *device; 2601 /* Reserve zones for normal SINGLE metadata and tree-log block group. */ 2602 unsigned int metadata_reserve = 2; 2603 /* Reserve a zone for SINGLE system block group. */ 2604 unsigned int system_reserve = 1; 2605 2606 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) 2607 return; 2608 2609 /* 2610 * This function is called from the mount context. So, there is no 2611 * parallel process touching the bits. No need for read_seqretry(). 2612 */ 2613 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP) 2614 metadata_reserve = 4; 2615 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP) 2616 system_reserve = 2; 2617 2618 /* Apply the reservation on all the devices. */ 2619 mutex_lock(&fs_devices->device_list_mutex); 2620 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2621 if (!device->bdev) 2622 continue; 2623 2624 device->zone_info->reserved_active_zones = 2625 metadata_reserve + system_reserve; 2626 } 2627 mutex_unlock(&fs_devices->device_list_mutex); 2628 2629 /* Release reservation for currently active block groups. */ 2630 spin_lock(&fs_info->zone_active_bgs_lock); 2631 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { 2632 struct btrfs_chunk_map *map = block_group->physical_map; 2633 2634 if (!(block_group->flags & 2635 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))) 2636 continue; 2637 2638 for (int i = 0; i < map->num_stripes; i++) 2639 map->stripes[i].dev->zone_info->reserved_active_zones--; 2640 } 2641 spin_unlock(&fs_info->zone_active_bgs_lock); 2642 } 2643