1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
9 #include "ctree.h"
10 #include "volumes.h"
11 #include "zoned.h"
12 #include "disk-io.h"
13 #include "block-group.h"
14 #include "dev-replace.h"
15 #include "space-info.h"
16 #include "fs.h"
17 #include "accessors.h"
18 #include "bio.h"
19 #include "transaction.h"
20 #include "sysfs.h"
21
22 /* Maximum number of zones to report per blkdev_report_zones() call */
23 #define BTRFS_REPORT_NR_ZONES 4096
24 /* Invalid allocation pointer value for missing devices */
25 #define WP_MISSING_DEV ((u64)-1)
26 /* Pseudo write pointer value for conventional zone */
27 #define WP_CONVENTIONAL ((u64)-2)
28
29 /*
30 * Location of the first zone of superblock logging zone pairs.
31 *
32 * - primary superblock: 0B (zone 0)
33 * - first copy: 512G (zone starting at that offset)
34 * - second copy: 4T (zone starting at that offset)
35 */
36 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
37 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
38 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
39
40 #define BTRFS_SB_LOG_FIRST_SHIFT ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
41 #define BTRFS_SB_LOG_SECOND_SHIFT ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
42
43 /* Number of superblock log zones */
44 #define BTRFS_NR_SB_LOG_ZONES 2
45
46 /* Default number of max active zones when the device has no limits. */
47 #define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 128
48
49 /*
50 * Minimum of active zones we need:
51 *
52 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
53 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
54 * - 1 zone for tree-log dedicated block group
55 * - 1 zone for relocation
56 */
57 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
58
59 /*
60 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
61 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
62 * We do not expect the zone size to become larger than 8GiB or smaller than
63 * 4MiB in the near future.
64 */
65 #define BTRFS_MAX_ZONE_SIZE SZ_8G
66 #define BTRFS_MIN_ZONE_SIZE SZ_4M
67
68 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
69
70 static void wait_eb_writebacks(struct btrfs_block_group *block_group);
71 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
72
sb_zone_is_full(const struct blk_zone * zone)73 static inline bool sb_zone_is_full(const struct blk_zone *zone)
74 {
75 return (zone->cond == BLK_ZONE_COND_FULL) ||
76 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
77 }
78
copy_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)79 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
80 {
81 struct blk_zone *zones = data;
82
83 memcpy(&zones[idx], zone, sizeof(*zone));
84
85 return 0;
86 }
87
sb_write_pointer(struct block_device * bdev,struct blk_zone * zones,u64 * wp_ret)88 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
89 u64 *wp_ret)
90 {
91 bool empty[BTRFS_NR_SB_LOG_ZONES];
92 bool full[BTRFS_NR_SB_LOG_ZONES];
93 sector_t sector;
94
95 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
96 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL,
97 "zones[%d].type=%d", i, zones[i].type);
98 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
99 full[i] = sb_zone_is_full(&zones[i]);
100 }
101
102 /*
103 * Possible states of log buffer zones
104 *
105 * Empty[0] In use[0] Full[0]
106 * Empty[1] * 0 1
107 * In use[1] x x 1
108 * Full[1] 0 0 C
109 *
110 * Log position:
111 * *: Special case, no superblock is written
112 * 0: Use write pointer of zones[0]
113 * 1: Use write pointer of zones[1]
114 * C: Compare super blocks from zones[0] and zones[1], use the latest
115 * one determined by generation
116 * x: Invalid state
117 */
118
119 if (empty[0] && empty[1]) {
120 /* Special case to distinguish no superblock to read */
121 *wp_ret = zones[0].start << SECTOR_SHIFT;
122 return -ENOENT;
123 } else if (full[0] && full[1]) {
124 /* Compare two super blocks */
125 struct address_space *mapping = bdev->bd_mapping;
126 struct page *page[BTRFS_NR_SB_LOG_ZONES];
127 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
128
129 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
130 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
131 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
132 BTRFS_SUPER_INFO_SIZE;
133
134 page[i] = read_cache_page_gfp(mapping,
135 bytenr >> PAGE_SHIFT, GFP_NOFS);
136 if (IS_ERR(page[i])) {
137 if (i == 1)
138 btrfs_release_disk_super(super[0]);
139 return PTR_ERR(page[i]);
140 }
141 super[i] = page_address(page[i]);
142 }
143
144 if (btrfs_super_generation(super[0]) >
145 btrfs_super_generation(super[1]))
146 sector = zones[1].start;
147 else
148 sector = zones[0].start;
149
150 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
151 btrfs_release_disk_super(super[i]);
152 } else if (!full[0] && (empty[1] || full[1])) {
153 sector = zones[0].wp;
154 } else if (full[0]) {
155 sector = zones[1].wp;
156 } else {
157 return -EUCLEAN;
158 }
159 *wp_ret = sector << SECTOR_SHIFT;
160 return 0;
161 }
162
163 /*
164 * Get the first zone number of the superblock mirror
165 */
sb_zone_number(int shift,int mirror)166 static inline u32 sb_zone_number(int shift, int mirror)
167 {
168 u64 zone = U64_MAX;
169
170 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror);
171 switch (mirror) {
172 case 0: zone = 0; break;
173 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
174 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
175 }
176
177 ASSERT(zone <= U32_MAX, "zone=%llu", zone);
178
179 return (u32)zone;
180 }
181
zone_start_sector(u32 zone_number,struct block_device * bdev)182 static inline sector_t zone_start_sector(u32 zone_number,
183 struct block_device *bdev)
184 {
185 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
186 }
187
zone_start_physical(u32 zone_number,struct btrfs_zoned_device_info * zone_info)188 static inline u64 zone_start_physical(u32 zone_number,
189 struct btrfs_zoned_device_info *zone_info)
190 {
191 return (u64)zone_number << zone_info->zone_size_shift;
192 }
193
194 /*
195 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
196 * device into static sized chunks and fake a conventional zone on each of
197 * them.
198 */
emulate_report_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int nr_zones)199 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
200 struct blk_zone *zones, unsigned int nr_zones)
201 {
202 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
203 sector_t bdev_size = bdev_nr_sectors(device->bdev);
204 unsigned int i;
205
206 pos >>= SECTOR_SHIFT;
207 for (i = 0; i < nr_zones; i++) {
208 zones[i].start = i * zone_sectors + pos;
209 zones[i].len = zone_sectors;
210 zones[i].capacity = zone_sectors;
211 zones[i].wp = zones[i].start + zone_sectors;
212 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
213 zones[i].cond = BLK_ZONE_COND_NOT_WP;
214
215 if (zones[i].wp >= bdev_size) {
216 i++;
217 break;
218 }
219 }
220
221 return i;
222 }
223
btrfs_get_dev_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int * nr_zones)224 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
225 struct blk_zone *zones, unsigned int *nr_zones)
226 {
227 struct btrfs_zoned_device_info *zinfo = device->zone_info;
228 int ret;
229
230 if (!*nr_zones)
231 return 0;
232
233 if (!bdev_is_zoned(device->bdev)) {
234 ret = emulate_report_zones(device, pos, zones, *nr_zones);
235 *nr_zones = ret;
236 return 0;
237 }
238
239 /* Check cache */
240 if (zinfo->zone_cache) {
241 unsigned int i;
242 u32 zno;
243
244 ASSERT(IS_ALIGNED(pos, zinfo->zone_size),
245 "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size);
246 zno = pos >> zinfo->zone_size_shift;
247 /*
248 * We cannot report zones beyond the zone end. So, it is OK to
249 * cap *nr_zones to at the end.
250 */
251 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
252
253 for (i = 0; i < *nr_zones; i++) {
254 struct blk_zone *zone_info;
255
256 zone_info = &zinfo->zone_cache[zno + i];
257 if (!zone_info->len)
258 break;
259 }
260
261 if (i == *nr_zones) {
262 /* Cache hit on all the zones */
263 memcpy(zones, zinfo->zone_cache + zno,
264 sizeof(*zinfo->zone_cache) * *nr_zones);
265 return 0;
266 }
267 }
268
269 ret = blkdev_report_zones_cached(device->bdev, pos >> SECTOR_SHIFT,
270 *nr_zones, copy_zone_info_cb, zones);
271 if (ret < 0) {
272 btrfs_err(device->fs_info,
273 "zoned: failed to read zone %llu on %s (devid %llu)",
274 pos, rcu_dereference(device->name),
275 device->devid);
276 return ret;
277 }
278 *nr_zones = ret;
279 if (unlikely(!ret))
280 return -EIO;
281
282 /* Populate cache */
283 if (zinfo->zone_cache) {
284 u32 zno = pos >> zinfo->zone_size_shift;
285
286 memcpy(zinfo->zone_cache + zno, zones,
287 sizeof(*zinfo->zone_cache) * *nr_zones);
288 }
289
290 return 0;
291 }
292
293 /* The emulated zone size is determined from the size of device extent */
calculate_emulated_zone_size(struct btrfs_fs_info * fs_info)294 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
295 {
296 BTRFS_PATH_AUTO_FREE(path);
297 struct btrfs_root *root = fs_info->dev_root;
298 struct btrfs_key key;
299 struct extent_buffer *leaf;
300 struct btrfs_dev_extent *dext;
301 int ret = 0;
302
303 key.objectid = 1;
304 key.type = BTRFS_DEV_EXTENT_KEY;
305 key.offset = 0;
306
307 path = btrfs_alloc_path();
308 if (!path)
309 return -ENOMEM;
310
311 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
312 if (ret < 0)
313 return ret;
314
315 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
316 ret = btrfs_next_leaf(root, path);
317 if (ret < 0)
318 return ret;
319 /* No dev extents at all? Not good */
320 if (unlikely(ret > 0))
321 return -EUCLEAN;
322 }
323
324 leaf = path->nodes[0];
325 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 return 0;
328 }
329
btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info * fs_info)330 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
331 {
332 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
333 struct btrfs_device *device;
334 int ret = 0;
335
336 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
337 if (!btrfs_fs_incompat(fs_info, ZONED))
338 return 0;
339
340 mutex_lock(&fs_devices->device_list_mutex);
341 list_for_each_entry(device, &fs_devices->devices, dev_list) {
342 /* We can skip reading of zone info for missing devices */
343 if (!device->bdev)
344 continue;
345
346 ret = btrfs_get_dev_zone_info(device, true);
347 if (ret)
348 break;
349 }
350 mutex_unlock(&fs_devices->device_list_mutex);
351
352 return ret;
353 }
354
btrfs_get_dev_zone_info(struct btrfs_device * device,bool populate_cache)355 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
356 {
357 struct btrfs_fs_info *fs_info = device->fs_info;
358 struct btrfs_zoned_device_info *zone_info = NULL;
359 struct block_device *bdev = device->bdev;
360 unsigned int max_active_zones;
361 unsigned int nactive;
362 sector_t nr_sectors;
363 sector_t sector = 0;
364 struct blk_zone *zones = NULL;
365 unsigned int i, nreported = 0, nr_zones;
366 sector_t zone_sectors;
367 char *model, *emulated;
368 int ret;
369
370 /*
371 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
372 * yet be set.
373 */
374 if (!btrfs_fs_incompat(fs_info, ZONED))
375 return 0;
376
377 if (device->zone_info)
378 return 0;
379
380 zone_info = kzalloc_obj(*zone_info);
381 if (!zone_info)
382 return -ENOMEM;
383
384 device->zone_info = zone_info;
385
386 if (!bdev_is_zoned(bdev)) {
387 if (!fs_info->zone_size) {
388 ret = calculate_emulated_zone_size(fs_info);
389 if (ret)
390 goto out;
391 }
392
393 ASSERT(fs_info->zone_size);
394 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
395 } else {
396 zone_sectors = bdev_zone_sectors(bdev);
397 }
398
399 ASSERT(is_power_of_two_u64(zone_sectors));
400 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
401
402 /* We reject devices with a zone size larger than 8GB */
403 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
404 btrfs_err(fs_info,
405 "zoned: %s: zone size %llu larger than supported maximum %llu",
406 rcu_dereference(device->name),
407 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
408 ret = -EINVAL;
409 goto out;
410 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
411 btrfs_err(fs_info,
412 "zoned: %s: zone size %llu smaller than supported minimum %u",
413 rcu_dereference(device->name),
414 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
415 ret = -EINVAL;
416 goto out;
417 }
418
419 nr_sectors = bdev_nr_sectors(bdev);
420 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
421 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
422 if (!IS_ALIGNED(nr_sectors, zone_sectors))
423 zone_info->nr_zones++;
424
425 max_active_zones = min_not_zero(bdev_max_active_zones(bdev),
426 bdev_max_open_zones(bdev));
427 if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
428 max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;
429 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
430 btrfs_err(fs_info,
431 "zoned: %s: max active zones %u is too small, need at least %u active zones",
432 rcu_dereference(device->name), max_active_zones,
433 BTRFS_MIN_ACTIVE_ZONES);
434 ret = -EINVAL;
435 goto out;
436 }
437 zone_info->max_active_zones = max_active_zones;
438
439 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
440 if (!zone_info->seq_zones) {
441 ret = -ENOMEM;
442 goto out;
443 }
444
445 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
446 if (!zone_info->empty_zones) {
447 ret = -ENOMEM;
448 goto out;
449 }
450
451 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
452 if (!zone_info->active_zones) {
453 ret = -ENOMEM;
454 goto out;
455 }
456
457 zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES);
458 if (!zones) {
459 ret = -ENOMEM;
460 goto out;
461 }
462
463 /*
464 * Enable zone cache only for a zoned device. On a non-zoned device, we
465 * fill the zone info with emulated CONVENTIONAL zones, so no need to
466 * use the cache.
467 */
468 if (populate_cache && bdev_is_zoned(device->bdev)) {
469 zone_info->zone_cache = vcalloc(zone_info->nr_zones,
470 sizeof(struct blk_zone));
471 if (!zone_info->zone_cache) {
472 btrfs_err(device->fs_info,
473 "zoned: failed to allocate zone cache for %s",
474 rcu_dereference(device->name));
475 ret = -ENOMEM;
476 goto out;
477 }
478 }
479
480 /* Get zones type */
481 nactive = 0;
482 while (sector < nr_sectors) {
483 nr_zones = BTRFS_REPORT_NR_ZONES;
484 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
485 &nr_zones);
486 if (ret)
487 goto out;
488
489 for (i = 0; i < nr_zones; i++) {
490 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
491 __set_bit(nreported, zone_info->seq_zones);
492 switch (zones[i].cond) {
493 case BLK_ZONE_COND_EMPTY:
494 __set_bit(nreported, zone_info->empty_zones);
495 break;
496 case BLK_ZONE_COND_IMP_OPEN:
497 case BLK_ZONE_COND_EXP_OPEN:
498 case BLK_ZONE_COND_CLOSED:
499 case BLK_ZONE_COND_ACTIVE:
500 __set_bit(nreported, zone_info->active_zones);
501 nactive++;
502 break;
503 }
504 nreported++;
505 }
506 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
507 }
508
509 if (unlikely(nreported != zone_info->nr_zones)) {
510 btrfs_err(device->fs_info,
511 "inconsistent number of zones on %s (%u/%u)",
512 rcu_dereference(device->name), nreported,
513 zone_info->nr_zones);
514 ret = -EIO;
515 goto out;
516 }
517
518 if (max_active_zones) {
519 if (unlikely(nactive > max_active_zones)) {
520 if (bdev_max_active_zones(bdev) == 0) {
521 max_active_zones = 0;
522 zone_info->max_active_zones = 0;
523 goto validate;
524 }
525 btrfs_err(device->fs_info,
526 "zoned: %u active zones on %s exceeds max_active_zones %u",
527 nactive, rcu_dereference(device->name),
528 max_active_zones);
529 ret = -EIO;
530 goto out;
531 }
532 atomic_set(&zone_info->active_zones_left,
533 max_active_zones - nactive);
534 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
535 }
536
537 validate:
538 /* Validate superblock log */
539 nr_zones = BTRFS_NR_SB_LOG_ZONES;
540 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
541 u32 sb_zone;
542 u64 sb_wp;
543 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
544
545 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
546 if (sb_zone + 1 >= zone_info->nr_zones)
547 continue;
548
549 ret = btrfs_get_dev_zones(device,
550 zone_start_physical(sb_zone, zone_info),
551 &zone_info->sb_zones[sb_pos],
552 &nr_zones);
553 if (ret)
554 goto out;
555
556 if (unlikely(nr_zones != BTRFS_NR_SB_LOG_ZONES)) {
557 btrfs_err(device->fs_info,
558 "zoned: failed to read super block log zone info at devid %llu zone %u",
559 device->devid, sb_zone);
560 ret = -EUCLEAN;
561 goto out;
562 }
563
564 /*
565 * If zones[0] is conventional, always use the beginning of the
566 * zone to record superblock. No need to validate in that case.
567 */
568 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
569 BLK_ZONE_TYPE_CONVENTIONAL)
570 continue;
571
572 ret = sb_write_pointer(device->bdev,
573 &zone_info->sb_zones[sb_pos], &sb_wp);
574 if (unlikely(ret != -ENOENT && ret)) {
575 btrfs_err(device->fs_info,
576 "zoned: super block log zone corrupted devid %llu zone %u",
577 device->devid, sb_zone);
578 ret = -EUCLEAN;
579 goto out;
580 }
581 }
582
583
584 kvfree(zones);
585
586 if (bdev_is_zoned(bdev)) {
587 model = "host-managed zoned";
588 emulated = "";
589 } else {
590 model = "regular";
591 emulated = "emulated ";
592 }
593
594 btrfs_info(fs_info,
595 "%s block device %s, %u %szones of %llu bytes",
596 model, rcu_dereference(device->name), zone_info->nr_zones,
597 emulated, zone_info->zone_size);
598
599 return 0;
600
601 out:
602 kvfree(zones);
603 btrfs_destroy_dev_zone_info(device);
604 return ret;
605 }
606
btrfs_destroy_dev_zone_info(struct btrfs_device * device)607 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
608 {
609 struct btrfs_zoned_device_info *zone_info = device->zone_info;
610
611 if (!zone_info)
612 return;
613
614 bitmap_free(zone_info->active_zones);
615 bitmap_free(zone_info->seq_zones);
616 bitmap_free(zone_info->empty_zones);
617 vfree(zone_info->zone_cache);
618 kfree(zone_info);
619 device->zone_info = NULL;
620 }
621
btrfs_clone_dev_zone_info(struct btrfs_device * orig_dev)622 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
623 {
624 struct btrfs_zoned_device_info *zone_info;
625
626 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
627 if (!zone_info)
628 return NULL;
629
630 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
631 if (!zone_info->seq_zones)
632 goto out;
633
634 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
635 zone_info->nr_zones);
636
637 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
638 if (!zone_info->empty_zones)
639 goto out;
640
641 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
642 zone_info->nr_zones);
643
644 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
645 if (!zone_info->active_zones)
646 goto out;
647
648 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
649 zone_info->nr_zones);
650 zone_info->zone_cache = NULL;
651
652 return zone_info;
653
654 out:
655 bitmap_free(zone_info->seq_zones);
656 bitmap_free(zone_info->empty_zones);
657 bitmap_free(zone_info->active_zones);
658 kfree(zone_info);
659 return NULL;
660 }
661
btrfs_get_dev_zone(struct btrfs_device * device,u64 pos,struct blk_zone * zone)662 static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
663 {
664 unsigned int nr_zones = 1;
665 int ret;
666
667 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
668 if (ret != 0 || !nr_zones)
669 return ret ? ret : -EIO;
670
671 return 0;
672 }
673
btrfs_check_for_zoned_device(struct btrfs_fs_info * fs_info)674 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
675 {
676 struct btrfs_device *device;
677
678 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
679 if (device->bdev && bdev_is_zoned(device->bdev)) {
680 btrfs_err(fs_info,
681 "zoned: mode not enabled but zoned device found: %pg",
682 device->bdev);
683 return -EINVAL;
684 }
685 }
686
687 return 0;
688 }
689
btrfs_check_zoned_mode(struct btrfs_fs_info * fs_info)690 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
691 {
692 struct queue_limits *lim = &fs_info->limits;
693 struct btrfs_device *device;
694 u64 zone_size = 0;
695 int ret;
696
697 /*
698 * Host-Managed devices can't be used without the ZONED flag. With the
699 * ZONED all devices can be used, using zone emulation if required.
700 */
701 if (!btrfs_fs_incompat(fs_info, ZONED))
702 return btrfs_check_for_zoned_device(fs_info);
703
704 blk_set_stacking_limits(lim);
705
706 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
707 struct btrfs_zoned_device_info *zone_info = device->zone_info;
708
709 if (!device->bdev)
710 continue;
711
712 if (!zone_size) {
713 zone_size = zone_info->zone_size;
714 } else if (zone_info->zone_size != zone_size) {
715 btrfs_err(fs_info,
716 "zoned: unequal block device zone sizes: have %llu found %llu",
717 zone_info->zone_size, zone_size);
718 return -EINVAL;
719 }
720
721 /*
722 * With the zoned emulation, we can have non-zoned device on the
723 * zoned mode. In this case, we don't have a valid max zone
724 * append size.
725 */
726 if (bdev_is_zoned(device->bdev))
727 blk_stack_limits(lim, bdev_limits(device->bdev), 0);
728 }
729
730 ret = blk_validate_limits(lim);
731 if (ret) {
732 btrfs_err(fs_info, "zoned: failed to validate queue limits");
733 return ret;
734 }
735
736 /*
737 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
738 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
739 * check the alignment here.
740 */
741 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
742 btrfs_err(fs_info,
743 "zoned: zone size %llu not aligned to stripe %u",
744 zone_size, BTRFS_STRIPE_LEN);
745 return -EINVAL;
746 }
747
748 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
749 btrfs_err(fs_info, "zoned: mixed block groups not supported");
750 return -EINVAL;
751 }
752
753 fs_info->zone_size = zone_size;
754 /*
755 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
756 * Technically, we can have multiple pages per segment. But, since
757 * we add the pages one by one to a bio, and cannot increase the
758 * metadata reservation even if it increases the number of extents, it
759 * is safe to stick with the limit.
760 */
761 fs_info->max_zone_append_size = ALIGN_DOWN(
762 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
763 (u64)lim->max_sectors << SECTOR_SHIFT,
764 (u64)lim->max_segments << PAGE_SHIFT),
765 fs_info->sectorsize);
766 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
767
768 fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
769 fs_info->max_zone_append_size);
770
771 /*
772 * Check mount options here, because we might change fs_info->zoned
773 * from fs_info->zone_size.
774 */
775 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
776 if (ret)
777 return ret;
778
779 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
780 return 0;
781 }
782
btrfs_check_mountopts_zoned(const struct btrfs_fs_info * info,unsigned long long * mount_opt)783 int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
784 unsigned long long *mount_opt)
785 {
786 if (!btrfs_is_zoned(info))
787 return 0;
788
789 /*
790 * Space cache writing is not COWed. Disable that to avoid write errors
791 * in sequential zones.
792 */
793 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
794 btrfs_err(info, "zoned: space cache v1 is not supported");
795 return -EINVAL;
796 }
797
798 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
799 btrfs_err(info, "zoned: NODATACOW not supported");
800 return -EINVAL;
801 }
802
803 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
804 btrfs_info(info,
805 "zoned: async discard ignored and disabled for zoned mode");
806 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
807 }
808
809 return 0;
810 }
811
sb_log_location(struct block_device * bdev,struct blk_zone * zones,int rw,u64 * bytenr_ret)812 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
813 int rw, u64 *bytenr_ret)
814 {
815 u64 wp;
816 int ret;
817
818 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
819 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
820 return 0;
821 }
822
823 ret = sb_write_pointer(bdev, zones, &wp);
824 if (ret != -ENOENT && ret < 0)
825 return ret;
826
827 if (rw == WRITE) {
828 struct blk_zone *reset = NULL;
829
830 if (wp == zones[0].start << SECTOR_SHIFT)
831 reset = &zones[0];
832 else if (wp == zones[1].start << SECTOR_SHIFT)
833 reset = &zones[1];
834
835 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
836 unsigned int nofs_flags;
837
838 ASSERT(sb_zone_is_full(reset));
839
840 nofs_flags = memalloc_nofs_save();
841 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
842 reset->start, reset->len);
843 memalloc_nofs_restore(nofs_flags);
844 if (ret)
845 return ret;
846
847 reset->cond = BLK_ZONE_COND_EMPTY;
848 reset->wp = reset->start;
849 }
850 } else if (ret != -ENOENT) {
851 /*
852 * For READ, we want the previous one. Move write pointer to
853 * the end of a zone, if it is at the head of a zone.
854 */
855 u64 zone_end = 0;
856
857 if (wp == zones[0].start << SECTOR_SHIFT)
858 zone_end = zones[1].start + zones[1].capacity;
859 else if (wp == zones[1].start << SECTOR_SHIFT)
860 zone_end = zones[0].start + zones[0].capacity;
861 if (zone_end)
862 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
863 BTRFS_SUPER_INFO_SIZE);
864
865 wp -= BTRFS_SUPER_INFO_SIZE;
866 }
867
868 *bytenr_ret = wp;
869 return 0;
870
871 }
872
btrfs_sb_log_location_bdev(struct block_device * bdev,int mirror,int rw,u64 * bytenr_ret)873 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
874 u64 *bytenr_ret)
875 {
876 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
877 sector_t zone_sectors;
878 u32 sb_zone;
879 int ret;
880 u8 zone_sectors_shift;
881 sector_t nr_sectors;
882 u32 nr_zones;
883
884 if (!bdev_is_zoned(bdev)) {
885 *bytenr_ret = btrfs_sb_offset(mirror);
886 return 0;
887 }
888
889 ASSERT(rw == READ || rw == WRITE);
890
891 zone_sectors = bdev_zone_sectors(bdev);
892 if (!is_power_of_2(zone_sectors))
893 return -EINVAL;
894 zone_sectors_shift = ilog2(zone_sectors);
895 nr_sectors = bdev_nr_sectors(bdev);
896 nr_zones = nr_sectors >> zone_sectors_shift;
897
898 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
899 if (sb_zone + 1 >= nr_zones)
900 return -ENOENT;
901
902 ret = blkdev_report_zones_cached(bdev, zone_start_sector(sb_zone, bdev),
903 BTRFS_NR_SB_LOG_ZONES,
904 copy_zone_info_cb, zones);
905 if (ret < 0)
906 return ret;
907 if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
908 return -EIO;
909
910 return sb_log_location(bdev, zones, rw, bytenr_ret);
911 }
912
btrfs_sb_log_location(struct btrfs_device * device,int mirror,int rw,u64 * bytenr_ret)913 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
914 u64 *bytenr_ret)
915 {
916 struct btrfs_zoned_device_info *zinfo = device->zone_info;
917 u32 zone_num;
918
919 /*
920 * For a zoned filesystem on a non-zoned block device, use the same
921 * super block locations as regular filesystem. Doing so, the super
922 * block can always be retrieved and the zoned flag of the volume
923 * detected from the super block information.
924 */
925 if (!bdev_is_zoned(device->bdev)) {
926 *bytenr_ret = btrfs_sb_offset(mirror);
927 return 0;
928 }
929
930 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
931 if (zone_num + 1 >= zinfo->nr_zones)
932 return -ENOENT;
933
934 return sb_log_location(device->bdev,
935 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
936 rw, bytenr_ret);
937 }
938
is_sb_log_zone(struct btrfs_zoned_device_info * zinfo,int mirror)939 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
940 int mirror)
941 {
942 u32 zone_num;
943
944 if (!zinfo)
945 return false;
946
947 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
948 if (zone_num + 1 >= zinfo->nr_zones)
949 return false;
950
951 if (!test_bit(zone_num, zinfo->seq_zones))
952 return false;
953
954 return true;
955 }
956
btrfs_advance_sb_log(struct btrfs_device * device,int mirror)957 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
958 {
959 struct btrfs_zoned_device_info *zinfo = device->zone_info;
960 struct blk_zone *zone;
961 int i;
962
963 if (!is_sb_log_zone(zinfo, mirror))
964 return 0;
965
966 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
967 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
968 /* Advance the next zone */
969 if (zone->cond == BLK_ZONE_COND_FULL) {
970 zone++;
971 continue;
972 }
973
974 if (zone->cond == BLK_ZONE_COND_EMPTY)
975 zone->cond = BLK_ZONE_COND_IMP_OPEN;
976
977 zone->wp += SUPER_INFO_SECTORS;
978
979 if (sb_zone_is_full(zone)) {
980 /*
981 * No room left to write new superblock. Since
982 * superblock is written with REQ_SYNC, it is safe to
983 * finish the zone now.
984 *
985 * If the write pointer is exactly at the capacity,
986 * explicit ZONE_FINISH is not necessary.
987 */
988 if (zone->wp != zone->start + zone->capacity) {
989 unsigned int nofs_flags;
990 int ret;
991
992 nofs_flags = memalloc_nofs_save();
993 ret = blkdev_zone_mgmt(device->bdev,
994 REQ_OP_ZONE_FINISH, zone->start,
995 zone->len);
996 memalloc_nofs_restore(nofs_flags);
997 if (ret)
998 return ret;
999 }
1000
1001 zone->wp = zone->start + zone->len;
1002 zone->cond = BLK_ZONE_COND_FULL;
1003 }
1004 return 0;
1005 }
1006
1007 /* All the zones are FULL. Should not reach here. */
1008 DEBUG_WARN("unexpected state, all zones full");
1009 return -EIO;
1010 }
1011
btrfs_reset_sb_log_zones(struct block_device * bdev,int mirror)1012 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1013 {
1014 unsigned int nofs_flags;
1015 sector_t zone_sectors;
1016 sector_t nr_sectors;
1017 u8 zone_sectors_shift;
1018 u32 sb_zone;
1019 u32 nr_zones;
1020 int ret;
1021
1022 zone_sectors = bdev_zone_sectors(bdev);
1023 zone_sectors_shift = ilog2(zone_sectors);
1024 nr_sectors = bdev_nr_sectors(bdev);
1025 nr_zones = nr_sectors >> zone_sectors_shift;
1026
1027 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1028 if (sb_zone + 1 >= nr_zones)
1029 return -ENOENT;
1030
1031 nofs_flags = memalloc_nofs_save();
1032 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1033 zone_start_sector(sb_zone, bdev),
1034 zone_sectors * BTRFS_NR_SB_LOG_ZONES);
1035 memalloc_nofs_restore(nofs_flags);
1036 return ret;
1037 }
1038
1039 /*
1040 * Find allocatable zones within a given region.
1041 *
1042 * @device: the device to allocate a region on
1043 * @hole_start: the position of the hole to allocate the region
1044 * @num_bytes: size of wanted region
1045 * @hole_end: the end of the hole
1046 * @return: position of allocatable zones
1047 *
1048 * Allocatable region should not contain any superblock locations.
1049 */
btrfs_find_allocatable_zones(struct btrfs_device * device,u64 hole_start,u64 hole_end,u64 num_bytes)1050 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1051 u64 hole_end, u64 num_bytes)
1052 {
1053 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1054 const u8 shift = zinfo->zone_size_shift;
1055 u64 nzones = num_bytes >> shift;
1056 u64 pos = hole_start;
1057 u64 begin, end;
1058 bool have_sb;
1059 int i;
1060
1061 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size),
1062 "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size);
1063 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size),
1064 "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size);
1065
1066 while (pos < hole_end) {
1067 begin = pos >> shift;
1068 end = begin + nzones;
1069
1070 if (end > zinfo->nr_zones)
1071 return hole_end;
1072
1073 /* Check if zones in the region are all empty */
1074 if (btrfs_dev_is_sequential(device, pos) &&
1075 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1076 pos += zinfo->zone_size;
1077 continue;
1078 }
1079
1080 have_sb = false;
1081 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1082 u32 sb_zone;
1083 u64 sb_pos;
1084
1085 sb_zone = sb_zone_number(shift, i);
1086 if (!(end <= sb_zone ||
1087 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1088 have_sb = true;
1089 pos = zone_start_physical(
1090 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1091 break;
1092 }
1093
1094 /* We also need to exclude regular superblock positions */
1095 sb_pos = btrfs_sb_offset(i);
1096 if (!(pos + num_bytes <= sb_pos ||
1097 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1098 have_sb = true;
1099 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1100 zinfo->zone_size);
1101 break;
1102 }
1103 }
1104 if (!have_sb)
1105 break;
1106 }
1107
1108 return pos;
1109 }
1110
btrfs_dev_set_active_zone(struct btrfs_device * device,u64 pos)1111 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1112 {
1113 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1114 unsigned int zno = (pos >> zone_info->zone_size_shift);
1115
1116 /* We can use any number of zones */
1117 if (zone_info->max_active_zones == 0)
1118 return true;
1119
1120 if (!test_bit(zno, zone_info->active_zones)) {
1121 /* Active zone left? */
1122 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1123 return false;
1124 if (test_and_set_bit(zno, zone_info->active_zones)) {
1125 /* Someone already set the bit */
1126 atomic_inc(&zone_info->active_zones_left);
1127 }
1128 }
1129
1130 return true;
1131 }
1132
btrfs_dev_clear_active_zone(struct btrfs_device * device,u64 pos)1133 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1134 {
1135 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1136 unsigned int zno = (pos >> zone_info->zone_size_shift);
1137
1138 /* We can use any number of zones */
1139 if (zone_info->max_active_zones == 0)
1140 return;
1141
1142 if (test_and_clear_bit(zno, zone_info->active_zones))
1143 atomic_inc(&zone_info->active_zones_left);
1144 }
1145
btrfs_reset_device_zone(struct btrfs_device * device,u64 physical,u64 length,u64 * bytes)1146 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1147 u64 length, u64 *bytes)
1148 {
1149 unsigned int nofs_flags;
1150 int ret;
1151
1152 *bytes = 0;
1153 nofs_flags = memalloc_nofs_save();
1154 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1155 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
1156 memalloc_nofs_restore(nofs_flags);
1157 if (ret)
1158 return ret;
1159
1160 *bytes = length;
1161 while (length) {
1162 btrfs_dev_set_zone_empty(device, physical);
1163 btrfs_dev_clear_active_zone(device, physical);
1164 physical += device->zone_info->zone_size;
1165 length -= device->zone_info->zone_size;
1166 }
1167
1168 return 0;
1169 }
1170
btrfs_ensure_empty_zones(struct btrfs_device * device,u64 start,u64 size)1171 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1172 {
1173 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1174 const u8 shift = zinfo->zone_size_shift;
1175 unsigned long begin = start >> shift;
1176 unsigned long nbits = size >> shift;
1177 u64 pos;
1178 int ret;
1179
1180 ASSERT(IS_ALIGNED(start, zinfo->zone_size),
1181 "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size);
1182 ASSERT(IS_ALIGNED(size, zinfo->zone_size),
1183 "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size);
1184
1185 if (begin + nbits > zinfo->nr_zones)
1186 return -ERANGE;
1187
1188 /* All the zones are conventional */
1189 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1190 return 0;
1191
1192 /* All the zones are sequential and empty */
1193 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1194 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1195 return 0;
1196
1197 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1198 u64 reset_bytes;
1199
1200 if (!btrfs_dev_is_sequential(device, pos) ||
1201 btrfs_dev_is_empty_zone(device, pos))
1202 continue;
1203
1204 /* Free regions should be empty */
1205 btrfs_warn(
1206 device->fs_info,
1207 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1208 rcu_dereference(device->name), device->devid, pos >> shift);
1209 WARN_ON_ONCE(1);
1210
1211 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1212 &reset_bytes);
1213 if (ret)
1214 return ret;
1215 }
1216
1217 return 0;
1218 }
1219
1220 /*
1221 * Calculate an allocation pointer from the extent allocation information
1222 * for a block group consist of conventional zones. It is pointed to the
1223 * end of the highest addressed extent in the block group as an allocation
1224 * offset.
1225 */
calculate_alloc_pointer(struct btrfs_block_group * cache,u64 * offset_ret,bool new)1226 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1227 u64 *offset_ret, bool new)
1228 {
1229 struct btrfs_fs_info *fs_info = cache->fs_info;
1230 struct btrfs_root *root;
1231 BTRFS_PATH_AUTO_FREE(path);
1232 struct btrfs_key key;
1233 struct btrfs_key found_key;
1234 const u64 bg_end = btrfs_block_group_end(cache);
1235 int ret;
1236 u64 length;
1237
1238 /*
1239 * Avoid tree lookups for a new block group, there's no use for it.
1240 * It must always be 0.
1241 *
1242 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1243 * For new a block group, this function is called from
1244 * btrfs_make_block_group() which is already taking the chunk mutex.
1245 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1246 * buffer locks to avoid deadlock.
1247 */
1248 if (new) {
1249 *offset_ret = 0;
1250 return 0;
1251 }
1252
1253 path = btrfs_alloc_path();
1254 if (!path)
1255 return -ENOMEM;
1256
1257 key.objectid = bg_end;
1258 key.type = 0;
1259 key.offset = 0;
1260
1261 root = btrfs_extent_root(fs_info, key.objectid);
1262 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1263 /* We should not find the exact match */
1264 if (unlikely(!ret))
1265 ret = -EUCLEAN;
1266 if (ret < 0)
1267 return ret;
1268
1269 ret = btrfs_previous_extent_item(root, path, cache->start);
1270 if (ret) {
1271 if (ret == 1) {
1272 ret = 0;
1273 *offset_ret = 0;
1274 }
1275 return ret;
1276 }
1277
1278 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1279
1280 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1281 length = found_key.offset;
1282 else
1283 length = fs_info->nodesize;
1284
1285 if (unlikely(!(found_key.objectid >= cache->start &&
1286 found_key.objectid + length <= bg_end))) {
1287 return -EUCLEAN;
1288 }
1289 *offset_ret = found_key.objectid + length - cache->start;
1290 return 0;
1291 }
1292
1293 struct zone_info {
1294 u64 physical;
1295 u64 capacity;
1296 u64 alloc_offset;
1297 };
1298
btrfs_load_zone_info(struct btrfs_fs_info * fs_info,int zone_idx,struct zone_info * info,unsigned long * active,struct btrfs_chunk_map * map,bool new)1299 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1300 struct zone_info *info, unsigned long *active,
1301 struct btrfs_chunk_map *map, bool new)
1302 {
1303 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1304 struct btrfs_device *device;
1305 int dev_replace_is_ongoing = 0;
1306 unsigned int nofs_flag;
1307 struct blk_zone zone;
1308 int ret;
1309
1310 info->physical = map->stripes[zone_idx].physical;
1311
1312 down_read(&dev_replace->rwsem);
1313 device = map->stripes[zone_idx].dev;
1314
1315 if (!device->bdev) {
1316 up_read(&dev_replace->rwsem);
1317 info->alloc_offset = WP_MISSING_DEV;
1318 return 0;
1319 }
1320
1321 /* Consider a zone as active if we can allow any number of active zones. */
1322 if (!device->zone_info->max_active_zones)
1323 __set_bit(zone_idx, active);
1324
1325 if (!btrfs_dev_is_sequential(device, info->physical)) {
1326 up_read(&dev_replace->rwsem);
1327 info->alloc_offset = WP_CONVENTIONAL;
1328 info->capacity = device->zone_info->zone_size;
1329 return 0;
1330 }
1331
1332 ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
1333
1334 /* This zone will be used for allocation, so mark this zone non-empty. */
1335 btrfs_dev_clear_zone_empty(device, info->physical);
1336
1337 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1338 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1339 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1340
1341 /*
1342 * The group is mapped to a sequential zone. Get the zone write pointer
1343 * to determine the allocation offset within the zone.
1344 */
1345 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1346
1347 if (new) {
1348 sector_t capacity;
1349
1350 capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
1351 up_read(&dev_replace->rwsem);
1352 info->alloc_offset = 0;
1353 info->capacity = capacity << SECTOR_SHIFT;
1354
1355 return 0;
1356 }
1357
1358 nofs_flag = memalloc_nofs_save();
1359 ret = btrfs_get_dev_zone(device, info->physical, &zone);
1360 memalloc_nofs_restore(nofs_flag);
1361 if (ret) {
1362 up_read(&dev_replace->rwsem);
1363 if (ret != -EIO && ret != -EOPNOTSUPP)
1364 return ret;
1365 info->alloc_offset = WP_MISSING_DEV;
1366 return 0;
1367 }
1368
1369 if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
1370 btrfs_err(fs_info,
1371 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1372 zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
1373 device->devid);
1374 up_read(&dev_replace->rwsem);
1375 return -EIO;
1376 }
1377
1378 info->capacity = (zone.capacity << SECTOR_SHIFT);
1379
1380 switch (zone.cond) {
1381 case BLK_ZONE_COND_OFFLINE:
1382 case BLK_ZONE_COND_READONLY:
1383 btrfs_err(fs_info,
1384 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1385 (info->physical >> device->zone_info->zone_size_shift),
1386 rcu_dereference(device->name), device->devid);
1387 info->alloc_offset = WP_MISSING_DEV;
1388 break;
1389 case BLK_ZONE_COND_EMPTY:
1390 info->alloc_offset = 0;
1391 break;
1392 case BLK_ZONE_COND_FULL:
1393 info->alloc_offset = info->capacity;
1394 break;
1395 default:
1396 /* Partially used zone. */
1397 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1398 __set_bit(zone_idx, active);
1399 break;
1400 }
1401
1402 up_read(&dev_replace->rwsem);
1403
1404 return 0;
1405 }
1406
btrfs_load_block_group_single(struct btrfs_block_group * bg,struct zone_info * info,unsigned long * active)1407 static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1408 struct zone_info *info,
1409 unsigned long *active)
1410 {
1411 if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
1412 btrfs_err(bg->fs_info,
1413 "zoned: cannot recover write pointer for zone %llu",
1414 info->physical);
1415 return -EIO;
1416 }
1417
1418 bg->alloc_offset = info->alloc_offset;
1419 bg->zone_capacity = info->capacity;
1420 if (test_bit(0, active))
1421 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1422 return 0;
1423 }
1424
btrfs_load_block_group_dup(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1425 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1426 struct btrfs_chunk_map *map,
1427 struct zone_info *zone_info,
1428 unsigned long *active,
1429 u64 last_alloc)
1430 {
1431 struct btrfs_fs_info *fs_info = bg->fs_info;
1432
1433 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1434 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1435 return -EINVAL;
1436 }
1437
1438 bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1439
1440 if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
1441 btrfs_err(fs_info,
1442 "zoned: cannot recover write pointer for zone %llu",
1443 zone_info[0].physical);
1444 return -EIO;
1445 }
1446 if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
1447 btrfs_err(fs_info,
1448 "zoned: cannot recover write pointer for zone %llu",
1449 zone_info[1].physical);
1450 return -EIO;
1451 }
1452
1453 /*
1454 * When the last extent is removed, last_alloc can be smaller than the other write
1455 * pointer. In that case, last_alloc should be moved to the corresponding write
1456 * pointer position.
1457 */
1458 for (int i = 0; i < map->num_stripes; i++) {
1459 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1460 continue;
1461 if (last_alloc <= zone_info[i].alloc_offset) {
1462 last_alloc = zone_info[i].alloc_offset;
1463 break;
1464 }
1465 }
1466
1467 if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
1468 zone_info[0].alloc_offset = last_alloc;
1469
1470 if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
1471 zone_info[1].alloc_offset = last_alloc;
1472
1473 if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
1474 btrfs_err(fs_info,
1475 "zoned: write pointer offset mismatch of zones in DUP profile");
1476 return -EIO;
1477 }
1478
1479 if (test_bit(0, active) != test_bit(1, active)) {
1480 if (unlikely(!btrfs_zone_activate(bg)))
1481 return -EIO;
1482 } else if (test_bit(0, active)) {
1483 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1484 }
1485
1486 bg->alloc_offset = zone_info[0].alloc_offset;
1487 return 0;
1488 }
1489
btrfs_load_block_group_raid1(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1490 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1491 struct btrfs_chunk_map *map,
1492 struct zone_info *zone_info,
1493 unsigned long *active,
1494 u64 last_alloc)
1495 {
1496 struct btrfs_fs_info *fs_info = bg->fs_info;
1497 int i;
1498
1499 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1500 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1501 btrfs_bg_type_to_raid_name(map->type));
1502 return -EINVAL;
1503 }
1504
1505 /* In case a device is missing we have a cap of 0, so don't use it. */
1506 bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1507
1508 /*
1509 * When the last extent is removed, last_alloc can be smaller than the other write
1510 * pointer. In that case, last_alloc should be moved to the corresponding write
1511 * pointer position.
1512 */
1513 for (i = 0; i < map->num_stripes; i++) {
1514 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1515 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1516 continue;
1517 if (last_alloc <= zone_info[i].alloc_offset) {
1518 last_alloc = zone_info[i].alloc_offset;
1519 break;
1520 }
1521 }
1522
1523 for (i = 0; i < map->num_stripes; i++) {
1524 if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1525 continue;
1526
1527 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1528 zone_info[i].alloc_offset = last_alloc;
1529
1530 if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1531 !btrfs_test_opt(fs_info, DEGRADED))) {
1532 btrfs_err(fs_info,
1533 "zoned: write pointer offset mismatch of zones in %s profile",
1534 btrfs_bg_type_to_raid_name(map->type));
1535 return -EIO;
1536 }
1537 if (test_bit(0, active) != test_bit(i, active)) {
1538 if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
1539 !btrfs_zone_activate(bg))) {
1540 return -EIO;
1541 }
1542 } else {
1543 if (test_bit(0, active))
1544 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1545 }
1546 }
1547
1548 if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1549 bg->alloc_offset = zone_info[0].alloc_offset;
1550 else
1551 bg->alloc_offset = zone_info[i - 1].alloc_offset;
1552
1553 return 0;
1554 }
1555
btrfs_load_block_group_raid0(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1556 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1557 struct btrfs_chunk_map *map,
1558 struct zone_info *zone_info,
1559 unsigned long *active,
1560 u64 last_alloc)
1561 {
1562 struct btrfs_fs_info *fs_info = bg->fs_info;
1563 u64 stripe_nr = 0, stripe_offset = 0;
1564 u64 prev_offset = 0;
1565 u32 stripe_index = 0;
1566 bool has_partial = false, has_conventional = false;
1567
1568 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1569 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1570 btrfs_bg_type_to_raid_name(map->type));
1571 return -EINVAL;
1572 }
1573
1574 /*
1575 * When the last extent is removed, last_alloc can be smaller than the other write
1576 * pointer. In that case, last_alloc should be moved to the corresponding write
1577 * pointer position.
1578 */
1579 for (int i = 0; i < map->num_stripes; i++) {
1580 u64 alloc;
1581
1582 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1583 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1584 continue;
1585
1586 stripe_nr = zone_info[i].alloc_offset >> BTRFS_STRIPE_LEN_SHIFT;
1587 stripe_offset = zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK;
1588 if (stripe_offset == 0 && stripe_nr > 0) {
1589 stripe_nr--;
1590 stripe_offset = BTRFS_STRIPE_LEN;
1591 }
1592 alloc = ((stripe_nr * map->num_stripes + i) << BTRFS_STRIPE_LEN_SHIFT) +
1593 stripe_offset;
1594 last_alloc = max(last_alloc, alloc);
1595
1596 /* Partially written stripe found. It should be last. */
1597 if (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK)
1598 break;
1599 }
1600 stripe_nr = 0;
1601 stripe_offset = 0;
1602
1603 if (last_alloc) {
1604 u32 factor = map->num_stripes;
1605
1606 stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1607 stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1608 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1609 }
1610
1611 for (int i = 0; i < map->num_stripes; i++) {
1612 if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1613 continue;
1614
1615 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
1616 has_conventional = true;
1617 zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
1618
1619 if (stripe_index > i)
1620 zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
1621 else if (stripe_index == i)
1622 zone_info[i].alloc_offset += stripe_offset;
1623 }
1624
1625 /* Verification */
1626 if (i != 0) {
1627 if (unlikely(prev_offset < zone_info[i].alloc_offset)) {
1628 btrfs_err(fs_info,
1629 "zoned: stripe position disorder found in block group %llu",
1630 bg->start);
1631 return -EIO;
1632 }
1633
1634 if (unlikely(has_partial &&
1635 (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK))) {
1636 btrfs_err(fs_info,
1637 "zoned: multiple partial written stripe found in block group %llu",
1638 bg->start);
1639 return -EIO;
1640 }
1641 }
1642 prev_offset = zone_info[i].alloc_offset;
1643
1644 if ((zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK) != 0)
1645 has_partial = true;
1646
1647 if (test_bit(0, active) != test_bit(i, active)) {
1648 if (unlikely(!btrfs_zone_activate(bg)))
1649 return -EIO;
1650 } else {
1651 if (test_bit(0, active))
1652 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1653 }
1654 bg->zone_capacity += zone_info[i].capacity;
1655 bg->alloc_offset += zone_info[i].alloc_offset;
1656 }
1657
1658 /* Check if all devices stay in the same stripe row. */
1659 if (unlikely(zone_info[0].alloc_offset -
1660 zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
1661 btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu", bg->start);
1662 return -EIO;
1663 }
1664
1665 if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
1666 btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
1667 bg->alloc_offset, last_alloc);
1668 return -EIO;
1669 }
1670
1671 return 0;
1672 }
1673
btrfs_load_block_group_raid10(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1674 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1675 struct btrfs_chunk_map *map,
1676 struct zone_info *zone_info,
1677 unsigned long *active,
1678 u64 last_alloc)
1679 {
1680 struct btrfs_fs_info *fs_info = bg->fs_info;
1681 u64 AUTO_KFREE(raid0_allocs);
1682 u64 stripe_nr = 0, stripe_offset = 0;
1683 u32 stripe_index = 0;
1684 bool has_partial = false, has_conventional = false;
1685 u64 prev_offset = 0;
1686
1687 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1688 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1689 btrfs_bg_type_to_raid_name(map->type));
1690 return -EINVAL;
1691 }
1692
1693 raid0_allocs = kcalloc(map->num_stripes / map->sub_stripes, sizeof(*raid0_allocs),
1694 GFP_NOFS);
1695 if (!raid0_allocs)
1696 return -ENOMEM;
1697
1698 /*
1699 * When the last extent is removed, last_alloc can be smaller than the other write
1700 * pointer. In that case, last_alloc should be moved to the corresponding write
1701 * pointer position.
1702 */
1703 for (int i = 0; i < map->num_stripes; i += map->sub_stripes) {
1704 u64 alloc = zone_info[i].alloc_offset;
1705
1706 for (int j = 1; j < map->sub_stripes; j++) {
1707 int idx = i + j;
1708
1709 if (zone_info[idx].alloc_offset == WP_MISSING_DEV ||
1710 zone_info[idx].alloc_offset == WP_CONVENTIONAL)
1711 continue;
1712 if (alloc == WP_MISSING_DEV || alloc == WP_CONVENTIONAL) {
1713 alloc = zone_info[idx].alloc_offset;
1714 } else if (unlikely(zone_info[idx].alloc_offset != alloc)) {
1715 btrfs_err(fs_info,
1716 "zoned: write pointer mismatch found in block group %llu",
1717 bg->start);
1718 return -EIO;
1719 }
1720 }
1721
1722 raid0_allocs[i / map->sub_stripes] = alloc;
1723 if (alloc == WP_CONVENTIONAL)
1724 continue;
1725 if (unlikely(alloc == WP_MISSING_DEV)) {
1726 btrfs_err(fs_info,
1727 "zoned: cannot recover write pointer of block group %llu due to missing device",
1728 bg->start);
1729 return -EIO;
1730 }
1731
1732 stripe_nr = alloc >> BTRFS_STRIPE_LEN_SHIFT;
1733 stripe_offset = alloc & BTRFS_STRIPE_LEN_MASK;
1734 if (stripe_offset == 0 && stripe_nr > 0) {
1735 stripe_nr--;
1736 stripe_offset = BTRFS_STRIPE_LEN;
1737 }
1738
1739 alloc = ((stripe_nr * (map->num_stripes / map->sub_stripes) +
1740 (i / map->sub_stripes)) <<
1741 BTRFS_STRIPE_LEN_SHIFT) + stripe_offset;
1742 last_alloc = max(last_alloc, alloc);
1743 }
1744 stripe_nr = 0;
1745 stripe_offset = 0;
1746
1747 if (last_alloc) {
1748 u32 factor = map->num_stripes / map->sub_stripes;
1749
1750 stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1751 stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1752 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1753 }
1754
1755 for (int i = 0; i < map->num_stripes; i++) {
1756 int idx = i / map->sub_stripes;
1757
1758 if (raid0_allocs[idx] == WP_CONVENTIONAL) {
1759 has_conventional = true;
1760 raid0_allocs[idx] = btrfs_stripe_nr_to_offset(stripe_nr);
1761
1762 if (stripe_index > idx)
1763 raid0_allocs[idx] += BTRFS_STRIPE_LEN;
1764 else if (stripe_index == idx)
1765 raid0_allocs[idx] += stripe_offset;
1766 }
1767
1768 if ((i % map->sub_stripes) == 0) {
1769 /* Verification */
1770 if (i != 0) {
1771 if (unlikely(prev_offset < raid0_allocs[idx])) {
1772 btrfs_err(fs_info,
1773 "zoned: stripe position disorder found in block group %llu",
1774 bg->start);
1775 return -EIO;
1776 }
1777
1778 if (unlikely(has_partial &&
1779 (raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK))) {
1780 btrfs_err(fs_info,
1781 "zoned: multiple partial written stripe found in block group %llu",
1782 bg->start);
1783 return -EIO;
1784 }
1785 }
1786 prev_offset = raid0_allocs[idx];
1787
1788 if ((raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK) != 0)
1789 has_partial = true;
1790 }
1791
1792 if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1793 zone_info[i].alloc_offset == WP_CONVENTIONAL)
1794 zone_info[i].alloc_offset = raid0_allocs[idx];
1795
1796 if (test_bit(0, active) != test_bit(i, active)) {
1797 if (unlikely(!btrfs_zone_activate(bg)))
1798 return -EIO;
1799 } else if (test_bit(0, active)) {
1800 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1801 }
1802
1803 if ((i % map->sub_stripes) == 0) {
1804 bg->zone_capacity += zone_info[i].capacity;
1805 bg->alloc_offset += zone_info[i].alloc_offset;
1806 }
1807 }
1808
1809 /* Check if all devices stay in the same stripe row. */
1810 if (unlikely(zone_info[0].alloc_offset -
1811 zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
1812 btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu",
1813 bg->start);
1814 return -EIO;
1815 }
1816
1817 if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
1818 btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
1819 bg->alloc_offset, last_alloc);
1820 return -EIO;
1821 }
1822
1823 return 0;
1824 }
1825
1826 EXPORT_FOR_TESTS
btrfs_load_block_group_by_raid_type(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1827 int btrfs_load_block_group_by_raid_type(struct btrfs_block_group *bg,
1828 struct btrfs_chunk_map *map,
1829 struct zone_info *zone_info,
1830 unsigned long *active, u64 last_alloc)
1831 {
1832 struct btrfs_fs_info *fs_info = bg->fs_info;
1833 u64 profile;
1834 int ret;
1835
1836 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
1837 switch (profile) {
1838 case 0: /* single */
1839 ret = btrfs_load_block_group_single(bg, &zone_info[0], active);
1840 break;
1841 case BTRFS_BLOCK_GROUP_DUP:
1842 ret = btrfs_load_block_group_dup(bg, map, zone_info, active, last_alloc);
1843 break;
1844 case BTRFS_BLOCK_GROUP_RAID1:
1845 case BTRFS_BLOCK_GROUP_RAID1C3:
1846 case BTRFS_BLOCK_GROUP_RAID1C4:
1847 ret = btrfs_load_block_group_raid1(bg, map, zone_info, active, last_alloc);
1848 break;
1849 case BTRFS_BLOCK_GROUP_RAID0:
1850 ret = btrfs_load_block_group_raid0(bg, map, zone_info, active, last_alloc);
1851 break;
1852 case BTRFS_BLOCK_GROUP_RAID10:
1853 ret = btrfs_load_block_group_raid10(bg, map, zone_info, active, last_alloc);
1854 break;
1855 case BTRFS_BLOCK_GROUP_RAID5:
1856 case BTRFS_BLOCK_GROUP_RAID6:
1857 default:
1858 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1859 btrfs_bg_type_to_raid_name(map->type));
1860 return -EINVAL;
1861 }
1862
1863 if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 &&
1864 profile != BTRFS_BLOCK_GROUP_RAID10) {
1865 /*
1866 * Detected broken write pointer. Make this block group
1867 * unallocatable by setting the allocation pointer at the end of
1868 * allocatable region. Relocating this block group will fix the
1869 * mismatch.
1870 *
1871 * Currently, we cannot handle RAID0 or RAID10 case like this
1872 * because we don't have a proper zone_capacity value. But,
1873 * reading from this block group won't work anyway by a missing
1874 * stripe.
1875 */
1876 bg->alloc_offset = bg->zone_capacity;
1877 }
1878
1879 return ret;
1880 }
1881
btrfs_load_block_group_zone_info(struct btrfs_block_group * cache,bool new)1882 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1883 {
1884 struct btrfs_fs_info *fs_info = cache->fs_info;
1885 struct btrfs_chunk_map *map;
1886 u64 logical = cache->start;
1887 u64 length = cache->length;
1888 struct zone_info AUTO_KFREE(zone_info);
1889 int ret;
1890 int i;
1891 unsigned long *active = NULL;
1892 u64 last_alloc = 0;
1893 u32 num_sequential = 0, num_conventional = 0;
1894
1895 if (!btrfs_is_zoned(fs_info))
1896 return 0;
1897
1898 /* Sanity check */
1899 if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
1900 btrfs_err(fs_info,
1901 "zoned: block group %llu len %llu unaligned to zone size %llu",
1902 logical, length, fs_info->zone_size);
1903 return -EIO;
1904 }
1905
1906 map = btrfs_find_chunk_map(fs_info, logical, length);
1907 if (!map)
1908 return -EINVAL;
1909
1910 cache->physical_map = map;
1911
1912 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1913 if (!zone_info) {
1914 ret = -ENOMEM;
1915 goto out;
1916 }
1917
1918 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1919 if (!active) {
1920 ret = -ENOMEM;
1921 goto out;
1922 }
1923
1924 for (i = 0; i < map->num_stripes; i++) {
1925 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
1926 if (ret)
1927 goto out;
1928
1929 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1930 num_conventional++;
1931 else
1932 num_sequential++;
1933 }
1934
1935 if (num_sequential > 0)
1936 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1937
1938 if (num_conventional > 0) {
1939 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1940 if (ret) {
1941 btrfs_err(fs_info,
1942 "zoned: failed to determine allocation offset of bg %llu",
1943 cache->start);
1944 goto out;
1945 } else if (map->num_stripes == num_conventional) {
1946 cache->alloc_offset = last_alloc;
1947 cache->zone_capacity = cache->length;
1948 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1949 goto out;
1950 }
1951 }
1952
1953 ret = btrfs_load_block_group_by_raid_type(cache, map, zone_info, active, last_alloc);
1954
1955 out:
1956 /* Reject non SINGLE data profiles without RST */
1957 if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
1958 (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1959 !fs_info->stripe_root) {
1960 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1961 btrfs_bg_type_to_raid_name(map->type));
1962 ret = -EINVAL;
1963 }
1964
1965 if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
1966 btrfs_err(fs_info,
1967 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1968 cache->alloc_offset, cache->zone_capacity,
1969 cache->start);
1970 ret = -EIO;
1971 }
1972
1973 /* An extent is allocated after the write pointer */
1974 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1975 btrfs_err(fs_info,
1976 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1977 logical, last_alloc, cache->alloc_offset);
1978 ret = -EIO;
1979 }
1980
1981 if (!ret) {
1982 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1983 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1984 btrfs_get_block_group(cache);
1985 spin_lock(&fs_info->zone_active_bgs_lock);
1986 list_add_tail(&cache->active_bg_list,
1987 &fs_info->zone_active_bgs);
1988 spin_unlock(&fs_info->zone_active_bgs_lock);
1989 }
1990 } else {
1991 btrfs_free_chunk_map(cache->physical_map);
1992 cache->physical_map = NULL;
1993 }
1994 bitmap_free(active);
1995
1996 return ret;
1997 }
1998
btrfs_calc_zone_unusable(struct btrfs_block_group * cache)1999 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
2000 {
2001 u64 unusable, free;
2002
2003 if (!btrfs_is_zoned(cache->fs_info))
2004 return;
2005
2006 WARN_ON(cache->bytes_super != 0);
2007 unusable = (cache->alloc_offset - cache->used) +
2008 (cache->length - cache->zone_capacity);
2009 free = cache->zone_capacity - cache->alloc_offset;
2010
2011 /* We only need ->free_space in ALLOC_SEQ block groups */
2012 cache->cached = BTRFS_CACHE_FINISHED;
2013 cache->free_space_ctl->free_space = free;
2014 cache->zone_unusable = unusable;
2015 }
2016
btrfs_use_zone_append(struct btrfs_bio * bbio)2017 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
2018 {
2019 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
2020 struct btrfs_inode *inode = bbio->inode;
2021 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2022 struct btrfs_block_group *cache;
2023 bool ret = false;
2024
2025 if (!btrfs_is_zoned(fs_info))
2026 return false;
2027
2028 if (!is_data_inode(inode))
2029 return false;
2030
2031 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
2032 return false;
2033
2034 /*
2035 * Using REQ_OP_ZONE_APPEND for relocation can break assumptions on the
2036 * extent layout the relocation code has.
2037 * Furthermore we have set aside own block-group from which only the
2038 * relocation "process" can allocate and make sure only one process at a
2039 * time can add pages to an extent that gets relocated, so it's safe to
2040 * use regular REQ_OP_WRITE for this special case.
2041 */
2042 if (btrfs_is_data_reloc_root(inode->root))
2043 return false;
2044
2045 cache = btrfs_lookup_block_group(fs_info, start);
2046 ASSERT(cache);
2047 if (!cache)
2048 return false;
2049
2050 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
2051 btrfs_put_block_group(cache);
2052
2053 return ret;
2054 }
2055
btrfs_record_physical_zoned(struct btrfs_bio * bbio)2056 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
2057 {
2058 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2059 struct btrfs_ordered_sum *sum = bbio->sums;
2060
2061 if (physical < bbio->orig_physical)
2062 sum->logical -= bbio->orig_physical - physical;
2063 else
2064 sum->logical += physical - bbio->orig_physical;
2065 }
2066
btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent * ordered,u64 logical)2067 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
2068 u64 logical)
2069 {
2070 struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
2071 struct extent_map *em;
2072
2073 ordered->disk_bytenr = logical;
2074
2075 write_lock(&em_tree->lock);
2076 em = btrfs_search_extent_mapping(em_tree, ordered->file_offset,
2077 ordered->num_bytes);
2078 /* The em should be a new COW extent, thus it should not have an offset. */
2079 ASSERT(em->offset == 0, "em->offset=%llu", em->offset);
2080 em->disk_bytenr = logical;
2081 btrfs_free_extent_map(em);
2082 write_unlock(&em_tree->lock);
2083 }
2084
btrfs_zoned_split_ordered(struct btrfs_ordered_extent * ordered,u64 logical,u64 len)2085 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
2086 u64 logical, u64 len)
2087 {
2088 struct btrfs_ordered_extent *new;
2089
2090 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
2091 btrfs_split_extent_map(ordered->inode, ordered->file_offset,
2092 ordered->num_bytes, len, logical))
2093 return false;
2094
2095 new = btrfs_split_ordered_extent(ordered, len);
2096 if (IS_ERR(new))
2097 return false;
2098 new->disk_bytenr = logical;
2099 btrfs_finish_one_ordered(new);
2100 return true;
2101 }
2102
btrfs_finish_ordered_zoned(struct btrfs_ordered_extent * ordered)2103 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
2104 {
2105 struct btrfs_inode *inode = ordered->inode;
2106 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2107 struct btrfs_ordered_sum *sum;
2108 u64 logical, len;
2109
2110 /*
2111 * Write to pre-allocated region is for the data relocation, and so
2112 * it should use WRITE operation. No split/rewrite are necessary.
2113 */
2114 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
2115 return;
2116
2117 ASSERT(!list_empty(&ordered->list));
2118 /* The ordered->list can be empty in the above pre-alloc case. */
2119 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
2120 logical = sum->logical;
2121 len = sum->len;
2122
2123 while (len < ordered->disk_num_bytes) {
2124 sum = list_next_entry(sum, list);
2125 if (sum->logical == logical + len) {
2126 len += sum->len;
2127 continue;
2128 }
2129 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
2130 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
2131 btrfs_err(fs_info, "failed to split ordered extent");
2132 goto out;
2133 }
2134 logical = sum->logical;
2135 len = sum->len;
2136 }
2137
2138 if (ordered->disk_bytenr != logical)
2139 btrfs_rewrite_logical_zoned(ordered, logical);
2140
2141 out:
2142 /*
2143 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
2144 * were allocated by btrfs_alloc_dummy_sum only to record the logical
2145 * addresses and don't contain actual checksums. We thus must free them
2146 * here so that we don't attempt to log the csums later.
2147 */
2148 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
2149 test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
2150 while ((sum = list_first_entry_or_null(&ordered->list,
2151 typeof(*sum), list))) {
2152 list_del(&sum->list);
2153 kfree(sum);
2154 }
2155 }
2156 }
2157
check_bg_is_active(struct btrfs_eb_write_context * ctx,struct btrfs_block_group ** active_bg)2158 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
2159 struct btrfs_block_group **active_bg)
2160 {
2161 const struct writeback_control *wbc = ctx->wbc;
2162 struct btrfs_block_group *block_group = ctx->zoned_bg;
2163 struct btrfs_fs_info *fs_info = block_group->fs_info;
2164
2165 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
2166 return true;
2167
2168 if (fs_info->treelog_bg == block_group->start) {
2169 if (!btrfs_zone_activate(block_group)) {
2170 int ret_fin = btrfs_zone_finish_one_bg(fs_info);
2171
2172 if (ret_fin != 1 || !btrfs_zone_activate(block_group))
2173 return false;
2174 }
2175 } else if (*active_bg != block_group) {
2176 struct btrfs_block_group *tgt = *active_bg;
2177
2178 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
2179 lockdep_assert_held(&fs_info->zoned_meta_io_lock);
2180
2181 if (tgt) {
2182 /*
2183 * If there is an unsent IO left in the allocated area,
2184 * we cannot wait for them as it may cause a deadlock.
2185 */
2186 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
2187 if (wbc->sync_mode == WB_SYNC_NONE ||
2188 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
2189 return false;
2190 }
2191
2192 /* Pivot active metadata/system block group. */
2193 btrfs_zoned_meta_io_unlock(fs_info);
2194 wait_eb_writebacks(tgt);
2195 do_zone_finish(tgt, true);
2196 btrfs_zoned_meta_io_lock(fs_info);
2197 if (*active_bg == tgt) {
2198 btrfs_put_block_group(tgt);
2199 *active_bg = NULL;
2200 }
2201 }
2202 if (!btrfs_zone_activate(block_group))
2203 return false;
2204 if (*active_bg != block_group) {
2205 ASSERT(*active_bg == NULL);
2206 *active_bg = block_group;
2207 btrfs_get_block_group(block_group);
2208 }
2209 }
2210
2211 return true;
2212 }
2213
2214 /*
2215 * Check if @ctx->eb is aligned to the write pointer.
2216 *
2217 * Return:
2218 * 0: @ctx->eb is at the write pointer. You can write it.
2219 * -EAGAIN: There is a hole. The caller should handle the case.
2220 * -EBUSY: There is a hole, but the caller can just bail out.
2221 */
btrfs_check_meta_write_pointer(struct btrfs_fs_info * fs_info,struct btrfs_eb_write_context * ctx)2222 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
2223 struct btrfs_eb_write_context *ctx)
2224 {
2225 const struct writeback_control *wbc = ctx->wbc;
2226 const struct extent_buffer *eb = ctx->eb;
2227 struct btrfs_block_group *block_group = ctx->zoned_bg;
2228
2229 if (!btrfs_is_zoned(fs_info))
2230 return 0;
2231
2232 if (block_group) {
2233 if (block_group->start > eb->start ||
2234 btrfs_block_group_end(block_group) <= eb->start) {
2235 btrfs_put_block_group(block_group);
2236 block_group = NULL;
2237 ctx->zoned_bg = NULL;
2238 }
2239 }
2240
2241 if (!block_group) {
2242 block_group = btrfs_lookup_block_group(fs_info, eb->start);
2243 if (!block_group)
2244 return 0;
2245 ctx->zoned_bg = block_group;
2246 }
2247
2248 if (block_group->meta_write_pointer == eb->start) {
2249 struct btrfs_block_group **tgt;
2250
2251 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2252 return 0;
2253
2254 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2255 tgt = &fs_info->active_system_bg;
2256 else
2257 tgt = &fs_info->active_meta_bg;
2258 if (check_bg_is_active(ctx, tgt))
2259 return 0;
2260 }
2261
2262 /*
2263 * Since we may release fs_info->zoned_meta_io_lock, someone can already
2264 * start writing this eb. In that case, we can just bail out.
2265 */
2266 if (block_group->meta_write_pointer > eb->start)
2267 return -EBUSY;
2268
2269 /* If for_sync, this hole will be filled with transaction commit. */
2270 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
2271 return -EAGAIN;
2272 return -EBUSY;
2273 }
2274
btrfs_zoned_issue_zeroout(struct btrfs_device * device,u64 physical,u64 length)2275 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
2276 {
2277 if (!btrfs_dev_is_sequential(device, physical))
2278 return -EOPNOTSUPP;
2279
2280 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
2281 length >> SECTOR_SHIFT, GFP_NOFS, 0);
2282 }
2283
read_zone_info(struct btrfs_fs_info * fs_info,u64 logical,struct blk_zone * zone)2284 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
2285 struct blk_zone *zone)
2286 {
2287 struct btrfs_io_context *bioc = NULL;
2288 u64 mapped_length = PAGE_SIZE;
2289 unsigned int nofs_flag;
2290 int nmirrors;
2291 int i, ret;
2292
2293 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2294 &mapped_length, &bioc, NULL, NULL);
2295 if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
2296 ret = -EIO;
2297 goto out_put_bioc;
2298 }
2299
2300 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2301 ret = -EINVAL;
2302 goto out_put_bioc;
2303 }
2304
2305 nofs_flag = memalloc_nofs_save();
2306 nmirrors = (int)bioc->num_stripes;
2307 for (i = 0; i < nmirrors; i++) {
2308 u64 physical = bioc->stripes[i].physical;
2309 struct btrfs_device *dev = bioc->stripes[i].dev;
2310
2311 /* Missing device */
2312 if (!dev->bdev)
2313 continue;
2314
2315 ret = btrfs_get_dev_zone(dev, physical, zone);
2316 /* Failing device */
2317 if (ret == -EIO || ret == -EOPNOTSUPP)
2318 continue;
2319 break;
2320 }
2321 memalloc_nofs_restore(nofs_flag);
2322 out_put_bioc:
2323 btrfs_put_bioc(bioc);
2324 return ret;
2325 }
2326
2327 /*
2328 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2329 * filling zeros between @physical_pos to a write pointer of dev-replace
2330 * source device.
2331 */
btrfs_sync_zone_write_pointer(struct btrfs_device * tgt_dev,u64 logical,u64 physical_start,u64 physical_pos)2332 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2333 u64 physical_start, u64 physical_pos)
2334 {
2335 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2336 struct blk_zone zone;
2337 u64 length;
2338 u64 wp;
2339 int ret;
2340
2341 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2342 return 0;
2343
2344 ret = read_zone_info(fs_info, logical, &zone);
2345 if (ret)
2346 return ret;
2347
2348 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2349
2350 if (physical_pos == wp)
2351 return 0;
2352
2353 if (unlikely(physical_pos > wp))
2354 return -EUCLEAN;
2355
2356 length = wp - physical_pos;
2357 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2358 }
2359
2360 /*
2361 * Activate block group and underlying device zones
2362 *
2363 * @block_group: the block group to activate
2364 *
2365 * Return: true on success, false otherwise
2366 */
btrfs_zone_activate(struct btrfs_block_group * block_group)2367 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2368 {
2369 struct btrfs_fs_info *fs_info = block_group->fs_info;
2370 struct btrfs_chunk_map *map;
2371 struct btrfs_device *device;
2372 u64 physical;
2373 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2374 bool ret;
2375 int i;
2376
2377 if (!btrfs_is_zoned(block_group->fs_info))
2378 return true;
2379
2380 map = block_group->physical_map;
2381
2382 spin_lock(&fs_info->zone_active_bgs_lock);
2383 spin_lock(&block_group->lock);
2384 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2385 ret = true;
2386 goto out_unlock;
2387 }
2388
2389 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
2390 /* The caller should check if the block group is full. */
2391 if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
2392 ret = false;
2393 goto out_unlock;
2394 }
2395 } else {
2396 /* Since it is already written, it should have been active. */
2397 WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
2398 }
2399
2400 for (i = 0; i < map->num_stripes; i++) {
2401 struct btrfs_zoned_device_info *zinfo;
2402 int reserved = 0;
2403
2404 device = map->stripes[i].dev;
2405 physical = map->stripes[i].physical;
2406 zinfo = device->zone_info;
2407
2408 if (!device->bdev)
2409 continue;
2410
2411 if (zinfo->max_active_zones == 0)
2412 continue;
2413
2414 if (is_data)
2415 reserved = zinfo->reserved_active_zones;
2416 /*
2417 * For the data block group, leave active zones for one
2418 * metadata block group and one system block group.
2419 */
2420 if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2421 ret = false;
2422 goto out_unlock;
2423 }
2424
2425 if (!btrfs_dev_set_active_zone(device, physical)) {
2426 /* Cannot activate the zone */
2427 ret = false;
2428 goto out_unlock;
2429 }
2430 if (!is_data)
2431 zinfo->reserved_active_zones--;
2432 }
2433
2434 /* Successfully activated all the zones */
2435 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2436 spin_unlock(&block_group->lock);
2437
2438 /* For the active block group list */
2439 btrfs_get_block_group(block_group);
2440 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2441 spin_unlock(&fs_info->zone_active_bgs_lock);
2442
2443 return true;
2444
2445 out_unlock:
2446 spin_unlock(&block_group->lock);
2447 spin_unlock(&fs_info->zone_active_bgs_lock);
2448 return ret;
2449 }
2450
wait_eb_writebacks(struct btrfs_block_group * block_group)2451 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2452 {
2453 struct btrfs_fs_info *fs_info = block_group->fs_info;
2454 const u64 end = btrfs_block_group_end(block_group);
2455 struct extent_buffer *eb;
2456 unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);
2457
2458 rcu_read_lock();
2459 xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {
2460 if (eb->start < block_group->start)
2461 continue;
2462 if (eb->start >= end)
2463 break;
2464 rcu_read_unlock();
2465 wait_on_extent_buffer_writeback(eb);
2466 rcu_read_lock();
2467 }
2468 rcu_read_unlock();
2469 }
2470
call_zone_finish(struct btrfs_block_group * block_group,struct btrfs_io_stripe * stripe)2471 static int call_zone_finish(struct btrfs_block_group *block_group,
2472 struct btrfs_io_stripe *stripe)
2473 {
2474 struct btrfs_device *device = stripe->dev;
2475 const u64 physical = stripe->physical;
2476 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2477 int ret;
2478
2479 if (!device->bdev)
2480 return 0;
2481
2482 if (zinfo->max_active_zones == 0)
2483 return 0;
2484
2485 if (btrfs_dev_is_sequential(device, physical)) {
2486 unsigned int nofs_flags;
2487
2488 nofs_flags = memalloc_nofs_save();
2489 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2490 physical >> SECTOR_SHIFT,
2491 zinfo->zone_size >> SECTOR_SHIFT);
2492 memalloc_nofs_restore(nofs_flags);
2493
2494 if (ret)
2495 return ret;
2496 }
2497
2498 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2499 zinfo->reserved_active_zones++;
2500 btrfs_dev_clear_active_zone(device, physical);
2501
2502 return 0;
2503 }
2504
do_zone_finish(struct btrfs_block_group * block_group,bool fully_written)2505 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2506 {
2507 struct btrfs_fs_info *fs_info = block_group->fs_info;
2508 struct btrfs_chunk_map *map;
2509 const bool is_metadata = (block_group->flags &
2510 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2511 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2512 int ret = 0;
2513 int i;
2514
2515 spin_lock(&block_group->lock);
2516 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2517 spin_unlock(&block_group->lock);
2518 return 0;
2519 }
2520
2521 /* Check if we have unwritten allocated space */
2522 if (is_metadata &&
2523 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2524 spin_unlock(&block_group->lock);
2525 return -EAGAIN;
2526 }
2527
2528 /*
2529 * If we are sure that the block group is full (= no more room left for
2530 * new allocation) and the IO for the last usable block is completed, we
2531 * don't need to wait for the other IOs. This holds because we ensure
2532 * the sequential IO submissions using the ZONE_APPEND command for data
2533 * and block_group->meta_write_pointer for metadata.
2534 */
2535 if (!fully_written) {
2536 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2537 spin_unlock(&block_group->lock);
2538 return -EAGAIN;
2539 }
2540 spin_unlock(&block_group->lock);
2541
2542 ret = btrfs_inc_block_group_ro(block_group, false);
2543 if (ret)
2544 return ret;
2545
2546 /* Ensure all writes in this block group finish */
2547 btrfs_wait_block_group_reservations(block_group);
2548 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2549 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
2550 /* Wait for extent buffers to be written. */
2551 if (is_metadata)
2552 wait_eb_writebacks(block_group);
2553
2554 spin_lock(&block_group->lock);
2555
2556 /*
2557 * Bail out if someone already deactivated the block group, or
2558 * allocated space is left in the block group.
2559 */
2560 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2561 &block_group->runtime_flags)) {
2562 spin_unlock(&block_group->lock);
2563 btrfs_dec_block_group_ro(block_group);
2564 return 0;
2565 }
2566
2567 if (block_group->reserved ||
2568 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2569 &block_group->runtime_flags)) {
2570 spin_unlock(&block_group->lock);
2571 btrfs_dec_block_group_ro(block_group);
2572 return -EAGAIN;
2573 }
2574 }
2575
2576 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2577 block_group->alloc_offset = block_group->zone_capacity;
2578 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2579 block_group->meta_write_pointer = block_group->start +
2580 block_group->zone_capacity;
2581 block_group->free_space_ctl->free_space = 0;
2582 btrfs_clear_treelog_bg(block_group);
2583 btrfs_clear_data_reloc_bg(block_group);
2584 spin_unlock(&block_group->lock);
2585
2586 down_read(&dev_replace->rwsem);
2587 map = block_group->physical_map;
2588 for (i = 0; i < map->num_stripes; i++) {
2589
2590 ret = call_zone_finish(block_group, &map->stripes[i]);
2591 if (ret) {
2592 up_read(&dev_replace->rwsem);
2593 return ret;
2594 }
2595 }
2596 up_read(&dev_replace->rwsem);
2597
2598 if (!fully_written)
2599 btrfs_dec_block_group_ro(block_group);
2600
2601 spin_lock(&fs_info->zone_active_bgs_lock);
2602 ASSERT(!list_empty(&block_group->active_bg_list));
2603 list_del_init(&block_group->active_bg_list);
2604 spin_unlock(&fs_info->zone_active_bgs_lock);
2605
2606 /* For active_bg_list */
2607 btrfs_put_block_group(block_group);
2608
2609 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2610
2611 return 0;
2612 }
2613
btrfs_zone_finish(struct btrfs_block_group * block_group)2614 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2615 {
2616 if (!btrfs_is_zoned(block_group->fs_info))
2617 return 0;
2618
2619 return do_zone_finish(block_group, false);
2620 }
2621
btrfs_can_activate_zone(struct btrfs_fs_devices * fs_devices,u64 flags)2622 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2623 {
2624 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2625 struct btrfs_device *device;
2626 bool ret = false;
2627
2628 if (!btrfs_is_zoned(fs_info))
2629 return true;
2630
2631 if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
2632 return false;
2633
2634 /* Check if there is a device with active zones left */
2635 mutex_lock(&fs_info->chunk_mutex);
2636 spin_lock(&fs_info->zone_active_bgs_lock);
2637 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2638 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2639 int reserved = 0;
2640
2641 if (!device->bdev)
2642 continue;
2643
2644 if (!zinfo->max_active_zones) {
2645 ret = true;
2646 break;
2647 }
2648
2649 if (flags & BTRFS_BLOCK_GROUP_DATA)
2650 reserved = zinfo->reserved_active_zones;
2651
2652 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2653 case 0: /* single */
2654 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2655 break;
2656 case BTRFS_BLOCK_GROUP_DUP:
2657 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2658 break;
2659 }
2660 if (ret)
2661 break;
2662 }
2663 spin_unlock(&fs_info->zone_active_bgs_lock);
2664 mutex_unlock(&fs_info->chunk_mutex);
2665
2666 if (!ret)
2667 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2668
2669 return ret;
2670 }
2671
btrfs_zone_finish_endio(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2672 int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2673 {
2674 struct btrfs_block_group *block_group;
2675 u64 min_alloc_bytes;
2676
2677 if (!btrfs_is_zoned(fs_info))
2678 return 0;
2679
2680 block_group = btrfs_lookup_block_group(fs_info, logical);
2681 if (WARN_ON_ONCE(!block_group))
2682 return -ENOENT;
2683
2684 /* No MIXED_BG on zoned btrfs. */
2685 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2686 min_alloc_bytes = fs_info->sectorsize;
2687 else
2688 min_alloc_bytes = fs_info->nodesize;
2689
2690 /* Bail out if we can allocate more data from this block group. */
2691 if (logical + length + min_alloc_bytes <=
2692 block_group->start + block_group->zone_capacity)
2693 goto out;
2694
2695 do_zone_finish(block_group, true);
2696
2697 out:
2698 btrfs_put_block_group(block_group);
2699 return 0;
2700 }
2701
btrfs_zone_finish_endio_workfn(struct work_struct * work)2702 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2703 {
2704 int ret;
2705 struct btrfs_block_group *bg =
2706 container_of(work, struct btrfs_block_group, zone_finish_work);
2707
2708 wait_on_extent_buffer_writeback(bg->last_eb);
2709 free_extent_buffer(bg->last_eb);
2710 ret = do_zone_finish(bg, true);
2711 if (ret)
2712 btrfs_handle_fs_error(bg->fs_info, ret,
2713 "Failed to finish block-group's zone");
2714 btrfs_put_block_group(bg);
2715 }
2716
btrfs_schedule_zone_finish_bg(struct btrfs_block_group * bg,struct extent_buffer * eb)2717 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2718 struct extent_buffer *eb)
2719 {
2720 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2721 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2722 return;
2723
2724 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2725 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2726 bg->start);
2727 return;
2728 }
2729
2730 /* For the work */
2731 btrfs_get_block_group(bg);
2732 refcount_inc(&eb->refs);
2733 bg->last_eb = eb;
2734 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2735 queue_work(system_dfl_wq, &bg->zone_finish_work);
2736 }
2737
btrfs_clear_data_reloc_bg(struct btrfs_block_group * bg)2738 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2739 {
2740 struct btrfs_fs_info *fs_info = bg->fs_info;
2741
2742 spin_lock(&fs_info->relocation_bg_lock);
2743 if (fs_info->data_reloc_bg == bg->start)
2744 fs_info->data_reloc_bg = 0;
2745 spin_unlock(&fs_info->relocation_bg_lock);
2746 }
2747
btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info * fs_info)2748 void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
2749 {
2750 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
2751 struct btrfs_space_info *space_info = data_sinfo;
2752 struct btrfs_trans_handle *trans;
2753 struct btrfs_block_group *bg;
2754 struct list_head *bg_list;
2755 u64 alloc_flags;
2756 bool first = true;
2757 bool did_chunk_alloc = false;
2758 int index;
2759 int ret;
2760
2761 if (!btrfs_is_zoned(fs_info))
2762 return;
2763
2764 if (fs_info->data_reloc_bg)
2765 return;
2766
2767 if (sb_rdonly(fs_info->sb))
2768 return;
2769
2770 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
2771 index = btrfs_bg_flags_to_raid_index(alloc_flags);
2772
2773 /* Scan the data space_info to find empty block groups. Take the second one. */
2774 again:
2775 bg_list = &space_info->block_groups[index];
2776 list_for_each_entry(bg, bg_list, list) {
2777 if (bg->alloc_offset != 0)
2778 continue;
2779
2780 if (first) {
2781 first = false;
2782 continue;
2783 }
2784
2785 if (space_info == data_sinfo) {
2786 /* Migrate the block group to the data relocation space_info. */
2787 struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];
2788 int factor;
2789
2790 ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2791 "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id);
2792 factor = btrfs_bg_type_to_factor(bg->flags);
2793
2794 down_write(&space_info->groups_sem);
2795 list_del_init(&bg->list);
2796 /* We can assume this as we choose the second empty one. */
2797 ASSERT(!list_empty(&space_info->block_groups[index]));
2798 up_write(&space_info->groups_sem);
2799
2800 spin_lock(&space_info->lock);
2801 space_info->total_bytes -= bg->length;
2802 space_info->disk_total -= bg->length * factor;
2803 space_info->disk_total -= bg->zone_unusable;
2804 /* There is no allocation ever happened. */
2805 ASSERT(bg->used == 0, "bg->used=%llu", bg->used);
2806 /* No super block in a block group on the zoned setup. */
2807 ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super);
2808 spin_unlock(&space_info->lock);
2809
2810 bg->space_info = reloc_sinfo;
2811 if (reloc_sinfo->block_group_kobjs[index] == NULL)
2812 btrfs_sysfs_add_block_group_type(bg);
2813
2814 btrfs_add_bg_to_space_info(fs_info, bg);
2815 }
2816
2817 fs_info->data_reloc_bg = bg->start;
2818 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags);
2819 btrfs_zone_activate(bg);
2820
2821 return;
2822 }
2823
2824 if (did_chunk_alloc)
2825 return;
2826
2827 trans = btrfs_join_transaction(fs_info->tree_root);
2828 if (IS_ERR(trans))
2829 return;
2830
2831 /* Allocate new BG in the data relocation space_info. */
2832 space_info = data_sinfo->sub_group[0];
2833 ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2834 "space_info->subgroup_id=%d", space_info->subgroup_id);
2835 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
2836 btrfs_end_transaction(trans);
2837 if (ret == 1) {
2838 /*
2839 * We allocated a new block group in the data relocation space_info. We
2840 * can take that one.
2841 */
2842 first = false;
2843 did_chunk_alloc = true;
2844 goto again;
2845 }
2846 }
2847
btrfs_free_zone_cache(struct btrfs_fs_info * fs_info)2848 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2849 {
2850 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2851 struct btrfs_device *device;
2852
2853 if (!btrfs_is_zoned(fs_info))
2854 return;
2855
2856 mutex_lock(&fs_devices->device_list_mutex);
2857 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2858 if (device->zone_info) {
2859 vfree(device->zone_info->zone_cache);
2860 device->zone_info->zone_cache = NULL;
2861 }
2862 }
2863 mutex_unlock(&fs_devices->device_list_mutex);
2864 }
2865
btrfs_zoned_should_reclaim(const struct btrfs_fs_info * fs_info)2866 bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
2867 {
2868 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2869 struct btrfs_device *device;
2870 u64 total = btrfs_super_total_bytes(fs_info->super_copy);
2871 u64 used = 0;
2872 u64 factor;
2873
2874 ASSERT(btrfs_is_zoned(fs_info));
2875
2876 if (fs_info->bg_reclaim_threshold == 0)
2877 return false;
2878
2879 mutex_lock(&fs_devices->device_list_mutex);
2880 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2881 if (!device->bdev)
2882 continue;
2883
2884 used += device->bytes_used;
2885 }
2886 mutex_unlock(&fs_devices->device_list_mutex);
2887
2888 factor = div64_u64(used * 100, total);
2889 return factor >= fs_info->bg_reclaim_threshold;
2890 }
2891
btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2892 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2893 u64 length)
2894 {
2895 struct btrfs_block_group *block_group;
2896
2897 if (!btrfs_is_zoned(fs_info))
2898 return;
2899
2900 block_group = btrfs_lookup_block_group(fs_info, logical);
2901 /* It should be called on a previous data relocation block group. */
2902 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2903
2904 spin_lock(&block_group->lock);
2905 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2906 goto out;
2907
2908 /* All relocation extents are written. */
2909 if (block_group->start + block_group->alloc_offset == logical + length) {
2910 /*
2911 * Now, release this block group for further allocations and
2912 * zone finish.
2913 */
2914 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2915 &block_group->runtime_flags);
2916 }
2917
2918 out:
2919 spin_unlock(&block_group->lock);
2920 btrfs_put_block_group(block_group);
2921 }
2922
btrfs_zone_finish_one_bg(struct btrfs_fs_info * fs_info)2923 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2924 {
2925 struct btrfs_block_group *block_group;
2926 struct btrfs_block_group *min_bg = NULL;
2927 u64 min_avail = U64_MAX;
2928 int ret;
2929
2930 spin_lock(&fs_info->zone_active_bgs_lock);
2931 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2932 active_bg_list) {
2933 u64 avail;
2934
2935 spin_lock(&block_group->lock);
2936 if (block_group->reserved || block_group->alloc_offset == 0 ||
2937 !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
2938 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2939 spin_unlock(&block_group->lock);
2940 continue;
2941 }
2942
2943 avail = block_group->zone_capacity - block_group->alloc_offset;
2944 if (min_avail > avail) {
2945 if (min_bg)
2946 btrfs_put_block_group(min_bg);
2947 min_bg = block_group;
2948 min_avail = avail;
2949 btrfs_get_block_group(min_bg);
2950 }
2951 spin_unlock(&block_group->lock);
2952 }
2953 spin_unlock(&fs_info->zone_active_bgs_lock);
2954
2955 if (!min_bg)
2956 return 0;
2957
2958 ret = btrfs_zone_finish(min_bg);
2959 btrfs_put_block_group(min_bg);
2960
2961 return ret < 0 ? ret : 1;
2962 }
2963
btrfs_zoned_activate_one_bg(struct btrfs_space_info * space_info,bool do_finish)2964 int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish)
2965 {
2966 struct btrfs_fs_info *fs_info = space_info->fs_info;
2967 struct btrfs_block_group *bg;
2968 int index;
2969
2970 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2971 return 0;
2972
2973 for (;;) {
2974 int ret;
2975 bool need_finish = false;
2976
2977 down_read(&space_info->groups_sem);
2978 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2979 list_for_each_entry(bg, &space_info->block_groups[index],
2980 list) {
2981 if (!spin_trylock(&bg->lock))
2982 continue;
2983 if (btrfs_zoned_bg_is_full(bg) ||
2984 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2985 &bg->runtime_flags)) {
2986 spin_unlock(&bg->lock);
2987 continue;
2988 }
2989 spin_unlock(&bg->lock);
2990
2991 if (btrfs_zone_activate(bg)) {
2992 up_read(&space_info->groups_sem);
2993 return 1;
2994 }
2995
2996 need_finish = true;
2997 }
2998 }
2999 up_read(&space_info->groups_sem);
3000
3001 if (!do_finish || !need_finish)
3002 break;
3003
3004 ret = btrfs_zone_finish_one_bg(fs_info);
3005 if (ret == 0)
3006 break;
3007 if (ret < 0)
3008 return ret;
3009 }
3010
3011 return 0;
3012 }
3013
3014 /*
3015 * Reserve zones for one metadata block group, one tree-log block group, and one
3016 * system block group.
3017 */
btrfs_check_active_zone_reservation(struct btrfs_fs_info * fs_info)3018 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
3019 {
3020 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3021 struct btrfs_block_group *block_group;
3022 struct btrfs_device *device;
3023 /* Reserve zones for normal SINGLE metadata and tree-log block group. */
3024 unsigned int metadata_reserve = 2;
3025 /* Reserve a zone for SINGLE system block group. */
3026 unsigned int system_reserve = 1;
3027
3028 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
3029 return;
3030
3031 /*
3032 * This function is called from the mount context. So, there is no
3033 * parallel process touching the bits. No need for read_seqretry().
3034 */
3035 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
3036 metadata_reserve = 4;
3037 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
3038 system_reserve = 2;
3039
3040 /* Apply the reservation on all the devices. */
3041 mutex_lock(&fs_devices->device_list_mutex);
3042 list_for_each_entry(device, &fs_devices->devices, dev_list) {
3043 if (!device->bdev)
3044 continue;
3045
3046 device->zone_info->reserved_active_zones =
3047 metadata_reserve + system_reserve;
3048 }
3049 mutex_unlock(&fs_devices->device_list_mutex);
3050
3051 /* Release reservation for currently active block groups. */
3052 spin_lock(&fs_info->zone_active_bgs_lock);
3053 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
3054 struct btrfs_chunk_map *map = block_group->physical_map;
3055
3056 if (!(block_group->flags &
3057 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
3058 continue;
3059
3060 for (int i = 0; i < map->num_stripes; i++)
3061 map->stripes[i].dev->zone_info->reserved_active_zones--;
3062 }
3063 spin_unlock(&fs_info->zone_active_bgs_lock);
3064 }
3065
3066 /*
3067 * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
3068 *
3069 * @space_info: the space to work on
3070 * @num_bytes: targeting reclaim bytes
3071 *
3072 * This one resets the zones of a block group, so we can reuse the region
3073 * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
3074 * just removes a block group and frees up the underlying zones. So, we still
3075 * need to allocate a new block group to reuse the zones.
3076 *
3077 * Resetting is faster than deleting/recreating a block group. It is similar
3078 * to freeing the logical space on the regular mode. However, we cannot change
3079 * the block group's profile with this operation.
3080 */
btrfs_reset_unused_block_groups(struct btrfs_space_info * space_info,u64 num_bytes)3081 int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
3082 {
3083 struct btrfs_fs_info *fs_info = space_info->fs_info;
3084 const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
3085
3086 if (!btrfs_is_zoned(fs_info))
3087 return 0;
3088
3089 while (num_bytes > 0) {
3090 struct btrfs_chunk_map *map;
3091 struct btrfs_block_group *bg = NULL;
3092 bool found = false;
3093 u64 reclaimed = 0;
3094
3095 /*
3096 * Here, we choose a fully zone_unusable block group. It's
3097 * technically possible to reset a partly zone_unusable block
3098 * group, which still has some free space left. However,
3099 * handling that needs to cope with the allocation side, which
3100 * makes the logic more complex. So, let's handle the easy case
3101 * for now.
3102 */
3103 spin_lock(&fs_info->unused_bgs_lock);
3104 list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
3105 if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
3106 continue;
3107
3108 /*
3109 * Use trylock to avoid locking order violation. In
3110 * btrfs_reclaim_bgs_work(), the lock order is
3111 * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
3112 * block group if we cannot take its lock.
3113 */
3114 if (!spin_trylock(&bg->lock))
3115 continue;
3116 if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
3117 spin_unlock(&bg->lock);
3118 continue;
3119 }
3120 spin_unlock(&bg->lock);
3121 found = true;
3122 break;
3123 }
3124 if (!found) {
3125 spin_unlock(&fs_info->unused_bgs_lock);
3126 return 0;
3127 }
3128
3129 list_del_init(&bg->bg_list);
3130 btrfs_put_block_group(bg);
3131 spin_unlock(&fs_info->unused_bgs_lock);
3132
3133 /*
3134 * Since the block group is fully zone_unusable and we cannot
3135 * allocate from this block group anymore, we don't need to set
3136 * this block group read-only.
3137 */
3138
3139 down_read(&fs_info->dev_replace.rwsem);
3140 map = bg->physical_map;
3141 for (int i = 0; i < map->num_stripes; i++) {
3142 struct btrfs_io_stripe *stripe = &map->stripes[i];
3143 unsigned int nofs_flags;
3144 int ret;
3145
3146 nofs_flags = memalloc_nofs_save();
3147 ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
3148 stripe->physical >> SECTOR_SHIFT,
3149 zone_size_sectors);
3150 memalloc_nofs_restore(nofs_flags);
3151
3152 if (ret) {
3153 up_read(&fs_info->dev_replace.rwsem);
3154 return ret;
3155 }
3156 }
3157 up_read(&fs_info->dev_replace.rwsem);
3158
3159 spin_lock(&space_info->lock);
3160 spin_lock(&bg->lock);
3161 ASSERT(!btrfs_is_block_group_used(bg));
3162 if (bg->ro) {
3163 spin_unlock(&bg->lock);
3164 spin_unlock(&space_info->lock);
3165 continue;
3166 }
3167
3168 reclaimed = bg->alloc_offset;
3169 bg->zone_unusable = bg->length - bg->zone_capacity;
3170 bg->alloc_offset = 0;
3171 /*
3172 * This holds because we currently reset fully used then freed
3173 * block group.
3174 */
3175 ASSERT(reclaimed == bg->zone_capacity,
3176 "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity);
3177 bg->free_space_ctl->free_space += reclaimed;
3178 space_info->bytes_zone_unusable -= reclaimed;
3179 spin_unlock(&bg->lock);
3180 btrfs_return_free_space(space_info, reclaimed);
3181 spin_unlock(&space_info->lock);
3182
3183 if (num_bytes <= reclaimed)
3184 break;
3185 num_bytes -= reclaimed;
3186 }
3187
3188 return 0;
3189 }
3190
btrfs_show_zoned_stats(struct btrfs_fs_info * fs_info,struct seq_file * seq)3191 void btrfs_show_zoned_stats(struct btrfs_fs_info *fs_info, struct seq_file *seq)
3192 {
3193 struct btrfs_block_group *bg;
3194 u64 data_reloc_bg;
3195 u64 treelog_bg;
3196
3197 seq_puts(seq, "\n zoned statistics:\n");
3198
3199 spin_lock(&fs_info->zone_active_bgs_lock);
3200 seq_printf(seq, "\tactive block-groups: %zu\n",
3201 list_count_nodes(&fs_info->zone_active_bgs));
3202 spin_unlock(&fs_info->zone_active_bgs_lock);
3203
3204 spin_lock(&fs_info->unused_bgs_lock);
3205 seq_printf(seq, "\t reclaimable: %zu\n",
3206 list_count_nodes(&fs_info->reclaim_bgs));
3207 seq_printf(seq, "\t unused: %zu\n", list_count_nodes(&fs_info->unused_bgs));
3208 spin_unlock(&fs_info->unused_bgs_lock);
3209
3210 seq_printf(seq,"\t need reclaim: %s\n",
3211 str_true_false(btrfs_zoned_should_reclaim(fs_info)));
3212
3213 data_reloc_bg = data_race(fs_info->data_reloc_bg);
3214 if (data_reloc_bg)
3215 seq_printf(seq, "\tdata relocation block-group: %llu\n",
3216 data_reloc_bg);
3217 treelog_bg = data_race(fs_info->treelog_bg);
3218 if (treelog_bg)
3219 seq_printf(seq, "\ttree-log block-group: %llu\n", treelog_bg);
3220
3221 spin_lock(&fs_info->zone_active_bgs_lock);
3222 seq_puts(seq, "\tactive zones:\n");
3223 list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
3224 u64 start;
3225 u64 alloc_offset;
3226 u64 used;
3227 u64 reserved;
3228 u64 zone_unusable;
3229 const char *typestr = btrfs_space_info_type_str(bg->space_info);
3230
3231 spin_lock(&bg->lock);
3232 start = bg->start;
3233 alloc_offset = bg->alloc_offset;
3234 used = bg->used;
3235 reserved = bg->reserved;
3236 zone_unusable = bg->zone_unusable;
3237 spin_unlock(&bg->lock);
3238
3239 seq_printf(seq,
3240 "\t start: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu (%s)\n",
3241 start, alloc_offset, used, reserved, zone_unusable, typestr);
3242 }
3243 spin_unlock(&fs_info->zone_active_bgs_lock);
3244 }
3245