1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
9 #include "ctree.h"
10 #include "volumes.h"
11 #include "zoned.h"
12 #include "disk-io.h"
13 #include "block-group.h"
14 #include "dev-replace.h"
15 #include "space-info.h"
16 #include "fs.h"
17 #include "accessors.h"
18 #include "bio.h"
19 #include "transaction.h"
20 #include "sysfs.h"
21
22 /* Maximum number of zones to report per blkdev_report_zones() call */
23 #define BTRFS_REPORT_NR_ZONES 4096
24 /* Invalid allocation pointer value for missing devices */
25 #define WP_MISSING_DEV ((u64)-1)
26 /* Pseudo write pointer value for conventional zone */
27 #define WP_CONVENTIONAL ((u64)-2)
28
29 /*
30 * Location of the first zone of superblock logging zone pairs.
31 *
32 * - primary superblock: 0B (zone 0)
33 * - first copy: 512G (zone starting at that offset)
34 * - second copy: 4T (zone starting at that offset)
35 */
36 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
37 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
38 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
39
40 #define BTRFS_SB_LOG_FIRST_SHIFT ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
41 #define BTRFS_SB_LOG_SECOND_SHIFT ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
42
43 /* Number of superblock log zones */
44 #define BTRFS_NR_SB_LOG_ZONES 2
45
46 /* Default number of max active zones when the device has no limits. */
47 #define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 128
48
49 /*
50 * Minimum of active zones we need:
51 *
52 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
53 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
54 * - 1 zone for tree-log dedicated block group
55 * - 1 zone for relocation
56 */
57 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
58
59 /*
60 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
61 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
62 * We do not expect the zone size to become larger than 8GiB or smaller than
63 * 4MiB in the near future.
64 */
65 #define BTRFS_MAX_ZONE_SIZE SZ_8G
66 #define BTRFS_MIN_ZONE_SIZE SZ_4M
67
68 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
69
70 static void wait_eb_writebacks(struct btrfs_block_group *block_group);
71 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
72
sb_zone_is_full(const struct blk_zone * zone)73 static inline bool sb_zone_is_full(const struct blk_zone *zone)
74 {
75 return (zone->cond == BLK_ZONE_COND_FULL) ||
76 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
77 }
78
copy_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)79 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
80 {
81 struct blk_zone *zones = data;
82
83 memcpy(&zones[idx], zone, sizeof(*zone));
84
85 return 0;
86 }
87
sb_write_pointer(struct block_device * bdev,struct blk_zone * zones,u64 * wp_ret)88 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
89 u64 *wp_ret)
90 {
91 bool empty[BTRFS_NR_SB_LOG_ZONES];
92 bool full[BTRFS_NR_SB_LOG_ZONES];
93 sector_t sector;
94
95 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
96 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL,
97 "zones[%d].type=%d", i, zones[i].type);
98 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
99 full[i] = sb_zone_is_full(&zones[i]);
100 }
101
102 /*
103 * Possible states of log buffer zones
104 *
105 * Empty[0] In use[0] Full[0]
106 * Empty[1] * 0 1
107 * In use[1] x x 1
108 * Full[1] 0 0 C
109 *
110 * Log position:
111 * *: Special case, no superblock is written
112 * 0: Use write pointer of zones[0]
113 * 1: Use write pointer of zones[1]
114 * C: Compare super blocks from zones[0] and zones[1], use the latest
115 * one determined by generation
116 * x: Invalid state
117 */
118
119 if (empty[0] && empty[1]) {
120 /* Special case to distinguish no superblock to read */
121 *wp_ret = zones[0].start << SECTOR_SHIFT;
122 return -ENOENT;
123 } else if (full[0] && full[1]) {
124 /* Compare two super blocks */
125 struct address_space *mapping = bdev->bd_mapping;
126 struct page *page[BTRFS_NR_SB_LOG_ZONES];
127 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
128
129 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
130 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
131 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
132 BTRFS_SUPER_INFO_SIZE;
133
134 page[i] = read_cache_page_gfp(mapping,
135 bytenr >> PAGE_SHIFT, GFP_NOFS);
136 if (IS_ERR(page[i])) {
137 if (i == 1)
138 btrfs_release_disk_super(super[0]);
139 return PTR_ERR(page[i]);
140 }
141 super[i] = page_address(page[i]);
142 }
143
144 if (btrfs_super_generation(super[0]) >
145 btrfs_super_generation(super[1]))
146 sector = zones[1].start;
147 else
148 sector = zones[0].start;
149
150 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
151 btrfs_release_disk_super(super[i]);
152 } else if (!full[0] && (empty[1] || full[1])) {
153 sector = zones[0].wp;
154 } else if (full[0]) {
155 sector = zones[1].wp;
156 } else {
157 return -EUCLEAN;
158 }
159 *wp_ret = sector << SECTOR_SHIFT;
160 return 0;
161 }
162
163 /*
164 * Get the first zone number of the superblock mirror
165 */
sb_zone_number(int shift,int mirror)166 static inline u32 sb_zone_number(int shift, int mirror)
167 {
168 u64 zone = U64_MAX;
169
170 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror);
171 switch (mirror) {
172 case 0: zone = 0; break;
173 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
174 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
175 }
176
177 ASSERT(zone <= U32_MAX, "zone=%llu", zone);
178
179 return (u32)zone;
180 }
181
zone_start_sector(u32 zone_number,struct block_device * bdev)182 static inline sector_t zone_start_sector(u32 zone_number,
183 struct block_device *bdev)
184 {
185 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
186 }
187
zone_start_physical(u32 zone_number,struct btrfs_zoned_device_info * zone_info)188 static inline u64 zone_start_physical(u32 zone_number,
189 struct btrfs_zoned_device_info *zone_info)
190 {
191 return (u64)zone_number << zone_info->zone_size_shift;
192 }
193
194 /*
195 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
196 * device into static sized chunks and fake a conventional zone on each of
197 * them.
198 */
emulate_report_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int nr_zones)199 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
200 struct blk_zone *zones, unsigned int nr_zones)
201 {
202 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
203 sector_t bdev_size = bdev_nr_sectors(device->bdev);
204 unsigned int i;
205
206 pos >>= SECTOR_SHIFT;
207 for (i = 0; i < nr_zones; i++) {
208 zones[i].start = i * zone_sectors + pos;
209 zones[i].len = zone_sectors;
210 zones[i].capacity = zone_sectors;
211 zones[i].wp = zones[i].start + zone_sectors;
212 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
213 zones[i].cond = BLK_ZONE_COND_NOT_WP;
214
215 if (zones[i].wp >= bdev_size) {
216 i++;
217 break;
218 }
219 }
220
221 return i;
222 }
223
btrfs_get_dev_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int * nr_zones)224 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
225 struct blk_zone *zones, unsigned int *nr_zones)
226 {
227 struct btrfs_zoned_device_info *zinfo = device->zone_info;
228 int ret;
229
230 if (!*nr_zones)
231 return 0;
232
233 if (!bdev_is_zoned(device->bdev)) {
234 ret = emulate_report_zones(device, pos, zones, *nr_zones);
235 *nr_zones = ret;
236 return 0;
237 }
238
239 /* Check cache */
240 if (zinfo->zone_cache) {
241 unsigned int i;
242 u32 zno;
243
244 ASSERT(IS_ALIGNED(pos, zinfo->zone_size),
245 "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size);
246 zno = pos >> zinfo->zone_size_shift;
247 /*
248 * We cannot report zones beyond the zone end. So, it is OK to
249 * cap *nr_zones to at the end.
250 */
251 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
252
253 for (i = 0; i < *nr_zones; i++) {
254 struct blk_zone *zone_info;
255
256 zone_info = &zinfo->zone_cache[zno + i];
257 if (!zone_info->len)
258 break;
259 }
260
261 if (i == *nr_zones) {
262 /* Cache hit on all the zones */
263 memcpy(zones, zinfo->zone_cache + zno,
264 sizeof(*zinfo->zone_cache) * *nr_zones);
265 return 0;
266 }
267 }
268
269 ret = blkdev_report_zones_cached(device->bdev, pos >> SECTOR_SHIFT,
270 *nr_zones, copy_zone_info_cb, zones);
271 if (ret < 0) {
272 btrfs_err(device->fs_info,
273 "zoned: failed to read zone %llu on %s (devid %llu)",
274 pos, rcu_dereference(device->name),
275 device->devid);
276 return ret;
277 }
278 *nr_zones = ret;
279 if (unlikely(!ret))
280 return -EIO;
281
282 /* Populate cache */
283 if (zinfo->zone_cache) {
284 u32 zno = pos >> zinfo->zone_size_shift;
285
286 memcpy(zinfo->zone_cache + zno, zones,
287 sizeof(*zinfo->zone_cache) * *nr_zones);
288 }
289
290 return 0;
291 }
292
293 /* The emulated zone size is determined from the size of device extent */
calculate_emulated_zone_size(struct btrfs_fs_info * fs_info)294 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
295 {
296 BTRFS_PATH_AUTO_FREE(path);
297 struct btrfs_root *root = fs_info->dev_root;
298 struct btrfs_key key;
299 struct extent_buffer *leaf;
300 struct btrfs_dev_extent *dext;
301 int ret = 0;
302
303 key.objectid = 1;
304 key.type = BTRFS_DEV_EXTENT_KEY;
305 key.offset = 0;
306
307 path = btrfs_alloc_path();
308 if (!path)
309 return -ENOMEM;
310
311 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
312 if (ret < 0)
313 return ret;
314
315 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
316 ret = btrfs_next_leaf(root, path);
317 if (ret < 0)
318 return ret;
319 /* No dev extents at all? Not good */
320 if (unlikely(ret > 0))
321 return -EUCLEAN;
322 }
323
324 leaf = path->nodes[0];
325 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 return 0;
328 }
329
btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info * fs_info)330 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
331 {
332 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
333 struct btrfs_device *device;
334 int ret = 0;
335
336 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
337 if (!btrfs_fs_incompat(fs_info, ZONED))
338 return 0;
339
340 mutex_lock(&fs_devices->device_list_mutex);
341 list_for_each_entry(device, &fs_devices->devices, dev_list) {
342 /* We can skip reading of zone info for missing devices */
343 if (!device->bdev)
344 continue;
345
346 ret = btrfs_get_dev_zone_info(device, true);
347 if (ret)
348 break;
349 }
350 mutex_unlock(&fs_devices->device_list_mutex);
351
352 return ret;
353 }
354
btrfs_get_dev_zone_info(struct btrfs_device * device,bool populate_cache)355 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
356 {
357 struct btrfs_fs_info *fs_info = device->fs_info;
358 struct btrfs_zoned_device_info *zone_info = NULL;
359 struct block_device *bdev = device->bdev;
360 unsigned int max_active_zones;
361 unsigned int nactive;
362 sector_t nr_sectors;
363 sector_t sector = 0;
364 struct blk_zone *zones = NULL;
365 unsigned int i, nreported = 0, nr_zones;
366 sector_t zone_sectors;
367 char *model, *emulated;
368 int ret;
369
370 /*
371 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
372 * yet be set.
373 */
374 if (!btrfs_fs_incompat(fs_info, ZONED))
375 return 0;
376
377 if (device->zone_info)
378 return 0;
379
380 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
381 if (!zone_info)
382 return -ENOMEM;
383
384 device->zone_info = zone_info;
385
386 if (!bdev_is_zoned(bdev)) {
387 if (!fs_info->zone_size) {
388 ret = calculate_emulated_zone_size(fs_info);
389 if (ret)
390 goto out;
391 }
392
393 ASSERT(fs_info->zone_size);
394 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
395 } else {
396 zone_sectors = bdev_zone_sectors(bdev);
397 }
398
399 ASSERT(is_power_of_two_u64(zone_sectors));
400 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
401
402 /* We reject devices with a zone size larger than 8GB */
403 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
404 btrfs_err(fs_info,
405 "zoned: %s: zone size %llu larger than supported maximum %llu",
406 rcu_dereference(device->name),
407 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
408 ret = -EINVAL;
409 goto out;
410 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
411 btrfs_err(fs_info,
412 "zoned: %s: zone size %llu smaller than supported minimum %u",
413 rcu_dereference(device->name),
414 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
415 ret = -EINVAL;
416 goto out;
417 }
418
419 nr_sectors = bdev_nr_sectors(bdev);
420 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
421 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
422 if (!IS_ALIGNED(nr_sectors, zone_sectors))
423 zone_info->nr_zones++;
424
425 max_active_zones = min_not_zero(bdev_max_active_zones(bdev),
426 bdev_max_open_zones(bdev));
427 if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
428 max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;
429 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
430 btrfs_err(fs_info,
431 "zoned: %s: max active zones %u is too small, need at least %u active zones",
432 rcu_dereference(device->name), max_active_zones,
433 BTRFS_MIN_ACTIVE_ZONES);
434 ret = -EINVAL;
435 goto out;
436 }
437 zone_info->max_active_zones = max_active_zones;
438
439 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
440 if (!zone_info->seq_zones) {
441 ret = -ENOMEM;
442 goto out;
443 }
444
445 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
446 if (!zone_info->empty_zones) {
447 ret = -ENOMEM;
448 goto out;
449 }
450
451 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
452 if (!zone_info->active_zones) {
453 ret = -ENOMEM;
454 goto out;
455 }
456
457 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
458 if (!zones) {
459 ret = -ENOMEM;
460 goto out;
461 }
462
463 /*
464 * Enable zone cache only for a zoned device. On a non-zoned device, we
465 * fill the zone info with emulated CONVENTIONAL zones, so no need to
466 * use the cache.
467 */
468 if (populate_cache && bdev_is_zoned(device->bdev)) {
469 zone_info->zone_cache = vcalloc(zone_info->nr_zones,
470 sizeof(struct blk_zone));
471 if (!zone_info->zone_cache) {
472 btrfs_err(device->fs_info,
473 "zoned: failed to allocate zone cache for %s",
474 rcu_dereference(device->name));
475 ret = -ENOMEM;
476 goto out;
477 }
478 }
479
480 /* Get zones type */
481 nactive = 0;
482 while (sector < nr_sectors) {
483 nr_zones = BTRFS_REPORT_NR_ZONES;
484 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
485 &nr_zones);
486 if (ret)
487 goto out;
488
489 for (i = 0; i < nr_zones; i++) {
490 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
491 __set_bit(nreported, zone_info->seq_zones);
492 switch (zones[i].cond) {
493 case BLK_ZONE_COND_EMPTY:
494 __set_bit(nreported, zone_info->empty_zones);
495 break;
496 case BLK_ZONE_COND_IMP_OPEN:
497 case BLK_ZONE_COND_EXP_OPEN:
498 case BLK_ZONE_COND_CLOSED:
499 case BLK_ZONE_COND_ACTIVE:
500 __set_bit(nreported, zone_info->active_zones);
501 nactive++;
502 break;
503 }
504 nreported++;
505 }
506 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
507 }
508
509 if (unlikely(nreported != zone_info->nr_zones)) {
510 btrfs_err(device->fs_info,
511 "inconsistent number of zones on %s (%u/%u)",
512 rcu_dereference(device->name), nreported,
513 zone_info->nr_zones);
514 ret = -EIO;
515 goto out;
516 }
517
518 if (max_active_zones) {
519 if (unlikely(nactive > max_active_zones)) {
520 if (bdev_max_active_zones(bdev) == 0) {
521 max_active_zones = 0;
522 zone_info->max_active_zones = 0;
523 goto validate;
524 }
525 btrfs_err(device->fs_info,
526 "zoned: %u active zones on %s exceeds max_active_zones %u",
527 nactive, rcu_dereference(device->name),
528 max_active_zones);
529 ret = -EIO;
530 goto out;
531 }
532 atomic_set(&zone_info->active_zones_left,
533 max_active_zones - nactive);
534 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
535 }
536
537 validate:
538 /* Validate superblock log */
539 nr_zones = BTRFS_NR_SB_LOG_ZONES;
540 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
541 u32 sb_zone;
542 u64 sb_wp;
543 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
544
545 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
546 if (sb_zone + 1 >= zone_info->nr_zones)
547 continue;
548
549 ret = btrfs_get_dev_zones(device,
550 zone_start_physical(sb_zone, zone_info),
551 &zone_info->sb_zones[sb_pos],
552 &nr_zones);
553 if (ret)
554 goto out;
555
556 if (unlikely(nr_zones != BTRFS_NR_SB_LOG_ZONES)) {
557 btrfs_err(device->fs_info,
558 "zoned: failed to read super block log zone info at devid %llu zone %u",
559 device->devid, sb_zone);
560 ret = -EUCLEAN;
561 goto out;
562 }
563
564 /*
565 * If zones[0] is conventional, always use the beginning of the
566 * zone to record superblock. No need to validate in that case.
567 */
568 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
569 BLK_ZONE_TYPE_CONVENTIONAL)
570 continue;
571
572 ret = sb_write_pointer(device->bdev,
573 &zone_info->sb_zones[sb_pos], &sb_wp);
574 if (unlikely(ret != -ENOENT && ret)) {
575 btrfs_err(device->fs_info,
576 "zoned: super block log zone corrupted devid %llu zone %u",
577 device->devid, sb_zone);
578 ret = -EUCLEAN;
579 goto out;
580 }
581 }
582
583
584 kvfree(zones);
585
586 if (bdev_is_zoned(bdev)) {
587 model = "host-managed zoned";
588 emulated = "";
589 } else {
590 model = "regular";
591 emulated = "emulated ";
592 }
593
594 btrfs_info(fs_info,
595 "%s block device %s, %u %szones of %llu bytes",
596 model, rcu_dereference(device->name), zone_info->nr_zones,
597 emulated, zone_info->zone_size);
598
599 return 0;
600
601 out:
602 kvfree(zones);
603 btrfs_destroy_dev_zone_info(device);
604 return ret;
605 }
606
btrfs_destroy_dev_zone_info(struct btrfs_device * device)607 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
608 {
609 struct btrfs_zoned_device_info *zone_info = device->zone_info;
610
611 if (!zone_info)
612 return;
613
614 bitmap_free(zone_info->active_zones);
615 bitmap_free(zone_info->seq_zones);
616 bitmap_free(zone_info->empty_zones);
617 vfree(zone_info->zone_cache);
618 kfree(zone_info);
619 device->zone_info = NULL;
620 }
621
btrfs_clone_dev_zone_info(struct btrfs_device * orig_dev)622 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
623 {
624 struct btrfs_zoned_device_info *zone_info;
625
626 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
627 if (!zone_info)
628 return NULL;
629
630 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
631 if (!zone_info->seq_zones)
632 goto out;
633
634 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
635 zone_info->nr_zones);
636
637 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
638 if (!zone_info->empty_zones)
639 goto out;
640
641 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
642 zone_info->nr_zones);
643
644 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
645 if (!zone_info->active_zones)
646 goto out;
647
648 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
649 zone_info->nr_zones);
650 zone_info->zone_cache = NULL;
651
652 return zone_info;
653
654 out:
655 bitmap_free(zone_info->seq_zones);
656 bitmap_free(zone_info->empty_zones);
657 bitmap_free(zone_info->active_zones);
658 kfree(zone_info);
659 return NULL;
660 }
661
btrfs_get_dev_zone(struct btrfs_device * device,u64 pos,struct blk_zone * zone)662 static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
663 {
664 unsigned int nr_zones = 1;
665 int ret;
666
667 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
668 if (ret != 0 || !nr_zones)
669 return ret ? ret : -EIO;
670
671 return 0;
672 }
673
btrfs_check_for_zoned_device(struct btrfs_fs_info * fs_info)674 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
675 {
676 struct btrfs_device *device;
677
678 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
679 if (device->bdev && bdev_is_zoned(device->bdev)) {
680 btrfs_err(fs_info,
681 "zoned: mode not enabled but zoned device found: %pg",
682 device->bdev);
683 return -EINVAL;
684 }
685 }
686
687 return 0;
688 }
689
btrfs_check_zoned_mode(struct btrfs_fs_info * fs_info)690 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
691 {
692 struct queue_limits *lim = &fs_info->limits;
693 struct btrfs_device *device;
694 u64 zone_size = 0;
695 int ret;
696
697 /*
698 * Host-Managed devices can't be used without the ZONED flag. With the
699 * ZONED all devices can be used, using zone emulation if required.
700 */
701 if (!btrfs_fs_incompat(fs_info, ZONED))
702 return btrfs_check_for_zoned_device(fs_info);
703
704 blk_set_stacking_limits(lim);
705
706 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
707 struct btrfs_zoned_device_info *zone_info = device->zone_info;
708
709 if (!device->bdev)
710 continue;
711
712 if (!zone_size) {
713 zone_size = zone_info->zone_size;
714 } else if (zone_info->zone_size != zone_size) {
715 btrfs_err(fs_info,
716 "zoned: unequal block device zone sizes: have %llu found %llu",
717 zone_info->zone_size, zone_size);
718 return -EINVAL;
719 }
720
721 /*
722 * With the zoned emulation, we can have non-zoned device on the
723 * zoned mode. In this case, we don't have a valid max zone
724 * append size.
725 */
726 if (bdev_is_zoned(device->bdev))
727 blk_stack_limits(lim, bdev_limits(device->bdev), 0);
728 }
729
730 ret = blk_validate_limits(lim);
731 if (ret) {
732 btrfs_err(fs_info, "zoned: failed to validate queue limits");
733 return ret;
734 }
735
736 /*
737 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
738 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
739 * check the alignment here.
740 */
741 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
742 btrfs_err(fs_info,
743 "zoned: zone size %llu not aligned to stripe %u",
744 zone_size, BTRFS_STRIPE_LEN);
745 return -EINVAL;
746 }
747
748 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
749 btrfs_err(fs_info, "zoned: mixed block groups not supported");
750 return -EINVAL;
751 }
752
753 fs_info->zone_size = zone_size;
754 /*
755 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
756 * Technically, we can have multiple pages per segment. But, since
757 * we add the pages one by one to a bio, and cannot increase the
758 * metadata reservation even if it increases the number of extents, it
759 * is safe to stick with the limit.
760 */
761 fs_info->max_zone_append_size = ALIGN_DOWN(
762 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
763 (u64)lim->max_sectors << SECTOR_SHIFT,
764 (u64)lim->max_segments << PAGE_SHIFT),
765 fs_info->sectorsize);
766 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
767
768 fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
769 fs_info->max_zone_append_size);
770
771 /*
772 * Check mount options here, because we might change fs_info->zoned
773 * from fs_info->zone_size.
774 */
775 ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
776 if (ret)
777 return ret;
778
779 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
780 return 0;
781 }
782
btrfs_check_mountopts_zoned(const struct btrfs_fs_info * info,unsigned long long * mount_opt)783 int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
784 unsigned long long *mount_opt)
785 {
786 if (!btrfs_is_zoned(info))
787 return 0;
788
789 /*
790 * Space cache writing is not COWed. Disable that to avoid write errors
791 * in sequential zones.
792 */
793 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
794 btrfs_err(info, "zoned: space cache v1 is not supported");
795 return -EINVAL;
796 }
797
798 if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
799 btrfs_err(info, "zoned: NODATACOW not supported");
800 return -EINVAL;
801 }
802
803 if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
804 btrfs_info(info,
805 "zoned: async discard ignored and disabled for zoned mode");
806 btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
807 }
808
809 return 0;
810 }
811
sb_log_location(struct block_device * bdev,struct blk_zone * zones,int rw,u64 * bytenr_ret)812 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
813 int rw, u64 *bytenr_ret)
814 {
815 u64 wp;
816 int ret;
817
818 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
819 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
820 return 0;
821 }
822
823 ret = sb_write_pointer(bdev, zones, &wp);
824 if (ret != -ENOENT && ret < 0)
825 return ret;
826
827 if (rw == WRITE) {
828 struct blk_zone *reset = NULL;
829
830 if (wp == zones[0].start << SECTOR_SHIFT)
831 reset = &zones[0];
832 else if (wp == zones[1].start << SECTOR_SHIFT)
833 reset = &zones[1];
834
835 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
836 unsigned int nofs_flags;
837
838 ASSERT(sb_zone_is_full(reset));
839
840 nofs_flags = memalloc_nofs_save();
841 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
842 reset->start, reset->len);
843 memalloc_nofs_restore(nofs_flags);
844 if (ret)
845 return ret;
846
847 reset->cond = BLK_ZONE_COND_EMPTY;
848 reset->wp = reset->start;
849 }
850 } else if (ret != -ENOENT) {
851 /*
852 * For READ, we want the previous one. Move write pointer to
853 * the end of a zone, if it is at the head of a zone.
854 */
855 u64 zone_end = 0;
856
857 if (wp == zones[0].start << SECTOR_SHIFT)
858 zone_end = zones[1].start + zones[1].capacity;
859 else if (wp == zones[1].start << SECTOR_SHIFT)
860 zone_end = zones[0].start + zones[0].capacity;
861 if (zone_end)
862 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
863 BTRFS_SUPER_INFO_SIZE);
864
865 wp -= BTRFS_SUPER_INFO_SIZE;
866 }
867
868 *bytenr_ret = wp;
869 return 0;
870
871 }
872
btrfs_sb_log_location_bdev(struct block_device * bdev,int mirror,int rw,u64 * bytenr_ret)873 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
874 u64 *bytenr_ret)
875 {
876 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
877 sector_t zone_sectors;
878 u32 sb_zone;
879 int ret;
880 u8 zone_sectors_shift;
881 sector_t nr_sectors;
882 u32 nr_zones;
883
884 if (!bdev_is_zoned(bdev)) {
885 *bytenr_ret = btrfs_sb_offset(mirror);
886 return 0;
887 }
888
889 ASSERT(rw == READ || rw == WRITE);
890
891 zone_sectors = bdev_zone_sectors(bdev);
892 if (!is_power_of_2(zone_sectors))
893 return -EINVAL;
894 zone_sectors_shift = ilog2(zone_sectors);
895 nr_sectors = bdev_nr_sectors(bdev);
896 nr_zones = nr_sectors >> zone_sectors_shift;
897
898 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
899 if (sb_zone + 1 >= nr_zones)
900 return -ENOENT;
901
902 ret = blkdev_report_zones_cached(bdev, zone_start_sector(sb_zone, bdev),
903 BTRFS_NR_SB_LOG_ZONES,
904 copy_zone_info_cb, zones);
905 if (ret < 0)
906 return ret;
907 if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
908 return -EIO;
909
910 return sb_log_location(bdev, zones, rw, bytenr_ret);
911 }
912
btrfs_sb_log_location(struct btrfs_device * device,int mirror,int rw,u64 * bytenr_ret)913 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
914 u64 *bytenr_ret)
915 {
916 struct btrfs_zoned_device_info *zinfo = device->zone_info;
917 u32 zone_num;
918
919 /*
920 * For a zoned filesystem on a non-zoned block device, use the same
921 * super block locations as regular filesystem. Doing so, the super
922 * block can always be retrieved and the zoned flag of the volume
923 * detected from the super block information.
924 */
925 if (!bdev_is_zoned(device->bdev)) {
926 *bytenr_ret = btrfs_sb_offset(mirror);
927 return 0;
928 }
929
930 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
931 if (zone_num + 1 >= zinfo->nr_zones)
932 return -ENOENT;
933
934 return sb_log_location(device->bdev,
935 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
936 rw, bytenr_ret);
937 }
938
is_sb_log_zone(struct btrfs_zoned_device_info * zinfo,int mirror)939 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
940 int mirror)
941 {
942 u32 zone_num;
943
944 if (!zinfo)
945 return false;
946
947 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
948 if (zone_num + 1 >= zinfo->nr_zones)
949 return false;
950
951 if (!test_bit(zone_num, zinfo->seq_zones))
952 return false;
953
954 return true;
955 }
956
btrfs_advance_sb_log(struct btrfs_device * device,int mirror)957 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
958 {
959 struct btrfs_zoned_device_info *zinfo = device->zone_info;
960 struct blk_zone *zone;
961 int i;
962
963 if (!is_sb_log_zone(zinfo, mirror))
964 return 0;
965
966 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
967 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
968 /* Advance the next zone */
969 if (zone->cond == BLK_ZONE_COND_FULL) {
970 zone++;
971 continue;
972 }
973
974 if (zone->cond == BLK_ZONE_COND_EMPTY)
975 zone->cond = BLK_ZONE_COND_IMP_OPEN;
976
977 zone->wp += SUPER_INFO_SECTORS;
978
979 if (sb_zone_is_full(zone)) {
980 /*
981 * No room left to write new superblock. Since
982 * superblock is written with REQ_SYNC, it is safe to
983 * finish the zone now.
984 *
985 * If the write pointer is exactly at the capacity,
986 * explicit ZONE_FINISH is not necessary.
987 */
988 if (zone->wp != zone->start + zone->capacity) {
989 unsigned int nofs_flags;
990 int ret;
991
992 nofs_flags = memalloc_nofs_save();
993 ret = blkdev_zone_mgmt(device->bdev,
994 REQ_OP_ZONE_FINISH, zone->start,
995 zone->len);
996 memalloc_nofs_restore(nofs_flags);
997 if (ret)
998 return ret;
999 }
1000
1001 zone->wp = zone->start + zone->len;
1002 zone->cond = BLK_ZONE_COND_FULL;
1003 }
1004 return 0;
1005 }
1006
1007 /* All the zones are FULL. Should not reach here. */
1008 DEBUG_WARN("unexpected state, all zones full");
1009 return -EIO;
1010 }
1011
btrfs_reset_sb_log_zones(struct block_device * bdev,int mirror)1012 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1013 {
1014 unsigned int nofs_flags;
1015 sector_t zone_sectors;
1016 sector_t nr_sectors;
1017 u8 zone_sectors_shift;
1018 u32 sb_zone;
1019 u32 nr_zones;
1020 int ret;
1021
1022 zone_sectors = bdev_zone_sectors(bdev);
1023 zone_sectors_shift = ilog2(zone_sectors);
1024 nr_sectors = bdev_nr_sectors(bdev);
1025 nr_zones = nr_sectors >> zone_sectors_shift;
1026
1027 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1028 if (sb_zone + 1 >= nr_zones)
1029 return -ENOENT;
1030
1031 nofs_flags = memalloc_nofs_save();
1032 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1033 zone_start_sector(sb_zone, bdev),
1034 zone_sectors * BTRFS_NR_SB_LOG_ZONES);
1035 memalloc_nofs_restore(nofs_flags);
1036 return ret;
1037 }
1038
1039 /*
1040 * Find allocatable zones within a given region.
1041 *
1042 * @device: the device to allocate a region on
1043 * @hole_start: the position of the hole to allocate the region
1044 * @num_bytes: size of wanted region
1045 * @hole_end: the end of the hole
1046 * @return: position of allocatable zones
1047 *
1048 * Allocatable region should not contain any superblock locations.
1049 */
btrfs_find_allocatable_zones(struct btrfs_device * device,u64 hole_start,u64 hole_end,u64 num_bytes)1050 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1051 u64 hole_end, u64 num_bytes)
1052 {
1053 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1054 const u8 shift = zinfo->zone_size_shift;
1055 u64 nzones = num_bytes >> shift;
1056 u64 pos = hole_start;
1057 u64 begin, end;
1058 bool have_sb;
1059 int i;
1060
1061 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size),
1062 "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size);
1063 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size),
1064 "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size);
1065
1066 while (pos < hole_end) {
1067 begin = pos >> shift;
1068 end = begin + nzones;
1069
1070 if (end > zinfo->nr_zones)
1071 return hole_end;
1072
1073 /* Check if zones in the region are all empty */
1074 if (btrfs_dev_is_sequential(device, pos) &&
1075 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1076 pos += zinfo->zone_size;
1077 continue;
1078 }
1079
1080 have_sb = false;
1081 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1082 u32 sb_zone;
1083 u64 sb_pos;
1084
1085 sb_zone = sb_zone_number(shift, i);
1086 if (!(end <= sb_zone ||
1087 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1088 have_sb = true;
1089 pos = zone_start_physical(
1090 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1091 break;
1092 }
1093
1094 /* We also need to exclude regular superblock positions */
1095 sb_pos = btrfs_sb_offset(i);
1096 if (!(pos + num_bytes <= sb_pos ||
1097 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1098 have_sb = true;
1099 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1100 zinfo->zone_size);
1101 break;
1102 }
1103 }
1104 if (!have_sb)
1105 break;
1106 }
1107
1108 return pos;
1109 }
1110
btrfs_dev_set_active_zone(struct btrfs_device * device,u64 pos)1111 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1112 {
1113 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1114 unsigned int zno = (pos >> zone_info->zone_size_shift);
1115
1116 /* We can use any number of zones */
1117 if (zone_info->max_active_zones == 0)
1118 return true;
1119
1120 if (!test_bit(zno, zone_info->active_zones)) {
1121 /* Active zone left? */
1122 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1123 return false;
1124 if (test_and_set_bit(zno, zone_info->active_zones)) {
1125 /* Someone already set the bit */
1126 atomic_inc(&zone_info->active_zones_left);
1127 }
1128 }
1129
1130 return true;
1131 }
1132
btrfs_dev_clear_active_zone(struct btrfs_device * device,u64 pos)1133 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1134 {
1135 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1136 unsigned int zno = (pos >> zone_info->zone_size_shift);
1137
1138 /* We can use any number of zones */
1139 if (zone_info->max_active_zones == 0)
1140 return;
1141
1142 if (test_and_clear_bit(zno, zone_info->active_zones))
1143 atomic_inc(&zone_info->active_zones_left);
1144 }
1145
btrfs_reset_device_zone(struct btrfs_device * device,u64 physical,u64 length,u64 * bytes)1146 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1147 u64 length, u64 *bytes)
1148 {
1149 unsigned int nofs_flags;
1150 int ret;
1151
1152 *bytes = 0;
1153 nofs_flags = memalloc_nofs_save();
1154 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1155 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
1156 memalloc_nofs_restore(nofs_flags);
1157 if (ret)
1158 return ret;
1159
1160 *bytes = length;
1161 while (length) {
1162 btrfs_dev_set_zone_empty(device, physical);
1163 btrfs_dev_clear_active_zone(device, physical);
1164 physical += device->zone_info->zone_size;
1165 length -= device->zone_info->zone_size;
1166 }
1167
1168 return 0;
1169 }
1170
btrfs_ensure_empty_zones(struct btrfs_device * device,u64 start,u64 size)1171 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1172 {
1173 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1174 const u8 shift = zinfo->zone_size_shift;
1175 unsigned long begin = start >> shift;
1176 unsigned long nbits = size >> shift;
1177 u64 pos;
1178 int ret;
1179
1180 ASSERT(IS_ALIGNED(start, zinfo->zone_size),
1181 "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size);
1182 ASSERT(IS_ALIGNED(size, zinfo->zone_size),
1183 "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size);
1184
1185 if (begin + nbits > zinfo->nr_zones)
1186 return -ERANGE;
1187
1188 /* All the zones are conventional */
1189 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1190 return 0;
1191
1192 /* All the zones are sequential and empty */
1193 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1194 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1195 return 0;
1196
1197 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1198 u64 reset_bytes;
1199
1200 if (!btrfs_dev_is_sequential(device, pos) ||
1201 btrfs_dev_is_empty_zone(device, pos))
1202 continue;
1203
1204 /* Free regions should be empty */
1205 btrfs_warn(
1206 device->fs_info,
1207 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1208 rcu_dereference(device->name), device->devid, pos >> shift);
1209 WARN_ON_ONCE(1);
1210
1211 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1212 &reset_bytes);
1213 if (ret)
1214 return ret;
1215 }
1216
1217 return 0;
1218 }
1219
1220 /*
1221 * Calculate an allocation pointer from the extent allocation information
1222 * for a block group consist of conventional zones. It is pointed to the
1223 * end of the highest addressed extent in the block group as an allocation
1224 * offset.
1225 */
calculate_alloc_pointer(struct btrfs_block_group * cache,u64 * offset_ret,bool new)1226 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1227 u64 *offset_ret, bool new)
1228 {
1229 struct btrfs_fs_info *fs_info = cache->fs_info;
1230 struct btrfs_root *root;
1231 BTRFS_PATH_AUTO_FREE(path);
1232 struct btrfs_key key;
1233 struct btrfs_key found_key;
1234 int ret;
1235 u64 length;
1236
1237 /*
1238 * Avoid tree lookups for a new block group, there's no use for it.
1239 * It must always be 0.
1240 *
1241 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1242 * For new a block group, this function is called from
1243 * btrfs_make_block_group() which is already taking the chunk mutex.
1244 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1245 * buffer locks to avoid deadlock.
1246 */
1247 if (new) {
1248 *offset_ret = 0;
1249 return 0;
1250 }
1251
1252 path = btrfs_alloc_path();
1253 if (!path)
1254 return -ENOMEM;
1255
1256 key.objectid = cache->start + cache->length;
1257 key.type = 0;
1258 key.offset = 0;
1259
1260 root = btrfs_extent_root(fs_info, key.objectid);
1261 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1262 /* We should not find the exact match */
1263 if (unlikely(!ret))
1264 ret = -EUCLEAN;
1265 if (ret < 0)
1266 return ret;
1267
1268 ret = btrfs_previous_extent_item(root, path, cache->start);
1269 if (ret) {
1270 if (ret == 1) {
1271 ret = 0;
1272 *offset_ret = 0;
1273 }
1274 return ret;
1275 }
1276
1277 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1278
1279 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1280 length = found_key.offset;
1281 else
1282 length = fs_info->nodesize;
1283
1284 if (unlikely(!(found_key.objectid >= cache->start &&
1285 found_key.objectid + length <= cache->start + cache->length))) {
1286 return -EUCLEAN;
1287 }
1288 *offset_ret = found_key.objectid + length - cache->start;
1289 return 0;
1290 }
1291
1292 struct zone_info {
1293 u64 physical;
1294 u64 capacity;
1295 u64 alloc_offset;
1296 };
1297
btrfs_load_zone_info(struct btrfs_fs_info * fs_info,int zone_idx,struct zone_info * info,unsigned long * active,struct btrfs_chunk_map * map,bool new)1298 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1299 struct zone_info *info, unsigned long *active,
1300 struct btrfs_chunk_map *map, bool new)
1301 {
1302 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1303 struct btrfs_device *device;
1304 int dev_replace_is_ongoing = 0;
1305 unsigned int nofs_flag;
1306 struct blk_zone zone;
1307 int ret;
1308
1309 info->physical = map->stripes[zone_idx].physical;
1310
1311 down_read(&dev_replace->rwsem);
1312 device = map->stripes[zone_idx].dev;
1313
1314 if (!device->bdev) {
1315 up_read(&dev_replace->rwsem);
1316 info->alloc_offset = WP_MISSING_DEV;
1317 return 0;
1318 }
1319
1320 /* Consider a zone as active if we can allow any number of active zones. */
1321 if (!device->zone_info->max_active_zones)
1322 __set_bit(zone_idx, active);
1323
1324 if (!btrfs_dev_is_sequential(device, info->physical)) {
1325 up_read(&dev_replace->rwsem);
1326 info->alloc_offset = WP_CONVENTIONAL;
1327 info->capacity = device->zone_info->zone_size;
1328 return 0;
1329 }
1330
1331 ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
1332
1333 /* This zone will be used for allocation, so mark this zone non-empty. */
1334 btrfs_dev_clear_zone_empty(device, info->physical);
1335
1336 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1337 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1338 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1339
1340 /*
1341 * The group is mapped to a sequential zone. Get the zone write pointer
1342 * to determine the allocation offset within the zone.
1343 */
1344 WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1345
1346 if (new) {
1347 sector_t capacity;
1348
1349 capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
1350 up_read(&dev_replace->rwsem);
1351 info->alloc_offset = 0;
1352 info->capacity = capacity << SECTOR_SHIFT;
1353
1354 return 0;
1355 }
1356
1357 nofs_flag = memalloc_nofs_save();
1358 ret = btrfs_get_dev_zone(device, info->physical, &zone);
1359 memalloc_nofs_restore(nofs_flag);
1360 if (ret) {
1361 up_read(&dev_replace->rwsem);
1362 if (ret != -EIO && ret != -EOPNOTSUPP)
1363 return ret;
1364 info->alloc_offset = WP_MISSING_DEV;
1365 return 0;
1366 }
1367
1368 if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
1369 btrfs_err(fs_info,
1370 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1371 zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
1372 device->devid);
1373 up_read(&dev_replace->rwsem);
1374 return -EIO;
1375 }
1376
1377 info->capacity = (zone.capacity << SECTOR_SHIFT);
1378
1379 switch (zone.cond) {
1380 case BLK_ZONE_COND_OFFLINE:
1381 case BLK_ZONE_COND_READONLY:
1382 btrfs_err(fs_info,
1383 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1384 (info->physical >> device->zone_info->zone_size_shift),
1385 rcu_dereference(device->name), device->devid);
1386 info->alloc_offset = WP_MISSING_DEV;
1387 break;
1388 case BLK_ZONE_COND_EMPTY:
1389 info->alloc_offset = 0;
1390 break;
1391 case BLK_ZONE_COND_FULL:
1392 info->alloc_offset = info->capacity;
1393 break;
1394 default:
1395 /* Partially used zone. */
1396 info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1397 __set_bit(zone_idx, active);
1398 break;
1399 }
1400
1401 up_read(&dev_replace->rwsem);
1402
1403 return 0;
1404 }
1405
btrfs_load_block_group_single(struct btrfs_block_group * bg,struct zone_info * info,unsigned long * active)1406 static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1407 struct zone_info *info,
1408 unsigned long *active)
1409 {
1410 if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
1411 btrfs_err(bg->fs_info,
1412 "zoned: cannot recover write pointer for zone %llu",
1413 info->physical);
1414 return -EIO;
1415 }
1416
1417 bg->alloc_offset = info->alloc_offset;
1418 bg->zone_capacity = info->capacity;
1419 if (test_bit(0, active))
1420 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1421 return 0;
1422 }
1423
btrfs_load_block_group_dup(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1424 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1425 struct btrfs_chunk_map *map,
1426 struct zone_info *zone_info,
1427 unsigned long *active,
1428 u64 last_alloc)
1429 {
1430 struct btrfs_fs_info *fs_info = bg->fs_info;
1431
1432 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1433 btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1434 return -EINVAL;
1435 }
1436
1437 bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1438
1439 if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
1440 btrfs_err(bg->fs_info,
1441 "zoned: cannot recover write pointer for zone %llu",
1442 zone_info[0].physical);
1443 return -EIO;
1444 }
1445 if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
1446 btrfs_err(bg->fs_info,
1447 "zoned: cannot recover write pointer for zone %llu",
1448 zone_info[1].physical);
1449 return -EIO;
1450 }
1451
1452 if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
1453 zone_info[0].alloc_offset = last_alloc;
1454
1455 if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
1456 zone_info[1].alloc_offset = last_alloc;
1457
1458 if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
1459 btrfs_err(bg->fs_info,
1460 "zoned: write pointer offset mismatch of zones in DUP profile");
1461 return -EIO;
1462 }
1463
1464 if (test_bit(0, active) != test_bit(1, active)) {
1465 if (unlikely(!btrfs_zone_activate(bg)))
1466 return -EIO;
1467 } else if (test_bit(0, active)) {
1468 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1469 }
1470
1471 bg->alloc_offset = zone_info[0].alloc_offset;
1472 return 0;
1473 }
1474
btrfs_load_block_group_raid1(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1475 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1476 struct btrfs_chunk_map *map,
1477 struct zone_info *zone_info,
1478 unsigned long *active,
1479 u64 last_alloc)
1480 {
1481 struct btrfs_fs_info *fs_info = bg->fs_info;
1482 int i;
1483
1484 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1485 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1486 btrfs_bg_type_to_raid_name(map->type));
1487 return -EINVAL;
1488 }
1489
1490 /* In case a device is missing we have a cap of 0, so don't use it. */
1491 bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1492
1493 for (i = 0; i < map->num_stripes; i++) {
1494 if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1495 continue;
1496
1497 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1498 zone_info[i].alloc_offset = last_alloc;
1499
1500 if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1501 !btrfs_test_opt(fs_info, DEGRADED))) {
1502 btrfs_err(fs_info,
1503 "zoned: write pointer offset mismatch of zones in %s profile",
1504 btrfs_bg_type_to_raid_name(map->type));
1505 return -EIO;
1506 }
1507 if (test_bit(0, active) != test_bit(i, active)) {
1508 if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
1509 !btrfs_zone_activate(bg))) {
1510 return -EIO;
1511 }
1512 } else {
1513 if (test_bit(0, active))
1514 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1515 }
1516 }
1517
1518 if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1519 bg->alloc_offset = zone_info[0].alloc_offset;
1520 else
1521 bg->alloc_offset = zone_info[i - 1].alloc_offset;
1522
1523 return 0;
1524 }
1525
btrfs_load_block_group_raid0(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1526 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1527 struct btrfs_chunk_map *map,
1528 struct zone_info *zone_info,
1529 unsigned long *active,
1530 u64 last_alloc)
1531 {
1532 struct btrfs_fs_info *fs_info = bg->fs_info;
1533 u64 stripe_nr = 0, stripe_offset = 0;
1534 u32 stripe_index = 0;
1535
1536 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1537 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1538 btrfs_bg_type_to_raid_name(map->type));
1539 return -EINVAL;
1540 }
1541
1542 if (last_alloc) {
1543 u32 factor = map->num_stripes;
1544
1545 stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1546 stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1547 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1548 }
1549
1550 for (int i = 0; i < map->num_stripes; i++) {
1551 if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1552 continue;
1553
1554 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
1555
1556 zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
1557
1558 if (stripe_index > i)
1559 zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
1560 else if (stripe_index == i)
1561 zone_info[i].alloc_offset += stripe_offset;
1562 }
1563
1564 if (test_bit(0, active) != test_bit(i, active)) {
1565 if (unlikely(!btrfs_zone_activate(bg)))
1566 return -EIO;
1567 } else {
1568 if (test_bit(0, active))
1569 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1570 }
1571 bg->zone_capacity += zone_info[i].capacity;
1572 bg->alloc_offset += zone_info[i].alloc_offset;
1573 }
1574
1575 return 0;
1576 }
1577
btrfs_load_block_group_raid10(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1578 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1579 struct btrfs_chunk_map *map,
1580 struct zone_info *zone_info,
1581 unsigned long *active,
1582 u64 last_alloc)
1583 {
1584 struct btrfs_fs_info *fs_info = bg->fs_info;
1585 u64 stripe_nr = 0, stripe_offset = 0;
1586 u32 stripe_index = 0;
1587
1588 if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1589 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1590 btrfs_bg_type_to_raid_name(map->type));
1591 return -EINVAL;
1592 }
1593
1594 if (last_alloc) {
1595 u32 factor = map->num_stripes / map->sub_stripes;
1596
1597 stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1598 stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1599 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1600 }
1601
1602 for (int i = 0; i < map->num_stripes; i++) {
1603 if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1604 continue;
1605
1606 if (test_bit(0, active) != test_bit(i, active)) {
1607 if (unlikely(!btrfs_zone_activate(bg)))
1608 return -EIO;
1609 } else {
1610 if (test_bit(0, active))
1611 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1612 }
1613
1614 if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
1615 zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
1616
1617 if (stripe_index > (i / map->sub_stripes))
1618 zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
1619 else if (stripe_index == (i / map->sub_stripes))
1620 zone_info[i].alloc_offset += stripe_offset;
1621 }
1622
1623 if ((i % map->sub_stripes) == 0) {
1624 bg->zone_capacity += zone_info[i].capacity;
1625 bg->alloc_offset += zone_info[i].alloc_offset;
1626 }
1627 }
1628
1629 return 0;
1630 }
1631
btrfs_load_block_group_zone_info(struct btrfs_block_group * cache,bool new)1632 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1633 {
1634 struct btrfs_fs_info *fs_info = cache->fs_info;
1635 struct btrfs_chunk_map *map;
1636 u64 logical = cache->start;
1637 u64 length = cache->length;
1638 struct zone_info AUTO_KFREE(zone_info);
1639 int ret;
1640 int i;
1641 unsigned long *active = NULL;
1642 u64 last_alloc = 0;
1643 u32 num_sequential = 0, num_conventional = 0;
1644 u64 profile;
1645
1646 if (!btrfs_is_zoned(fs_info))
1647 return 0;
1648
1649 /* Sanity check */
1650 if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
1651 btrfs_err(fs_info,
1652 "zoned: block group %llu len %llu unaligned to zone size %llu",
1653 logical, length, fs_info->zone_size);
1654 return -EIO;
1655 }
1656
1657 map = btrfs_find_chunk_map(fs_info, logical, length);
1658 if (!map)
1659 return -EINVAL;
1660
1661 cache->physical_map = map;
1662
1663 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1664 if (!zone_info) {
1665 ret = -ENOMEM;
1666 goto out;
1667 }
1668
1669 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1670 if (!active) {
1671 ret = -ENOMEM;
1672 goto out;
1673 }
1674
1675 for (i = 0; i < map->num_stripes; i++) {
1676 ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
1677 if (ret)
1678 goto out;
1679
1680 if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1681 num_conventional++;
1682 else
1683 num_sequential++;
1684 }
1685
1686 if (num_sequential > 0)
1687 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1688
1689 if (num_conventional > 0) {
1690 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1691 if (ret) {
1692 btrfs_err(fs_info,
1693 "zoned: failed to determine allocation offset of bg %llu",
1694 cache->start);
1695 goto out;
1696 } else if (map->num_stripes == num_conventional) {
1697 cache->alloc_offset = last_alloc;
1698 cache->zone_capacity = cache->length;
1699 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1700 goto out;
1701 }
1702 }
1703
1704 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
1705 switch (profile) {
1706 case 0: /* single */
1707 ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
1708 break;
1709 case BTRFS_BLOCK_GROUP_DUP:
1710 ret = btrfs_load_block_group_dup(cache, map, zone_info, active,
1711 last_alloc);
1712 break;
1713 case BTRFS_BLOCK_GROUP_RAID1:
1714 case BTRFS_BLOCK_GROUP_RAID1C3:
1715 case BTRFS_BLOCK_GROUP_RAID1C4:
1716 ret = btrfs_load_block_group_raid1(cache, map, zone_info,
1717 active, last_alloc);
1718 break;
1719 case BTRFS_BLOCK_GROUP_RAID0:
1720 ret = btrfs_load_block_group_raid0(cache, map, zone_info,
1721 active, last_alloc);
1722 break;
1723 case BTRFS_BLOCK_GROUP_RAID10:
1724 ret = btrfs_load_block_group_raid10(cache, map, zone_info,
1725 active, last_alloc);
1726 break;
1727 case BTRFS_BLOCK_GROUP_RAID5:
1728 case BTRFS_BLOCK_GROUP_RAID6:
1729 default:
1730 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1731 btrfs_bg_type_to_raid_name(map->type));
1732 ret = -EINVAL;
1733 goto out;
1734 }
1735
1736 if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 &&
1737 profile != BTRFS_BLOCK_GROUP_RAID10) {
1738 /*
1739 * Detected broken write pointer. Make this block group
1740 * unallocatable by setting the allocation pointer at the end of
1741 * allocatable region. Relocating this block group will fix the
1742 * mismatch.
1743 *
1744 * Currently, we cannot handle RAID0 or RAID10 case like this
1745 * because we don't have a proper zone_capacity value. But,
1746 * reading from this block group won't work anyway by a missing
1747 * stripe.
1748 */
1749 cache->alloc_offset = cache->zone_capacity;
1750 }
1751
1752 out:
1753 /* Reject non SINGLE data profiles without RST */
1754 if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
1755 (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1756 !fs_info->stripe_root) {
1757 btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1758 btrfs_bg_type_to_raid_name(map->type));
1759 ret = -EINVAL;
1760 }
1761
1762 if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
1763 btrfs_err(fs_info,
1764 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1765 cache->alloc_offset, cache->zone_capacity,
1766 cache->start);
1767 ret = -EIO;
1768 }
1769
1770 /* An extent is allocated after the write pointer */
1771 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1772 btrfs_err(fs_info,
1773 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1774 logical, last_alloc, cache->alloc_offset);
1775 ret = -EIO;
1776 }
1777
1778 if (!ret) {
1779 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1780 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1781 btrfs_get_block_group(cache);
1782 spin_lock(&fs_info->zone_active_bgs_lock);
1783 list_add_tail(&cache->active_bg_list,
1784 &fs_info->zone_active_bgs);
1785 spin_unlock(&fs_info->zone_active_bgs_lock);
1786 }
1787 } else {
1788 btrfs_free_chunk_map(cache->physical_map);
1789 cache->physical_map = NULL;
1790 }
1791 bitmap_free(active);
1792
1793 return ret;
1794 }
1795
btrfs_calc_zone_unusable(struct btrfs_block_group * cache)1796 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1797 {
1798 u64 unusable, free;
1799
1800 if (!btrfs_is_zoned(cache->fs_info))
1801 return;
1802
1803 WARN_ON(cache->bytes_super != 0);
1804 unusable = (cache->alloc_offset - cache->used) +
1805 (cache->length - cache->zone_capacity);
1806 free = cache->zone_capacity - cache->alloc_offset;
1807
1808 /* We only need ->free_space in ALLOC_SEQ block groups */
1809 cache->cached = BTRFS_CACHE_FINISHED;
1810 cache->free_space_ctl->free_space = free;
1811 cache->zone_unusable = unusable;
1812 }
1813
btrfs_use_zone_append(struct btrfs_bio * bbio)1814 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
1815 {
1816 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1817 struct btrfs_inode *inode = bbio->inode;
1818 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1819 struct btrfs_block_group *cache;
1820 bool ret = false;
1821
1822 if (!btrfs_is_zoned(fs_info))
1823 return false;
1824
1825 if (!is_data_inode(inode))
1826 return false;
1827
1828 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1829 return false;
1830
1831 /*
1832 * Using REQ_OP_ZONE_APPEND for relocation can break assumptions on the
1833 * extent layout the relocation code has.
1834 * Furthermore we have set aside own block-group from which only the
1835 * relocation "process" can allocate and make sure only one process at a
1836 * time can add pages to an extent that gets relocated, so it's safe to
1837 * use regular REQ_OP_WRITE for this special case.
1838 */
1839 if (btrfs_is_data_reloc_root(inode->root))
1840 return false;
1841
1842 cache = btrfs_lookup_block_group(fs_info, start);
1843 ASSERT(cache);
1844 if (!cache)
1845 return false;
1846
1847 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1848 btrfs_put_block_group(cache);
1849
1850 return ret;
1851 }
1852
btrfs_record_physical_zoned(struct btrfs_bio * bbio)1853 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
1854 {
1855 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
1856 struct btrfs_ordered_sum *sum = bbio->sums;
1857
1858 if (physical < bbio->orig_physical)
1859 sum->logical -= bbio->orig_physical - physical;
1860 else
1861 sum->logical += physical - bbio->orig_physical;
1862 }
1863
btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent * ordered,u64 logical)1864 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1865 u64 logical)
1866 {
1867 struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
1868 struct extent_map *em;
1869
1870 ordered->disk_bytenr = logical;
1871
1872 write_lock(&em_tree->lock);
1873 em = btrfs_search_extent_mapping(em_tree, ordered->file_offset,
1874 ordered->num_bytes);
1875 /* The em should be a new COW extent, thus it should not have an offset. */
1876 ASSERT(em->offset == 0, "em->offset=%llu", em->offset);
1877 em->disk_bytenr = logical;
1878 btrfs_free_extent_map(em);
1879 write_unlock(&em_tree->lock);
1880 }
1881
btrfs_zoned_split_ordered(struct btrfs_ordered_extent * ordered,u64 logical,u64 len)1882 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1883 u64 logical, u64 len)
1884 {
1885 struct btrfs_ordered_extent *new;
1886
1887 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1888 btrfs_split_extent_map(ordered->inode, ordered->file_offset,
1889 ordered->num_bytes, len, logical))
1890 return false;
1891
1892 new = btrfs_split_ordered_extent(ordered, len);
1893 if (IS_ERR(new))
1894 return false;
1895 new->disk_bytenr = logical;
1896 btrfs_finish_one_ordered(new);
1897 return true;
1898 }
1899
btrfs_finish_ordered_zoned(struct btrfs_ordered_extent * ordered)1900 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1901 {
1902 struct btrfs_inode *inode = ordered->inode;
1903 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1904 struct btrfs_ordered_sum *sum;
1905 u64 logical, len;
1906
1907 /*
1908 * Write to pre-allocated region is for the data relocation, and so
1909 * it should use WRITE operation. No split/rewrite are necessary.
1910 */
1911 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
1912 return;
1913
1914 ASSERT(!list_empty(&ordered->list));
1915 /* The ordered->list can be empty in the above pre-alloc case. */
1916 sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
1917 logical = sum->logical;
1918 len = sum->len;
1919
1920 while (len < ordered->disk_num_bytes) {
1921 sum = list_next_entry(sum, list);
1922 if (sum->logical == logical + len) {
1923 len += sum->len;
1924 continue;
1925 }
1926 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1927 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1928 btrfs_err(fs_info, "failed to split ordered extent");
1929 goto out;
1930 }
1931 logical = sum->logical;
1932 len = sum->len;
1933 }
1934
1935 if (ordered->disk_bytenr != logical)
1936 btrfs_rewrite_logical_zoned(ordered, logical);
1937
1938 out:
1939 /*
1940 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1941 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1942 * addresses and don't contain actual checksums. We thus must free them
1943 * here so that we don't attempt to log the csums later.
1944 */
1945 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
1946 test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
1947 while ((sum = list_first_entry_or_null(&ordered->list,
1948 typeof(*sum), list))) {
1949 list_del(&sum->list);
1950 kfree(sum);
1951 }
1952 }
1953 }
1954
check_bg_is_active(struct btrfs_eb_write_context * ctx,struct btrfs_block_group ** active_bg)1955 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
1956 struct btrfs_block_group **active_bg)
1957 {
1958 const struct writeback_control *wbc = ctx->wbc;
1959 struct btrfs_block_group *block_group = ctx->zoned_bg;
1960 struct btrfs_fs_info *fs_info = block_group->fs_info;
1961
1962 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1963 return true;
1964
1965 if (fs_info->treelog_bg == block_group->start) {
1966 if (!btrfs_zone_activate(block_group)) {
1967 int ret_fin = btrfs_zone_finish_one_bg(fs_info);
1968
1969 if (ret_fin != 1 || !btrfs_zone_activate(block_group))
1970 return false;
1971 }
1972 } else if (*active_bg != block_group) {
1973 struct btrfs_block_group *tgt = *active_bg;
1974
1975 /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
1976 lockdep_assert_held(&fs_info->zoned_meta_io_lock);
1977
1978 if (tgt) {
1979 /*
1980 * If there is an unsent IO left in the allocated area,
1981 * we cannot wait for them as it may cause a deadlock.
1982 */
1983 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1984 if (wbc->sync_mode == WB_SYNC_NONE ||
1985 (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
1986 return false;
1987 }
1988
1989 /* Pivot active metadata/system block group. */
1990 btrfs_zoned_meta_io_unlock(fs_info);
1991 wait_eb_writebacks(tgt);
1992 do_zone_finish(tgt, true);
1993 btrfs_zoned_meta_io_lock(fs_info);
1994 if (*active_bg == tgt) {
1995 btrfs_put_block_group(tgt);
1996 *active_bg = NULL;
1997 }
1998 }
1999 if (!btrfs_zone_activate(block_group))
2000 return false;
2001 if (*active_bg != block_group) {
2002 ASSERT(*active_bg == NULL);
2003 *active_bg = block_group;
2004 btrfs_get_block_group(block_group);
2005 }
2006 }
2007
2008 return true;
2009 }
2010
2011 /*
2012 * Check if @ctx->eb is aligned to the write pointer.
2013 *
2014 * Return:
2015 * 0: @ctx->eb is at the write pointer. You can write it.
2016 * -EAGAIN: There is a hole. The caller should handle the case.
2017 * -EBUSY: There is a hole, but the caller can just bail out.
2018 */
btrfs_check_meta_write_pointer(struct btrfs_fs_info * fs_info,struct btrfs_eb_write_context * ctx)2019 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
2020 struct btrfs_eb_write_context *ctx)
2021 {
2022 const struct writeback_control *wbc = ctx->wbc;
2023 const struct extent_buffer *eb = ctx->eb;
2024 struct btrfs_block_group *block_group = ctx->zoned_bg;
2025
2026 if (!btrfs_is_zoned(fs_info))
2027 return 0;
2028
2029 if (block_group) {
2030 if (block_group->start > eb->start ||
2031 block_group->start + block_group->length <= eb->start) {
2032 btrfs_put_block_group(block_group);
2033 block_group = NULL;
2034 ctx->zoned_bg = NULL;
2035 }
2036 }
2037
2038 if (!block_group) {
2039 block_group = btrfs_lookup_block_group(fs_info, eb->start);
2040 if (!block_group)
2041 return 0;
2042 ctx->zoned_bg = block_group;
2043 }
2044
2045 if (block_group->meta_write_pointer == eb->start) {
2046 struct btrfs_block_group **tgt;
2047
2048 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2049 return 0;
2050
2051 if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2052 tgt = &fs_info->active_system_bg;
2053 else
2054 tgt = &fs_info->active_meta_bg;
2055 if (check_bg_is_active(ctx, tgt))
2056 return 0;
2057 }
2058
2059 /*
2060 * Since we may release fs_info->zoned_meta_io_lock, someone can already
2061 * start writing this eb. In that case, we can just bail out.
2062 */
2063 if (block_group->meta_write_pointer > eb->start)
2064 return -EBUSY;
2065
2066 /* If for_sync, this hole will be filled with transaction commit. */
2067 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
2068 return -EAGAIN;
2069 return -EBUSY;
2070 }
2071
btrfs_zoned_issue_zeroout(struct btrfs_device * device,u64 physical,u64 length)2072 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
2073 {
2074 if (!btrfs_dev_is_sequential(device, physical))
2075 return -EOPNOTSUPP;
2076
2077 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
2078 length >> SECTOR_SHIFT, GFP_NOFS, 0);
2079 }
2080
read_zone_info(struct btrfs_fs_info * fs_info,u64 logical,struct blk_zone * zone)2081 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
2082 struct blk_zone *zone)
2083 {
2084 struct btrfs_io_context *bioc = NULL;
2085 u64 mapped_length = PAGE_SIZE;
2086 unsigned int nofs_flag;
2087 int nmirrors;
2088 int i, ret;
2089
2090 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2091 &mapped_length, &bioc, NULL, NULL);
2092 if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
2093 ret = -EIO;
2094 goto out_put_bioc;
2095 }
2096
2097 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2098 ret = -EINVAL;
2099 goto out_put_bioc;
2100 }
2101
2102 nofs_flag = memalloc_nofs_save();
2103 nmirrors = (int)bioc->num_stripes;
2104 for (i = 0; i < nmirrors; i++) {
2105 u64 physical = bioc->stripes[i].physical;
2106 struct btrfs_device *dev = bioc->stripes[i].dev;
2107
2108 /* Missing device */
2109 if (!dev->bdev)
2110 continue;
2111
2112 ret = btrfs_get_dev_zone(dev, physical, zone);
2113 /* Failing device */
2114 if (ret == -EIO || ret == -EOPNOTSUPP)
2115 continue;
2116 break;
2117 }
2118 memalloc_nofs_restore(nofs_flag);
2119 out_put_bioc:
2120 btrfs_put_bioc(bioc);
2121 return ret;
2122 }
2123
2124 /*
2125 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2126 * filling zeros between @physical_pos to a write pointer of dev-replace
2127 * source device.
2128 */
btrfs_sync_zone_write_pointer(struct btrfs_device * tgt_dev,u64 logical,u64 physical_start,u64 physical_pos)2129 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2130 u64 physical_start, u64 physical_pos)
2131 {
2132 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2133 struct blk_zone zone;
2134 u64 length;
2135 u64 wp;
2136 int ret;
2137
2138 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2139 return 0;
2140
2141 ret = read_zone_info(fs_info, logical, &zone);
2142 if (ret)
2143 return ret;
2144
2145 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2146
2147 if (physical_pos == wp)
2148 return 0;
2149
2150 if (unlikely(physical_pos > wp))
2151 return -EUCLEAN;
2152
2153 length = wp - physical_pos;
2154 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2155 }
2156
2157 /*
2158 * Activate block group and underlying device zones
2159 *
2160 * @block_group: the block group to activate
2161 *
2162 * Return: true on success, false otherwise
2163 */
btrfs_zone_activate(struct btrfs_block_group * block_group)2164 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2165 {
2166 struct btrfs_fs_info *fs_info = block_group->fs_info;
2167 struct btrfs_chunk_map *map;
2168 struct btrfs_device *device;
2169 u64 physical;
2170 const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2171 bool ret;
2172 int i;
2173
2174 if (!btrfs_is_zoned(block_group->fs_info))
2175 return true;
2176
2177 map = block_group->physical_map;
2178
2179 spin_lock(&fs_info->zone_active_bgs_lock);
2180 spin_lock(&block_group->lock);
2181 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2182 ret = true;
2183 goto out_unlock;
2184 }
2185
2186 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
2187 /* The caller should check if the block group is full. */
2188 if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
2189 ret = false;
2190 goto out_unlock;
2191 }
2192 } else {
2193 /* Since it is already written, it should have been active. */
2194 WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
2195 }
2196
2197 for (i = 0; i < map->num_stripes; i++) {
2198 struct btrfs_zoned_device_info *zinfo;
2199 int reserved = 0;
2200
2201 device = map->stripes[i].dev;
2202 physical = map->stripes[i].physical;
2203 zinfo = device->zone_info;
2204
2205 if (!device->bdev)
2206 continue;
2207
2208 if (zinfo->max_active_zones == 0)
2209 continue;
2210
2211 if (is_data)
2212 reserved = zinfo->reserved_active_zones;
2213 /*
2214 * For the data block group, leave active zones for one
2215 * metadata block group and one system block group.
2216 */
2217 if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2218 ret = false;
2219 goto out_unlock;
2220 }
2221
2222 if (!btrfs_dev_set_active_zone(device, physical)) {
2223 /* Cannot activate the zone */
2224 ret = false;
2225 goto out_unlock;
2226 }
2227 if (!is_data)
2228 zinfo->reserved_active_zones--;
2229 }
2230
2231 /* Successfully activated all the zones */
2232 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2233 spin_unlock(&block_group->lock);
2234
2235 /* For the active block group list */
2236 btrfs_get_block_group(block_group);
2237 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2238 spin_unlock(&fs_info->zone_active_bgs_lock);
2239
2240 return true;
2241
2242 out_unlock:
2243 spin_unlock(&block_group->lock);
2244 spin_unlock(&fs_info->zone_active_bgs_lock);
2245 return ret;
2246 }
2247
wait_eb_writebacks(struct btrfs_block_group * block_group)2248 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2249 {
2250 struct btrfs_fs_info *fs_info = block_group->fs_info;
2251 const u64 end = block_group->start + block_group->length;
2252 struct extent_buffer *eb;
2253 unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);
2254
2255 rcu_read_lock();
2256 xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {
2257 if (eb->start < block_group->start)
2258 continue;
2259 if (eb->start >= end)
2260 break;
2261 rcu_read_unlock();
2262 wait_on_extent_buffer_writeback(eb);
2263 rcu_read_lock();
2264 }
2265 rcu_read_unlock();
2266 }
2267
call_zone_finish(struct btrfs_block_group * block_group,struct btrfs_io_stripe * stripe)2268 static int call_zone_finish(struct btrfs_block_group *block_group,
2269 struct btrfs_io_stripe *stripe)
2270 {
2271 struct btrfs_device *device = stripe->dev;
2272 const u64 physical = stripe->physical;
2273 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2274 int ret;
2275
2276 if (!device->bdev)
2277 return 0;
2278
2279 if (zinfo->max_active_zones == 0)
2280 return 0;
2281
2282 if (btrfs_dev_is_sequential(device, physical)) {
2283 unsigned int nofs_flags;
2284
2285 nofs_flags = memalloc_nofs_save();
2286 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2287 physical >> SECTOR_SHIFT,
2288 zinfo->zone_size >> SECTOR_SHIFT);
2289 memalloc_nofs_restore(nofs_flags);
2290
2291 if (ret)
2292 return ret;
2293 }
2294
2295 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2296 zinfo->reserved_active_zones++;
2297 btrfs_dev_clear_active_zone(device, physical);
2298
2299 return 0;
2300 }
2301
do_zone_finish(struct btrfs_block_group * block_group,bool fully_written)2302 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2303 {
2304 struct btrfs_fs_info *fs_info = block_group->fs_info;
2305 struct btrfs_chunk_map *map;
2306 const bool is_metadata = (block_group->flags &
2307 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2308 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2309 int ret = 0;
2310 int i;
2311
2312 spin_lock(&block_group->lock);
2313 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2314 spin_unlock(&block_group->lock);
2315 return 0;
2316 }
2317
2318 /* Check if we have unwritten allocated space */
2319 if (is_metadata &&
2320 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2321 spin_unlock(&block_group->lock);
2322 return -EAGAIN;
2323 }
2324
2325 /*
2326 * If we are sure that the block group is full (= no more room left for
2327 * new allocation) and the IO for the last usable block is completed, we
2328 * don't need to wait for the other IOs. This holds because we ensure
2329 * the sequential IO submissions using the ZONE_APPEND command for data
2330 * and block_group->meta_write_pointer for metadata.
2331 */
2332 if (!fully_written) {
2333 if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2334 spin_unlock(&block_group->lock);
2335 return -EAGAIN;
2336 }
2337 spin_unlock(&block_group->lock);
2338
2339 ret = btrfs_inc_block_group_ro(block_group, false);
2340 if (ret)
2341 return ret;
2342
2343 /* Ensure all writes in this block group finish */
2344 btrfs_wait_block_group_reservations(block_group);
2345 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2346 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
2347 /* Wait for extent buffers to be written. */
2348 if (is_metadata)
2349 wait_eb_writebacks(block_group);
2350
2351 spin_lock(&block_group->lock);
2352
2353 /*
2354 * Bail out if someone already deactivated the block group, or
2355 * allocated space is left in the block group.
2356 */
2357 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2358 &block_group->runtime_flags)) {
2359 spin_unlock(&block_group->lock);
2360 btrfs_dec_block_group_ro(block_group);
2361 return 0;
2362 }
2363
2364 if (block_group->reserved ||
2365 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2366 &block_group->runtime_flags)) {
2367 spin_unlock(&block_group->lock);
2368 btrfs_dec_block_group_ro(block_group);
2369 return -EAGAIN;
2370 }
2371 }
2372
2373 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2374 block_group->alloc_offset = block_group->zone_capacity;
2375 if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2376 block_group->meta_write_pointer = block_group->start +
2377 block_group->zone_capacity;
2378 block_group->free_space_ctl->free_space = 0;
2379 btrfs_clear_treelog_bg(block_group);
2380 btrfs_clear_data_reloc_bg(block_group);
2381 spin_unlock(&block_group->lock);
2382
2383 down_read(&dev_replace->rwsem);
2384 map = block_group->physical_map;
2385 for (i = 0; i < map->num_stripes; i++) {
2386
2387 ret = call_zone_finish(block_group, &map->stripes[i]);
2388 if (ret) {
2389 up_read(&dev_replace->rwsem);
2390 return ret;
2391 }
2392 }
2393 up_read(&dev_replace->rwsem);
2394
2395 if (!fully_written)
2396 btrfs_dec_block_group_ro(block_group);
2397
2398 spin_lock(&fs_info->zone_active_bgs_lock);
2399 ASSERT(!list_empty(&block_group->active_bg_list));
2400 list_del_init(&block_group->active_bg_list);
2401 spin_unlock(&fs_info->zone_active_bgs_lock);
2402
2403 /* For active_bg_list */
2404 btrfs_put_block_group(block_group);
2405
2406 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2407
2408 return 0;
2409 }
2410
btrfs_zone_finish(struct btrfs_block_group * block_group)2411 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2412 {
2413 if (!btrfs_is_zoned(block_group->fs_info))
2414 return 0;
2415
2416 return do_zone_finish(block_group, false);
2417 }
2418
btrfs_can_activate_zone(struct btrfs_fs_devices * fs_devices,u64 flags)2419 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2420 {
2421 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2422 struct btrfs_device *device;
2423 bool ret = false;
2424
2425 if (!btrfs_is_zoned(fs_info))
2426 return true;
2427
2428 if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
2429 return false;
2430
2431 /* Check if there is a device with active zones left */
2432 mutex_lock(&fs_info->chunk_mutex);
2433 spin_lock(&fs_info->zone_active_bgs_lock);
2434 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2435 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2436 int reserved = 0;
2437
2438 if (!device->bdev)
2439 continue;
2440
2441 if (!zinfo->max_active_zones) {
2442 ret = true;
2443 break;
2444 }
2445
2446 if (flags & BTRFS_BLOCK_GROUP_DATA)
2447 reserved = zinfo->reserved_active_zones;
2448
2449 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2450 case 0: /* single */
2451 ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2452 break;
2453 case BTRFS_BLOCK_GROUP_DUP:
2454 ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2455 break;
2456 }
2457 if (ret)
2458 break;
2459 }
2460 spin_unlock(&fs_info->zone_active_bgs_lock);
2461 mutex_unlock(&fs_info->chunk_mutex);
2462
2463 if (!ret)
2464 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2465
2466 return ret;
2467 }
2468
btrfs_zone_finish_endio(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2469 int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2470 {
2471 struct btrfs_block_group *block_group;
2472 u64 min_alloc_bytes;
2473
2474 if (!btrfs_is_zoned(fs_info))
2475 return 0;
2476
2477 block_group = btrfs_lookup_block_group(fs_info, logical);
2478 if (WARN_ON_ONCE(!block_group))
2479 return -ENOENT;
2480
2481 /* No MIXED_BG on zoned btrfs. */
2482 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2483 min_alloc_bytes = fs_info->sectorsize;
2484 else
2485 min_alloc_bytes = fs_info->nodesize;
2486
2487 /* Bail out if we can allocate more data from this block group. */
2488 if (logical + length + min_alloc_bytes <=
2489 block_group->start + block_group->zone_capacity)
2490 goto out;
2491
2492 do_zone_finish(block_group, true);
2493
2494 out:
2495 btrfs_put_block_group(block_group);
2496 return 0;
2497 }
2498
btrfs_zone_finish_endio_workfn(struct work_struct * work)2499 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2500 {
2501 int ret;
2502 struct btrfs_block_group *bg =
2503 container_of(work, struct btrfs_block_group, zone_finish_work);
2504
2505 wait_on_extent_buffer_writeback(bg->last_eb);
2506 free_extent_buffer(bg->last_eb);
2507 ret = do_zone_finish(bg, true);
2508 if (ret)
2509 btrfs_handle_fs_error(bg->fs_info, ret,
2510 "Failed to finish block-group's zone");
2511 btrfs_put_block_group(bg);
2512 }
2513
btrfs_schedule_zone_finish_bg(struct btrfs_block_group * bg,struct extent_buffer * eb)2514 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2515 struct extent_buffer *eb)
2516 {
2517 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2518 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2519 return;
2520
2521 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2522 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2523 bg->start);
2524 return;
2525 }
2526
2527 /* For the work */
2528 btrfs_get_block_group(bg);
2529 refcount_inc(&eb->refs);
2530 bg->last_eb = eb;
2531 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2532 queue_work(system_dfl_wq, &bg->zone_finish_work);
2533 }
2534
btrfs_clear_data_reloc_bg(struct btrfs_block_group * bg)2535 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2536 {
2537 struct btrfs_fs_info *fs_info = bg->fs_info;
2538
2539 spin_lock(&fs_info->relocation_bg_lock);
2540 if (fs_info->data_reloc_bg == bg->start)
2541 fs_info->data_reloc_bg = 0;
2542 spin_unlock(&fs_info->relocation_bg_lock);
2543 }
2544
btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info * fs_info)2545 void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
2546 {
2547 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
2548 struct btrfs_space_info *space_info = data_sinfo;
2549 struct btrfs_trans_handle *trans;
2550 struct btrfs_block_group *bg;
2551 struct list_head *bg_list;
2552 u64 alloc_flags;
2553 bool first = true;
2554 bool did_chunk_alloc = false;
2555 int index;
2556 int ret;
2557
2558 if (!btrfs_is_zoned(fs_info))
2559 return;
2560
2561 if (fs_info->data_reloc_bg)
2562 return;
2563
2564 if (sb_rdonly(fs_info->sb))
2565 return;
2566
2567 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
2568 index = btrfs_bg_flags_to_raid_index(alloc_flags);
2569
2570 /* Scan the data space_info to find empty block groups. Take the second one. */
2571 again:
2572 bg_list = &space_info->block_groups[index];
2573 list_for_each_entry(bg, bg_list, list) {
2574 if (bg->alloc_offset != 0)
2575 continue;
2576
2577 if (first) {
2578 first = false;
2579 continue;
2580 }
2581
2582 if (space_info == data_sinfo) {
2583 /* Migrate the block group to the data relocation space_info. */
2584 struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];
2585 int factor;
2586
2587 ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2588 "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id);
2589 factor = btrfs_bg_type_to_factor(bg->flags);
2590
2591 down_write(&space_info->groups_sem);
2592 list_del_init(&bg->list);
2593 /* We can assume this as we choose the second empty one. */
2594 ASSERT(!list_empty(&space_info->block_groups[index]));
2595 up_write(&space_info->groups_sem);
2596
2597 spin_lock(&space_info->lock);
2598 space_info->total_bytes -= bg->length;
2599 space_info->disk_total -= bg->length * factor;
2600 space_info->disk_total -= bg->zone_unusable;
2601 /* There is no allocation ever happened. */
2602 ASSERT(bg->used == 0, "bg->used=%llu", bg->used);
2603 /* No super block in a block group on the zoned setup. */
2604 ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super);
2605 spin_unlock(&space_info->lock);
2606
2607 bg->space_info = reloc_sinfo;
2608 if (reloc_sinfo->block_group_kobjs[index] == NULL)
2609 btrfs_sysfs_add_block_group_type(bg);
2610
2611 btrfs_add_bg_to_space_info(fs_info, bg);
2612 }
2613
2614 fs_info->data_reloc_bg = bg->start;
2615 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags);
2616 btrfs_zone_activate(bg);
2617
2618 return;
2619 }
2620
2621 if (did_chunk_alloc)
2622 return;
2623
2624 trans = btrfs_join_transaction(fs_info->tree_root);
2625 if (IS_ERR(trans))
2626 return;
2627
2628 /* Allocate new BG in the data relocation space_info. */
2629 space_info = data_sinfo->sub_group[0];
2630 ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2631 "space_info->subgroup_id=%d", space_info->subgroup_id);
2632 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
2633 btrfs_end_transaction(trans);
2634 if (ret == 1) {
2635 /*
2636 * We allocated a new block group in the data relocation space_info. We
2637 * can take that one.
2638 */
2639 first = false;
2640 did_chunk_alloc = true;
2641 goto again;
2642 }
2643 }
2644
btrfs_free_zone_cache(struct btrfs_fs_info * fs_info)2645 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2646 {
2647 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2648 struct btrfs_device *device;
2649
2650 if (!btrfs_is_zoned(fs_info))
2651 return;
2652
2653 mutex_lock(&fs_devices->device_list_mutex);
2654 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2655 if (device->zone_info) {
2656 vfree(device->zone_info->zone_cache);
2657 device->zone_info->zone_cache = NULL;
2658 }
2659 }
2660 mutex_unlock(&fs_devices->device_list_mutex);
2661 }
2662
btrfs_zoned_should_reclaim(const struct btrfs_fs_info * fs_info)2663 bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
2664 {
2665 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2666 struct btrfs_device *device;
2667 u64 total = btrfs_super_total_bytes(fs_info->super_copy);
2668 u64 used = 0;
2669 u64 factor;
2670
2671 ASSERT(btrfs_is_zoned(fs_info));
2672
2673 if (fs_info->bg_reclaim_threshold == 0)
2674 return false;
2675
2676 mutex_lock(&fs_devices->device_list_mutex);
2677 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2678 if (!device->bdev)
2679 continue;
2680
2681 used += device->bytes_used;
2682 }
2683 mutex_unlock(&fs_devices->device_list_mutex);
2684
2685 factor = div64_u64(used * 100, total);
2686 return factor >= fs_info->bg_reclaim_threshold;
2687 }
2688
btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2689 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2690 u64 length)
2691 {
2692 struct btrfs_block_group *block_group;
2693
2694 if (!btrfs_is_zoned(fs_info))
2695 return;
2696
2697 block_group = btrfs_lookup_block_group(fs_info, logical);
2698 /* It should be called on a previous data relocation block group. */
2699 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2700
2701 spin_lock(&block_group->lock);
2702 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2703 goto out;
2704
2705 /* All relocation extents are written. */
2706 if (block_group->start + block_group->alloc_offset == logical + length) {
2707 /*
2708 * Now, release this block group for further allocations and
2709 * zone finish.
2710 */
2711 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2712 &block_group->runtime_flags);
2713 }
2714
2715 out:
2716 spin_unlock(&block_group->lock);
2717 btrfs_put_block_group(block_group);
2718 }
2719
btrfs_zone_finish_one_bg(struct btrfs_fs_info * fs_info)2720 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2721 {
2722 struct btrfs_block_group *block_group;
2723 struct btrfs_block_group *min_bg = NULL;
2724 u64 min_avail = U64_MAX;
2725 int ret;
2726
2727 spin_lock(&fs_info->zone_active_bgs_lock);
2728 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2729 active_bg_list) {
2730 u64 avail;
2731
2732 spin_lock(&block_group->lock);
2733 if (block_group->reserved || block_group->alloc_offset == 0 ||
2734 !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
2735 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2736 spin_unlock(&block_group->lock);
2737 continue;
2738 }
2739
2740 avail = block_group->zone_capacity - block_group->alloc_offset;
2741 if (min_avail > avail) {
2742 if (min_bg)
2743 btrfs_put_block_group(min_bg);
2744 min_bg = block_group;
2745 min_avail = avail;
2746 btrfs_get_block_group(min_bg);
2747 }
2748 spin_unlock(&block_group->lock);
2749 }
2750 spin_unlock(&fs_info->zone_active_bgs_lock);
2751
2752 if (!min_bg)
2753 return 0;
2754
2755 ret = btrfs_zone_finish(min_bg);
2756 btrfs_put_block_group(min_bg);
2757
2758 return ret < 0 ? ret : 1;
2759 }
2760
btrfs_zoned_activate_one_bg(struct btrfs_space_info * space_info,bool do_finish)2761 int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish)
2762 {
2763 struct btrfs_fs_info *fs_info = space_info->fs_info;
2764 struct btrfs_block_group *bg;
2765 int index;
2766
2767 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2768 return 0;
2769
2770 for (;;) {
2771 int ret;
2772 bool need_finish = false;
2773
2774 down_read(&space_info->groups_sem);
2775 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2776 list_for_each_entry(bg, &space_info->block_groups[index],
2777 list) {
2778 if (!spin_trylock(&bg->lock))
2779 continue;
2780 if (btrfs_zoned_bg_is_full(bg) ||
2781 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2782 &bg->runtime_flags)) {
2783 spin_unlock(&bg->lock);
2784 continue;
2785 }
2786 spin_unlock(&bg->lock);
2787
2788 if (btrfs_zone_activate(bg)) {
2789 up_read(&space_info->groups_sem);
2790 return 1;
2791 }
2792
2793 need_finish = true;
2794 }
2795 }
2796 up_read(&space_info->groups_sem);
2797
2798 if (!do_finish || !need_finish)
2799 break;
2800
2801 ret = btrfs_zone_finish_one_bg(fs_info);
2802 if (ret == 0)
2803 break;
2804 if (ret < 0)
2805 return ret;
2806 }
2807
2808 return 0;
2809 }
2810
2811 /*
2812 * Reserve zones for one metadata block group, one tree-log block group, and one
2813 * system block group.
2814 */
btrfs_check_active_zone_reservation(struct btrfs_fs_info * fs_info)2815 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
2816 {
2817 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2818 struct btrfs_block_group *block_group;
2819 struct btrfs_device *device;
2820 /* Reserve zones for normal SINGLE metadata and tree-log block group. */
2821 unsigned int metadata_reserve = 2;
2822 /* Reserve a zone for SINGLE system block group. */
2823 unsigned int system_reserve = 1;
2824
2825 if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2826 return;
2827
2828 /*
2829 * This function is called from the mount context. So, there is no
2830 * parallel process touching the bits. No need for read_seqretry().
2831 */
2832 if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2833 metadata_reserve = 4;
2834 if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2835 system_reserve = 2;
2836
2837 /* Apply the reservation on all the devices. */
2838 mutex_lock(&fs_devices->device_list_mutex);
2839 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2840 if (!device->bdev)
2841 continue;
2842
2843 device->zone_info->reserved_active_zones =
2844 metadata_reserve + system_reserve;
2845 }
2846 mutex_unlock(&fs_devices->device_list_mutex);
2847
2848 /* Release reservation for currently active block groups. */
2849 spin_lock(&fs_info->zone_active_bgs_lock);
2850 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
2851 struct btrfs_chunk_map *map = block_group->physical_map;
2852
2853 if (!(block_group->flags &
2854 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2855 continue;
2856
2857 for (int i = 0; i < map->num_stripes; i++)
2858 map->stripes[i].dev->zone_info->reserved_active_zones--;
2859 }
2860 spin_unlock(&fs_info->zone_active_bgs_lock);
2861 }
2862
2863 /*
2864 * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
2865 *
2866 * @space_info: the space to work on
2867 * @num_bytes: targeting reclaim bytes
2868 *
2869 * This one resets the zones of a block group, so we can reuse the region
2870 * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
2871 * just removes a block group and frees up the underlying zones. So, we still
2872 * need to allocate a new block group to reuse the zones.
2873 *
2874 * Resetting is faster than deleting/recreating a block group. It is similar
2875 * to freeing the logical space on the regular mode. However, we cannot change
2876 * the block group's profile with this operation.
2877 */
btrfs_reset_unused_block_groups(struct btrfs_space_info * space_info,u64 num_bytes)2878 int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
2879 {
2880 struct btrfs_fs_info *fs_info = space_info->fs_info;
2881 const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
2882
2883 if (!btrfs_is_zoned(fs_info))
2884 return 0;
2885
2886 while (num_bytes > 0) {
2887 struct btrfs_chunk_map *map;
2888 struct btrfs_block_group *bg = NULL;
2889 bool found = false;
2890 u64 reclaimed = 0;
2891
2892 /*
2893 * Here, we choose a fully zone_unusable block group. It's
2894 * technically possible to reset a partly zone_unusable block
2895 * group, which still has some free space left. However,
2896 * handling that needs to cope with the allocation side, which
2897 * makes the logic more complex. So, let's handle the easy case
2898 * for now.
2899 */
2900 spin_lock(&fs_info->unused_bgs_lock);
2901 list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
2902 if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
2903 continue;
2904
2905 /*
2906 * Use trylock to avoid locking order violation. In
2907 * btrfs_reclaim_bgs_work(), the lock order is
2908 * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
2909 * block group if we cannot take its lock.
2910 */
2911 if (!spin_trylock(&bg->lock))
2912 continue;
2913 if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
2914 spin_unlock(&bg->lock);
2915 continue;
2916 }
2917 spin_unlock(&bg->lock);
2918 found = true;
2919 break;
2920 }
2921 if (!found) {
2922 spin_unlock(&fs_info->unused_bgs_lock);
2923 return 0;
2924 }
2925
2926 list_del_init(&bg->bg_list);
2927 btrfs_put_block_group(bg);
2928 spin_unlock(&fs_info->unused_bgs_lock);
2929
2930 /*
2931 * Since the block group is fully zone_unusable and we cannot
2932 * allocate from this block group anymore, we don't need to set
2933 * this block group read-only.
2934 */
2935
2936 down_read(&fs_info->dev_replace.rwsem);
2937 map = bg->physical_map;
2938 for (int i = 0; i < map->num_stripes; i++) {
2939 struct btrfs_io_stripe *stripe = &map->stripes[i];
2940 unsigned int nofs_flags;
2941 int ret;
2942
2943 nofs_flags = memalloc_nofs_save();
2944 ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
2945 stripe->physical >> SECTOR_SHIFT,
2946 zone_size_sectors);
2947 memalloc_nofs_restore(nofs_flags);
2948
2949 if (ret) {
2950 up_read(&fs_info->dev_replace.rwsem);
2951 return ret;
2952 }
2953 }
2954 up_read(&fs_info->dev_replace.rwsem);
2955
2956 spin_lock(&space_info->lock);
2957 spin_lock(&bg->lock);
2958 ASSERT(!btrfs_is_block_group_used(bg));
2959 if (bg->ro) {
2960 spin_unlock(&bg->lock);
2961 spin_unlock(&space_info->lock);
2962 continue;
2963 }
2964
2965 reclaimed = bg->alloc_offset;
2966 bg->zone_unusable = bg->length - bg->zone_capacity;
2967 bg->alloc_offset = 0;
2968 /*
2969 * This holds because we currently reset fully used then freed
2970 * block group.
2971 */
2972 ASSERT(reclaimed == bg->zone_capacity,
2973 "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity);
2974 bg->free_space_ctl->free_space += reclaimed;
2975 space_info->bytes_zone_unusable -= reclaimed;
2976 spin_unlock(&bg->lock);
2977 btrfs_return_free_space(space_info, reclaimed);
2978 spin_unlock(&space_info->lock);
2979
2980 if (num_bytes <= reclaimed)
2981 break;
2982 num_bytes -= reclaimed;
2983 }
2984
2985 return 0;
2986 }
2987