xref: /linux/fs/btrfs/zoned.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
9 #include "ctree.h"
10 #include "volumes.h"
11 #include "zoned.h"
12 #include "rcu-string.h"
13 #include "disk-io.h"
14 #include "block-group.h"
15 #include "dev-replace.h"
16 #include "space-info.h"
17 #include "fs.h"
18 #include "accessors.h"
19 #include "bio.h"
20 
21 /* Maximum number of zones to report per blkdev_report_zones() call */
22 #define BTRFS_REPORT_NR_ZONES   4096
23 /* Invalid allocation pointer value for missing devices */
24 #define WP_MISSING_DEV ((u64)-1)
25 /* Pseudo write pointer value for conventional zone */
26 #define WP_CONVENTIONAL ((u64)-2)
27 
28 /*
29  * Location of the first zone of superblock logging zone pairs.
30  *
31  * - primary superblock:    0B (zone 0)
32  * - first copy:          512G (zone starting at that offset)
33  * - second copy:           4T (zone starting at that offset)
34  */
35 #define BTRFS_SB_LOG_PRIMARY_OFFSET	(0ULL)
36 #define BTRFS_SB_LOG_FIRST_OFFSET	(512ULL * SZ_1G)
37 #define BTRFS_SB_LOG_SECOND_OFFSET	(4096ULL * SZ_1G)
38 
39 #define BTRFS_SB_LOG_FIRST_SHIFT	const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
40 #define BTRFS_SB_LOG_SECOND_SHIFT	const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
41 
42 /* Number of superblock log zones */
43 #define BTRFS_NR_SB_LOG_ZONES 2
44 
45 /*
46  * Minimum of active zones we need:
47  *
48  * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
49  * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
50  * - 1 zone for tree-log dedicated block group
51  * - 1 zone for relocation
52  */
53 #define BTRFS_MIN_ACTIVE_ZONES		(BTRFS_SUPER_MIRROR_MAX + 5)
54 
55 /*
56  * Minimum / maximum supported zone size. Currently, SMR disks have a zone
57  * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
58  * We do not expect the zone size to become larger than 8GiB or smaller than
59  * 4MiB in the near future.
60  */
61 #define BTRFS_MAX_ZONE_SIZE		SZ_8G
62 #define BTRFS_MIN_ZONE_SIZE		SZ_4M
63 
64 #define SUPER_INFO_SECTORS	((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
65 
66 static void wait_eb_writebacks(struct btrfs_block_group *block_group);
67 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
68 
69 static inline bool sb_zone_is_full(const struct blk_zone *zone)
70 {
71 	return (zone->cond == BLK_ZONE_COND_FULL) ||
72 		(zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
73 }
74 
75 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
76 {
77 	struct blk_zone *zones = data;
78 
79 	memcpy(&zones[idx], zone, sizeof(*zone));
80 
81 	return 0;
82 }
83 
84 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
85 			    u64 *wp_ret)
86 {
87 	bool empty[BTRFS_NR_SB_LOG_ZONES];
88 	bool full[BTRFS_NR_SB_LOG_ZONES];
89 	sector_t sector;
90 
91 	for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
92 		ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
93 		empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
94 		full[i] = sb_zone_is_full(&zones[i]);
95 	}
96 
97 	/*
98 	 * Possible states of log buffer zones
99 	 *
100 	 *           Empty[0]  In use[0]  Full[0]
101 	 * Empty[1]         *          0        1
102 	 * In use[1]        x          x        1
103 	 * Full[1]          0          0        C
104 	 *
105 	 * Log position:
106 	 *   *: Special case, no superblock is written
107 	 *   0: Use write pointer of zones[0]
108 	 *   1: Use write pointer of zones[1]
109 	 *   C: Compare super blocks from zones[0] and zones[1], use the latest
110 	 *      one determined by generation
111 	 *   x: Invalid state
112 	 */
113 
114 	if (empty[0] && empty[1]) {
115 		/* Special case to distinguish no superblock to read */
116 		*wp_ret = zones[0].start << SECTOR_SHIFT;
117 		return -ENOENT;
118 	} else if (full[0] && full[1]) {
119 		/* Compare two super blocks */
120 		struct address_space *mapping = bdev->bd_mapping;
121 		struct page *page[BTRFS_NR_SB_LOG_ZONES];
122 		struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
123 
124 		for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
125 			u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
126 			u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
127 						BTRFS_SUPER_INFO_SIZE;
128 
129 			page[i] = read_cache_page_gfp(mapping,
130 					bytenr >> PAGE_SHIFT, GFP_NOFS);
131 			if (IS_ERR(page[i])) {
132 				if (i == 1)
133 					btrfs_release_disk_super(super[0]);
134 				return PTR_ERR(page[i]);
135 			}
136 			super[i] = page_address(page[i]);
137 		}
138 
139 		if (btrfs_super_generation(super[0]) >
140 		    btrfs_super_generation(super[1]))
141 			sector = zones[1].start;
142 		else
143 			sector = zones[0].start;
144 
145 		for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
146 			btrfs_release_disk_super(super[i]);
147 	} else if (!full[0] && (empty[1] || full[1])) {
148 		sector = zones[0].wp;
149 	} else if (full[0]) {
150 		sector = zones[1].wp;
151 	} else {
152 		return -EUCLEAN;
153 	}
154 	*wp_ret = sector << SECTOR_SHIFT;
155 	return 0;
156 }
157 
158 /*
159  * Get the first zone number of the superblock mirror
160  */
161 static inline u32 sb_zone_number(int shift, int mirror)
162 {
163 	u64 zone = U64_MAX;
164 
165 	ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
166 	switch (mirror) {
167 	case 0: zone = 0; break;
168 	case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
169 	case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
170 	}
171 
172 	ASSERT(zone <= U32_MAX);
173 
174 	return (u32)zone;
175 }
176 
177 static inline sector_t zone_start_sector(u32 zone_number,
178 					 struct block_device *bdev)
179 {
180 	return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
181 }
182 
183 static inline u64 zone_start_physical(u32 zone_number,
184 				      struct btrfs_zoned_device_info *zone_info)
185 {
186 	return (u64)zone_number << zone_info->zone_size_shift;
187 }
188 
189 /*
190  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
191  * device into static sized chunks and fake a conventional zone on each of
192  * them.
193  */
194 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
195 				struct blk_zone *zones, unsigned int nr_zones)
196 {
197 	const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
198 	sector_t bdev_size = bdev_nr_sectors(device->bdev);
199 	unsigned int i;
200 
201 	pos >>= SECTOR_SHIFT;
202 	for (i = 0; i < nr_zones; i++) {
203 		zones[i].start = i * zone_sectors + pos;
204 		zones[i].len = zone_sectors;
205 		zones[i].capacity = zone_sectors;
206 		zones[i].wp = zones[i].start + zone_sectors;
207 		zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
208 		zones[i].cond = BLK_ZONE_COND_NOT_WP;
209 
210 		if (zones[i].wp >= bdev_size) {
211 			i++;
212 			break;
213 		}
214 	}
215 
216 	return i;
217 }
218 
219 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
220 			       struct blk_zone *zones, unsigned int *nr_zones)
221 {
222 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
223 	int ret;
224 
225 	if (!*nr_zones)
226 		return 0;
227 
228 	if (!bdev_is_zoned(device->bdev)) {
229 		ret = emulate_report_zones(device, pos, zones, *nr_zones);
230 		*nr_zones = ret;
231 		return 0;
232 	}
233 
234 	/* Check cache */
235 	if (zinfo->zone_cache) {
236 		unsigned int i;
237 		u32 zno;
238 
239 		ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
240 		zno = pos >> zinfo->zone_size_shift;
241 		/*
242 		 * We cannot report zones beyond the zone end. So, it is OK to
243 		 * cap *nr_zones to at the end.
244 		 */
245 		*nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
246 
247 		for (i = 0; i < *nr_zones; i++) {
248 			struct blk_zone *zone_info;
249 
250 			zone_info = &zinfo->zone_cache[zno + i];
251 			if (!zone_info->len)
252 				break;
253 		}
254 
255 		if (i == *nr_zones) {
256 			/* Cache hit on all the zones */
257 			memcpy(zones, zinfo->zone_cache + zno,
258 			       sizeof(*zinfo->zone_cache) * *nr_zones);
259 			return 0;
260 		}
261 	}
262 
263 	ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
264 				  copy_zone_info_cb, zones);
265 	if (ret < 0) {
266 		btrfs_err_in_rcu(device->fs_info,
267 				 "zoned: failed to read zone %llu on %s (devid %llu)",
268 				 pos, rcu_str_deref(device->name),
269 				 device->devid);
270 		return ret;
271 	}
272 	*nr_zones = ret;
273 	if (!ret)
274 		return -EIO;
275 
276 	/* Populate cache */
277 	if (zinfo->zone_cache) {
278 		u32 zno = pos >> zinfo->zone_size_shift;
279 
280 		memcpy(zinfo->zone_cache + zno, zones,
281 		       sizeof(*zinfo->zone_cache) * *nr_zones);
282 	}
283 
284 	return 0;
285 }
286 
287 /* The emulated zone size is determined from the size of device extent */
288 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
289 {
290 	struct btrfs_path *path;
291 	struct btrfs_root *root = fs_info->dev_root;
292 	struct btrfs_key key;
293 	struct extent_buffer *leaf;
294 	struct btrfs_dev_extent *dext;
295 	int ret = 0;
296 
297 	key.objectid = 1;
298 	key.type = BTRFS_DEV_EXTENT_KEY;
299 	key.offset = 0;
300 
301 	path = btrfs_alloc_path();
302 	if (!path)
303 		return -ENOMEM;
304 
305 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
306 	if (ret < 0)
307 		goto out;
308 
309 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
310 		ret = btrfs_next_leaf(root, path);
311 		if (ret < 0)
312 			goto out;
313 		/* No dev extents at all? Not good */
314 		if (ret > 0) {
315 			ret = -EUCLEAN;
316 			goto out;
317 		}
318 	}
319 
320 	leaf = path->nodes[0];
321 	dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
322 	fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
323 	ret = 0;
324 
325 out:
326 	btrfs_free_path(path);
327 
328 	return ret;
329 }
330 
331 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
332 {
333 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
334 	struct btrfs_device *device;
335 	int ret = 0;
336 
337 	/* fs_info->zone_size might not set yet. Use the incomapt flag here. */
338 	if (!btrfs_fs_incompat(fs_info, ZONED))
339 		return 0;
340 
341 	mutex_lock(&fs_devices->device_list_mutex);
342 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
343 		/* We can skip reading of zone info for missing devices */
344 		if (!device->bdev)
345 			continue;
346 
347 		ret = btrfs_get_dev_zone_info(device, true);
348 		if (ret)
349 			break;
350 	}
351 	mutex_unlock(&fs_devices->device_list_mutex);
352 
353 	return ret;
354 }
355 
356 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
357 {
358 	struct btrfs_fs_info *fs_info = device->fs_info;
359 	struct btrfs_zoned_device_info *zone_info = NULL;
360 	struct block_device *bdev = device->bdev;
361 	unsigned int max_active_zones;
362 	unsigned int nactive;
363 	sector_t nr_sectors;
364 	sector_t sector = 0;
365 	struct blk_zone *zones = NULL;
366 	unsigned int i, nreported = 0, nr_zones;
367 	sector_t zone_sectors;
368 	char *model, *emulated;
369 	int ret;
370 
371 	/*
372 	 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
373 	 * yet be set.
374 	 */
375 	if (!btrfs_fs_incompat(fs_info, ZONED))
376 		return 0;
377 
378 	if (device->zone_info)
379 		return 0;
380 
381 	zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
382 	if (!zone_info)
383 		return -ENOMEM;
384 
385 	device->zone_info = zone_info;
386 
387 	if (!bdev_is_zoned(bdev)) {
388 		if (!fs_info->zone_size) {
389 			ret = calculate_emulated_zone_size(fs_info);
390 			if (ret)
391 				goto out;
392 		}
393 
394 		ASSERT(fs_info->zone_size);
395 		zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
396 	} else {
397 		zone_sectors = bdev_zone_sectors(bdev);
398 	}
399 
400 	ASSERT(is_power_of_two_u64(zone_sectors));
401 	zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
402 
403 	/* We reject devices with a zone size larger than 8GB */
404 	if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
405 		btrfs_err_in_rcu(fs_info,
406 		"zoned: %s: zone size %llu larger than supported maximum %llu",
407 				 rcu_str_deref(device->name),
408 				 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
409 		ret = -EINVAL;
410 		goto out;
411 	} else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
412 		btrfs_err_in_rcu(fs_info,
413 		"zoned: %s: zone size %llu smaller than supported minimum %u",
414 				 rcu_str_deref(device->name),
415 				 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
416 		ret = -EINVAL;
417 		goto out;
418 	}
419 
420 	nr_sectors = bdev_nr_sectors(bdev);
421 	zone_info->zone_size_shift = ilog2(zone_info->zone_size);
422 	zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
423 	if (!IS_ALIGNED(nr_sectors, zone_sectors))
424 		zone_info->nr_zones++;
425 
426 	max_active_zones = bdev_max_active_zones(bdev);
427 	if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
428 		btrfs_err_in_rcu(fs_info,
429 "zoned: %s: max active zones %u is too small, need at least %u active zones",
430 				 rcu_str_deref(device->name), max_active_zones,
431 				 BTRFS_MIN_ACTIVE_ZONES);
432 		ret = -EINVAL;
433 		goto out;
434 	}
435 	zone_info->max_active_zones = max_active_zones;
436 
437 	zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
438 	if (!zone_info->seq_zones) {
439 		ret = -ENOMEM;
440 		goto out;
441 	}
442 
443 	zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
444 	if (!zone_info->empty_zones) {
445 		ret = -ENOMEM;
446 		goto out;
447 	}
448 
449 	zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
450 	if (!zone_info->active_zones) {
451 		ret = -ENOMEM;
452 		goto out;
453 	}
454 
455 	zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
456 	if (!zones) {
457 		ret = -ENOMEM;
458 		goto out;
459 	}
460 
461 	/*
462 	 * Enable zone cache only for a zoned device. On a non-zoned device, we
463 	 * fill the zone info with emulated CONVENTIONAL zones, so no need to
464 	 * use the cache.
465 	 */
466 	if (populate_cache && bdev_is_zoned(device->bdev)) {
467 		zone_info->zone_cache = vcalloc(zone_info->nr_zones,
468 						sizeof(struct blk_zone));
469 		if (!zone_info->zone_cache) {
470 			btrfs_err_in_rcu(device->fs_info,
471 				"zoned: failed to allocate zone cache for %s",
472 				rcu_str_deref(device->name));
473 			ret = -ENOMEM;
474 			goto out;
475 		}
476 	}
477 
478 	/* Get zones type */
479 	nactive = 0;
480 	while (sector < nr_sectors) {
481 		nr_zones = BTRFS_REPORT_NR_ZONES;
482 		ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
483 					  &nr_zones);
484 		if (ret)
485 			goto out;
486 
487 		for (i = 0; i < nr_zones; i++) {
488 			if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
489 				__set_bit(nreported, zone_info->seq_zones);
490 			switch (zones[i].cond) {
491 			case BLK_ZONE_COND_EMPTY:
492 				__set_bit(nreported, zone_info->empty_zones);
493 				break;
494 			case BLK_ZONE_COND_IMP_OPEN:
495 			case BLK_ZONE_COND_EXP_OPEN:
496 			case BLK_ZONE_COND_CLOSED:
497 				__set_bit(nreported, zone_info->active_zones);
498 				nactive++;
499 				break;
500 			}
501 			nreported++;
502 		}
503 		sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
504 	}
505 
506 	if (nreported != zone_info->nr_zones) {
507 		btrfs_err_in_rcu(device->fs_info,
508 				 "inconsistent number of zones on %s (%u/%u)",
509 				 rcu_str_deref(device->name), nreported,
510 				 zone_info->nr_zones);
511 		ret = -EIO;
512 		goto out;
513 	}
514 
515 	if (max_active_zones) {
516 		if (nactive > max_active_zones) {
517 			btrfs_err_in_rcu(device->fs_info,
518 			"zoned: %u active zones on %s exceeds max_active_zones %u",
519 					 nactive, rcu_str_deref(device->name),
520 					 max_active_zones);
521 			ret = -EIO;
522 			goto out;
523 		}
524 		atomic_set(&zone_info->active_zones_left,
525 			   max_active_zones - nactive);
526 		set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
527 	}
528 
529 	/* Validate superblock log */
530 	nr_zones = BTRFS_NR_SB_LOG_ZONES;
531 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
532 		u32 sb_zone;
533 		u64 sb_wp;
534 		int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
535 
536 		sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
537 		if (sb_zone + 1 >= zone_info->nr_zones)
538 			continue;
539 
540 		ret = btrfs_get_dev_zones(device,
541 					  zone_start_physical(sb_zone, zone_info),
542 					  &zone_info->sb_zones[sb_pos],
543 					  &nr_zones);
544 		if (ret)
545 			goto out;
546 
547 		if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
548 			btrfs_err_in_rcu(device->fs_info,
549 	"zoned: failed to read super block log zone info at devid %llu zone %u",
550 					 device->devid, sb_zone);
551 			ret = -EUCLEAN;
552 			goto out;
553 		}
554 
555 		/*
556 		 * If zones[0] is conventional, always use the beginning of the
557 		 * zone to record superblock. No need to validate in that case.
558 		 */
559 		if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
560 		    BLK_ZONE_TYPE_CONVENTIONAL)
561 			continue;
562 
563 		ret = sb_write_pointer(device->bdev,
564 				       &zone_info->sb_zones[sb_pos], &sb_wp);
565 		if (ret != -ENOENT && ret) {
566 			btrfs_err_in_rcu(device->fs_info,
567 			"zoned: super block log zone corrupted devid %llu zone %u",
568 					 device->devid, sb_zone);
569 			ret = -EUCLEAN;
570 			goto out;
571 		}
572 	}
573 
574 
575 	kvfree(zones);
576 
577 	if (bdev_is_zoned(bdev)) {
578 		model = "host-managed zoned";
579 		emulated = "";
580 	} else {
581 		model = "regular";
582 		emulated = "emulated ";
583 	}
584 
585 	btrfs_info_in_rcu(fs_info,
586 		"%s block device %s, %u %szones of %llu bytes",
587 		model, rcu_str_deref(device->name), zone_info->nr_zones,
588 		emulated, zone_info->zone_size);
589 
590 	return 0;
591 
592 out:
593 	kvfree(zones);
594 	btrfs_destroy_dev_zone_info(device);
595 	return ret;
596 }
597 
598 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
599 {
600 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
601 
602 	if (!zone_info)
603 		return;
604 
605 	bitmap_free(zone_info->active_zones);
606 	bitmap_free(zone_info->seq_zones);
607 	bitmap_free(zone_info->empty_zones);
608 	vfree(zone_info->zone_cache);
609 	kfree(zone_info);
610 	device->zone_info = NULL;
611 }
612 
613 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
614 {
615 	struct btrfs_zoned_device_info *zone_info;
616 
617 	zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
618 	if (!zone_info)
619 		return NULL;
620 
621 	zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
622 	if (!zone_info->seq_zones)
623 		goto out;
624 
625 	bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
626 		    zone_info->nr_zones);
627 
628 	zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
629 	if (!zone_info->empty_zones)
630 		goto out;
631 
632 	bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
633 		    zone_info->nr_zones);
634 
635 	zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
636 	if (!zone_info->active_zones)
637 		goto out;
638 
639 	bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
640 		    zone_info->nr_zones);
641 	zone_info->zone_cache = NULL;
642 
643 	return zone_info;
644 
645 out:
646 	bitmap_free(zone_info->seq_zones);
647 	bitmap_free(zone_info->empty_zones);
648 	bitmap_free(zone_info->active_zones);
649 	kfree(zone_info);
650 	return NULL;
651 }
652 
653 static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
654 {
655 	unsigned int nr_zones = 1;
656 	int ret;
657 
658 	ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
659 	if (ret != 0 || !nr_zones)
660 		return ret ? ret : -EIO;
661 
662 	return 0;
663 }
664 
665 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
666 {
667 	struct btrfs_device *device;
668 
669 	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
670 		if (device->bdev && bdev_is_zoned(device->bdev)) {
671 			btrfs_err(fs_info,
672 				"zoned: mode not enabled but zoned device found: %pg",
673 				device->bdev);
674 			return -EINVAL;
675 		}
676 	}
677 
678 	return 0;
679 }
680 
681 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
682 {
683 	struct queue_limits *lim = &fs_info->limits;
684 	struct btrfs_device *device;
685 	u64 zone_size = 0;
686 	int ret;
687 
688 	/*
689 	 * Host-Managed devices can't be used without the ZONED flag.  With the
690 	 * ZONED all devices can be used, using zone emulation if required.
691 	 */
692 	if (!btrfs_fs_incompat(fs_info, ZONED))
693 		return btrfs_check_for_zoned_device(fs_info);
694 
695 	blk_set_stacking_limits(lim);
696 
697 	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
698 		struct btrfs_zoned_device_info *zone_info = device->zone_info;
699 
700 		if (!device->bdev)
701 			continue;
702 
703 		if (!zone_size) {
704 			zone_size = zone_info->zone_size;
705 		} else if (zone_info->zone_size != zone_size) {
706 			btrfs_err(fs_info,
707 		"zoned: unequal block device zone sizes: have %llu found %llu",
708 				  zone_info->zone_size, zone_size);
709 			return -EINVAL;
710 		}
711 
712 		/*
713 		 * With the zoned emulation, we can have non-zoned device on the
714 		 * zoned mode. In this case, we don't have a valid max zone
715 		 * append size.
716 		 */
717 		if (bdev_is_zoned(device->bdev)) {
718 			blk_stack_limits(lim,
719 					 &bdev_get_queue(device->bdev)->limits,
720 					 0);
721 		}
722 	}
723 
724 	/*
725 	 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
726 	 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
727 	 * check the alignment here.
728 	 */
729 	if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
730 		btrfs_err(fs_info,
731 			  "zoned: zone size %llu not aligned to stripe %u",
732 			  zone_size, BTRFS_STRIPE_LEN);
733 		return -EINVAL;
734 	}
735 
736 	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
737 		btrfs_err(fs_info, "zoned: mixed block groups not supported");
738 		return -EINVAL;
739 	}
740 
741 	fs_info->zone_size = zone_size;
742 	/*
743 	 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
744 	 * Technically, we can have multiple pages per segment. But, since
745 	 * we add the pages one by one to a bio, and cannot increase the
746 	 * metadata reservation even if it increases the number of extents, it
747 	 * is safe to stick with the limit.
748 	 */
749 	fs_info->max_zone_append_size = ALIGN_DOWN(
750 		min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
751 		     (u64)lim->max_sectors << SECTOR_SHIFT,
752 		     (u64)lim->max_segments << PAGE_SHIFT),
753 		fs_info->sectorsize);
754 	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
755 	if (fs_info->max_zone_append_size < fs_info->max_extent_size)
756 		fs_info->max_extent_size = fs_info->max_zone_append_size;
757 
758 	/*
759 	 * Check mount options here, because we might change fs_info->zoned
760 	 * from fs_info->zone_size.
761 	 */
762 	ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
763 	if (ret)
764 		return ret;
765 
766 	btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
767 	return 0;
768 }
769 
770 int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
771 				unsigned long long *mount_opt)
772 {
773 	if (!btrfs_is_zoned(info))
774 		return 0;
775 
776 	/*
777 	 * Space cache writing is not COWed. Disable that to avoid write errors
778 	 * in sequential zones.
779 	 */
780 	if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
781 		btrfs_err(info, "zoned: space cache v1 is not supported");
782 		return -EINVAL;
783 	}
784 
785 	if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
786 		btrfs_err(info, "zoned: NODATACOW not supported");
787 		return -EINVAL;
788 	}
789 
790 	if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
791 		btrfs_info(info,
792 			   "zoned: async discard ignored and disabled for zoned mode");
793 		btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
794 	}
795 
796 	return 0;
797 }
798 
799 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
800 			   int rw, u64 *bytenr_ret)
801 {
802 	u64 wp;
803 	int ret;
804 
805 	if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
806 		*bytenr_ret = zones[0].start << SECTOR_SHIFT;
807 		return 0;
808 	}
809 
810 	ret = sb_write_pointer(bdev, zones, &wp);
811 	if (ret != -ENOENT && ret < 0)
812 		return ret;
813 
814 	if (rw == WRITE) {
815 		struct blk_zone *reset = NULL;
816 
817 		if (wp == zones[0].start << SECTOR_SHIFT)
818 			reset = &zones[0];
819 		else if (wp == zones[1].start << SECTOR_SHIFT)
820 			reset = &zones[1];
821 
822 		if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
823 			unsigned int nofs_flags;
824 
825 			ASSERT(sb_zone_is_full(reset));
826 
827 			nofs_flags = memalloc_nofs_save();
828 			ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
829 					       reset->start, reset->len);
830 			memalloc_nofs_restore(nofs_flags);
831 			if (ret)
832 				return ret;
833 
834 			reset->cond = BLK_ZONE_COND_EMPTY;
835 			reset->wp = reset->start;
836 		}
837 	} else if (ret != -ENOENT) {
838 		/*
839 		 * For READ, we want the previous one. Move write pointer to
840 		 * the end of a zone, if it is at the head of a zone.
841 		 */
842 		u64 zone_end = 0;
843 
844 		if (wp == zones[0].start << SECTOR_SHIFT)
845 			zone_end = zones[1].start + zones[1].capacity;
846 		else if (wp == zones[1].start << SECTOR_SHIFT)
847 			zone_end = zones[0].start + zones[0].capacity;
848 		if (zone_end)
849 			wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
850 					BTRFS_SUPER_INFO_SIZE);
851 
852 		wp -= BTRFS_SUPER_INFO_SIZE;
853 	}
854 
855 	*bytenr_ret = wp;
856 	return 0;
857 
858 }
859 
860 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
861 			       u64 *bytenr_ret)
862 {
863 	struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
864 	sector_t zone_sectors;
865 	u32 sb_zone;
866 	int ret;
867 	u8 zone_sectors_shift;
868 	sector_t nr_sectors;
869 	u32 nr_zones;
870 
871 	if (!bdev_is_zoned(bdev)) {
872 		*bytenr_ret = btrfs_sb_offset(mirror);
873 		return 0;
874 	}
875 
876 	ASSERT(rw == READ || rw == WRITE);
877 
878 	zone_sectors = bdev_zone_sectors(bdev);
879 	if (!is_power_of_2(zone_sectors))
880 		return -EINVAL;
881 	zone_sectors_shift = ilog2(zone_sectors);
882 	nr_sectors = bdev_nr_sectors(bdev);
883 	nr_zones = nr_sectors >> zone_sectors_shift;
884 
885 	sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
886 	if (sb_zone + 1 >= nr_zones)
887 		return -ENOENT;
888 
889 	ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
890 				  BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
891 				  zones);
892 	if (ret < 0)
893 		return ret;
894 	if (ret != BTRFS_NR_SB_LOG_ZONES)
895 		return -EIO;
896 
897 	return sb_log_location(bdev, zones, rw, bytenr_ret);
898 }
899 
900 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
901 			  u64 *bytenr_ret)
902 {
903 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
904 	u32 zone_num;
905 
906 	/*
907 	 * For a zoned filesystem on a non-zoned block device, use the same
908 	 * super block locations as regular filesystem. Doing so, the super
909 	 * block can always be retrieved and the zoned flag of the volume
910 	 * detected from the super block information.
911 	 */
912 	if (!bdev_is_zoned(device->bdev)) {
913 		*bytenr_ret = btrfs_sb_offset(mirror);
914 		return 0;
915 	}
916 
917 	zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
918 	if (zone_num + 1 >= zinfo->nr_zones)
919 		return -ENOENT;
920 
921 	return sb_log_location(device->bdev,
922 			       &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
923 			       rw, bytenr_ret);
924 }
925 
926 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
927 				  int mirror)
928 {
929 	u32 zone_num;
930 
931 	if (!zinfo)
932 		return false;
933 
934 	zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
935 	if (zone_num + 1 >= zinfo->nr_zones)
936 		return false;
937 
938 	if (!test_bit(zone_num, zinfo->seq_zones))
939 		return false;
940 
941 	return true;
942 }
943 
944 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
945 {
946 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
947 	struct blk_zone *zone;
948 	int i;
949 
950 	if (!is_sb_log_zone(zinfo, mirror))
951 		return 0;
952 
953 	zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
954 	for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
955 		/* Advance the next zone */
956 		if (zone->cond == BLK_ZONE_COND_FULL) {
957 			zone++;
958 			continue;
959 		}
960 
961 		if (zone->cond == BLK_ZONE_COND_EMPTY)
962 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
963 
964 		zone->wp += SUPER_INFO_SECTORS;
965 
966 		if (sb_zone_is_full(zone)) {
967 			/*
968 			 * No room left to write new superblock. Since
969 			 * superblock is written with REQ_SYNC, it is safe to
970 			 * finish the zone now.
971 			 *
972 			 * If the write pointer is exactly at the capacity,
973 			 * explicit ZONE_FINISH is not necessary.
974 			 */
975 			if (zone->wp != zone->start + zone->capacity) {
976 				unsigned int nofs_flags;
977 				int ret;
978 
979 				nofs_flags = memalloc_nofs_save();
980 				ret = blkdev_zone_mgmt(device->bdev,
981 						REQ_OP_ZONE_FINISH, zone->start,
982 						zone->len);
983 				memalloc_nofs_restore(nofs_flags);
984 				if (ret)
985 					return ret;
986 			}
987 
988 			zone->wp = zone->start + zone->len;
989 			zone->cond = BLK_ZONE_COND_FULL;
990 		}
991 		return 0;
992 	}
993 
994 	/* All the zones are FULL. Should not reach here. */
995 	ASSERT(0);
996 	return -EIO;
997 }
998 
999 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1000 {
1001 	unsigned int nofs_flags;
1002 	sector_t zone_sectors;
1003 	sector_t nr_sectors;
1004 	u8 zone_sectors_shift;
1005 	u32 sb_zone;
1006 	u32 nr_zones;
1007 	int ret;
1008 
1009 	zone_sectors = bdev_zone_sectors(bdev);
1010 	zone_sectors_shift = ilog2(zone_sectors);
1011 	nr_sectors = bdev_nr_sectors(bdev);
1012 	nr_zones = nr_sectors >> zone_sectors_shift;
1013 
1014 	sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1015 	if (sb_zone + 1 >= nr_zones)
1016 		return -ENOENT;
1017 
1018 	nofs_flags = memalloc_nofs_save();
1019 	ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1020 			       zone_start_sector(sb_zone, bdev),
1021 			       zone_sectors * BTRFS_NR_SB_LOG_ZONES);
1022 	memalloc_nofs_restore(nofs_flags);
1023 	return ret;
1024 }
1025 
1026 /*
1027  * Find allocatable zones within a given region.
1028  *
1029  * @device:	the device to allocate a region on
1030  * @hole_start: the position of the hole to allocate the region
1031  * @num_bytes:	size of wanted region
1032  * @hole_end:	the end of the hole
1033  * @return:	position of allocatable zones
1034  *
1035  * Allocatable region should not contain any superblock locations.
1036  */
1037 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1038 				 u64 hole_end, u64 num_bytes)
1039 {
1040 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
1041 	const u8 shift = zinfo->zone_size_shift;
1042 	u64 nzones = num_bytes >> shift;
1043 	u64 pos = hole_start;
1044 	u64 begin, end;
1045 	bool have_sb;
1046 	int i;
1047 
1048 	ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1049 	ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1050 
1051 	while (pos < hole_end) {
1052 		begin = pos >> shift;
1053 		end = begin + nzones;
1054 
1055 		if (end > zinfo->nr_zones)
1056 			return hole_end;
1057 
1058 		/* Check if zones in the region are all empty */
1059 		if (btrfs_dev_is_sequential(device, pos) &&
1060 		    !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1061 			pos += zinfo->zone_size;
1062 			continue;
1063 		}
1064 
1065 		have_sb = false;
1066 		for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1067 			u32 sb_zone;
1068 			u64 sb_pos;
1069 
1070 			sb_zone = sb_zone_number(shift, i);
1071 			if (!(end <= sb_zone ||
1072 			      sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1073 				have_sb = true;
1074 				pos = zone_start_physical(
1075 					sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1076 				break;
1077 			}
1078 
1079 			/* We also need to exclude regular superblock positions */
1080 			sb_pos = btrfs_sb_offset(i);
1081 			if (!(pos + num_bytes <= sb_pos ||
1082 			      sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1083 				have_sb = true;
1084 				pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1085 					    zinfo->zone_size);
1086 				break;
1087 			}
1088 		}
1089 		if (!have_sb)
1090 			break;
1091 	}
1092 
1093 	return pos;
1094 }
1095 
1096 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1097 {
1098 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
1099 	unsigned int zno = (pos >> zone_info->zone_size_shift);
1100 
1101 	/* We can use any number of zones */
1102 	if (zone_info->max_active_zones == 0)
1103 		return true;
1104 
1105 	if (!test_bit(zno, zone_info->active_zones)) {
1106 		/* Active zone left? */
1107 		if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1108 			return false;
1109 		if (test_and_set_bit(zno, zone_info->active_zones)) {
1110 			/* Someone already set the bit */
1111 			atomic_inc(&zone_info->active_zones_left);
1112 		}
1113 	}
1114 
1115 	return true;
1116 }
1117 
1118 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1119 {
1120 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
1121 	unsigned int zno = (pos >> zone_info->zone_size_shift);
1122 
1123 	/* We can use any number of zones */
1124 	if (zone_info->max_active_zones == 0)
1125 		return;
1126 
1127 	if (test_and_clear_bit(zno, zone_info->active_zones))
1128 		atomic_inc(&zone_info->active_zones_left);
1129 }
1130 
1131 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1132 			    u64 length, u64 *bytes)
1133 {
1134 	unsigned int nofs_flags;
1135 	int ret;
1136 
1137 	*bytes = 0;
1138 	nofs_flags = memalloc_nofs_save();
1139 	ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1140 			       physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
1141 	memalloc_nofs_restore(nofs_flags);
1142 	if (ret)
1143 		return ret;
1144 
1145 	*bytes = length;
1146 	while (length) {
1147 		btrfs_dev_set_zone_empty(device, physical);
1148 		btrfs_dev_clear_active_zone(device, physical);
1149 		physical += device->zone_info->zone_size;
1150 		length -= device->zone_info->zone_size;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1157 {
1158 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
1159 	const u8 shift = zinfo->zone_size_shift;
1160 	unsigned long begin = start >> shift;
1161 	unsigned long nbits = size >> shift;
1162 	u64 pos;
1163 	int ret;
1164 
1165 	ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1166 	ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1167 
1168 	if (begin + nbits > zinfo->nr_zones)
1169 		return -ERANGE;
1170 
1171 	/* All the zones are conventional */
1172 	if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1173 		return 0;
1174 
1175 	/* All the zones are sequential and empty */
1176 	if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1177 	    bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1178 		return 0;
1179 
1180 	for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1181 		u64 reset_bytes;
1182 
1183 		if (!btrfs_dev_is_sequential(device, pos) ||
1184 		    btrfs_dev_is_empty_zone(device, pos))
1185 			continue;
1186 
1187 		/* Free regions should be empty */
1188 		btrfs_warn_in_rcu(
1189 			device->fs_info,
1190 		"zoned: resetting device %s (devid %llu) zone %llu for allocation",
1191 			rcu_str_deref(device->name), device->devid, pos >> shift);
1192 		WARN_ON_ONCE(1);
1193 
1194 		ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1195 					      &reset_bytes);
1196 		if (ret)
1197 			return ret;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 /*
1204  * Calculate an allocation pointer from the extent allocation information
1205  * for a block group consist of conventional zones. It is pointed to the
1206  * end of the highest addressed extent in the block group as an allocation
1207  * offset.
1208  */
1209 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1210 				   u64 *offset_ret, bool new)
1211 {
1212 	struct btrfs_fs_info *fs_info = cache->fs_info;
1213 	struct btrfs_root *root;
1214 	struct btrfs_path *path;
1215 	struct btrfs_key key;
1216 	struct btrfs_key found_key;
1217 	int ret;
1218 	u64 length;
1219 
1220 	/*
1221 	 * Avoid  tree lookups for a new block group, there's no use for it.
1222 	 * It must always be 0.
1223 	 *
1224 	 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1225 	 * For new a block group, this function is called from
1226 	 * btrfs_make_block_group() which is already taking the chunk mutex.
1227 	 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1228 	 * buffer locks to avoid deadlock.
1229 	 */
1230 	if (new) {
1231 		*offset_ret = 0;
1232 		return 0;
1233 	}
1234 
1235 	path = btrfs_alloc_path();
1236 	if (!path)
1237 		return -ENOMEM;
1238 
1239 	key.objectid = cache->start + cache->length;
1240 	key.type = 0;
1241 	key.offset = 0;
1242 
1243 	root = btrfs_extent_root(fs_info, key.objectid);
1244 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1245 	/* We should not find the exact match */
1246 	if (!ret)
1247 		ret = -EUCLEAN;
1248 	if (ret < 0)
1249 		goto out;
1250 
1251 	ret = btrfs_previous_extent_item(root, path, cache->start);
1252 	if (ret) {
1253 		if (ret == 1) {
1254 			ret = 0;
1255 			*offset_ret = 0;
1256 		}
1257 		goto out;
1258 	}
1259 
1260 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1261 
1262 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1263 		length = found_key.offset;
1264 	else
1265 		length = fs_info->nodesize;
1266 
1267 	if (!(found_key.objectid >= cache->start &&
1268 	       found_key.objectid + length <= cache->start + cache->length)) {
1269 		ret = -EUCLEAN;
1270 		goto out;
1271 	}
1272 	*offset_ret = found_key.objectid + length - cache->start;
1273 	ret = 0;
1274 
1275 out:
1276 	btrfs_free_path(path);
1277 	return ret;
1278 }
1279 
1280 struct zone_info {
1281 	u64 physical;
1282 	u64 capacity;
1283 	u64 alloc_offset;
1284 };
1285 
1286 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1287 				struct zone_info *info, unsigned long *active,
1288 				struct btrfs_chunk_map *map)
1289 {
1290 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1291 	struct btrfs_device *device;
1292 	int dev_replace_is_ongoing = 0;
1293 	unsigned int nofs_flag;
1294 	struct blk_zone zone;
1295 	int ret;
1296 
1297 	info->physical = map->stripes[zone_idx].physical;
1298 
1299 	down_read(&dev_replace->rwsem);
1300 	device = map->stripes[zone_idx].dev;
1301 
1302 	if (!device->bdev) {
1303 		up_read(&dev_replace->rwsem);
1304 		info->alloc_offset = WP_MISSING_DEV;
1305 		return 0;
1306 	}
1307 
1308 	/* Consider a zone as active if we can allow any number of active zones. */
1309 	if (!device->zone_info->max_active_zones)
1310 		__set_bit(zone_idx, active);
1311 
1312 	if (!btrfs_dev_is_sequential(device, info->physical)) {
1313 		up_read(&dev_replace->rwsem);
1314 		info->alloc_offset = WP_CONVENTIONAL;
1315 		return 0;
1316 	}
1317 
1318 	/* This zone will be used for allocation, so mark this zone non-empty. */
1319 	btrfs_dev_clear_zone_empty(device, info->physical);
1320 
1321 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1322 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1323 		btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1324 
1325 	/*
1326 	 * The group is mapped to a sequential zone. Get the zone write pointer
1327 	 * to determine the allocation offset within the zone.
1328 	 */
1329 	WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1330 	nofs_flag = memalloc_nofs_save();
1331 	ret = btrfs_get_dev_zone(device, info->physical, &zone);
1332 	memalloc_nofs_restore(nofs_flag);
1333 	if (ret) {
1334 		up_read(&dev_replace->rwsem);
1335 		if (ret != -EIO && ret != -EOPNOTSUPP)
1336 			return ret;
1337 		info->alloc_offset = WP_MISSING_DEV;
1338 		return 0;
1339 	}
1340 
1341 	if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1342 		btrfs_err_in_rcu(fs_info,
1343 		"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1344 			zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
1345 			device->devid);
1346 		up_read(&dev_replace->rwsem);
1347 		return -EIO;
1348 	}
1349 
1350 	info->capacity = (zone.capacity << SECTOR_SHIFT);
1351 
1352 	switch (zone.cond) {
1353 	case BLK_ZONE_COND_OFFLINE:
1354 	case BLK_ZONE_COND_READONLY:
1355 		btrfs_err(fs_info,
1356 		"zoned: offline/readonly zone %llu on device %s (devid %llu)",
1357 			  (info->physical >> device->zone_info->zone_size_shift),
1358 			  rcu_str_deref(device->name), device->devid);
1359 		info->alloc_offset = WP_MISSING_DEV;
1360 		break;
1361 	case BLK_ZONE_COND_EMPTY:
1362 		info->alloc_offset = 0;
1363 		break;
1364 	case BLK_ZONE_COND_FULL:
1365 		info->alloc_offset = info->capacity;
1366 		break;
1367 	default:
1368 		/* Partially used zone. */
1369 		info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1370 		__set_bit(zone_idx, active);
1371 		break;
1372 	}
1373 
1374 	up_read(&dev_replace->rwsem);
1375 
1376 	return 0;
1377 }
1378 
1379 static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1380 					 struct zone_info *info,
1381 					 unsigned long *active)
1382 {
1383 	if (info->alloc_offset == WP_MISSING_DEV) {
1384 		btrfs_err(bg->fs_info,
1385 			"zoned: cannot recover write pointer for zone %llu",
1386 			info->physical);
1387 		return -EIO;
1388 	}
1389 
1390 	bg->alloc_offset = info->alloc_offset;
1391 	bg->zone_capacity = info->capacity;
1392 	if (test_bit(0, active))
1393 		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1394 	return 0;
1395 }
1396 
1397 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1398 				      struct btrfs_chunk_map *map,
1399 				      struct zone_info *zone_info,
1400 				      unsigned long *active)
1401 {
1402 	struct btrfs_fs_info *fs_info = bg->fs_info;
1403 
1404 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1405 		btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1406 		return -EINVAL;
1407 	}
1408 
1409 	bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1410 
1411 	if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
1412 		btrfs_err(bg->fs_info,
1413 			  "zoned: cannot recover write pointer for zone %llu",
1414 			  zone_info[0].physical);
1415 		return -EIO;
1416 	}
1417 	if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
1418 		btrfs_err(bg->fs_info,
1419 			  "zoned: cannot recover write pointer for zone %llu",
1420 			  zone_info[1].physical);
1421 		return -EIO;
1422 	}
1423 	if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
1424 		btrfs_err(bg->fs_info,
1425 			  "zoned: write pointer offset mismatch of zones in DUP profile");
1426 		return -EIO;
1427 	}
1428 
1429 	if (test_bit(0, active) != test_bit(1, active)) {
1430 		if (!btrfs_zone_activate(bg))
1431 			return -EIO;
1432 	} else if (test_bit(0, active)) {
1433 		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1434 	}
1435 
1436 	bg->alloc_offset = zone_info[0].alloc_offset;
1437 	return 0;
1438 }
1439 
1440 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1441 					struct btrfs_chunk_map *map,
1442 					struct zone_info *zone_info,
1443 					unsigned long *active)
1444 {
1445 	struct btrfs_fs_info *fs_info = bg->fs_info;
1446 	int i;
1447 
1448 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1449 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1450 			  btrfs_bg_type_to_raid_name(map->type));
1451 		return -EINVAL;
1452 	}
1453 
1454 	/* In case a device is missing we have a cap of 0, so don't use it. */
1455 	bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1456 
1457 	for (i = 0; i < map->num_stripes; i++) {
1458 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1459 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1460 			continue;
1461 
1462 		if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1463 		    !btrfs_test_opt(fs_info, DEGRADED)) {
1464 			btrfs_err(fs_info,
1465 			"zoned: write pointer offset mismatch of zones in %s profile",
1466 				  btrfs_bg_type_to_raid_name(map->type));
1467 			return -EIO;
1468 		}
1469 		if (test_bit(0, active) != test_bit(i, active)) {
1470 			if (!btrfs_test_opt(fs_info, DEGRADED) &&
1471 			    !btrfs_zone_activate(bg)) {
1472 				return -EIO;
1473 			}
1474 		} else {
1475 			if (test_bit(0, active))
1476 				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1477 		}
1478 	}
1479 
1480 	if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1481 		bg->alloc_offset = zone_info[0].alloc_offset;
1482 	else
1483 		bg->alloc_offset = zone_info[i - 1].alloc_offset;
1484 
1485 	return 0;
1486 }
1487 
1488 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1489 					struct btrfs_chunk_map *map,
1490 					struct zone_info *zone_info,
1491 					unsigned long *active)
1492 {
1493 	struct btrfs_fs_info *fs_info = bg->fs_info;
1494 
1495 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1496 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1497 			  btrfs_bg_type_to_raid_name(map->type));
1498 		return -EINVAL;
1499 	}
1500 
1501 	for (int i = 0; i < map->num_stripes; i++) {
1502 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1503 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1504 			continue;
1505 
1506 		if (test_bit(0, active) != test_bit(i, active)) {
1507 			if (!btrfs_zone_activate(bg))
1508 				return -EIO;
1509 		} else {
1510 			if (test_bit(0, active))
1511 				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1512 		}
1513 		bg->zone_capacity += zone_info[i].capacity;
1514 		bg->alloc_offset += zone_info[i].alloc_offset;
1515 	}
1516 
1517 	return 0;
1518 }
1519 
1520 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1521 					 struct btrfs_chunk_map *map,
1522 					 struct zone_info *zone_info,
1523 					 unsigned long *active)
1524 {
1525 	struct btrfs_fs_info *fs_info = bg->fs_info;
1526 
1527 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1528 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1529 			  btrfs_bg_type_to_raid_name(map->type));
1530 		return -EINVAL;
1531 	}
1532 
1533 	for (int i = 0; i < map->num_stripes; i++) {
1534 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1535 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1536 			continue;
1537 
1538 		if (test_bit(0, active) != test_bit(i, active)) {
1539 			if (!btrfs_zone_activate(bg))
1540 				return -EIO;
1541 		} else {
1542 			if (test_bit(0, active))
1543 				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1544 		}
1545 
1546 		if ((i % map->sub_stripes) == 0) {
1547 			bg->zone_capacity += zone_info[i].capacity;
1548 			bg->alloc_offset += zone_info[i].alloc_offset;
1549 		}
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1556 {
1557 	struct btrfs_fs_info *fs_info = cache->fs_info;
1558 	struct btrfs_chunk_map *map;
1559 	u64 logical = cache->start;
1560 	u64 length = cache->length;
1561 	struct zone_info *zone_info = NULL;
1562 	int ret;
1563 	int i;
1564 	unsigned long *active = NULL;
1565 	u64 last_alloc = 0;
1566 	u32 num_sequential = 0, num_conventional = 0;
1567 	u64 profile;
1568 
1569 	if (!btrfs_is_zoned(fs_info))
1570 		return 0;
1571 
1572 	/* Sanity check */
1573 	if (!IS_ALIGNED(length, fs_info->zone_size)) {
1574 		btrfs_err(fs_info,
1575 		"zoned: block group %llu len %llu unaligned to zone size %llu",
1576 			  logical, length, fs_info->zone_size);
1577 		return -EIO;
1578 	}
1579 
1580 	map = btrfs_find_chunk_map(fs_info, logical, length);
1581 	if (!map)
1582 		return -EINVAL;
1583 
1584 	cache->physical_map = map;
1585 
1586 	zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1587 	if (!zone_info) {
1588 		ret = -ENOMEM;
1589 		goto out;
1590 	}
1591 
1592 	active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1593 	if (!active) {
1594 		ret = -ENOMEM;
1595 		goto out;
1596 	}
1597 
1598 	for (i = 0; i < map->num_stripes; i++) {
1599 		ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1600 		if (ret)
1601 			goto out;
1602 
1603 		if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1604 			num_conventional++;
1605 		else
1606 			num_sequential++;
1607 	}
1608 
1609 	if (num_sequential > 0)
1610 		set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1611 
1612 	if (num_conventional > 0) {
1613 		/* Zone capacity is always zone size in emulation */
1614 		cache->zone_capacity = cache->length;
1615 		ret = calculate_alloc_pointer(cache, &last_alloc, new);
1616 		if (ret) {
1617 			btrfs_err(fs_info,
1618 			"zoned: failed to determine allocation offset of bg %llu",
1619 				  cache->start);
1620 			goto out;
1621 		} else if (map->num_stripes == num_conventional) {
1622 			cache->alloc_offset = last_alloc;
1623 			set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1624 			goto out;
1625 		}
1626 	}
1627 
1628 	profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
1629 	switch (profile) {
1630 	case 0: /* single */
1631 		ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
1632 		break;
1633 	case BTRFS_BLOCK_GROUP_DUP:
1634 		ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
1635 		break;
1636 	case BTRFS_BLOCK_GROUP_RAID1:
1637 	case BTRFS_BLOCK_GROUP_RAID1C3:
1638 	case BTRFS_BLOCK_GROUP_RAID1C4:
1639 		ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
1640 		break;
1641 	case BTRFS_BLOCK_GROUP_RAID0:
1642 		ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
1643 		break;
1644 	case BTRFS_BLOCK_GROUP_RAID10:
1645 		ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
1646 		break;
1647 	case BTRFS_BLOCK_GROUP_RAID5:
1648 	case BTRFS_BLOCK_GROUP_RAID6:
1649 	default:
1650 		btrfs_err(fs_info, "zoned: profile %s not yet supported",
1651 			  btrfs_bg_type_to_raid_name(map->type));
1652 		ret = -EINVAL;
1653 		goto out;
1654 	}
1655 
1656 	if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 &&
1657 	    profile != BTRFS_BLOCK_GROUP_RAID10) {
1658 		/*
1659 		 * Detected broken write pointer.  Make this block group
1660 		 * unallocatable by setting the allocation pointer at the end of
1661 		 * allocatable region. Relocating this block group will fix the
1662 		 * mismatch.
1663 		 *
1664 		 * Currently, we cannot handle RAID0 or RAID10 case like this
1665 		 * because we don't have a proper zone_capacity value. But,
1666 		 * reading from this block group won't work anyway by a missing
1667 		 * stripe.
1668 		 */
1669 		cache->alloc_offset = cache->zone_capacity;
1670 		ret = 0;
1671 	}
1672 
1673 out:
1674 	/* Reject non SINGLE data profiles without RST */
1675 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
1676 	    (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1677 	    !fs_info->stripe_root) {
1678 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1679 			  btrfs_bg_type_to_raid_name(map->type));
1680 		return -EINVAL;
1681 	}
1682 
1683 	if (cache->alloc_offset > cache->zone_capacity) {
1684 		btrfs_err(fs_info,
1685 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1686 			  cache->alloc_offset, cache->zone_capacity,
1687 			  cache->start);
1688 		ret = -EIO;
1689 	}
1690 
1691 	/* An extent is allocated after the write pointer */
1692 	if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1693 		btrfs_err(fs_info,
1694 			  "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1695 			  logical, last_alloc, cache->alloc_offset);
1696 		ret = -EIO;
1697 	}
1698 
1699 	if (!ret) {
1700 		cache->meta_write_pointer = cache->alloc_offset + cache->start;
1701 		if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1702 			btrfs_get_block_group(cache);
1703 			spin_lock(&fs_info->zone_active_bgs_lock);
1704 			list_add_tail(&cache->active_bg_list,
1705 				      &fs_info->zone_active_bgs);
1706 			spin_unlock(&fs_info->zone_active_bgs_lock);
1707 		}
1708 	} else {
1709 		btrfs_free_chunk_map(cache->physical_map);
1710 		cache->physical_map = NULL;
1711 	}
1712 	bitmap_free(active);
1713 	kfree(zone_info);
1714 
1715 	return ret;
1716 }
1717 
1718 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1719 {
1720 	u64 unusable, free;
1721 
1722 	if (!btrfs_is_zoned(cache->fs_info))
1723 		return;
1724 
1725 	WARN_ON(cache->bytes_super != 0);
1726 	unusable = (cache->alloc_offset - cache->used) +
1727 		   (cache->length - cache->zone_capacity);
1728 	free = cache->zone_capacity - cache->alloc_offset;
1729 
1730 	/* We only need ->free_space in ALLOC_SEQ block groups */
1731 	cache->cached = BTRFS_CACHE_FINISHED;
1732 	cache->free_space_ctl->free_space = free;
1733 	cache->zone_unusable = unusable;
1734 }
1735 
1736 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
1737 {
1738 	u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1739 	struct btrfs_inode *inode = bbio->inode;
1740 	struct btrfs_fs_info *fs_info = bbio->fs_info;
1741 	struct btrfs_block_group *cache;
1742 	bool ret = false;
1743 
1744 	if (!btrfs_is_zoned(fs_info))
1745 		return false;
1746 
1747 	if (!inode || !is_data_inode(inode))
1748 		return false;
1749 
1750 	if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1751 		return false;
1752 
1753 	/*
1754 	 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1755 	 * extent layout the relocation code has.
1756 	 * Furthermore we have set aside own block-group from which only the
1757 	 * relocation "process" can allocate and make sure only one process at a
1758 	 * time can add pages to an extent that gets relocated, so it's safe to
1759 	 * use regular REQ_OP_WRITE for this special case.
1760 	 */
1761 	if (btrfs_is_data_reloc_root(inode->root))
1762 		return false;
1763 
1764 	cache = btrfs_lookup_block_group(fs_info, start);
1765 	ASSERT(cache);
1766 	if (!cache)
1767 		return false;
1768 
1769 	ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1770 	btrfs_put_block_group(cache);
1771 
1772 	return ret;
1773 }
1774 
1775 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
1776 {
1777 	const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
1778 	struct btrfs_ordered_sum *sum = bbio->sums;
1779 
1780 	if (physical < bbio->orig_physical)
1781 		sum->logical -= bbio->orig_physical - physical;
1782 	else
1783 		sum->logical += physical - bbio->orig_physical;
1784 }
1785 
1786 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1787 					u64 logical)
1788 {
1789 	struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
1790 	struct extent_map *em;
1791 
1792 	ordered->disk_bytenr = logical;
1793 
1794 	write_lock(&em_tree->lock);
1795 	em = search_extent_mapping(em_tree, ordered->file_offset,
1796 				   ordered->num_bytes);
1797 	/* The em should be a new COW extent, thus it should not have an offset. */
1798 	ASSERT(em->offset == 0);
1799 	em->disk_bytenr = logical;
1800 	free_extent_map(em);
1801 	write_unlock(&em_tree->lock);
1802 }
1803 
1804 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1805 				      u64 logical, u64 len)
1806 {
1807 	struct btrfs_ordered_extent *new;
1808 
1809 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1810 	    split_extent_map(ordered->inode, ordered->file_offset,
1811 			     ordered->num_bytes, len, logical))
1812 		return false;
1813 
1814 	new = btrfs_split_ordered_extent(ordered, len);
1815 	if (IS_ERR(new))
1816 		return false;
1817 	new->disk_bytenr = logical;
1818 	btrfs_finish_one_ordered(new);
1819 	return true;
1820 }
1821 
1822 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1823 {
1824 	struct btrfs_inode *inode = ordered->inode;
1825 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1826 	struct btrfs_ordered_sum *sum;
1827 	u64 logical, len;
1828 
1829 	/*
1830 	 * Write to pre-allocated region is for the data relocation, and so
1831 	 * it should use WRITE operation. No split/rewrite are necessary.
1832 	 */
1833 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
1834 		return;
1835 
1836 	ASSERT(!list_empty(&ordered->list));
1837 	/* The ordered->list can be empty in the above pre-alloc case. */
1838 	sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
1839 	logical = sum->logical;
1840 	len = sum->len;
1841 
1842 	while (len < ordered->disk_num_bytes) {
1843 		sum = list_next_entry(sum, list);
1844 		if (sum->logical == logical + len) {
1845 			len += sum->len;
1846 			continue;
1847 		}
1848 		if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1849 			set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1850 			btrfs_err(fs_info, "failed to split ordered extent");
1851 			goto out;
1852 		}
1853 		logical = sum->logical;
1854 		len = sum->len;
1855 	}
1856 
1857 	if (ordered->disk_bytenr != logical)
1858 		btrfs_rewrite_logical_zoned(ordered, logical);
1859 
1860 out:
1861 	/*
1862 	 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1863 	 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1864 	 * addresses and don't contain actual checksums.  We thus must free them
1865 	 * here so that we don't attempt to log the csums later.
1866 	 */
1867 	if ((inode->flags & BTRFS_INODE_NODATASUM) ||
1868 	    test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
1869 		while ((sum = list_first_entry_or_null(&ordered->list,
1870 						       typeof(*sum), list))) {
1871 			list_del(&sum->list);
1872 			kfree(sum);
1873 		}
1874 	}
1875 }
1876 
1877 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
1878 			       struct btrfs_block_group **active_bg)
1879 {
1880 	const struct writeback_control *wbc = ctx->wbc;
1881 	struct btrfs_block_group *block_group = ctx->zoned_bg;
1882 	struct btrfs_fs_info *fs_info = block_group->fs_info;
1883 
1884 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1885 		return true;
1886 
1887 	if (fs_info->treelog_bg == block_group->start) {
1888 		if (!btrfs_zone_activate(block_group)) {
1889 			int ret_fin = btrfs_zone_finish_one_bg(fs_info);
1890 
1891 			if (ret_fin != 1 || !btrfs_zone_activate(block_group))
1892 				return false;
1893 		}
1894 	} else if (*active_bg != block_group) {
1895 		struct btrfs_block_group *tgt = *active_bg;
1896 
1897 		/* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
1898 		lockdep_assert_held(&fs_info->zoned_meta_io_lock);
1899 
1900 		if (tgt) {
1901 			/*
1902 			 * If there is an unsent IO left in the allocated area,
1903 			 * we cannot wait for them as it may cause a deadlock.
1904 			 */
1905 			if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1906 				if (wbc->sync_mode == WB_SYNC_NONE ||
1907 				    (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
1908 					return false;
1909 			}
1910 
1911 			/* Pivot active metadata/system block group. */
1912 			btrfs_zoned_meta_io_unlock(fs_info);
1913 			wait_eb_writebacks(tgt);
1914 			do_zone_finish(tgt, true);
1915 			btrfs_zoned_meta_io_lock(fs_info);
1916 			if (*active_bg == tgt) {
1917 				btrfs_put_block_group(tgt);
1918 				*active_bg = NULL;
1919 			}
1920 		}
1921 		if (!btrfs_zone_activate(block_group))
1922 			return false;
1923 		if (*active_bg != block_group) {
1924 			ASSERT(*active_bg == NULL);
1925 			*active_bg = block_group;
1926 			btrfs_get_block_group(block_group);
1927 		}
1928 	}
1929 
1930 	return true;
1931 }
1932 
1933 /*
1934  * Check if @ctx->eb is aligned to the write pointer.
1935  *
1936  * Return:
1937  *   0:        @ctx->eb is at the write pointer. You can write it.
1938  *   -EAGAIN:  There is a hole. The caller should handle the case.
1939  *   -EBUSY:   There is a hole, but the caller can just bail out.
1940  */
1941 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1942 				   struct btrfs_eb_write_context *ctx)
1943 {
1944 	const struct writeback_control *wbc = ctx->wbc;
1945 	const struct extent_buffer *eb = ctx->eb;
1946 	struct btrfs_block_group *block_group = ctx->zoned_bg;
1947 
1948 	if (!btrfs_is_zoned(fs_info))
1949 		return 0;
1950 
1951 	if (block_group) {
1952 		if (block_group->start > eb->start ||
1953 		    block_group->start + block_group->length <= eb->start) {
1954 			btrfs_put_block_group(block_group);
1955 			block_group = NULL;
1956 			ctx->zoned_bg = NULL;
1957 		}
1958 	}
1959 
1960 	if (!block_group) {
1961 		block_group = btrfs_lookup_block_group(fs_info, eb->start);
1962 		if (!block_group)
1963 			return 0;
1964 		ctx->zoned_bg = block_group;
1965 	}
1966 
1967 	if (block_group->meta_write_pointer == eb->start) {
1968 		struct btrfs_block_group **tgt;
1969 
1970 		if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
1971 			return 0;
1972 
1973 		if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
1974 			tgt = &fs_info->active_system_bg;
1975 		else
1976 			tgt = &fs_info->active_meta_bg;
1977 		if (check_bg_is_active(ctx, tgt))
1978 			return 0;
1979 	}
1980 
1981 	/*
1982 	 * Since we may release fs_info->zoned_meta_io_lock, someone can already
1983 	 * start writing this eb. In that case, we can just bail out.
1984 	 */
1985 	if (block_group->meta_write_pointer > eb->start)
1986 		return -EBUSY;
1987 
1988 	/* If for_sync, this hole will be filled with trasnsaction commit. */
1989 	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
1990 		return -EAGAIN;
1991 	return -EBUSY;
1992 }
1993 
1994 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1995 {
1996 	if (!btrfs_dev_is_sequential(device, physical))
1997 		return -EOPNOTSUPP;
1998 
1999 	return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
2000 				    length >> SECTOR_SHIFT, GFP_NOFS, 0);
2001 }
2002 
2003 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
2004 			  struct blk_zone *zone)
2005 {
2006 	struct btrfs_io_context *bioc = NULL;
2007 	u64 mapped_length = PAGE_SIZE;
2008 	unsigned int nofs_flag;
2009 	int nmirrors;
2010 	int i, ret;
2011 
2012 	ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2013 			      &mapped_length, &bioc, NULL, NULL);
2014 	if (ret || !bioc || mapped_length < PAGE_SIZE) {
2015 		ret = -EIO;
2016 		goto out_put_bioc;
2017 	}
2018 
2019 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2020 		ret = -EINVAL;
2021 		goto out_put_bioc;
2022 	}
2023 
2024 	nofs_flag = memalloc_nofs_save();
2025 	nmirrors = (int)bioc->num_stripes;
2026 	for (i = 0; i < nmirrors; i++) {
2027 		u64 physical = bioc->stripes[i].physical;
2028 		struct btrfs_device *dev = bioc->stripes[i].dev;
2029 
2030 		/* Missing device */
2031 		if (!dev->bdev)
2032 			continue;
2033 
2034 		ret = btrfs_get_dev_zone(dev, physical, zone);
2035 		/* Failing device */
2036 		if (ret == -EIO || ret == -EOPNOTSUPP)
2037 			continue;
2038 		break;
2039 	}
2040 	memalloc_nofs_restore(nofs_flag);
2041 out_put_bioc:
2042 	btrfs_put_bioc(bioc);
2043 	return ret;
2044 }
2045 
2046 /*
2047  * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2048  * filling zeros between @physical_pos to a write pointer of dev-replace
2049  * source device.
2050  */
2051 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2052 				    u64 physical_start, u64 physical_pos)
2053 {
2054 	struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2055 	struct blk_zone zone;
2056 	u64 length;
2057 	u64 wp;
2058 	int ret;
2059 
2060 	if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2061 		return 0;
2062 
2063 	ret = read_zone_info(fs_info, logical, &zone);
2064 	if (ret)
2065 		return ret;
2066 
2067 	wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2068 
2069 	if (physical_pos == wp)
2070 		return 0;
2071 
2072 	if (physical_pos > wp)
2073 		return -EUCLEAN;
2074 
2075 	length = wp - physical_pos;
2076 	return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2077 }
2078 
2079 /*
2080  * Activate block group and underlying device zones
2081  *
2082  * @block_group: the block group to activate
2083  *
2084  * Return: true on success, false otherwise
2085  */
2086 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2087 {
2088 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2089 	struct btrfs_chunk_map *map;
2090 	struct btrfs_device *device;
2091 	u64 physical;
2092 	const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2093 	bool ret;
2094 	int i;
2095 
2096 	if (!btrfs_is_zoned(block_group->fs_info))
2097 		return true;
2098 
2099 	map = block_group->physical_map;
2100 
2101 	spin_lock(&fs_info->zone_active_bgs_lock);
2102 	spin_lock(&block_group->lock);
2103 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2104 		ret = true;
2105 		goto out_unlock;
2106 	}
2107 
2108 	/* No space left */
2109 	if (btrfs_zoned_bg_is_full(block_group)) {
2110 		ret = false;
2111 		goto out_unlock;
2112 	}
2113 
2114 	for (i = 0; i < map->num_stripes; i++) {
2115 		struct btrfs_zoned_device_info *zinfo;
2116 		int reserved = 0;
2117 
2118 		device = map->stripes[i].dev;
2119 		physical = map->stripes[i].physical;
2120 		zinfo = device->zone_info;
2121 
2122 		if (zinfo->max_active_zones == 0)
2123 			continue;
2124 
2125 		if (is_data)
2126 			reserved = zinfo->reserved_active_zones;
2127 		/*
2128 		 * For the data block group, leave active zones for one
2129 		 * metadata block group and one system block group.
2130 		 */
2131 		if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2132 			ret = false;
2133 			goto out_unlock;
2134 		}
2135 
2136 		if (!btrfs_dev_set_active_zone(device, physical)) {
2137 			/* Cannot activate the zone */
2138 			ret = false;
2139 			goto out_unlock;
2140 		}
2141 		if (!is_data)
2142 			zinfo->reserved_active_zones--;
2143 	}
2144 
2145 	/* Successfully activated all the zones */
2146 	set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2147 	spin_unlock(&block_group->lock);
2148 
2149 	/* For the active block group list */
2150 	btrfs_get_block_group(block_group);
2151 	list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2152 	spin_unlock(&fs_info->zone_active_bgs_lock);
2153 
2154 	return true;
2155 
2156 out_unlock:
2157 	spin_unlock(&block_group->lock);
2158 	spin_unlock(&fs_info->zone_active_bgs_lock);
2159 	return ret;
2160 }
2161 
2162 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2163 {
2164 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2165 	const u64 end = block_group->start + block_group->length;
2166 	struct radix_tree_iter iter;
2167 	struct extent_buffer *eb;
2168 	void __rcu **slot;
2169 
2170 	rcu_read_lock();
2171 	radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
2172 				 block_group->start >> fs_info->sectorsize_bits) {
2173 		eb = radix_tree_deref_slot(slot);
2174 		if (!eb)
2175 			continue;
2176 		if (radix_tree_deref_retry(eb)) {
2177 			slot = radix_tree_iter_retry(&iter);
2178 			continue;
2179 		}
2180 
2181 		if (eb->start < block_group->start)
2182 			continue;
2183 		if (eb->start >= end)
2184 			break;
2185 
2186 		slot = radix_tree_iter_resume(slot, &iter);
2187 		rcu_read_unlock();
2188 		wait_on_extent_buffer_writeback(eb);
2189 		rcu_read_lock();
2190 	}
2191 	rcu_read_unlock();
2192 }
2193 
2194 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2195 {
2196 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2197 	struct btrfs_chunk_map *map;
2198 	const bool is_metadata = (block_group->flags &
2199 			(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2200 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2201 	int ret = 0;
2202 	int i;
2203 
2204 	spin_lock(&block_group->lock);
2205 	if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2206 		spin_unlock(&block_group->lock);
2207 		return 0;
2208 	}
2209 
2210 	/* Check if we have unwritten allocated space */
2211 	if (is_metadata &&
2212 	    block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2213 		spin_unlock(&block_group->lock);
2214 		return -EAGAIN;
2215 	}
2216 
2217 	/*
2218 	 * If we are sure that the block group is full (= no more room left for
2219 	 * new allocation) and the IO for the last usable block is completed, we
2220 	 * don't need to wait for the other IOs. This holds because we ensure
2221 	 * the sequential IO submissions using the ZONE_APPEND command for data
2222 	 * and block_group->meta_write_pointer for metadata.
2223 	 */
2224 	if (!fully_written) {
2225 		if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2226 			spin_unlock(&block_group->lock);
2227 			return -EAGAIN;
2228 		}
2229 		spin_unlock(&block_group->lock);
2230 
2231 		ret = btrfs_inc_block_group_ro(block_group, false);
2232 		if (ret)
2233 			return ret;
2234 
2235 		/* Ensure all writes in this block group finish */
2236 		btrfs_wait_block_group_reservations(block_group);
2237 		/* No need to wait for NOCOW writers. Zoned mode does not allow that */
2238 		btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
2239 		/* Wait for extent buffers to be written. */
2240 		if (is_metadata)
2241 			wait_eb_writebacks(block_group);
2242 
2243 		spin_lock(&block_group->lock);
2244 
2245 		/*
2246 		 * Bail out if someone already deactivated the block group, or
2247 		 * allocated space is left in the block group.
2248 		 */
2249 		if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2250 			      &block_group->runtime_flags)) {
2251 			spin_unlock(&block_group->lock);
2252 			btrfs_dec_block_group_ro(block_group);
2253 			return 0;
2254 		}
2255 
2256 		if (block_group->reserved ||
2257 		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2258 			     &block_group->runtime_flags)) {
2259 			spin_unlock(&block_group->lock);
2260 			btrfs_dec_block_group_ro(block_group);
2261 			return -EAGAIN;
2262 		}
2263 	}
2264 
2265 	clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2266 	block_group->alloc_offset = block_group->zone_capacity;
2267 	if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2268 		block_group->meta_write_pointer = block_group->start +
2269 						  block_group->zone_capacity;
2270 	block_group->free_space_ctl->free_space = 0;
2271 	btrfs_clear_treelog_bg(block_group);
2272 	btrfs_clear_data_reloc_bg(block_group);
2273 	spin_unlock(&block_group->lock);
2274 
2275 	down_read(&dev_replace->rwsem);
2276 	map = block_group->physical_map;
2277 	for (i = 0; i < map->num_stripes; i++) {
2278 		struct btrfs_device *device = map->stripes[i].dev;
2279 		const u64 physical = map->stripes[i].physical;
2280 		struct btrfs_zoned_device_info *zinfo = device->zone_info;
2281 		unsigned int nofs_flags;
2282 
2283 		if (zinfo->max_active_zones == 0)
2284 			continue;
2285 
2286 		nofs_flags = memalloc_nofs_save();
2287 		ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2288 				       physical >> SECTOR_SHIFT,
2289 				       zinfo->zone_size >> SECTOR_SHIFT);
2290 		memalloc_nofs_restore(nofs_flags);
2291 
2292 		if (ret) {
2293 			up_read(&dev_replace->rwsem);
2294 			return ret;
2295 		}
2296 
2297 		if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2298 			zinfo->reserved_active_zones++;
2299 		btrfs_dev_clear_active_zone(device, physical);
2300 	}
2301 	up_read(&dev_replace->rwsem);
2302 
2303 	if (!fully_written)
2304 		btrfs_dec_block_group_ro(block_group);
2305 
2306 	spin_lock(&fs_info->zone_active_bgs_lock);
2307 	ASSERT(!list_empty(&block_group->active_bg_list));
2308 	list_del_init(&block_group->active_bg_list);
2309 	spin_unlock(&fs_info->zone_active_bgs_lock);
2310 
2311 	/* For active_bg_list */
2312 	btrfs_put_block_group(block_group);
2313 
2314 	clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2315 
2316 	return 0;
2317 }
2318 
2319 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2320 {
2321 	if (!btrfs_is_zoned(block_group->fs_info))
2322 		return 0;
2323 
2324 	return do_zone_finish(block_group, false);
2325 }
2326 
2327 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2328 {
2329 	struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2330 	struct btrfs_device *device;
2331 	bool ret = false;
2332 
2333 	if (!btrfs_is_zoned(fs_info))
2334 		return true;
2335 
2336 	/* Check if there is a device with active zones left */
2337 	mutex_lock(&fs_info->chunk_mutex);
2338 	spin_lock(&fs_info->zone_active_bgs_lock);
2339 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2340 		struct btrfs_zoned_device_info *zinfo = device->zone_info;
2341 		int reserved = 0;
2342 
2343 		if (!device->bdev)
2344 			continue;
2345 
2346 		if (!zinfo->max_active_zones) {
2347 			ret = true;
2348 			break;
2349 		}
2350 
2351 		if (flags & BTRFS_BLOCK_GROUP_DATA)
2352 			reserved = zinfo->reserved_active_zones;
2353 
2354 		switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2355 		case 0: /* single */
2356 			ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2357 			break;
2358 		case BTRFS_BLOCK_GROUP_DUP:
2359 			ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2360 			break;
2361 		}
2362 		if (ret)
2363 			break;
2364 	}
2365 	spin_unlock(&fs_info->zone_active_bgs_lock);
2366 	mutex_unlock(&fs_info->chunk_mutex);
2367 
2368 	if (!ret)
2369 		set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2370 
2371 	return ret;
2372 }
2373 
2374 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2375 {
2376 	struct btrfs_block_group *block_group;
2377 	u64 min_alloc_bytes;
2378 
2379 	if (!btrfs_is_zoned(fs_info))
2380 		return;
2381 
2382 	block_group = btrfs_lookup_block_group(fs_info, logical);
2383 	ASSERT(block_group);
2384 
2385 	/* No MIXED_BG on zoned btrfs. */
2386 	if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2387 		min_alloc_bytes = fs_info->sectorsize;
2388 	else
2389 		min_alloc_bytes = fs_info->nodesize;
2390 
2391 	/* Bail out if we can allocate more data from this block group. */
2392 	if (logical + length + min_alloc_bytes <=
2393 	    block_group->start + block_group->zone_capacity)
2394 		goto out;
2395 
2396 	do_zone_finish(block_group, true);
2397 
2398 out:
2399 	btrfs_put_block_group(block_group);
2400 }
2401 
2402 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2403 {
2404 	struct btrfs_block_group *bg =
2405 		container_of(work, struct btrfs_block_group, zone_finish_work);
2406 
2407 	wait_on_extent_buffer_writeback(bg->last_eb);
2408 	free_extent_buffer(bg->last_eb);
2409 	btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2410 	btrfs_put_block_group(bg);
2411 }
2412 
2413 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2414 				   struct extent_buffer *eb)
2415 {
2416 	if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2417 	    eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2418 		return;
2419 
2420 	if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2421 		btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2422 			  bg->start);
2423 		return;
2424 	}
2425 
2426 	/* For the work */
2427 	btrfs_get_block_group(bg);
2428 	atomic_inc(&eb->refs);
2429 	bg->last_eb = eb;
2430 	INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2431 	queue_work(system_unbound_wq, &bg->zone_finish_work);
2432 }
2433 
2434 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2435 {
2436 	struct btrfs_fs_info *fs_info = bg->fs_info;
2437 
2438 	spin_lock(&fs_info->relocation_bg_lock);
2439 	if (fs_info->data_reloc_bg == bg->start)
2440 		fs_info->data_reloc_bg = 0;
2441 	spin_unlock(&fs_info->relocation_bg_lock);
2442 }
2443 
2444 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2445 {
2446 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2447 	struct btrfs_device *device;
2448 
2449 	if (!btrfs_is_zoned(fs_info))
2450 		return;
2451 
2452 	mutex_lock(&fs_devices->device_list_mutex);
2453 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2454 		if (device->zone_info) {
2455 			vfree(device->zone_info->zone_cache);
2456 			device->zone_info->zone_cache = NULL;
2457 		}
2458 	}
2459 	mutex_unlock(&fs_devices->device_list_mutex);
2460 }
2461 
2462 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2463 {
2464 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2465 	struct btrfs_device *device;
2466 	u64 used = 0;
2467 	u64 total = 0;
2468 	u64 factor;
2469 
2470 	ASSERT(btrfs_is_zoned(fs_info));
2471 
2472 	if (fs_info->bg_reclaim_threshold == 0)
2473 		return false;
2474 
2475 	mutex_lock(&fs_devices->device_list_mutex);
2476 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2477 		if (!device->bdev)
2478 			continue;
2479 
2480 		total += device->disk_total_bytes;
2481 		used += device->bytes_used;
2482 	}
2483 	mutex_unlock(&fs_devices->device_list_mutex);
2484 
2485 	factor = div64_u64(used * 100, total);
2486 	return factor >= fs_info->bg_reclaim_threshold;
2487 }
2488 
2489 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2490 				       u64 length)
2491 {
2492 	struct btrfs_block_group *block_group;
2493 
2494 	if (!btrfs_is_zoned(fs_info))
2495 		return;
2496 
2497 	block_group = btrfs_lookup_block_group(fs_info, logical);
2498 	/* It should be called on a previous data relocation block group. */
2499 	ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2500 
2501 	spin_lock(&block_group->lock);
2502 	if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2503 		goto out;
2504 
2505 	/* All relocation extents are written. */
2506 	if (block_group->start + block_group->alloc_offset == logical + length) {
2507 		/*
2508 		 * Now, release this block group for further allocations and
2509 		 * zone finish.
2510 		 */
2511 		clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2512 			  &block_group->runtime_flags);
2513 	}
2514 
2515 out:
2516 	spin_unlock(&block_group->lock);
2517 	btrfs_put_block_group(block_group);
2518 }
2519 
2520 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2521 {
2522 	struct btrfs_block_group *block_group;
2523 	struct btrfs_block_group *min_bg = NULL;
2524 	u64 min_avail = U64_MAX;
2525 	int ret;
2526 
2527 	spin_lock(&fs_info->zone_active_bgs_lock);
2528 	list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2529 			    active_bg_list) {
2530 		u64 avail;
2531 
2532 		spin_lock(&block_group->lock);
2533 		if (block_group->reserved || block_group->alloc_offset == 0 ||
2534 		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
2535 		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2536 			spin_unlock(&block_group->lock);
2537 			continue;
2538 		}
2539 
2540 		avail = block_group->zone_capacity - block_group->alloc_offset;
2541 		if (min_avail > avail) {
2542 			if (min_bg)
2543 				btrfs_put_block_group(min_bg);
2544 			min_bg = block_group;
2545 			min_avail = avail;
2546 			btrfs_get_block_group(min_bg);
2547 		}
2548 		spin_unlock(&block_group->lock);
2549 	}
2550 	spin_unlock(&fs_info->zone_active_bgs_lock);
2551 
2552 	if (!min_bg)
2553 		return 0;
2554 
2555 	ret = btrfs_zone_finish(min_bg);
2556 	btrfs_put_block_group(min_bg);
2557 
2558 	return ret < 0 ? ret : 1;
2559 }
2560 
2561 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2562 				struct btrfs_space_info *space_info,
2563 				bool do_finish)
2564 {
2565 	struct btrfs_block_group *bg;
2566 	int index;
2567 
2568 	if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2569 		return 0;
2570 
2571 	for (;;) {
2572 		int ret;
2573 		bool need_finish = false;
2574 
2575 		down_read(&space_info->groups_sem);
2576 		for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2577 			list_for_each_entry(bg, &space_info->block_groups[index],
2578 					    list) {
2579 				if (!spin_trylock(&bg->lock))
2580 					continue;
2581 				if (btrfs_zoned_bg_is_full(bg) ||
2582 				    test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2583 					     &bg->runtime_flags)) {
2584 					spin_unlock(&bg->lock);
2585 					continue;
2586 				}
2587 				spin_unlock(&bg->lock);
2588 
2589 				if (btrfs_zone_activate(bg)) {
2590 					up_read(&space_info->groups_sem);
2591 					return 1;
2592 				}
2593 
2594 				need_finish = true;
2595 			}
2596 		}
2597 		up_read(&space_info->groups_sem);
2598 
2599 		if (!do_finish || !need_finish)
2600 			break;
2601 
2602 		ret = btrfs_zone_finish_one_bg(fs_info);
2603 		if (ret == 0)
2604 			break;
2605 		if (ret < 0)
2606 			return ret;
2607 	}
2608 
2609 	return 0;
2610 }
2611 
2612 /*
2613  * Reserve zones for one metadata block group, one tree-log block group, and one
2614  * system block group.
2615  */
2616 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
2617 {
2618 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2619 	struct btrfs_block_group *block_group;
2620 	struct btrfs_device *device;
2621 	/* Reserve zones for normal SINGLE metadata and tree-log block group. */
2622 	unsigned int metadata_reserve = 2;
2623 	/* Reserve a zone for SINGLE system block group. */
2624 	unsigned int system_reserve = 1;
2625 
2626 	if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2627 		return;
2628 
2629 	/*
2630 	 * This function is called from the mount context. So, there is no
2631 	 * parallel process touching the bits. No need for read_seqretry().
2632 	 */
2633 	if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2634 		metadata_reserve = 4;
2635 	if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
2636 		system_reserve = 2;
2637 
2638 	/* Apply the reservation on all the devices. */
2639 	mutex_lock(&fs_devices->device_list_mutex);
2640 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2641 		if (!device->bdev)
2642 			continue;
2643 
2644 		device->zone_info->reserved_active_zones =
2645 			metadata_reserve + system_reserve;
2646 	}
2647 	mutex_unlock(&fs_devices->device_list_mutex);
2648 
2649 	/* Release reservation for currently active block groups. */
2650 	spin_lock(&fs_info->zone_active_bgs_lock);
2651 	list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
2652 		struct btrfs_chunk_map *map = block_group->physical_map;
2653 
2654 		if (!(block_group->flags &
2655 		      (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
2656 			continue;
2657 
2658 		for (int i = 0; i < map->num_stripes; i++)
2659 			map->stripes[i].dev->zone_info->reserved_active_zones--;
2660 	}
2661 	spin_unlock(&fs_info->zone_active_bgs_lock);
2662 }
2663