xref: /linux/fs/btrfs/zoned.c (revision 2d1373e4246da3b58e1df058374ed6b101804e07)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
9 #include "ctree.h"
10 #include "volumes.h"
11 #include "zoned.h"
12 #include "disk-io.h"
13 #include "block-group.h"
14 #include "dev-replace.h"
15 #include "space-info.h"
16 #include "fs.h"
17 #include "accessors.h"
18 #include "bio.h"
19 #include "transaction.h"
20 #include "sysfs.h"
21 
22 /* Maximum number of zones to report per blkdev_report_zones() call */
23 #define BTRFS_REPORT_NR_ZONES   4096
24 /* Invalid allocation pointer value for missing devices */
25 #define WP_MISSING_DEV ((u64)-1)
26 /* Pseudo write pointer value for conventional zone */
27 #define WP_CONVENTIONAL ((u64)-2)
28 
29 /*
30  * Location of the first zone of superblock logging zone pairs.
31  *
32  * - primary superblock:    0B (zone 0)
33  * - first copy:          512G (zone starting at that offset)
34  * - second copy:           4T (zone starting at that offset)
35  */
36 #define BTRFS_SB_LOG_PRIMARY_OFFSET	(0ULL)
37 #define BTRFS_SB_LOG_FIRST_OFFSET	(512ULL * SZ_1G)
38 #define BTRFS_SB_LOG_SECOND_OFFSET	(4096ULL * SZ_1G)
39 
40 #define BTRFS_SB_LOG_FIRST_SHIFT	ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
41 #define BTRFS_SB_LOG_SECOND_SHIFT	ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
42 
43 /* Number of superblock log zones */
44 #define BTRFS_NR_SB_LOG_ZONES 2
45 
46 /* Default number of max active zones when the device has no limits. */
47 #define BTRFS_DEFAULT_MAX_ACTIVE_ZONES	128
48 
49 /*
50  * Minimum of active zones we need:
51  *
52  * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
53  * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
54  * - 1 zone for tree-log dedicated block group
55  * - 1 zone for relocation
56  */
57 #define BTRFS_MIN_ACTIVE_ZONES		(BTRFS_SUPER_MIRROR_MAX + 5)
58 
59 /*
60  * Minimum / maximum supported zone size. Currently, SMR disks have a zone
61  * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
62  * We do not expect the zone size to become larger than 8GiB or smaller than
63  * 4MiB in the near future.
64  */
65 #define BTRFS_MAX_ZONE_SIZE		SZ_8G
66 #define BTRFS_MIN_ZONE_SIZE		SZ_4M
67 
68 #define SUPER_INFO_SECTORS	((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
69 
70 static void wait_eb_writebacks(struct btrfs_block_group *block_group);
71 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
72 
sb_zone_is_full(const struct blk_zone * zone)73 static inline bool sb_zone_is_full(const struct blk_zone *zone)
74 {
75 	return (zone->cond == BLK_ZONE_COND_FULL) ||
76 		(zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
77 }
78 
copy_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)79 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
80 {
81 	struct blk_zone *zones = data;
82 
83 	memcpy(&zones[idx], zone, sizeof(*zone));
84 
85 	return 0;
86 }
87 
sb_write_pointer(struct block_device * bdev,struct blk_zone * zones,u64 * wp_ret)88 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
89 			    u64 *wp_ret)
90 {
91 	bool empty[BTRFS_NR_SB_LOG_ZONES];
92 	bool full[BTRFS_NR_SB_LOG_ZONES];
93 	sector_t sector;
94 
95 	for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
96 		ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL,
97 		       "zones[%d].type=%d", i, zones[i].type);
98 		empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
99 		full[i] = sb_zone_is_full(&zones[i]);
100 	}
101 
102 	/*
103 	 * Possible states of log buffer zones
104 	 *
105 	 *           Empty[0]  In use[0]  Full[0]
106 	 * Empty[1]         *          0        1
107 	 * In use[1]        x          x        1
108 	 * Full[1]          0          0        C
109 	 *
110 	 * Log position:
111 	 *   *: Special case, no superblock is written
112 	 *   0: Use write pointer of zones[0]
113 	 *   1: Use write pointer of zones[1]
114 	 *   C: Compare super blocks from zones[0] and zones[1], use the latest
115 	 *      one determined by generation
116 	 *   x: Invalid state
117 	 */
118 
119 	if (empty[0] && empty[1]) {
120 		/* Special case to distinguish no superblock to read */
121 		*wp_ret = zones[0].start << SECTOR_SHIFT;
122 		return -ENOENT;
123 	} else if (full[0] && full[1]) {
124 		/* Compare two super blocks */
125 		struct address_space *mapping = bdev->bd_mapping;
126 		struct page *page[BTRFS_NR_SB_LOG_ZONES];
127 		struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
128 
129 		for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
130 			u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
131 			u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
132 						BTRFS_SUPER_INFO_SIZE;
133 
134 			page[i] = read_cache_page_gfp(mapping,
135 					bytenr >> PAGE_SHIFT, GFP_NOFS);
136 			if (IS_ERR(page[i])) {
137 				if (i == 1)
138 					btrfs_release_disk_super(super[0]);
139 				return PTR_ERR(page[i]);
140 			}
141 			super[i] = page_address(page[i]);
142 		}
143 
144 		if (btrfs_super_generation(super[0]) >
145 		    btrfs_super_generation(super[1]))
146 			sector = zones[1].start;
147 		else
148 			sector = zones[0].start;
149 
150 		for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
151 			btrfs_release_disk_super(super[i]);
152 	} else if (!full[0] && (empty[1] || full[1])) {
153 		sector = zones[0].wp;
154 	} else if (full[0]) {
155 		sector = zones[1].wp;
156 	} else {
157 		return -EUCLEAN;
158 	}
159 	*wp_ret = sector << SECTOR_SHIFT;
160 	return 0;
161 }
162 
163 /*
164  * Get the first zone number of the superblock mirror
165  */
sb_zone_number(int shift,int mirror)166 static inline u32 sb_zone_number(int shift, int mirror)
167 {
168 	u64 zone = U64_MAX;
169 
170 	ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror);
171 	switch (mirror) {
172 	case 0: zone = 0; break;
173 	case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
174 	case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
175 	}
176 
177 	ASSERT(zone <= U32_MAX, "zone=%llu", zone);
178 
179 	return (u32)zone;
180 }
181 
zone_start_sector(u32 zone_number,struct block_device * bdev)182 static inline sector_t zone_start_sector(u32 zone_number,
183 					 struct block_device *bdev)
184 {
185 	return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
186 }
187 
zone_start_physical(u32 zone_number,struct btrfs_zoned_device_info * zone_info)188 static inline u64 zone_start_physical(u32 zone_number,
189 				      struct btrfs_zoned_device_info *zone_info)
190 {
191 	return (u64)zone_number << zone_info->zone_size_shift;
192 }
193 
194 /*
195  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
196  * device into static sized chunks and fake a conventional zone on each of
197  * them.
198  */
emulate_report_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int nr_zones)199 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
200 				struct blk_zone *zones, unsigned int nr_zones)
201 {
202 	const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
203 	sector_t bdev_size = bdev_nr_sectors(device->bdev);
204 	unsigned int i;
205 
206 	pos >>= SECTOR_SHIFT;
207 	for (i = 0; i < nr_zones; i++) {
208 		zones[i].start = i * zone_sectors + pos;
209 		zones[i].len = zone_sectors;
210 		zones[i].capacity = zone_sectors;
211 		zones[i].wp = zones[i].start + zone_sectors;
212 		zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
213 		zones[i].cond = BLK_ZONE_COND_NOT_WP;
214 
215 		if (zones[i].wp >= bdev_size) {
216 			i++;
217 			break;
218 		}
219 	}
220 
221 	return i;
222 }
223 
btrfs_get_dev_zones(struct btrfs_device * device,u64 pos,struct blk_zone * zones,unsigned int * nr_zones)224 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
225 			       struct blk_zone *zones, unsigned int *nr_zones)
226 {
227 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
228 	int ret;
229 
230 	if (!*nr_zones)
231 		return 0;
232 
233 	if (!bdev_is_zoned(device->bdev)) {
234 		ret = emulate_report_zones(device, pos, zones, *nr_zones);
235 		*nr_zones = ret;
236 		return 0;
237 	}
238 
239 	/* Check cache */
240 	if (zinfo->zone_cache) {
241 		unsigned int i;
242 		u32 zno;
243 
244 		ASSERT(IS_ALIGNED(pos, zinfo->zone_size),
245 		       "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size);
246 		zno = pos >> zinfo->zone_size_shift;
247 		/*
248 		 * We cannot report zones beyond the zone end. So, it is OK to
249 		 * cap *nr_zones to at the end.
250 		 */
251 		*nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
252 
253 		for (i = 0; i < *nr_zones; i++) {
254 			struct blk_zone *zone_info;
255 
256 			zone_info = &zinfo->zone_cache[zno + i];
257 			if (!zone_info->len)
258 				break;
259 		}
260 
261 		if (i == *nr_zones) {
262 			/* Cache hit on all the zones */
263 			memcpy(zones, zinfo->zone_cache + zno,
264 			       sizeof(*zinfo->zone_cache) * *nr_zones);
265 			return 0;
266 		}
267 	}
268 
269 	ret = blkdev_report_zones_cached(device->bdev, pos >> SECTOR_SHIFT,
270 					 *nr_zones, copy_zone_info_cb, zones);
271 	if (ret < 0) {
272 		btrfs_err(device->fs_info,
273 				 "zoned: failed to read zone %llu on %s (devid %llu)",
274 				 pos, rcu_dereference(device->name),
275 				 device->devid);
276 		return ret;
277 	}
278 	*nr_zones = ret;
279 	if (unlikely(!ret))
280 		return -EIO;
281 
282 	/* Populate cache */
283 	if (zinfo->zone_cache) {
284 		u32 zno = pos >> zinfo->zone_size_shift;
285 
286 		memcpy(zinfo->zone_cache + zno, zones,
287 		       sizeof(*zinfo->zone_cache) * *nr_zones);
288 	}
289 
290 	return 0;
291 }
292 
293 /* The emulated zone size is determined from the size of device extent */
calculate_emulated_zone_size(struct btrfs_fs_info * fs_info)294 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
295 {
296 	BTRFS_PATH_AUTO_FREE(path);
297 	struct btrfs_root *root = fs_info->dev_root;
298 	struct btrfs_key key;
299 	struct extent_buffer *leaf;
300 	struct btrfs_dev_extent *dext;
301 	int ret = 0;
302 
303 	key.objectid = 1;
304 	key.type = BTRFS_DEV_EXTENT_KEY;
305 	key.offset = 0;
306 
307 	path = btrfs_alloc_path();
308 	if (!path)
309 		return -ENOMEM;
310 
311 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
312 	if (ret < 0)
313 		return ret;
314 
315 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
316 		ret = btrfs_next_leaf(root, path);
317 		if (ret < 0)
318 			return ret;
319 		/* No dev extents at all? Not good */
320 		if (unlikely(ret > 0))
321 			return -EUCLEAN;
322 	}
323 
324 	leaf = path->nodes[0];
325 	dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
326 	fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 	return 0;
328 }
329 
btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info * fs_info)330 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
331 {
332 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
333 	struct btrfs_device *device;
334 	int ret = 0;
335 
336 	/* fs_info->zone_size might not set yet. Use the incomapt flag here. */
337 	if (!btrfs_fs_incompat(fs_info, ZONED))
338 		return 0;
339 
340 	/*
341 	 * No need to take the device_list mutex here, we're still in the mount
342 	 * path and devices cannot be added to or removed from the list yet.
343 	 */
344 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
345 		/* We can skip reading of zone info for missing devices */
346 		if (!device->bdev)
347 			continue;
348 
349 		ret = btrfs_get_dev_zone_info(device, true);
350 		if (ret)
351 			break;
352 	}
353 
354 	return ret;
355 }
356 
btrfs_get_dev_zone_info(struct btrfs_device * device,bool populate_cache)357 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
358 {
359 	struct btrfs_fs_info *fs_info = device->fs_info;
360 	struct btrfs_zoned_device_info *zone_info = NULL;
361 	struct block_device *bdev = device->bdev;
362 	unsigned int max_active_zones;
363 	unsigned int nactive;
364 	sector_t nr_sectors;
365 	sector_t sector = 0;
366 	struct blk_zone *zones = NULL;
367 	unsigned int i, nreported = 0, nr_zones;
368 	sector_t zone_sectors;
369 	char *model, *emulated;
370 	int ret;
371 
372 	/*
373 	 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
374 	 * yet be set.
375 	 */
376 	if (!btrfs_fs_incompat(fs_info, ZONED))
377 		return 0;
378 
379 	if (device->zone_info)
380 		return 0;
381 
382 	zone_info = kzalloc_obj(*zone_info);
383 	if (!zone_info)
384 		return -ENOMEM;
385 
386 	device->zone_info = zone_info;
387 
388 	if (!bdev_is_zoned(bdev)) {
389 		if (!fs_info->zone_size) {
390 			ret = calculate_emulated_zone_size(fs_info);
391 			if (ret)
392 				goto out;
393 		}
394 
395 		ASSERT(fs_info->zone_size);
396 		zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
397 	} else {
398 		zone_sectors = bdev_zone_sectors(bdev);
399 	}
400 
401 	ASSERT(is_power_of_two_u64(zone_sectors));
402 	zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
403 
404 	/* We reject devices with a zone size larger than 8GB */
405 	if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
406 		btrfs_err(fs_info,
407 		"zoned: %s: zone size %llu larger than supported maximum %llu",
408 				 rcu_dereference(device->name),
409 				 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
410 		ret = -EINVAL;
411 		goto out;
412 	} else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
413 		btrfs_err(fs_info,
414 		"zoned: %s: zone size %llu smaller than supported minimum %u",
415 				 rcu_dereference(device->name),
416 				 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
417 		ret = -EINVAL;
418 		goto out;
419 	}
420 
421 	nr_sectors = bdev_nr_sectors(bdev);
422 	zone_info->zone_size_shift = ilog2(zone_info->zone_size);
423 	zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
424 	if (!IS_ALIGNED(nr_sectors, zone_sectors))
425 		zone_info->nr_zones++;
426 
427 	max_active_zones = min_not_zero(bdev_max_active_zones(bdev),
428 					bdev_max_open_zones(bdev));
429 	if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
430 		max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;
431 	if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
432 		btrfs_err(fs_info,
433 "zoned: %s: max active zones %u is too small, need at least %u active zones",
434 				 rcu_dereference(device->name), max_active_zones,
435 				 BTRFS_MIN_ACTIVE_ZONES);
436 		ret = -EINVAL;
437 		goto out;
438 	}
439 	zone_info->max_active_zones = max_active_zones;
440 
441 	zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
442 	if (!zone_info->seq_zones) {
443 		ret = -ENOMEM;
444 		goto out;
445 	}
446 
447 	zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
448 	if (!zone_info->empty_zones) {
449 		ret = -ENOMEM;
450 		goto out;
451 	}
452 
453 	zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
454 	if (!zone_info->active_zones) {
455 		ret = -ENOMEM;
456 		goto out;
457 	}
458 
459 	zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES);
460 	if (!zones) {
461 		ret = -ENOMEM;
462 		goto out;
463 	}
464 
465 	/*
466 	 * Enable zone cache only for a zoned device. On a non-zoned device, we
467 	 * fill the zone info with emulated CONVENTIONAL zones, so no need to
468 	 * use the cache.
469 	 */
470 	if (populate_cache && bdev_is_zoned(device->bdev)) {
471 		zone_info->zone_cache = vcalloc(zone_info->nr_zones,
472 						sizeof(struct blk_zone));
473 		if (!zone_info->zone_cache) {
474 			btrfs_err(device->fs_info,
475 				"zoned: failed to allocate zone cache for %s",
476 				rcu_dereference(device->name));
477 			ret = -ENOMEM;
478 			goto out;
479 		}
480 	}
481 
482 	/* Get zones type */
483 	nactive = 0;
484 	while (sector < nr_sectors) {
485 		nr_zones = BTRFS_REPORT_NR_ZONES;
486 		ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
487 					  &nr_zones);
488 		if (ret)
489 			goto out;
490 
491 		for (i = 0; i < nr_zones; i++) {
492 			if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
493 				__set_bit(nreported, zone_info->seq_zones);
494 			switch (zones[i].cond) {
495 			case BLK_ZONE_COND_EMPTY:
496 				__set_bit(nreported, zone_info->empty_zones);
497 				break;
498 			case BLK_ZONE_COND_IMP_OPEN:
499 			case BLK_ZONE_COND_EXP_OPEN:
500 			case BLK_ZONE_COND_CLOSED:
501 			case BLK_ZONE_COND_ACTIVE:
502 				__set_bit(nreported, zone_info->active_zones);
503 				nactive++;
504 				break;
505 			}
506 			nreported++;
507 		}
508 		sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
509 	}
510 
511 	if (unlikely(nreported != zone_info->nr_zones)) {
512 		btrfs_err(device->fs_info,
513 				 "inconsistent number of zones on %s (%u/%u)",
514 				 rcu_dereference(device->name), nreported,
515 				 zone_info->nr_zones);
516 		ret = -EIO;
517 		goto out;
518 	}
519 
520 	if (max_active_zones) {
521 		if (unlikely(nactive > max_active_zones)) {
522 			if (bdev_max_active_zones(bdev) == 0) {
523 				max_active_zones = 0;
524 				zone_info->max_active_zones = 0;
525 				goto validate;
526 			}
527 			btrfs_err(device->fs_info,
528 			"zoned: %u active zones on %s exceeds max_active_zones %u",
529 					 nactive, rcu_dereference(device->name),
530 					 max_active_zones);
531 			ret = -EIO;
532 			goto out;
533 		}
534 		atomic_set(&zone_info->active_zones_left,
535 			   max_active_zones - nactive);
536 		set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
537 	}
538 
539 validate:
540 	/* Validate superblock log */
541 	nr_zones = BTRFS_NR_SB_LOG_ZONES;
542 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
543 		u32 sb_zone;
544 		u64 sb_wp;
545 		int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
546 
547 		sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
548 		if (sb_zone + 1 >= zone_info->nr_zones)
549 			continue;
550 
551 		ret = btrfs_get_dev_zones(device,
552 					  zone_start_physical(sb_zone, zone_info),
553 					  &zone_info->sb_zones[sb_pos],
554 					  &nr_zones);
555 		if (ret)
556 			goto out;
557 
558 		if (unlikely(nr_zones != BTRFS_NR_SB_LOG_ZONES)) {
559 			btrfs_err(device->fs_info,
560 	"zoned: failed to read super block log zone info at devid %llu zone %u",
561 					 device->devid, sb_zone);
562 			ret = -EUCLEAN;
563 			goto out;
564 		}
565 
566 		/*
567 		 * If zones[0] is conventional, always use the beginning of the
568 		 * zone to record superblock. No need to validate in that case.
569 		 */
570 		if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
571 		    BLK_ZONE_TYPE_CONVENTIONAL)
572 			continue;
573 
574 		ret = sb_write_pointer(device->bdev,
575 				       &zone_info->sb_zones[sb_pos], &sb_wp);
576 		if (unlikely(ret != -ENOENT && ret)) {
577 			btrfs_err(device->fs_info,
578 			"zoned: super block log zone corrupted devid %llu zone %u",
579 					 device->devid, sb_zone);
580 			ret = -EUCLEAN;
581 			goto out;
582 		}
583 	}
584 
585 
586 	kvfree(zones);
587 
588 	if (bdev_is_zoned(bdev)) {
589 		model = "host-managed zoned";
590 		emulated = "";
591 	} else {
592 		model = "regular";
593 		emulated = "emulated ";
594 	}
595 
596 	btrfs_info(fs_info,
597 		"%s block device %s, %u %szones of %llu bytes",
598 		model, rcu_dereference(device->name), zone_info->nr_zones,
599 		emulated, zone_info->zone_size);
600 
601 	return 0;
602 
603 out:
604 	kvfree(zones);
605 	btrfs_destroy_dev_zone_info(device);
606 	return ret;
607 }
608 
btrfs_destroy_dev_zone_info(struct btrfs_device * device)609 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
610 {
611 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
612 
613 	if (!zone_info)
614 		return;
615 
616 	bitmap_free(zone_info->active_zones);
617 	bitmap_free(zone_info->seq_zones);
618 	bitmap_free(zone_info->empty_zones);
619 	vfree(zone_info->zone_cache);
620 	kfree(zone_info);
621 	device->zone_info = NULL;
622 }
623 
btrfs_clone_dev_zone_info(struct btrfs_device * orig_dev)624 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
625 {
626 	struct btrfs_zoned_device_info *zone_info;
627 
628 	zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
629 	if (!zone_info)
630 		return NULL;
631 
632 	zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
633 	if (!zone_info->seq_zones)
634 		goto out;
635 
636 	bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
637 		    zone_info->nr_zones);
638 
639 	zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
640 	if (!zone_info->empty_zones)
641 		goto out;
642 
643 	bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
644 		    zone_info->nr_zones);
645 
646 	zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
647 	if (!zone_info->active_zones)
648 		goto out;
649 
650 	bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
651 		    zone_info->nr_zones);
652 	zone_info->zone_cache = NULL;
653 
654 	return zone_info;
655 
656 out:
657 	bitmap_free(zone_info->seq_zones);
658 	bitmap_free(zone_info->empty_zones);
659 	bitmap_free(zone_info->active_zones);
660 	kfree(zone_info);
661 	return NULL;
662 }
663 
btrfs_get_dev_zone(struct btrfs_device * device,u64 pos,struct blk_zone * zone)664 static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
665 {
666 	unsigned int nr_zones = 1;
667 	int ret;
668 
669 	ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
670 	if (ret != 0 || !nr_zones)
671 		return ret ? ret : -EIO;
672 
673 	return 0;
674 }
675 
btrfs_check_for_zoned_device(struct btrfs_fs_info * fs_info)676 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
677 {
678 	struct btrfs_device *device;
679 
680 	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
681 		if (device->bdev && bdev_is_zoned(device->bdev)) {
682 			btrfs_err(fs_info,
683 				"zoned: mode not enabled but zoned device found: %pg",
684 				device->bdev);
685 			return -EINVAL;
686 		}
687 	}
688 
689 	return 0;
690 }
691 
btrfs_check_zoned_mode(struct btrfs_fs_info * fs_info)692 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
693 {
694 	struct queue_limits *lim = &fs_info->limits;
695 	struct btrfs_device *device;
696 	u64 zone_size = 0;
697 	int ret;
698 
699 	/*
700 	 * Host-Managed devices can't be used without the ZONED flag.  With the
701 	 * ZONED all devices can be used, using zone emulation if required.
702 	 */
703 	if (!btrfs_fs_incompat(fs_info, ZONED))
704 		return btrfs_check_for_zoned_device(fs_info);
705 
706 	blk_set_stacking_limits(lim);
707 
708 	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
709 		struct btrfs_zoned_device_info *zone_info = device->zone_info;
710 
711 		if (!device->bdev)
712 			continue;
713 
714 		if (!zone_size) {
715 			zone_size = zone_info->zone_size;
716 		} else if (zone_info->zone_size != zone_size) {
717 			btrfs_err(fs_info,
718 		"zoned: unequal block device zone sizes: have %llu found %llu",
719 				  zone_info->zone_size, zone_size);
720 			return -EINVAL;
721 		}
722 
723 		/*
724 		 * With the zoned emulation, we can have non-zoned device on the
725 		 * zoned mode. In this case, we don't have a valid max zone
726 		 * append size.
727 		 */
728 		if (bdev_is_zoned(device->bdev))
729 			blk_stack_limits(lim, bdev_limits(device->bdev), 0);
730 	}
731 
732 	ret = blk_validate_limits(lim);
733 	if (ret) {
734 		btrfs_err(fs_info, "zoned: failed to validate queue limits");
735 		return ret;
736 	}
737 
738 	/*
739 	 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
740 	 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
741 	 * check the alignment here.
742 	 */
743 	if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
744 		btrfs_err(fs_info,
745 			  "zoned: zone size %llu not aligned to stripe %u",
746 			  zone_size, BTRFS_STRIPE_LEN);
747 		return -EINVAL;
748 	}
749 
750 	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
751 		btrfs_err(fs_info, "zoned: mixed block groups not supported");
752 		return -EINVAL;
753 	}
754 
755 	fs_info->zone_size = zone_size;
756 	/*
757 	 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
758 	 * Technically, we can have multiple pages per segment. But, since
759 	 * we add the pages one by one to a bio, and cannot increase the
760 	 * metadata reservation even if it increases the number of extents, it
761 	 * is safe to stick with the limit.
762 	 */
763 	fs_info->max_zone_append_size = ALIGN_DOWN(
764 		min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
765 		     (u64)lim->max_sectors << SECTOR_SHIFT,
766 		     (u64)lim->max_segments << PAGE_SHIFT),
767 		fs_info->sectorsize);
768 	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
769 
770 	fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
771 						fs_info->max_zone_append_size);
772 
773 	/*
774 	 * Check mount options here, because we might change fs_info->zoned
775 	 * from fs_info->zone_size.
776 	 */
777 	ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
778 	if (ret)
779 		return ret;
780 
781 	btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
782 	return 0;
783 }
784 
btrfs_check_mountopts_zoned(const struct btrfs_fs_info * info,unsigned long long * mount_opt)785 int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
786 				unsigned long long *mount_opt)
787 {
788 	if (!btrfs_is_zoned(info))
789 		return 0;
790 
791 	/*
792 	 * Space cache writing is not COWed. Disable that to avoid write errors
793 	 * in sequential zones.
794 	 */
795 	if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
796 		btrfs_err(info, "zoned: space cache v1 is not supported");
797 		return -EINVAL;
798 	}
799 
800 	if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
801 		btrfs_err(info, "zoned: NODATACOW not supported");
802 		return -EINVAL;
803 	}
804 
805 	if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
806 		btrfs_info(info,
807 			   "zoned: async discard ignored and disabled for zoned mode");
808 		btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
809 	}
810 
811 	return 0;
812 }
813 
sb_log_location(struct block_device * bdev,struct blk_zone * zones,int rw,u64 * bytenr_ret)814 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
815 			   int rw, u64 *bytenr_ret)
816 {
817 	u64 wp;
818 	int ret;
819 
820 	if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
821 		*bytenr_ret = zones[0].start << SECTOR_SHIFT;
822 		return 0;
823 	}
824 
825 	ret = sb_write_pointer(bdev, zones, &wp);
826 	if (ret != -ENOENT && ret < 0)
827 		return ret;
828 
829 	if (rw == WRITE) {
830 		struct blk_zone *reset = NULL;
831 
832 		if (wp == zones[0].start << SECTOR_SHIFT)
833 			reset = &zones[0];
834 		else if (wp == zones[1].start << SECTOR_SHIFT)
835 			reset = &zones[1];
836 
837 		if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
838 			unsigned int nofs_flags;
839 
840 			ASSERT(sb_zone_is_full(reset));
841 
842 			nofs_flags = memalloc_nofs_save();
843 			ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
844 					       reset->start, reset->len);
845 			memalloc_nofs_restore(nofs_flags);
846 			if (ret)
847 				return ret;
848 
849 			reset->cond = BLK_ZONE_COND_EMPTY;
850 			reset->wp = reset->start;
851 		}
852 	} else if (ret != -ENOENT) {
853 		/*
854 		 * For READ, we want the previous one. Move write pointer to
855 		 * the end of a zone, if it is at the head of a zone.
856 		 */
857 		u64 zone_end = 0;
858 
859 		if (wp == zones[0].start << SECTOR_SHIFT)
860 			zone_end = zones[1].start + zones[1].capacity;
861 		else if (wp == zones[1].start << SECTOR_SHIFT)
862 			zone_end = zones[0].start + zones[0].capacity;
863 		if (zone_end)
864 			wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
865 					BTRFS_SUPER_INFO_SIZE);
866 
867 		wp -= BTRFS_SUPER_INFO_SIZE;
868 	}
869 
870 	*bytenr_ret = wp;
871 	return 0;
872 
873 }
874 
btrfs_sb_log_location_bdev(struct block_device * bdev,int mirror,int rw,u64 * bytenr_ret)875 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
876 			       u64 *bytenr_ret)
877 {
878 	struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
879 	sector_t zone_sectors;
880 	u32 sb_zone;
881 	int ret;
882 	u8 zone_sectors_shift;
883 	sector_t nr_sectors;
884 	u32 nr_zones;
885 
886 	if (!bdev_is_zoned(bdev)) {
887 		*bytenr_ret = btrfs_sb_offset(mirror);
888 		return 0;
889 	}
890 
891 	ASSERT(rw == READ || rw == WRITE);
892 
893 	zone_sectors = bdev_zone_sectors(bdev);
894 	if (!is_power_of_2(zone_sectors))
895 		return -EINVAL;
896 	zone_sectors_shift = ilog2(zone_sectors);
897 	nr_sectors = bdev_nr_sectors(bdev);
898 	nr_zones = nr_sectors >> zone_sectors_shift;
899 
900 	sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
901 	if (sb_zone + 1 >= nr_zones)
902 		return -ENOENT;
903 
904 	ret = blkdev_report_zones_cached(bdev, zone_start_sector(sb_zone, bdev),
905 					 BTRFS_NR_SB_LOG_ZONES,
906 					 copy_zone_info_cb, zones);
907 	if (ret < 0)
908 		return ret;
909 	if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
910 		return -EIO;
911 
912 	return sb_log_location(bdev, zones, rw, bytenr_ret);
913 }
914 
btrfs_sb_log_location(struct btrfs_device * device,int mirror,int rw,u64 * bytenr_ret)915 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
916 			  u64 *bytenr_ret)
917 {
918 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
919 	u32 zone_num;
920 
921 	/*
922 	 * For a zoned filesystem on a non-zoned block device, use the same
923 	 * super block locations as regular filesystem. Doing so, the super
924 	 * block can always be retrieved and the zoned flag of the volume
925 	 * detected from the super block information.
926 	 */
927 	if (!bdev_is_zoned(device->bdev)) {
928 		*bytenr_ret = btrfs_sb_offset(mirror);
929 		return 0;
930 	}
931 
932 	zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
933 	if (zone_num + 1 >= zinfo->nr_zones)
934 		return -ENOENT;
935 
936 	return sb_log_location(device->bdev,
937 			       &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
938 			       rw, bytenr_ret);
939 }
940 
is_sb_log_zone(struct btrfs_zoned_device_info * zinfo,int mirror)941 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
942 				  int mirror)
943 {
944 	u32 zone_num;
945 
946 	if (!zinfo)
947 		return false;
948 
949 	zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
950 	if (zone_num + 1 >= zinfo->nr_zones)
951 		return false;
952 
953 	if (!test_bit(zone_num, zinfo->seq_zones))
954 		return false;
955 
956 	return true;
957 }
958 
btrfs_advance_sb_log(struct btrfs_device * device,int mirror)959 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
960 {
961 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
962 	struct blk_zone *zone;
963 	int i;
964 
965 	if (!is_sb_log_zone(zinfo, mirror))
966 		return 0;
967 
968 	zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
969 	for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
970 		/* Advance the next zone */
971 		if (zone->cond == BLK_ZONE_COND_FULL) {
972 			zone++;
973 			continue;
974 		}
975 
976 		if (zone->cond == BLK_ZONE_COND_EMPTY)
977 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
978 
979 		zone->wp += SUPER_INFO_SECTORS;
980 
981 		if (sb_zone_is_full(zone)) {
982 			/*
983 			 * No room left to write new superblock. Since
984 			 * superblock is written with REQ_SYNC, it is safe to
985 			 * finish the zone now.
986 			 *
987 			 * If the write pointer is exactly at the capacity,
988 			 * explicit ZONE_FINISH is not necessary.
989 			 */
990 			if (zone->wp != zone->start + zone->capacity) {
991 				unsigned int nofs_flags;
992 				int ret;
993 
994 				nofs_flags = memalloc_nofs_save();
995 				ret = blkdev_zone_mgmt(device->bdev,
996 						REQ_OP_ZONE_FINISH, zone->start,
997 						zone->len);
998 				memalloc_nofs_restore(nofs_flags);
999 				if (ret)
1000 					return ret;
1001 			}
1002 
1003 			zone->wp = zone->start + zone->len;
1004 			zone->cond = BLK_ZONE_COND_FULL;
1005 		}
1006 		return 0;
1007 	}
1008 
1009 	/* All the zones are FULL. Should not reach here. */
1010 	DEBUG_WARN("unexpected state, all zones full");
1011 	return -EIO;
1012 }
1013 
btrfs_reset_sb_log_zones(struct block_device * bdev,int mirror)1014 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1015 {
1016 	unsigned int nofs_flags;
1017 	sector_t zone_sectors;
1018 	sector_t nr_sectors;
1019 	u8 zone_sectors_shift;
1020 	u32 sb_zone;
1021 	u32 nr_zones;
1022 	int ret;
1023 
1024 	zone_sectors = bdev_zone_sectors(bdev);
1025 	zone_sectors_shift = ilog2(zone_sectors);
1026 	nr_sectors = bdev_nr_sectors(bdev);
1027 	nr_zones = nr_sectors >> zone_sectors_shift;
1028 
1029 	sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1030 	if (sb_zone + 1 >= nr_zones)
1031 		return -ENOENT;
1032 
1033 	nofs_flags = memalloc_nofs_save();
1034 	ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1035 			       zone_start_sector(sb_zone, bdev),
1036 			       zone_sectors * BTRFS_NR_SB_LOG_ZONES);
1037 	memalloc_nofs_restore(nofs_flags);
1038 	return ret;
1039 }
1040 
1041 /*
1042  * Find allocatable zones within a given region.
1043  *
1044  * @device:	the device to allocate a region on
1045  * @hole_start: the position of the hole to allocate the region
1046  * @num_bytes:	size of wanted region
1047  * @hole_end:	the end of the hole
1048  * @return:	position of allocatable zones
1049  *
1050  * Allocatable region should not contain any superblock locations.
1051  */
btrfs_find_allocatable_zones(struct btrfs_device * device,u64 hole_start,u64 hole_end,u64 num_bytes)1052 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1053 				 u64 hole_end, u64 num_bytes)
1054 {
1055 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
1056 	const u8 shift = zinfo->zone_size_shift;
1057 	u64 nzones = num_bytes >> shift;
1058 	u64 pos = hole_start;
1059 	u64 begin, end;
1060 	bool have_sb;
1061 	int i;
1062 
1063 	ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size),
1064 	       "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size);
1065 	ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size),
1066 	       "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size);
1067 
1068 	while (pos < hole_end) {
1069 		begin = pos >> shift;
1070 		end = begin + nzones;
1071 
1072 		if (end > zinfo->nr_zones)
1073 			return hole_end;
1074 
1075 		/* Check if zones in the region are all empty */
1076 		if (btrfs_dev_is_sequential(device, pos) &&
1077 		    !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1078 			pos += zinfo->zone_size;
1079 			continue;
1080 		}
1081 
1082 		have_sb = false;
1083 		for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1084 			u32 sb_zone;
1085 			u64 sb_pos;
1086 
1087 			sb_zone = sb_zone_number(shift, i);
1088 			if (!(end <= sb_zone ||
1089 			      sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1090 				have_sb = true;
1091 				pos = zone_start_physical(
1092 					sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1093 				break;
1094 			}
1095 
1096 			/* We also need to exclude regular superblock positions */
1097 			sb_pos = btrfs_sb_offset(i);
1098 			if (!(pos + num_bytes <= sb_pos ||
1099 			      sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1100 				have_sb = true;
1101 				pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1102 					    zinfo->zone_size);
1103 				break;
1104 			}
1105 		}
1106 		if (!have_sb)
1107 			break;
1108 	}
1109 
1110 	return pos;
1111 }
1112 
btrfs_dev_set_active_zone(struct btrfs_device * device,u64 pos)1113 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1114 {
1115 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
1116 	unsigned int zno = (pos >> zone_info->zone_size_shift);
1117 
1118 	/* We can use any number of zones */
1119 	if (zone_info->max_active_zones == 0)
1120 		return true;
1121 
1122 	if (!test_bit(zno, zone_info->active_zones)) {
1123 		/* Active zone left? */
1124 		if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1125 			return false;
1126 		if (test_and_set_bit(zno, zone_info->active_zones)) {
1127 			/* Someone already set the bit */
1128 			atomic_inc(&zone_info->active_zones_left);
1129 		}
1130 	}
1131 
1132 	return true;
1133 }
1134 
btrfs_dev_clear_active_zone(struct btrfs_device * device,u64 pos)1135 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1136 {
1137 	struct btrfs_zoned_device_info *zone_info = device->zone_info;
1138 	unsigned int zno = (pos >> zone_info->zone_size_shift);
1139 
1140 	/* We can use any number of zones */
1141 	if (zone_info->max_active_zones == 0)
1142 		return;
1143 
1144 	if (test_and_clear_bit(zno, zone_info->active_zones))
1145 		atomic_inc(&zone_info->active_zones_left);
1146 }
1147 
btrfs_reset_device_zone(struct btrfs_device * device,u64 physical,u64 length,u64 * bytes)1148 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1149 			    u64 length, u64 *bytes)
1150 {
1151 	unsigned int nofs_flags;
1152 	int ret;
1153 
1154 	*bytes = 0;
1155 	nofs_flags = memalloc_nofs_save();
1156 	ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1157 			       physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
1158 	memalloc_nofs_restore(nofs_flags);
1159 	if (ret)
1160 		return ret;
1161 
1162 	*bytes = length;
1163 	while (length) {
1164 		btrfs_dev_set_zone_empty(device, physical);
1165 		btrfs_dev_clear_active_zone(device, physical);
1166 		physical += device->zone_info->zone_size;
1167 		length -= device->zone_info->zone_size;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
btrfs_ensure_empty_zones(struct btrfs_device * device,u64 start,u64 size)1173 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1174 {
1175 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
1176 	const u8 shift = zinfo->zone_size_shift;
1177 	unsigned long begin = start >> shift;
1178 	unsigned long nbits = size >> shift;
1179 	u64 pos;
1180 	int ret;
1181 
1182 	ASSERT(IS_ALIGNED(start, zinfo->zone_size),
1183 	       "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size);
1184 	ASSERT(IS_ALIGNED(size, zinfo->zone_size),
1185 	       "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size);
1186 
1187 	if (begin + nbits > zinfo->nr_zones)
1188 		return -ERANGE;
1189 
1190 	/* All the zones are conventional */
1191 	if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1192 		return 0;
1193 
1194 	/* All the zones are sequential and empty */
1195 	if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1196 	    bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1197 		return 0;
1198 
1199 	for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1200 		u64 reset_bytes;
1201 
1202 		if (!btrfs_dev_is_sequential(device, pos) ||
1203 		    btrfs_dev_is_empty_zone(device, pos))
1204 			continue;
1205 
1206 		/* Free regions should be empty */
1207 		btrfs_warn(
1208 			device->fs_info,
1209 		"zoned: resetting device %s (devid %llu) zone %llu for allocation",
1210 			rcu_dereference(device->name), device->devid, pos >> shift);
1211 		WARN_ON_ONCE(1);
1212 
1213 		ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1214 					      &reset_bytes);
1215 		if (ret)
1216 			return ret;
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 /*
1223  * Calculate an allocation pointer from the extent allocation information
1224  * for a block group consist of conventional zones. It is pointed to the
1225  * end of the highest addressed extent in the block group as an allocation
1226  * offset.
1227  */
calculate_alloc_pointer(struct btrfs_block_group * cache,u64 * offset_ret,bool new)1228 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1229 				   u64 *offset_ret, bool new)
1230 {
1231 	struct btrfs_fs_info *fs_info = cache->fs_info;
1232 	struct btrfs_root *root;
1233 	BTRFS_PATH_AUTO_FREE(path);
1234 	struct btrfs_key key;
1235 	struct btrfs_key found_key;
1236 	const u64 bg_end = btrfs_block_group_end(cache);
1237 	int ret;
1238 	u64 length;
1239 
1240 	/*
1241 	 * Avoid  tree lookups for a new block group, there's no use for it.
1242 	 * It must always be 0.
1243 	 *
1244 	 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1245 	 * For new a block group, this function is called from
1246 	 * btrfs_make_block_group() which is already taking the chunk mutex.
1247 	 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1248 	 * buffer locks to avoid deadlock.
1249 	 */
1250 	if (new) {
1251 		*offset_ret = 0;
1252 		return 0;
1253 	}
1254 
1255 	path = btrfs_alloc_path();
1256 	if (!path)
1257 		return -ENOMEM;
1258 
1259 	key.objectid = bg_end;
1260 	key.type = 0;
1261 	key.offset = 0;
1262 
1263 	root = btrfs_extent_root(fs_info, key.objectid);
1264 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1265 	/* We should not find the exact match */
1266 	if (unlikely(!ret))
1267 		ret = -EUCLEAN;
1268 	if (ret < 0)
1269 		return ret;
1270 
1271 	ret = btrfs_previous_extent_item(root, path, cache->start);
1272 	if (ret) {
1273 		if (ret == 1) {
1274 			ret = 0;
1275 			*offset_ret = 0;
1276 		}
1277 		return ret;
1278 	}
1279 
1280 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1281 
1282 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1283 		length = found_key.offset;
1284 	else
1285 		length = fs_info->nodesize;
1286 
1287 	if (unlikely(!(found_key.objectid >= cache->start &&
1288 		       found_key.objectid + length <= bg_end))) {
1289 		return -EUCLEAN;
1290 	}
1291 	*offset_ret = found_key.objectid + length - cache->start;
1292 	return 0;
1293 }
1294 
1295 struct zone_info {
1296 	u64 physical;
1297 	u64 capacity;
1298 	u64 alloc_offset;
1299 };
1300 
btrfs_load_zone_info(struct btrfs_fs_info * fs_info,int zone_idx,struct zone_info * info,unsigned long * active,struct btrfs_chunk_map * map,bool new)1301 static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
1302 				struct zone_info *info, unsigned long *active,
1303 				struct btrfs_chunk_map *map, bool new)
1304 {
1305 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1306 	struct btrfs_device *device;
1307 	int dev_replace_is_ongoing = 0;
1308 	unsigned int nofs_flag;
1309 	struct blk_zone zone;
1310 	int ret;
1311 
1312 	info->physical = map->stripes[zone_idx].physical;
1313 
1314 	down_read(&dev_replace->rwsem);
1315 	device = map->stripes[zone_idx].dev;
1316 
1317 	if (!device->bdev) {
1318 		up_read(&dev_replace->rwsem);
1319 		info->alloc_offset = WP_MISSING_DEV;
1320 		return 0;
1321 	}
1322 
1323 	/* Consider a zone as active if we can allow any number of active zones. */
1324 	if (!device->zone_info->max_active_zones)
1325 		__set_bit(zone_idx, active);
1326 
1327 	if (!btrfs_dev_is_sequential(device, info->physical)) {
1328 		up_read(&dev_replace->rwsem);
1329 		info->alloc_offset = WP_CONVENTIONAL;
1330 		info->capacity = device->zone_info->zone_size;
1331 		return 0;
1332 	}
1333 
1334 	ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
1335 
1336 	/* This zone will be used for allocation, so mark this zone non-empty. */
1337 	btrfs_dev_clear_zone_empty(device, info->physical);
1338 
1339 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1340 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1341 		btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
1342 
1343 	/*
1344 	 * The group is mapped to a sequential zone. Get the zone write pointer
1345 	 * to determine the allocation offset within the zone.
1346 	 */
1347 	WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1348 
1349 	if (new) {
1350 		sector_t capacity;
1351 
1352 		capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
1353 		up_read(&dev_replace->rwsem);
1354 		info->alloc_offset = 0;
1355 		info->capacity = capacity << SECTOR_SHIFT;
1356 
1357 		return 0;
1358 	}
1359 
1360 	nofs_flag = memalloc_nofs_save();
1361 	ret = btrfs_get_dev_zone(device, info->physical, &zone);
1362 	memalloc_nofs_restore(nofs_flag);
1363 	if (ret) {
1364 		up_read(&dev_replace->rwsem);
1365 		if (ret != -EIO && ret != -EOPNOTSUPP)
1366 			return ret;
1367 		info->alloc_offset = WP_MISSING_DEV;
1368 		return 0;
1369 	}
1370 
1371 	if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
1372 		btrfs_err(fs_info,
1373 		"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1374 			zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
1375 			device->devid);
1376 		up_read(&dev_replace->rwsem);
1377 		return -EIO;
1378 	}
1379 
1380 	info->capacity = (zone.capacity << SECTOR_SHIFT);
1381 
1382 	switch (zone.cond) {
1383 	case BLK_ZONE_COND_OFFLINE:
1384 	case BLK_ZONE_COND_READONLY:
1385 		btrfs_err(fs_info,
1386 		"zoned: offline/readonly zone %llu on device %s (devid %llu)",
1387 			  (info->physical >> device->zone_info->zone_size_shift),
1388 			  rcu_dereference(device->name), device->devid);
1389 		info->alloc_offset = WP_MISSING_DEV;
1390 		break;
1391 	case BLK_ZONE_COND_EMPTY:
1392 		info->alloc_offset = 0;
1393 		break;
1394 	case BLK_ZONE_COND_FULL:
1395 		info->alloc_offset = info->capacity;
1396 		break;
1397 	default:
1398 		/* Partially used zone. */
1399 		info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
1400 		__set_bit(zone_idx, active);
1401 		break;
1402 	}
1403 
1404 	up_read(&dev_replace->rwsem);
1405 
1406 	return 0;
1407 }
1408 
btrfs_load_block_group_single(struct btrfs_block_group * bg,struct zone_info * info,unsigned long * active)1409 static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1410 					 struct zone_info *info,
1411 					 unsigned long *active)
1412 {
1413 	if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
1414 		btrfs_err(bg->fs_info,
1415 			"zoned: cannot recover write pointer for zone %llu",
1416 			info->physical);
1417 		return -EIO;
1418 	}
1419 
1420 	bg->alloc_offset = info->alloc_offset;
1421 	bg->zone_capacity = info->capacity;
1422 	if (test_bit(0, active))
1423 		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1424 	return 0;
1425 }
1426 
btrfs_load_block_group_dup(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1427 static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1428 				      struct btrfs_chunk_map *map,
1429 				      struct zone_info *zone_info,
1430 				      unsigned long *active,
1431 				      u64 last_alloc)
1432 {
1433 	struct btrfs_fs_info *fs_info = bg->fs_info;
1434 
1435 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1436 		btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
1437 		return -EINVAL;
1438 	}
1439 
1440 	bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1441 
1442 	if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
1443 		btrfs_err(fs_info,
1444 			  "zoned: cannot recover write pointer for zone %llu",
1445 			  zone_info[0].physical);
1446 		return -EIO;
1447 	}
1448 	if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
1449 		btrfs_err(fs_info,
1450 			  "zoned: cannot recover write pointer for zone %llu",
1451 			  zone_info[1].physical);
1452 		return -EIO;
1453 	}
1454 
1455 	/*
1456 	 * When the last extent is removed, last_alloc can be smaller than the other write
1457 	 * pointer. In that case, last_alloc should be moved to the corresponding write
1458 	 * pointer position.
1459 	 */
1460 	for (int i = 0; i < map->num_stripes; i++) {
1461 		if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1462 			continue;
1463 		if (last_alloc <= zone_info[i].alloc_offset) {
1464 			last_alloc = zone_info[i].alloc_offset;
1465 			break;
1466 		}
1467 	}
1468 
1469 	if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
1470 		zone_info[0].alloc_offset = last_alloc;
1471 
1472 	if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
1473 		zone_info[1].alloc_offset = last_alloc;
1474 
1475 	if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
1476 		btrfs_err(fs_info,
1477 			  "zoned: write pointer offset mismatch of zones in DUP profile");
1478 		return -EIO;
1479 	}
1480 
1481 	if (test_bit(0, active) != test_bit(1, active)) {
1482 		if (unlikely(!btrfs_zone_activate(bg)))
1483 			return -EIO;
1484 	} else if (test_bit(0, active)) {
1485 		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1486 	}
1487 
1488 	bg->alloc_offset = zone_info[0].alloc_offset;
1489 	return 0;
1490 }
1491 
btrfs_load_block_group_raid1(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1492 static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1493 					struct btrfs_chunk_map *map,
1494 					struct zone_info *zone_info,
1495 					unsigned long *active,
1496 					u64 last_alloc)
1497 {
1498 	struct btrfs_fs_info *fs_info = bg->fs_info;
1499 	int i;
1500 
1501 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1502 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1503 			  btrfs_bg_type_to_raid_name(map->type));
1504 		return -EINVAL;
1505 	}
1506 
1507 	/* In case a device is missing we have a cap of 0, so don't use it. */
1508 	bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
1509 
1510 	/*
1511 	 * When the last extent is removed, last_alloc can be smaller than the other write
1512 	 * pointer. In that case, last_alloc should be moved to the corresponding write
1513 	 * pointer position.
1514 	 */
1515 	for (i = 0; i < map->num_stripes; i++) {
1516 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1517 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1518 			continue;
1519 		if (last_alloc <= zone_info[i].alloc_offset) {
1520 			last_alloc = zone_info[i].alloc_offset;
1521 			break;
1522 		}
1523 	}
1524 
1525 	for (i = 0; i < map->num_stripes; i++) {
1526 		if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1527 			continue;
1528 
1529 		if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1530 			zone_info[i].alloc_offset = last_alloc;
1531 
1532 		if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
1533 			     !btrfs_test_opt(fs_info, DEGRADED))) {
1534 			btrfs_err(fs_info,
1535 			"zoned: write pointer offset mismatch of zones in %s profile",
1536 				  btrfs_bg_type_to_raid_name(map->type));
1537 			return -EIO;
1538 		}
1539 		if (test_bit(0, active) != test_bit(i, active)) {
1540 			if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
1541 				     !btrfs_zone_activate(bg))) {
1542 				return -EIO;
1543 			}
1544 		} else {
1545 			if (test_bit(0, active))
1546 				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1547 		}
1548 	}
1549 
1550 	if (zone_info[0].alloc_offset != WP_MISSING_DEV)
1551 		bg->alloc_offset = zone_info[0].alloc_offset;
1552 	else
1553 		bg->alloc_offset = zone_info[i - 1].alloc_offset;
1554 
1555 	return 0;
1556 }
1557 
btrfs_load_block_group_raid0(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1558 static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1559 					struct btrfs_chunk_map *map,
1560 					struct zone_info *zone_info,
1561 					unsigned long *active,
1562 					u64 last_alloc)
1563 {
1564 	struct btrfs_fs_info *fs_info = bg->fs_info;
1565 	u64 stripe_nr = 0, stripe_offset = 0;
1566 	u64 prev_offset = 0;
1567 	u32 stripe_index = 0;
1568 	bool has_partial = false, has_conventional = false;
1569 
1570 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1571 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1572 			  btrfs_bg_type_to_raid_name(map->type));
1573 		return -EINVAL;
1574 	}
1575 
1576 	/*
1577 	 * When the last extent is removed, last_alloc can be smaller than the other write
1578 	 * pointer. In that case, last_alloc should be moved to the corresponding write
1579 	 * pointer position.
1580 	 */
1581 	for (int i = 0; i < map->num_stripes; i++) {
1582 		u64 alloc;
1583 
1584 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1585 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1586 			continue;
1587 
1588 		stripe_nr = zone_info[i].alloc_offset >> BTRFS_STRIPE_LEN_SHIFT;
1589 		stripe_offset = zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK;
1590 		if (stripe_offset == 0 && stripe_nr > 0) {
1591 			stripe_nr--;
1592 			stripe_offset = BTRFS_STRIPE_LEN;
1593 		}
1594 		alloc = ((stripe_nr * map->num_stripes + i) << BTRFS_STRIPE_LEN_SHIFT) +
1595 			stripe_offset;
1596 		last_alloc = max(last_alloc, alloc);
1597 
1598 		/* Partially written stripe found. It should be last. */
1599 		if (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK)
1600 			break;
1601 	}
1602 	stripe_nr = 0;
1603 	stripe_offset = 0;
1604 
1605 	if (last_alloc) {
1606 		u32 factor = map->num_stripes;
1607 
1608 		stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1609 		stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1610 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1611 	}
1612 
1613 	for (int i = 0; i < map->num_stripes; i++) {
1614 		if (zone_info[i].alloc_offset == WP_MISSING_DEV)
1615 			continue;
1616 
1617 		if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
1618 			has_conventional = true;
1619 			zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
1620 
1621 			if (stripe_index > i)
1622 				zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
1623 			else if (stripe_index == i)
1624 				zone_info[i].alloc_offset += stripe_offset;
1625 		}
1626 
1627 		/* Verification */
1628 		if (i != 0) {
1629 			if (unlikely(prev_offset < zone_info[i].alloc_offset)) {
1630 				btrfs_err(fs_info,
1631 				"zoned: stripe position disorder found in block group %llu",
1632 					  bg->start);
1633 				return -EIO;
1634 			}
1635 
1636 			if (unlikely(has_partial &&
1637 				     (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK))) {
1638 				btrfs_err(fs_info,
1639 				"zoned: multiple partial written stripe found in block group %llu",
1640 					  bg->start);
1641 				return -EIO;
1642 			}
1643 		}
1644 		prev_offset = zone_info[i].alloc_offset;
1645 
1646 		if ((zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK) != 0)
1647 			has_partial = true;
1648 
1649 		if (test_bit(0, active) != test_bit(i, active)) {
1650 			if (unlikely(!btrfs_zone_activate(bg)))
1651 				return -EIO;
1652 		} else {
1653 			if (test_bit(0, active))
1654 				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1655 		}
1656 		bg->zone_capacity += zone_info[i].capacity;
1657 		bg->alloc_offset += zone_info[i].alloc_offset;
1658 	}
1659 
1660 	/* Check if all devices stay in the same stripe row. */
1661 	if (unlikely(zone_info[0].alloc_offset -
1662 		     zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
1663 		btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu", bg->start);
1664 		return -EIO;
1665 	}
1666 
1667 	if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
1668 		btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
1669 			  bg->alloc_offset, last_alloc);
1670 		return -EIO;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
btrfs_load_block_group_raid10(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1676 static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1677 					 struct btrfs_chunk_map *map,
1678 					 struct zone_info *zone_info,
1679 					 unsigned long *active,
1680 					 u64 last_alloc)
1681 {
1682 	struct btrfs_fs_info *fs_info = bg->fs_info;
1683 	u64 AUTO_KFREE(raid0_allocs);
1684 	u64 stripe_nr = 0, stripe_offset = 0;
1685 	u32 stripe_index = 0;
1686 	bool has_partial = false, has_conventional = false;
1687 	u64 prev_offset = 0;
1688 
1689 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
1690 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1691 			  btrfs_bg_type_to_raid_name(map->type));
1692 		return -EINVAL;
1693 	}
1694 
1695 	raid0_allocs = kcalloc(map->num_stripes / map->sub_stripes, sizeof(*raid0_allocs),
1696 			       GFP_NOFS);
1697 	if (!raid0_allocs)
1698 		return -ENOMEM;
1699 
1700 	/*
1701 	 * When the last extent is removed, last_alloc can be smaller than the other write
1702 	 * pointer. In that case, last_alloc should be moved to the corresponding write
1703 	 * pointer position.
1704 	 */
1705 	for (int i = 0; i < map->num_stripes; i += map->sub_stripes) {
1706 		u64 alloc = zone_info[i].alloc_offset;
1707 
1708 		for (int j = 1; j < map->sub_stripes; j++) {
1709 			int idx = i + j;
1710 
1711 			if (zone_info[idx].alloc_offset == WP_MISSING_DEV ||
1712 			    zone_info[idx].alloc_offset == WP_CONVENTIONAL)
1713 				continue;
1714 			if (alloc == WP_MISSING_DEV || alloc == WP_CONVENTIONAL) {
1715 				alloc = zone_info[idx].alloc_offset;
1716 			} else if (unlikely(zone_info[idx].alloc_offset != alloc)) {
1717 				btrfs_err(fs_info,
1718 				"zoned: write pointer mismatch found in block group %llu",
1719 					  bg->start);
1720 				return -EIO;
1721 			}
1722 		}
1723 
1724 		raid0_allocs[i / map->sub_stripes] = alloc;
1725 		if (alloc == WP_CONVENTIONAL)
1726 			continue;
1727 		if (unlikely(alloc == WP_MISSING_DEV)) {
1728 			btrfs_err(fs_info,
1729 			"zoned: cannot recover write pointer of block group %llu due to missing device",
1730 				  bg->start);
1731 			return -EIO;
1732 		}
1733 
1734 		stripe_nr = alloc >> BTRFS_STRIPE_LEN_SHIFT;
1735 		stripe_offset = alloc & BTRFS_STRIPE_LEN_MASK;
1736 		if (stripe_offset == 0 && stripe_nr > 0) {
1737 			stripe_nr--;
1738 			stripe_offset = BTRFS_STRIPE_LEN;
1739 		}
1740 
1741 		alloc = ((stripe_nr * (map->num_stripes / map->sub_stripes) +
1742 			  (i / map->sub_stripes)) <<
1743 			 BTRFS_STRIPE_LEN_SHIFT) + stripe_offset;
1744 		last_alloc = max(last_alloc, alloc);
1745 	}
1746 	stripe_nr = 0;
1747 	stripe_offset = 0;
1748 
1749 	if (last_alloc) {
1750 		u32 factor = map->num_stripes / map->sub_stripes;
1751 
1752 		stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
1753 		stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
1754 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
1755 	}
1756 
1757 	for (int i = 0; i < map->num_stripes; i++) {
1758 		int idx = i / map->sub_stripes;
1759 
1760 		if (raid0_allocs[idx] == WP_CONVENTIONAL) {
1761 			has_conventional = true;
1762 			raid0_allocs[idx] = btrfs_stripe_nr_to_offset(stripe_nr);
1763 
1764 			if (stripe_index > idx)
1765 				raid0_allocs[idx] += BTRFS_STRIPE_LEN;
1766 			else if (stripe_index == idx)
1767 				raid0_allocs[idx] += stripe_offset;
1768 		}
1769 
1770 		if ((i % map->sub_stripes) == 0) {
1771 			/* Verification */
1772 			if (i != 0) {
1773 				if (unlikely(prev_offset < raid0_allocs[idx])) {
1774 					btrfs_err(fs_info,
1775 					"zoned: stripe position disorder found in block group %llu",
1776 						  bg->start);
1777 					return -EIO;
1778 				}
1779 
1780 				if (unlikely(has_partial &&
1781 					     (raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK))) {
1782 					btrfs_err(fs_info,
1783 					"zoned: multiple partial written stripe found in block group %llu",
1784 						  bg->start);
1785 					return -EIO;
1786 				}
1787 			}
1788 			prev_offset = raid0_allocs[idx];
1789 
1790 			if ((raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK) != 0)
1791 				has_partial = true;
1792 		}
1793 
1794 		if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
1795 		    zone_info[i].alloc_offset == WP_CONVENTIONAL)
1796 			zone_info[i].alloc_offset = raid0_allocs[idx];
1797 
1798 		if (test_bit(0, active) != test_bit(i, active)) {
1799 			if (unlikely(!btrfs_zone_activate(bg)))
1800 				return -EIO;
1801 		} else if (test_bit(0, active)) {
1802 			set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
1803 		}
1804 
1805 		if ((i % map->sub_stripes) == 0) {
1806 			bg->zone_capacity += zone_info[i].capacity;
1807 			bg->alloc_offset += zone_info[i].alloc_offset;
1808 		}
1809 	}
1810 
1811 	/* Check if all devices stay in the same stripe row. */
1812 	if (unlikely(zone_info[0].alloc_offset -
1813 		     zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
1814 		btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu",
1815 			  bg->start);
1816 		return -EIO;
1817 	}
1818 
1819 	if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
1820 		btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
1821 			  bg->alloc_offset, last_alloc);
1822 		return -EIO;
1823 	}
1824 
1825 	return 0;
1826 }
1827 
1828 EXPORT_FOR_TESTS
btrfs_load_block_group_by_raid_type(struct btrfs_block_group * bg,struct btrfs_chunk_map * map,struct zone_info * zone_info,unsigned long * active,u64 last_alloc)1829 int btrfs_load_block_group_by_raid_type(struct btrfs_block_group *bg,
1830 					struct btrfs_chunk_map *map,
1831 					struct zone_info *zone_info,
1832 					unsigned long *active, u64 last_alloc)
1833 {
1834 	struct btrfs_fs_info *fs_info = bg->fs_info;
1835 	u64 profile;
1836 	int ret;
1837 
1838 	profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
1839 	switch (profile) {
1840 	case 0: /* single */
1841 		ret = btrfs_load_block_group_single(bg, &zone_info[0], active);
1842 		break;
1843 	case BTRFS_BLOCK_GROUP_DUP:
1844 		ret = btrfs_load_block_group_dup(bg, map, zone_info, active, last_alloc);
1845 		break;
1846 	case BTRFS_BLOCK_GROUP_RAID1:
1847 	case BTRFS_BLOCK_GROUP_RAID1C3:
1848 	case BTRFS_BLOCK_GROUP_RAID1C4:
1849 		ret = btrfs_load_block_group_raid1(bg, map, zone_info, active, last_alloc);
1850 		break;
1851 	case BTRFS_BLOCK_GROUP_RAID0:
1852 		ret = btrfs_load_block_group_raid0(bg, map, zone_info, active, last_alloc);
1853 		break;
1854 	case BTRFS_BLOCK_GROUP_RAID10:
1855 		ret = btrfs_load_block_group_raid10(bg, map, zone_info, active, last_alloc);
1856 		break;
1857 	case BTRFS_BLOCK_GROUP_RAID5:
1858 	case BTRFS_BLOCK_GROUP_RAID6:
1859 	default:
1860 		btrfs_err(fs_info, "zoned: profile %s not yet supported",
1861 			  btrfs_bg_type_to_raid_name(map->type));
1862 		return -EINVAL;
1863 	}
1864 
1865 	if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 &&
1866 	    profile != BTRFS_BLOCK_GROUP_RAID10) {
1867 		/*
1868 		 * Detected broken write pointer.  Make this block group
1869 		 * unallocatable by setting the allocation pointer at the end of
1870 		 * allocatable region. Relocating this block group will fix the
1871 		 * mismatch.
1872 		 *
1873 		 * Currently, we cannot handle RAID0 or RAID10 case like this
1874 		 * because we don't have a proper zone_capacity value. But,
1875 		 * reading from this block group won't work anyway by a missing
1876 		 * stripe.
1877 		 */
1878 		bg->alloc_offset = bg->zone_capacity;
1879 	}
1880 
1881 	return ret;
1882 }
1883 
btrfs_load_block_group_zone_info(struct btrfs_block_group * cache,bool new)1884 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1885 {
1886 	struct btrfs_fs_info *fs_info = cache->fs_info;
1887 	struct btrfs_chunk_map *map;
1888 	u64 logical = cache->start;
1889 	u64 length = cache->length;
1890 	struct zone_info AUTO_KFREE(zone_info);
1891 	int ret;
1892 	int i;
1893 	unsigned long *active = NULL;
1894 	u64 last_alloc = 0;
1895 	u32 num_sequential = 0, num_conventional = 0;
1896 
1897 	if (!btrfs_is_zoned(fs_info))
1898 		return 0;
1899 
1900 	/* Sanity check */
1901 	if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
1902 		btrfs_err(fs_info,
1903 		"zoned: block group %llu len %llu unaligned to zone size %llu",
1904 			  logical, length, fs_info->zone_size);
1905 		return -EIO;
1906 	}
1907 
1908 	map = btrfs_find_chunk_map(fs_info, logical, length);
1909 	if (!map)
1910 		return -EINVAL;
1911 
1912 	cache->physical_map = map;
1913 
1914 	zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1915 	if (!zone_info) {
1916 		ret = -ENOMEM;
1917 		goto out;
1918 	}
1919 
1920 	active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1921 	if (!active) {
1922 		ret = -ENOMEM;
1923 		goto out;
1924 	}
1925 
1926 	for (i = 0; i < map->num_stripes; i++) {
1927 		ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
1928 		if (ret)
1929 			goto out;
1930 
1931 		if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
1932 			num_conventional++;
1933 		else
1934 			num_sequential++;
1935 	}
1936 
1937 	if (num_sequential > 0)
1938 		set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1939 
1940 	if (num_conventional > 0) {
1941 		ret = calculate_alloc_pointer(cache, &last_alloc, new);
1942 		if (ret) {
1943 			btrfs_err(fs_info,
1944 			"zoned: failed to determine allocation offset of bg %llu",
1945 				  cache->start);
1946 			goto out;
1947 		} else if (map->num_stripes == num_conventional) {
1948 			cache->alloc_offset = last_alloc;
1949 			cache->zone_capacity = cache->length;
1950 			set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1951 			goto out;
1952 		}
1953 	}
1954 
1955 	ret = btrfs_load_block_group_by_raid_type(cache, map, zone_info, active, last_alloc);
1956 
1957 out:
1958 	/* Reject non SINGLE data profiles without RST */
1959 	if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
1960 	    (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1961 	    !fs_info->stripe_root) {
1962 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
1963 			  btrfs_bg_type_to_raid_name(map->type));
1964 		ret = -EINVAL;
1965 	}
1966 
1967 	if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
1968 		btrfs_err(fs_info,
1969 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1970 			  cache->alloc_offset, cache->zone_capacity,
1971 			  cache->start);
1972 		ret = -EIO;
1973 	}
1974 
1975 	/* An extent is allocated after the write pointer */
1976 	if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1977 		btrfs_err(fs_info,
1978 			  "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1979 			  logical, last_alloc, cache->alloc_offset);
1980 		ret = -EIO;
1981 	}
1982 
1983 	if (!ret) {
1984 		cache->meta_write_pointer = cache->alloc_offset + cache->start;
1985 		if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1986 			btrfs_get_block_group(cache);
1987 			spin_lock(&fs_info->zone_active_bgs_lock);
1988 			list_add_tail(&cache->active_bg_list,
1989 				      &fs_info->zone_active_bgs);
1990 			spin_unlock(&fs_info->zone_active_bgs_lock);
1991 		}
1992 	} else {
1993 		btrfs_free_chunk_map(cache->physical_map);
1994 		cache->physical_map = NULL;
1995 	}
1996 	bitmap_free(active);
1997 
1998 	return ret;
1999 }
2000 
btrfs_calc_zone_unusable(struct btrfs_block_group * cache)2001 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
2002 {
2003 	u64 unusable, free;
2004 
2005 	if (!btrfs_is_zoned(cache->fs_info))
2006 		return;
2007 
2008 	WARN_ON(cache->bytes_super != 0);
2009 	unusable = (cache->alloc_offset - cache->used) +
2010 		   (cache->length - cache->zone_capacity);
2011 	free = cache->zone_capacity - cache->alloc_offset;
2012 
2013 	/* We only need ->free_space in ALLOC_SEQ block groups */
2014 	cache->cached = BTRFS_CACHE_FINISHED;
2015 	cache->free_space_ctl->free_space = free;
2016 	cache->zone_unusable = unusable;
2017 }
2018 
btrfs_use_zone_append(struct btrfs_bio * bbio)2019 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
2020 {
2021 	u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
2022 	struct btrfs_inode *inode = bbio->inode;
2023 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2024 	struct btrfs_block_group *cache;
2025 	bool ret = false;
2026 
2027 	if (!btrfs_is_zoned(fs_info))
2028 		return false;
2029 
2030 	if (!is_data_inode(inode))
2031 		return false;
2032 
2033 	if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
2034 		return false;
2035 
2036 	/*
2037 	 * Using REQ_OP_ZONE_APPEND for relocation can break assumptions on the
2038 	 * extent layout the relocation code has.
2039 	 * Furthermore we have set aside own block-group from which only the
2040 	 * relocation "process" can allocate and make sure only one process at a
2041 	 * time can add pages to an extent that gets relocated, so it's safe to
2042 	 * use regular REQ_OP_WRITE for this special case.
2043 	 */
2044 	if (btrfs_is_data_reloc_root(inode->root))
2045 		return false;
2046 
2047 	cache = btrfs_lookup_block_group(fs_info, start);
2048 	ASSERT(cache);
2049 	if (!cache)
2050 		return false;
2051 
2052 	ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
2053 	btrfs_put_block_group(cache);
2054 
2055 	return ret;
2056 }
2057 
btrfs_record_physical_zoned(struct btrfs_bio * bbio)2058 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
2059 {
2060 	const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2061 	struct btrfs_ordered_sum *sum = bbio->sums;
2062 
2063 	if (physical < bbio->orig_physical)
2064 		sum->logical -= bbio->orig_physical - physical;
2065 	else
2066 		sum->logical += physical - bbio->orig_physical;
2067 }
2068 
btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent * ordered,u64 logical)2069 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
2070 					u64 logical)
2071 {
2072 	struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
2073 	struct extent_map *em;
2074 
2075 	ordered->disk_bytenr = logical;
2076 
2077 	write_lock(&em_tree->lock);
2078 	em = btrfs_search_extent_mapping(em_tree, ordered->file_offset,
2079 					 ordered->num_bytes);
2080 	/* The em should be a new COW extent, thus it should not have an offset. */
2081 	ASSERT(em->offset == 0, "em->offset=%llu", em->offset);
2082 	em->disk_bytenr = logical;
2083 	btrfs_free_extent_map(em);
2084 	write_unlock(&em_tree->lock);
2085 }
2086 
btrfs_zoned_split_ordered(struct btrfs_ordered_extent * ordered,u64 logical,u64 len)2087 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
2088 				      u64 logical, u64 len)
2089 {
2090 	struct btrfs_ordered_extent *new;
2091 
2092 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
2093 	    btrfs_split_extent_map(ordered->inode, ordered->file_offset,
2094 				   ordered->num_bytes, len, logical))
2095 		return false;
2096 
2097 	new = btrfs_split_ordered_extent(ordered, len);
2098 	if (IS_ERR(new))
2099 		return false;
2100 	new->disk_bytenr = logical;
2101 	btrfs_finish_one_ordered(new);
2102 	return true;
2103 }
2104 
btrfs_finish_ordered_zoned(struct btrfs_ordered_extent * ordered)2105 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
2106 {
2107 	struct btrfs_inode *inode = ordered->inode;
2108 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2109 	struct btrfs_ordered_sum *sum;
2110 	u64 logical, len;
2111 
2112 	/*
2113 	 * Write to pre-allocated region is for the data relocation, and so
2114 	 * it should use WRITE operation. No split/rewrite are necessary.
2115 	 */
2116 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
2117 		return;
2118 
2119 	ASSERT(!list_empty(&ordered->list));
2120 	/* The ordered->list can be empty in the above pre-alloc case. */
2121 	sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
2122 	logical = sum->logical;
2123 	len = sum->len;
2124 
2125 	while (len < ordered->disk_num_bytes) {
2126 		sum = list_next_entry(sum, list);
2127 		if (sum->logical == logical + len) {
2128 			len += sum->len;
2129 			continue;
2130 		}
2131 		if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
2132 			set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
2133 			btrfs_err(fs_info, "failed to split ordered extent");
2134 			goto out;
2135 		}
2136 		logical = sum->logical;
2137 		len = sum->len;
2138 	}
2139 
2140 	if (ordered->disk_bytenr != logical)
2141 		btrfs_rewrite_logical_zoned(ordered, logical);
2142 
2143 out:
2144 	/*
2145 	 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
2146 	 * were allocated by btrfs_alloc_dummy_sum only to record the logical
2147 	 * addresses and don't contain actual checksums.  We thus must free them
2148 	 * here so that we don't attempt to log the csums later.
2149 	 */
2150 	if ((inode->flags & BTRFS_INODE_NODATASUM) ||
2151 	    test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
2152 		while ((sum = list_first_entry_or_null(&ordered->list,
2153 						       typeof(*sum), list))) {
2154 			list_del(&sum->list);
2155 			kfree(sum);
2156 		}
2157 	}
2158 }
2159 
check_bg_is_active(struct btrfs_eb_write_context * ctx,struct btrfs_block_group ** active_bg)2160 static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
2161 			       struct btrfs_block_group **active_bg)
2162 {
2163 	const struct writeback_control *wbc = ctx->wbc;
2164 	struct btrfs_block_group *block_group = ctx->zoned_bg;
2165 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2166 
2167 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
2168 		return true;
2169 
2170 	if (fs_info->treelog_bg == block_group->start) {
2171 		if (!btrfs_zone_activate(block_group)) {
2172 			int ret_fin = btrfs_zone_finish_one_bg(fs_info);
2173 
2174 			if (ret_fin != 1 || !btrfs_zone_activate(block_group))
2175 				return false;
2176 		}
2177 	} else if (*active_bg != block_group) {
2178 		struct btrfs_block_group *tgt = *active_bg;
2179 
2180 		/* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
2181 		lockdep_assert_held(&fs_info->zoned_meta_io_lock);
2182 
2183 		if (tgt) {
2184 			/*
2185 			 * If there is an unsent IO left in the allocated area,
2186 			 * we cannot wait for them as it may cause a deadlock.
2187 			 */
2188 			if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
2189 				if (wbc->sync_mode == WB_SYNC_NONE ||
2190 				    (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
2191 					return false;
2192 			}
2193 
2194 			/* Pivot active metadata/system block group. */
2195 			btrfs_zoned_meta_io_unlock(fs_info);
2196 			wait_eb_writebacks(tgt);
2197 			do_zone_finish(tgt, true);
2198 			btrfs_zoned_meta_io_lock(fs_info);
2199 			if (*active_bg == tgt) {
2200 				btrfs_put_block_group(tgt);
2201 				*active_bg = NULL;
2202 			}
2203 		}
2204 		if (!btrfs_zone_activate(block_group))
2205 			return false;
2206 		if (*active_bg != block_group) {
2207 			ASSERT(*active_bg == NULL);
2208 			*active_bg = block_group;
2209 			btrfs_get_block_group(block_group);
2210 		}
2211 	}
2212 
2213 	return true;
2214 }
2215 
2216 /*
2217  * Check if @ctx->eb is aligned to the write pointer.
2218  *
2219  * Return:
2220  *   0:        @ctx->eb is at the write pointer. You can write it.
2221  *   -EAGAIN:  There is a hole. The caller should handle the case.
2222  *   -EBUSY:   There is a hole, but the caller can just bail out.
2223  */
btrfs_check_meta_write_pointer(struct btrfs_fs_info * fs_info,struct btrfs_eb_write_context * ctx)2224 int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
2225 				   struct btrfs_eb_write_context *ctx)
2226 {
2227 	const struct writeback_control *wbc = ctx->wbc;
2228 	const struct extent_buffer *eb = ctx->eb;
2229 	struct btrfs_block_group *block_group = ctx->zoned_bg;
2230 
2231 	if (!btrfs_is_zoned(fs_info))
2232 		return 0;
2233 
2234 	if (block_group) {
2235 		if (block_group->start > eb->start ||
2236 		    btrfs_block_group_end(block_group) <= eb->start) {
2237 			btrfs_put_block_group(block_group);
2238 			block_group = NULL;
2239 			ctx->zoned_bg = NULL;
2240 		}
2241 	}
2242 
2243 	if (!block_group) {
2244 		block_group = btrfs_lookup_block_group(fs_info, eb->start);
2245 		if (!block_group)
2246 			return 0;
2247 		ctx->zoned_bg = block_group;
2248 	}
2249 
2250 	if (block_group->meta_write_pointer == eb->start) {
2251 		struct btrfs_block_group **tgt;
2252 
2253 		if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
2254 			return 0;
2255 
2256 		if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2257 			tgt = &fs_info->active_system_bg;
2258 		else
2259 			tgt = &fs_info->active_meta_bg;
2260 		if (check_bg_is_active(ctx, tgt))
2261 			return 0;
2262 	}
2263 
2264 	/*
2265 	 * Since we may release fs_info->zoned_meta_io_lock, someone can already
2266 	 * start writing this eb. In that case, we can just bail out.
2267 	 */
2268 	if (block_group->meta_write_pointer > eb->start)
2269 		return -EBUSY;
2270 
2271 	/* If for_sync, this hole will be filled with transaction commit. */
2272 	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
2273 		return -EAGAIN;
2274 	return -EBUSY;
2275 }
2276 
btrfs_zoned_issue_zeroout(struct btrfs_device * device,u64 physical,u64 length)2277 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
2278 {
2279 	if (!btrfs_dev_is_sequential(device, physical))
2280 		return -EOPNOTSUPP;
2281 
2282 	return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
2283 				    length >> SECTOR_SHIFT, GFP_NOFS, 0);
2284 }
2285 
read_zone_info(struct btrfs_fs_info * fs_info,u64 logical,struct blk_zone * zone)2286 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
2287 			  struct blk_zone *zone)
2288 {
2289 	struct btrfs_io_context *bioc = NULL;
2290 	u64 mapped_length = PAGE_SIZE;
2291 	unsigned int nofs_flag;
2292 	int nmirrors;
2293 	int i, ret;
2294 
2295 	ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2296 			      &mapped_length, &bioc, NULL, NULL);
2297 	if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
2298 		ret = -EIO;
2299 		goto out_put_bioc;
2300 	}
2301 
2302 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2303 		ret = -EINVAL;
2304 		goto out_put_bioc;
2305 	}
2306 
2307 	nofs_flag = memalloc_nofs_save();
2308 	nmirrors = (int)bioc->num_stripes;
2309 	for (i = 0; i < nmirrors; i++) {
2310 		u64 physical = bioc->stripes[i].physical;
2311 		struct btrfs_device *dev = bioc->stripes[i].dev;
2312 
2313 		/* Missing device */
2314 		if (!dev->bdev)
2315 			continue;
2316 
2317 		ret = btrfs_get_dev_zone(dev, physical, zone);
2318 		/* Failing device */
2319 		if (ret == -EIO || ret == -EOPNOTSUPP)
2320 			continue;
2321 		break;
2322 	}
2323 	memalloc_nofs_restore(nofs_flag);
2324 out_put_bioc:
2325 	btrfs_put_bioc(bioc);
2326 	return ret;
2327 }
2328 
2329 /*
2330  * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
2331  * filling zeros between @physical_pos to a write pointer of dev-replace
2332  * source device.
2333  */
btrfs_sync_zone_write_pointer(struct btrfs_device * tgt_dev,u64 logical,u64 physical_start,u64 physical_pos)2334 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
2335 				    u64 physical_start, u64 physical_pos)
2336 {
2337 	struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
2338 	struct blk_zone zone;
2339 	u64 length;
2340 	u64 wp;
2341 	int ret;
2342 
2343 	if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
2344 		return 0;
2345 
2346 	ret = read_zone_info(fs_info, logical, &zone);
2347 	if (ret)
2348 		return ret;
2349 
2350 	wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2351 
2352 	if (physical_pos == wp)
2353 		return 0;
2354 
2355 	if (unlikely(physical_pos > wp))
2356 		return -EUCLEAN;
2357 
2358 	length = wp - physical_pos;
2359 	return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
2360 }
2361 
2362 /*
2363  * Activate block group and underlying device zones
2364  *
2365  * @block_group: the block group to activate
2366  *
2367  * Return: true on success, false otherwise
2368  */
btrfs_zone_activate(struct btrfs_block_group * block_group)2369 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
2370 {
2371 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2372 	struct btrfs_chunk_map *map;
2373 	struct btrfs_device *device;
2374 	u64 physical;
2375 	const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
2376 	bool ret;
2377 	int i;
2378 
2379 	if (!btrfs_is_zoned(block_group->fs_info))
2380 		return true;
2381 
2382 	map = block_group->physical_map;
2383 
2384 	spin_lock(&fs_info->zone_active_bgs_lock);
2385 	spin_lock(&block_group->lock);
2386 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2387 		ret = true;
2388 		goto out_unlock;
2389 	}
2390 
2391 	if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
2392 		/* The caller should check if the block group is full. */
2393 		if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
2394 			ret = false;
2395 			goto out_unlock;
2396 		}
2397 	} else {
2398 		/* Since it is already written, it should have been active. */
2399 		WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
2400 	}
2401 
2402 	for (i = 0; i < map->num_stripes; i++) {
2403 		struct btrfs_zoned_device_info *zinfo;
2404 		int reserved = 0;
2405 
2406 		device = map->stripes[i].dev;
2407 		physical = map->stripes[i].physical;
2408 		zinfo = device->zone_info;
2409 
2410 		if (!device->bdev)
2411 			continue;
2412 
2413 		if (zinfo->max_active_zones == 0)
2414 			continue;
2415 
2416 		if (is_data)
2417 			reserved = zinfo->reserved_active_zones;
2418 		/*
2419 		 * For the data block group, leave active zones for one
2420 		 * metadata block group and one system block group.
2421 		 */
2422 		if (atomic_read(&zinfo->active_zones_left) <= reserved) {
2423 			ret = false;
2424 			goto out_unlock;
2425 		}
2426 
2427 		if (!btrfs_dev_set_active_zone(device, physical)) {
2428 			/* Cannot activate the zone */
2429 			ret = false;
2430 			goto out_unlock;
2431 		}
2432 		if (!is_data)
2433 			zinfo->reserved_active_zones--;
2434 	}
2435 
2436 	/* Successfully activated all the zones */
2437 	set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2438 	spin_unlock(&block_group->lock);
2439 
2440 	/* For the active block group list */
2441 	btrfs_get_block_group(block_group);
2442 	list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
2443 	spin_unlock(&fs_info->zone_active_bgs_lock);
2444 
2445 	return true;
2446 
2447 out_unlock:
2448 	spin_unlock(&block_group->lock);
2449 	spin_unlock(&fs_info->zone_active_bgs_lock);
2450 	return ret;
2451 }
2452 
wait_eb_writebacks(struct btrfs_block_group * block_group)2453 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
2454 {
2455 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2456 	const u64 end = btrfs_block_group_end(block_group);
2457 	struct extent_buffer *eb;
2458 	unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);
2459 
2460 	rcu_read_lock();
2461 	xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {
2462 		if (eb->start < block_group->start)
2463 			continue;
2464 		if (eb->start >= end)
2465 			break;
2466 		rcu_read_unlock();
2467 		wait_on_extent_buffer_writeback(eb);
2468 		rcu_read_lock();
2469 	}
2470 	rcu_read_unlock();
2471 }
2472 
call_zone_finish(struct btrfs_block_group * block_group,struct btrfs_io_stripe * stripe)2473 static int call_zone_finish(struct btrfs_block_group *block_group,
2474 			    struct btrfs_io_stripe *stripe)
2475 {
2476 	struct btrfs_device *device = stripe->dev;
2477 	const u64 physical = stripe->physical;
2478 	struct btrfs_zoned_device_info *zinfo = device->zone_info;
2479 	int ret;
2480 
2481 	if (!device->bdev)
2482 		return 0;
2483 
2484 	if (zinfo->max_active_zones == 0)
2485 		return 0;
2486 
2487 	if (btrfs_dev_is_sequential(device, physical)) {
2488 		unsigned int nofs_flags;
2489 
2490 		nofs_flags = memalloc_nofs_save();
2491 		ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2492 				       physical >> SECTOR_SHIFT,
2493 				       zinfo->zone_size >> SECTOR_SHIFT);
2494 		memalloc_nofs_restore(nofs_flags);
2495 
2496 		if (ret)
2497 			return ret;
2498 	}
2499 
2500 	if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
2501 		zinfo->reserved_active_zones++;
2502 	btrfs_dev_clear_active_zone(device, physical);
2503 
2504 	return 0;
2505 }
2506 
do_zone_finish(struct btrfs_block_group * block_group,bool fully_written)2507 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
2508 {
2509 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2510 	struct btrfs_chunk_map *map;
2511 	const bool is_metadata = (block_group->flags &
2512 			(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
2513 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2514 	int ret = 0;
2515 	int i;
2516 
2517 	spin_lock(&block_group->lock);
2518 	if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
2519 		spin_unlock(&block_group->lock);
2520 		return 0;
2521 	}
2522 
2523 	/* Check if we have unwritten allocated space */
2524 	if (is_metadata &&
2525 	    block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2526 		spin_unlock(&block_group->lock);
2527 		return -EAGAIN;
2528 	}
2529 
2530 	/*
2531 	 * If we are sure that the block group is full (= no more room left for
2532 	 * new allocation) and the IO for the last usable block is completed, we
2533 	 * don't need to wait for the other IOs. This holds because we ensure
2534 	 * the sequential IO submissions using the ZONE_APPEND command for data
2535 	 * and block_group->meta_write_pointer for metadata.
2536 	 */
2537 	if (!fully_written) {
2538 		if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2539 			spin_unlock(&block_group->lock);
2540 			return -EAGAIN;
2541 		}
2542 		spin_unlock(&block_group->lock);
2543 
2544 		ret = btrfs_inc_block_group_ro(block_group, false);
2545 		if (ret)
2546 			return ret;
2547 
2548 		/* Ensure all writes in this block group finish */
2549 		btrfs_wait_block_group_reservations(block_group);
2550 		/* No need to wait for NOCOW writers. Zoned mode does not allow that */
2551 		btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
2552 		/* Wait for extent buffers to be written. */
2553 		if (is_metadata)
2554 			wait_eb_writebacks(block_group);
2555 
2556 		spin_lock(&block_group->lock);
2557 
2558 		/*
2559 		 * Bail out if someone already deactivated the block group, or
2560 		 * allocated space is left in the block group.
2561 		 */
2562 		if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2563 			      &block_group->runtime_flags)) {
2564 			spin_unlock(&block_group->lock);
2565 			btrfs_dec_block_group_ro(block_group);
2566 			return 0;
2567 		}
2568 
2569 		if (block_group->reserved ||
2570 		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2571 			     &block_group->runtime_flags)) {
2572 			spin_unlock(&block_group->lock);
2573 			btrfs_dec_block_group_ro(block_group);
2574 			return -EAGAIN;
2575 		}
2576 	}
2577 
2578 	clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2579 	block_group->alloc_offset = block_group->zone_capacity;
2580 	if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
2581 		block_group->meta_write_pointer = block_group->start +
2582 						  block_group->zone_capacity;
2583 	block_group->free_space_ctl->free_space = 0;
2584 	btrfs_clear_treelog_bg(block_group);
2585 	btrfs_clear_data_reloc_bg(block_group);
2586 	spin_unlock(&block_group->lock);
2587 
2588 	down_read(&dev_replace->rwsem);
2589 	map = block_group->physical_map;
2590 	for (i = 0; i < map->num_stripes; i++) {
2591 
2592 		ret = call_zone_finish(block_group, &map->stripes[i]);
2593 		if (ret) {
2594 			up_read(&dev_replace->rwsem);
2595 			return ret;
2596 		}
2597 	}
2598 	up_read(&dev_replace->rwsem);
2599 
2600 	if (!fully_written)
2601 		btrfs_dec_block_group_ro(block_group);
2602 
2603 	spin_lock(&fs_info->zone_active_bgs_lock);
2604 	ASSERT(!list_empty(&block_group->active_bg_list));
2605 	list_del_init(&block_group->active_bg_list);
2606 	spin_unlock(&fs_info->zone_active_bgs_lock);
2607 
2608 	/* For active_bg_list */
2609 	btrfs_put_block_group(block_group);
2610 
2611 	clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2612 
2613 	return 0;
2614 }
2615 
btrfs_zone_finish(struct btrfs_block_group * block_group)2616 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2617 {
2618 	if (!btrfs_is_zoned(block_group->fs_info))
2619 		return 0;
2620 
2621 	return do_zone_finish(block_group, false);
2622 }
2623 
btrfs_can_activate_zone(struct btrfs_fs_devices * fs_devices,u64 flags)2624 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2625 {
2626 	struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2627 	struct btrfs_device *device;
2628 	bool ret = false;
2629 
2630 	if (!btrfs_is_zoned(fs_info))
2631 		return true;
2632 
2633 	if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
2634 		return false;
2635 
2636 	/* Check if there is a device with active zones left */
2637 	mutex_lock(&fs_info->chunk_mutex);
2638 	spin_lock(&fs_info->zone_active_bgs_lock);
2639 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2640 		struct btrfs_zoned_device_info *zinfo = device->zone_info;
2641 		int reserved = 0;
2642 
2643 		if (!device->bdev)
2644 			continue;
2645 
2646 		if (!zinfo->max_active_zones) {
2647 			ret = true;
2648 			break;
2649 		}
2650 
2651 		if (flags & BTRFS_BLOCK_GROUP_DATA)
2652 			reserved = zinfo->reserved_active_zones;
2653 
2654 		switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2655 		case 0: /* single */
2656 			ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
2657 			break;
2658 		case BTRFS_BLOCK_GROUP_DUP:
2659 			ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
2660 			break;
2661 		}
2662 		if (ret)
2663 			break;
2664 	}
2665 	spin_unlock(&fs_info->zone_active_bgs_lock);
2666 	mutex_unlock(&fs_info->chunk_mutex);
2667 
2668 	if (!ret)
2669 		set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2670 
2671 	return ret;
2672 }
2673 
btrfs_zone_finish_endio(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2674 int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2675 {
2676 	struct btrfs_block_group *block_group;
2677 	u64 min_alloc_bytes;
2678 
2679 	if (!btrfs_is_zoned(fs_info))
2680 		return 0;
2681 
2682 	block_group = btrfs_lookup_block_group(fs_info, logical);
2683 	if (WARN_ON_ONCE(!block_group))
2684 		return -ENOENT;
2685 
2686 	/* No MIXED_BG on zoned btrfs. */
2687 	if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2688 		min_alloc_bytes = fs_info->sectorsize;
2689 	else
2690 		min_alloc_bytes = fs_info->nodesize;
2691 
2692 	/* Bail out if we can allocate more data from this block group. */
2693 	if (logical + length + min_alloc_bytes <=
2694 	    block_group->start + block_group->zone_capacity)
2695 		goto out;
2696 
2697 	do_zone_finish(block_group, true);
2698 
2699 out:
2700 	btrfs_put_block_group(block_group);
2701 	return 0;
2702 }
2703 
btrfs_zone_finish_endio_workfn(struct work_struct * work)2704 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2705 {
2706 	int ret;
2707 	struct btrfs_block_group *bg =
2708 		container_of(work, struct btrfs_block_group, zone_finish_work);
2709 
2710 	wait_on_extent_buffer_writeback(bg->last_eb);
2711 	free_extent_buffer(bg->last_eb);
2712 	ret = do_zone_finish(bg, true);
2713 	if (ret)
2714 		btrfs_handle_fs_error(bg->fs_info, ret,
2715 				      "Failed to finish block-group's zone");
2716 	btrfs_put_block_group(bg);
2717 }
2718 
btrfs_schedule_zone_finish_bg(struct btrfs_block_group * bg,struct extent_buffer * eb)2719 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2720 				   struct extent_buffer *eb)
2721 {
2722 	if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2723 	    eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2724 		return;
2725 
2726 	if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2727 		btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2728 			  bg->start);
2729 		return;
2730 	}
2731 
2732 	/* For the work */
2733 	btrfs_get_block_group(bg);
2734 	refcount_inc(&eb->refs);
2735 	bg->last_eb = eb;
2736 	INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2737 	queue_work(system_dfl_wq, &bg->zone_finish_work);
2738 }
2739 
btrfs_clear_data_reloc_bg(struct btrfs_block_group * bg)2740 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2741 {
2742 	struct btrfs_fs_info *fs_info = bg->fs_info;
2743 
2744 	spin_lock(&fs_info->relocation_bg_lock);
2745 	if (fs_info->data_reloc_bg == bg->start)
2746 		fs_info->data_reloc_bg = 0;
2747 	spin_unlock(&fs_info->relocation_bg_lock);
2748 }
2749 
btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info * fs_info)2750 void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
2751 {
2752 	struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
2753 	struct btrfs_space_info *space_info = data_sinfo;
2754 	struct btrfs_trans_handle *trans;
2755 	struct btrfs_block_group *bg;
2756 	struct list_head *bg_list;
2757 	u64 alloc_flags;
2758 	bool first = true;
2759 	bool did_chunk_alloc = false;
2760 	int index;
2761 	int ret;
2762 
2763 	if (!btrfs_is_zoned(fs_info))
2764 		return;
2765 
2766 	if (fs_info->data_reloc_bg)
2767 		return;
2768 
2769 	if (sb_rdonly(fs_info->sb))
2770 		return;
2771 
2772 	alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
2773 	index = btrfs_bg_flags_to_raid_index(alloc_flags);
2774 
2775 	/* Scan the data space_info to find empty block groups. Take the second one. */
2776 again:
2777 	bg_list = &space_info->block_groups[index];
2778 	list_for_each_entry(bg, bg_list, list) {
2779 		if (bg->alloc_offset != 0)
2780 			continue;
2781 
2782 		if (first) {
2783 			first = false;
2784 			continue;
2785 		}
2786 
2787 		if (space_info == data_sinfo) {
2788 			/* Migrate the block group to the data relocation space_info. */
2789 			struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];
2790 			int factor;
2791 
2792 			ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2793 			       "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id);
2794 			factor = btrfs_bg_type_to_factor(bg->flags);
2795 
2796 			down_write(&space_info->groups_sem);
2797 			list_del_init(&bg->list);
2798 			/* We can assume this as we choose the second empty one. */
2799 			ASSERT(!list_empty(&space_info->block_groups[index]));
2800 			up_write(&space_info->groups_sem);
2801 
2802 			spin_lock(&space_info->lock);
2803 			space_info->total_bytes -= bg->length;
2804 			space_info->disk_total -= bg->length * factor;
2805 			space_info->disk_total -= bg->zone_unusable;
2806 			/* There is no allocation ever happened. */
2807 			ASSERT(bg->used == 0, "bg->used=%llu", bg->used);
2808 			/* No super block in a block group on the zoned setup. */
2809 			ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super);
2810 			spin_unlock(&space_info->lock);
2811 
2812 			bg->space_info = reloc_sinfo;
2813 			if (reloc_sinfo->block_group_kobjs[index] == NULL)
2814 				btrfs_sysfs_add_block_group_type(bg);
2815 
2816 			btrfs_add_bg_to_space_info(fs_info, bg);
2817 		}
2818 
2819 		fs_info->data_reloc_bg = bg->start;
2820 		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags);
2821 		btrfs_zone_activate(bg);
2822 
2823 		return;
2824 	}
2825 
2826 	if (did_chunk_alloc)
2827 		return;
2828 
2829 	trans = btrfs_join_transaction(fs_info->tree_root);
2830 	if (IS_ERR(trans))
2831 		return;
2832 
2833 	/* Allocate new BG in the data relocation space_info. */
2834 	space_info = data_sinfo->sub_group[0];
2835 	ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
2836 	       "space_info->subgroup_id=%d", space_info->subgroup_id);
2837 	ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
2838 	btrfs_end_transaction(trans);
2839 	if (ret == 1) {
2840 		/*
2841 		 * We allocated a new block group in the data relocation space_info. We
2842 		 * can take that one.
2843 		 */
2844 		first = false;
2845 		did_chunk_alloc = true;
2846 		goto again;
2847 	}
2848 }
2849 
btrfs_free_zone_cache(struct btrfs_fs_info * fs_info)2850 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2851 {
2852 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2853 	struct btrfs_device *device;
2854 
2855 	if (!btrfs_is_zoned(fs_info))
2856 		return;
2857 
2858 	mutex_lock(&fs_devices->device_list_mutex);
2859 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2860 		if (device->zone_info) {
2861 			vfree(device->zone_info->zone_cache);
2862 			device->zone_info->zone_cache = NULL;
2863 		}
2864 	}
2865 	mutex_unlock(&fs_devices->device_list_mutex);
2866 }
2867 
btrfs_zoned_should_reclaim(const struct btrfs_fs_info * fs_info)2868 bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
2869 {
2870 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2871 	struct btrfs_device *device;
2872 	u64 total = btrfs_super_total_bytes(fs_info->super_copy);
2873 	u64 used = 0;
2874 	u64 factor;
2875 
2876 	ASSERT(btrfs_is_zoned(fs_info));
2877 
2878 	if (fs_info->bg_reclaim_threshold == 0)
2879 		return false;
2880 
2881 	mutex_lock(&fs_devices->device_list_mutex);
2882 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2883 		if (!device->bdev)
2884 			continue;
2885 
2886 		used += device->bytes_used;
2887 	}
2888 	mutex_unlock(&fs_devices->device_list_mutex);
2889 
2890 	factor = div64_u64(used * 100, total);
2891 	return factor >= fs_info->bg_reclaim_threshold;
2892 }
2893 
btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2894 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2895 				       u64 length)
2896 {
2897 	struct btrfs_block_group *block_group;
2898 
2899 	if (!btrfs_is_zoned(fs_info))
2900 		return;
2901 
2902 	block_group = btrfs_lookup_block_group(fs_info, logical);
2903 	/* It should be called on a previous data relocation block group. */
2904 	ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2905 
2906 	spin_lock(&block_group->lock);
2907 	if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2908 		goto out;
2909 
2910 	/* All relocation extents are written. */
2911 	if (block_group->start + block_group->alloc_offset == logical + length) {
2912 		/*
2913 		 * Now, release this block group for further allocations and
2914 		 * zone finish.
2915 		 */
2916 		clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2917 			  &block_group->runtime_flags);
2918 	}
2919 
2920 out:
2921 	spin_unlock(&block_group->lock);
2922 	btrfs_put_block_group(block_group);
2923 }
2924 
btrfs_zone_finish_one_bg(struct btrfs_fs_info * fs_info)2925 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2926 {
2927 	struct btrfs_block_group *block_group;
2928 	struct btrfs_block_group *min_bg = NULL;
2929 	u64 min_avail = U64_MAX;
2930 	int ret;
2931 
2932 	spin_lock(&fs_info->zone_active_bgs_lock);
2933 	list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2934 			    active_bg_list) {
2935 		u64 avail;
2936 
2937 		spin_lock(&block_group->lock);
2938 		if (block_group->reserved || block_group->alloc_offset == 0 ||
2939 		    !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
2940 		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
2941 			spin_unlock(&block_group->lock);
2942 			continue;
2943 		}
2944 
2945 		avail = block_group->zone_capacity - block_group->alloc_offset;
2946 		if (min_avail > avail) {
2947 			if (min_bg)
2948 				btrfs_put_block_group(min_bg);
2949 			min_bg = block_group;
2950 			min_avail = avail;
2951 			btrfs_get_block_group(min_bg);
2952 		}
2953 		spin_unlock(&block_group->lock);
2954 	}
2955 	spin_unlock(&fs_info->zone_active_bgs_lock);
2956 
2957 	if (!min_bg)
2958 		return 0;
2959 
2960 	ret = btrfs_zone_finish(min_bg);
2961 	btrfs_put_block_group(min_bg);
2962 
2963 	return ret < 0 ? ret : 1;
2964 }
2965 
btrfs_zoned_activate_one_bg(struct btrfs_space_info * space_info,bool do_finish)2966 int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish)
2967 {
2968 	struct btrfs_fs_info *fs_info = space_info->fs_info;
2969 	struct btrfs_block_group *bg;
2970 	int index;
2971 
2972 	if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2973 		return 0;
2974 
2975 	for (;;) {
2976 		int ret;
2977 		bool need_finish = false;
2978 
2979 		down_read(&space_info->groups_sem);
2980 		for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2981 			list_for_each_entry(bg, &space_info->block_groups[index],
2982 					    list) {
2983 				if (!spin_trylock(&bg->lock))
2984 					continue;
2985 				if (btrfs_zoned_bg_is_full(bg) ||
2986 				    test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2987 					     &bg->runtime_flags)) {
2988 					spin_unlock(&bg->lock);
2989 					continue;
2990 				}
2991 				spin_unlock(&bg->lock);
2992 
2993 				if (btrfs_zone_activate(bg)) {
2994 					up_read(&space_info->groups_sem);
2995 					return 1;
2996 				}
2997 
2998 				need_finish = true;
2999 			}
3000 		}
3001 		up_read(&space_info->groups_sem);
3002 
3003 		if (!do_finish || !need_finish)
3004 			break;
3005 
3006 		ret = btrfs_zone_finish_one_bg(fs_info);
3007 		if (ret == 0)
3008 			break;
3009 		if (ret < 0)
3010 			return ret;
3011 	}
3012 
3013 	return 0;
3014 }
3015 
3016 /*
3017  * Reserve zones for one metadata block group, one tree-log block group, and one
3018  * system block group.
3019  */
btrfs_check_active_zone_reservation(struct btrfs_fs_info * fs_info)3020 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
3021 {
3022 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3023 	struct btrfs_block_group *block_group;
3024 	struct btrfs_device *device;
3025 	/* Reserve zones for normal SINGLE metadata and tree-log block group. */
3026 	unsigned int metadata_reserve = 2;
3027 	/* Reserve a zone for SINGLE system block group. */
3028 	unsigned int system_reserve = 1;
3029 
3030 	if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
3031 		return;
3032 
3033 	/*
3034 	 * This function is called from the mount context. So, there is no
3035 	 * parallel process touching the bits. No need for read_seqretry().
3036 	 */
3037 	if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
3038 		metadata_reserve = 4;
3039 	if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
3040 		system_reserve = 2;
3041 
3042 	/* Apply the reservation on all the devices. */
3043 	mutex_lock(&fs_devices->device_list_mutex);
3044 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
3045 		if (!device->bdev)
3046 			continue;
3047 
3048 		device->zone_info->reserved_active_zones =
3049 			metadata_reserve + system_reserve;
3050 	}
3051 	mutex_unlock(&fs_devices->device_list_mutex);
3052 
3053 	/* Release reservation for currently active block groups. */
3054 	spin_lock(&fs_info->zone_active_bgs_lock);
3055 	list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
3056 		struct btrfs_chunk_map *map = block_group->physical_map;
3057 
3058 		if (!(block_group->flags &
3059 		      (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
3060 			continue;
3061 
3062 		for (int i = 0; i < map->num_stripes; i++)
3063 			map->stripes[i].dev->zone_info->reserved_active_zones--;
3064 	}
3065 	spin_unlock(&fs_info->zone_active_bgs_lock);
3066 }
3067 
3068 /*
3069  * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
3070  *
3071  * @space_info:	the space to work on
3072  * @num_bytes:	targeting reclaim bytes
3073  *
3074  * This one resets the zones of a block group, so we can reuse the region
3075  * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
3076  * just removes a block group and frees up the underlying zones. So, we still
3077  * need to allocate a new block group to reuse the zones.
3078  *
3079  * Resetting is faster than deleting/recreating a block group. It is similar
3080  * to freeing the logical space on the regular mode. However, we cannot change
3081  * the block group's profile with this operation.
3082  */
btrfs_reset_unused_block_groups(struct btrfs_space_info * space_info,u64 num_bytes)3083 int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
3084 {
3085 	struct btrfs_fs_info *fs_info = space_info->fs_info;
3086 	const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
3087 
3088 	if (!btrfs_is_zoned(fs_info))
3089 		return 0;
3090 
3091 	while (num_bytes > 0) {
3092 		struct btrfs_chunk_map *map;
3093 		struct btrfs_block_group *bg = NULL;
3094 		bool found = false;
3095 		u64 reclaimed = 0;
3096 
3097 		/*
3098 		 * Here, we choose a fully zone_unusable block group. It's
3099 		 * technically possible to reset a partly zone_unusable block
3100 		 * group, which still has some free space left. However,
3101 		 * handling that needs to cope with the allocation side, which
3102 		 * makes the logic more complex. So, let's handle the easy case
3103 		 * for now.
3104 		 */
3105 		spin_lock(&fs_info->unused_bgs_lock);
3106 		list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
3107 			if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
3108 				continue;
3109 
3110 			/*
3111 			 * Use trylock to avoid locking order violation. In
3112 			 * btrfs_reclaim_bgs_work(), the lock order is
3113 			 * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
3114 			 * block group if we cannot take its lock.
3115 			 */
3116 			if (!spin_trylock(&bg->lock))
3117 				continue;
3118 			if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
3119 				spin_unlock(&bg->lock);
3120 				continue;
3121 			}
3122 			spin_unlock(&bg->lock);
3123 			found = true;
3124 			break;
3125 		}
3126 		if (!found) {
3127 			spin_unlock(&fs_info->unused_bgs_lock);
3128 			return 0;
3129 		}
3130 
3131 		list_del_init(&bg->bg_list);
3132 		btrfs_put_block_group(bg);
3133 		spin_unlock(&fs_info->unused_bgs_lock);
3134 
3135 		/*
3136 		 * Since the block group is fully zone_unusable and we cannot
3137 		 * allocate from this block group anymore, we don't need to set
3138 		 * this block group read-only.
3139 		 */
3140 
3141 		down_read(&fs_info->dev_replace.rwsem);
3142 		map = bg->physical_map;
3143 		for (int i = 0; i < map->num_stripes; i++) {
3144 			struct btrfs_io_stripe *stripe = &map->stripes[i];
3145 			unsigned int nofs_flags;
3146 			int ret;
3147 
3148 			nofs_flags = memalloc_nofs_save();
3149 			ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
3150 					       stripe->physical >> SECTOR_SHIFT,
3151 					       zone_size_sectors);
3152 			memalloc_nofs_restore(nofs_flags);
3153 
3154 			if (ret) {
3155 				up_read(&fs_info->dev_replace.rwsem);
3156 				return ret;
3157 			}
3158 		}
3159 		up_read(&fs_info->dev_replace.rwsem);
3160 
3161 		spin_lock(&space_info->lock);
3162 		spin_lock(&bg->lock);
3163 		ASSERT(!btrfs_is_block_group_used(bg));
3164 		if (bg->ro) {
3165 			spin_unlock(&bg->lock);
3166 			spin_unlock(&space_info->lock);
3167 			continue;
3168 		}
3169 
3170 		reclaimed = bg->alloc_offset;
3171 		bg->zone_unusable = bg->length - bg->zone_capacity;
3172 		bg->alloc_offset = 0;
3173 		/*
3174 		 * This holds because we currently reset fully used then freed
3175 		 * block group.
3176 		 */
3177 		ASSERT(reclaimed == bg->zone_capacity,
3178 		       "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity);
3179 		bg->free_space_ctl->free_space += reclaimed;
3180 		space_info->bytes_zone_unusable -= reclaimed;
3181 		spin_unlock(&bg->lock);
3182 		btrfs_return_free_space(space_info, reclaimed);
3183 		spin_unlock(&space_info->lock);
3184 
3185 		if (num_bytes <= reclaimed)
3186 			break;
3187 		num_bytes -= reclaimed;
3188 	}
3189 
3190 	return 0;
3191 }
3192 
btrfs_show_zoned_stats(struct btrfs_fs_info * fs_info,struct seq_file * seq)3193 void btrfs_show_zoned_stats(struct btrfs_fs_info *fs_info, struct seq_file *seq)
3194 {
3195 	struct btrfs_block_group *bg;
3196 	u64 data_reloc_bg;
3197 	u64 treelog_bg;
3198 
3199 	seq_puts(seq, "\n  zoned statistics:\n");
3200 
3201 	spin_lock(&fs_info->zone_active_bgs_lock);
3202 	seq_printf(seq, "\tactive block-groups: %zu\n",
3203 			     list_count_nodes(&fs_info->zone_active_bgs));
3204 	spin_unlock(&fs_info->zone_active_bgs_lock);
3205 
3206 	spin_lock(&fs_info->unused_bgs_lock);
3207 	seq_printf(seq, "\t  reclaimable: %zu\n",
3208 			     list_count_nodes(&fs_info->reclaim_bgs));
3209 	seq_printf(seq, "\t  unused: %zu\n", list_count_nodes(&fs_info->unused_bgs));
3210 	spin_unlock(&fs_info->unused_bgs_lock);
3211 
3212 	seq_printf(seq,"\t  need reclaim: %s\n",
3213 		   str_true_false(btrfs_zoned_should_reclaim(fs_info)));
3214 
3215 	data_reloc_bg = data_race(fs_info->data_reloc_bg);
3216 	if (data_reloc_bg)
3217 		seq_printf(seq, "\tdata relocation block-group: %llu\n",
3218 			   data_reloc_bg);
3219 	treelog_bg = data_race(fs_info->treelog_bg);
3220 	if (treelog_bg)
3221 		seq_printf(seq, "\ttree-log block-group: %llu\n", treelog_bg);
3222 
3223 	spin_lock(&fs_info->zone_active_bgs_lock);
3224 	seq_puts(seq, "\tactive zones:\n");
3225 	list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
3226 		u64 start;
3227 		u64 alloc_offset;
3228 		u64 used;
3229 		u64 reserved;
3230 		u64 zone_unusable;
3231 		const char *typestr = btrfs_space_info_type_str(bg->space_info);
3232 
3233 		spin_lock(&bg->lock);
3234 		start = bg->start;
3235 		alloc_offset = bg->alloc_offset;
3236 		used = bg->used;
3237 		reserved = bg->reserved;
3238 		zone_unusable = bg->zone_unusable;
3239 		spin_unlock(&bg->lock);
3240 
3241 		seq_printf(seq,
3242 			   "\t  start: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu (%s)\n",
3243 			   start, alloc_offset, used, reserved, zone_unusable, typestr);
3244 	}
3245 	spin_unlock(&fs_info->zone_active_bgs_lock);
3246 }
3247