1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_ZONED_H 4 #define BTRFS_ZONED_H 5 6 #include <linux/types.h> 7 #include <linux/blkdev.h> 8 #include "volumes.h" 9 #include "disk-io.h" 10 #include "block-group.h" 11 12 /* 13 * Block groups with more than this value (percents) of unusable space will be 14 * scheduled for background reclaim. 15 */ 16 #define BTRFS_DEFAULT_RECLAIM_THRESH 75 17 18 struct btrfs_zoned_device_info { 19 /* 20 * Number of zones, zone size and types of zones if bdev is a 21 * zoned block device. 22 */ 23 u64 zone_size; 24 u8 zone_size_shift; 25 u64 max_zone_append_size; 26 u32 nr_zones; 27 unsigned long *seq_zones; 28 unsigned long *empty_zones; 29 struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX]; 30 }; 31 32 #ifdef CONFIG_BLK_DEV_ZONED 33 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, 34 struct blk_zone *zone); 35 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info); 36 int btrfs_get_dev_zone_info(struct btrfs_device *device); 37 void btrfs_destroy_dev_zone_info(struct btrfs_device *device); 38 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info); 39 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info); 40 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 41 u64 *bytenr_ret); 42 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 43 u64 *bytenr_ret); 44 void btrfs_advance_sb_log(struct btrfs_device *device, int mirror); 45 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror); 46 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, 47 u64 hole_end, u64 num_bytes); 48 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, 49 u64 length, u64 *bytes); 50 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size); 51 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new); 52 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache); 53 void btrfs_redirty_list_add(struct btrfs_transaction *trans, 54 struct extent_buffer *eb); 55 void btrfs_free_redirty_list(struct btrfs_transaction *trans); 56 bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em); 57 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset, 58 struct bio *bio); 59 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered); 60 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 61 struct extent_buffer *eb, 62 struct btrfs_block_group **cache_ret); 63 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache, 64 struct extent_buffer *eb); 65 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length); 66 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, 67 u64 physical_start, u64 physical_pos); 68 #else /* CONFIG_BLK_DEV_ZONED */ 69 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, 70 struct blk_zone *zone) 71 { 72 return 0; 73 } 74 75 static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) 76 { 77 return 0; 78 } 79 80 static inline int btrfs_get_dev_zone_info(struct btrfs_device *device) 81 { 82 return 0; 83 } 84 85 static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { } 86 87 static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info) 88 { 89 if (!btrfs_is_zoned(fs_info)) 90 return 0; 91 92 btrfs_err(fs_info, "zoned block devices support is not enabled"); 93 return -EOPNOTSUPP; 94 } 95 96 static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info) 97 { 98 return 0; 99 } 100 101 static inline int btrfs_sb_log_location_bdev(struct block_device *bdev, 102 int mirror, int rw, u64 *bytenr_ret) 103 { 104 *bytenr_ret = btrfs_sb_offset(mirror); 105 return 0; 106 } 107 108 static inline int btrfs_sb_log_location(struct btrfs_device *device, int mirror, 109 int rw, u64 *bytenr_ret) 110 { 111 *bytenr_ret = btrfs_sb_offset(mirror); 112 return 0; 113 } 114 115 static inline void btrfs_advance_sb_log(struct btrfs_device *device, int mirror) 116 { } 117 118 static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) 119 { 120 return 0; 121 } 122 123 static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device, 124 u64 hole_start, u64 hole_end, 125 u64 num_bytes) 126 { 127 return hole_start; 128 } 129 130 static inline int btrfs_reset_device_zone(struct btrfs_device *device, 131 u64 physical, u64 length, u64 *bytes) 132 { 133 *bytes = 0; 134 return 0; 135 } 136 137 static inline int btrfs_ensure_empty_zones(struct btrfs_device *device, 138 u64 start, u64 size) 139 { 140 return 0; 141 } 142 143 static inline int btrfs_load_block_group_zone_info( 144 struct btrfs_block_group *cache, bool new) 145 { 146 return 0; 147 } 148 149 static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { } 150 151 static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans, 152 struct extent_buffer *eb) { } 153 static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { } 154 155 static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, 156 struct extent_map *em) 157 { 158 return false; 159 } 160 161 static inline void btrfs_record_physical_zoned(struct inode *inode, 162 u64 file_offset, struct bio *bio) 163 { 164 } 165 166 static inline void btrfs_rewrite_logical_zoned( 167 struct btrfs_ordered_extent *ordered) { } 168 169 static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 170 struct extent_buffer *eb, 171 struct btrfs_block_group **cache_ret) 172 { 173 return true; 174 } 175 176 static inline void btrfs_revert_meta_write_pointer( 177 struct btrfs_block_group *cache, 178 struct extent_buffer *eb) 179 { 180 } 181 182 static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device, 183 u64 physical, u64 length) 184 { 185 return -EOPNOTSUPP; 186 } 187 188 static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, 189 u64 logical, u64 physical_start, 190 u64 physical_pos) 191 { 192 return -EOPNOTSUPP; 193 } 194 195 #endif 196 197 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos) 198 { 199 struct btrfs_zoned_device_info *zone_info = device->zone_info; 200 201 if (!zone_info) 202 return false; 203 204 return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones); 205 } 206 207 static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos) 208 { 209 struct btrfs_zoned_device_info *zone_info = device->zone_info; 210 211 if (!zone_info) 212 return true; 213 214 return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones); 215 } 216 217 static inline void btrfs_dev_set_empty_zone_bit(struct btrfs_device *device, 218 u64 pos, bool set) 219 { 220 struct btrfs_zoned_device_info *zone_info = device->zone_info; 221 unsigned int zno; 222 223 if (!zone_info) 224 return; 225 226 zno = pos >> zone_info->zone_size_shift; 227 if (set) 228 set_bit(zno, zone_info->empty_zones); 229 else 230 clear_bit(zno, zone_info->empty_zones); 231 } 232 233 static inline void btrfs_dev_set_zone_empty(struct btrfs_device *device, u64 pos) 234 { 235 btrfs_dev_set_empty_zone_bit(device, pos, true); 236 } 237 238 static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 pos) 239 { 240 btrfs_dev_set_empty_zone_bit(device, pos, false); 241 } 242 243 static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info, 244 struct block_device *bdev) 245 { 246 if (btrfs_is_zoned(fs_info)) { 247 /* 248 * We can allow a regular device on a zoned filesystem, because 249 * we will emulate the zoned capabilities. 250 */ 251 if (!bdev_is_zoned(bdev)) 252 return true; 253 254 return fs_info->zone_size == 255 (bdev_zone_sectors(bdev) << SECTOR_SHIFT); 256 } 257 258 /* Do not allow Host Manged zoned device */ 259 return bdev_zoned_model(bdev) != BLK_ZONED_HM; 260 } 261 262 static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos) 263 { 264 /* 265 * On a non-zoned device, any address is OK. On a zoned device, 266 * non-SEQUENTIAL WRITE REQUIRED zones are capable. 267 */ 268 return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos); 269 } 270 271 static inline bool btrfs_can_zone_reset(struct btrfs_device *device, 272 u64 physical, u64 length) 273 { 274 u64 zone_size; 275 276 if (!btrfs_dev_is_sequential(device, physical)) 277 return false; 278 279 zone_size = device->zone_info->zone_size; 280 if (!IS_ALIGNED(physical, zone_size) || !IS_ALIGNED(length, zone_size)) 281 return false; 282 283 return true; 284 } 285 286 static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info) 287 { 288 if (!btrfs_is_zoned(fs_info)) 289 return; 290 mutex_lock(&fs_info->zoned_meta_io_lock); 291 } 292 293 static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info) 294 { 295 if (!btrfs_is_zoned(fs_info)) 296 return; 297 mutex_unlock(&fs_info->zoned_meta_io_lock); 298 } 299 300 static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) 301 { 302 struct btrfs_fs_info *fs_info = bg->fs_info; 303 304 if (!btrfs_is_zoned(fs_info)) 305 return; 306 307 spin_lock(&fs_info->treelog_bg_lock); 308 if (fs_info->treelog_bg == bg->start) 309 fs_info->treelog_bg = 0; 310 spin_unlock(&fs_info->treelog_bg_lock); 311 } 312 313 #endif 314