xref: /linux/fs/zonefs/super.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Simple file system for zoned block devices exposing zones as files.
4  *
5  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/magic.h>
10 #include <linux/iomap.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/statfs.h>
15 #include <linux/writeback.h>
16 #include <linux/quotaops.h>
17 #include <linux/seq_file.h>
18 #include <linux/parser.h>
19 #include <linux/uio.h>
20 #include <linux/mman.h>
21 #include <linux/sched/mm.h>
22 #include <linux/crc32.h>
23 #include <linux/task_io_accounting_ops.h>
24 
25 #include "zonefs.h"
26 
27 static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
28 			      unsigned int flags, struct iomap *iomap,
29 			      struct iomap *srcmap)
30 {
31 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
32 	struct super_block *sb = inode->i_sb;
33 	loff_t isize;
34 
35 	/* All I/Os should always be within the file maximum size */
36 	if (WARN_ON_ONCE(offset + length > zi->i_max_size))
37 		return -EIO;
38 
39 	/*
40 	 * Sequential zones can only accept direct writes. This is already
41 	 * checked when writes are issued, so warn if we see a page writeback
42 	 * operation.
43 	 */
44 	if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
45 			 (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
46 		return -EIO;
47 
48 	/*
49 	 * For conventional zones, all blocks are always mapped. For sequential
50 	 * zones, all blocks after always mapped below the inode size (zone
51 	 * write pointer) and unwriten beyond.
52 	 */
53 	mutex_lock(&zi->i_truncate_mutex);
54 	isize = i_size_read(inode);
55 	if (offset >= isize)
56 		iomap->type = IOMAP_UNWRITTEN;
57 	else
58 		iomap->type = IOMAP_MAPPED;
59 	if (flags & IOMAP_WRITE)
60 		length = zi->i_max_size - offset;
61 	else
62 		length = min(length, isize - offset);
63 	mutex_unlock(&zi->i_truncate_mutex);
64 
65 	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
66 	iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
67 	iomap->bdev = inode->i_sb->s_bdev;
68 	iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
69 
70 	return 0;
71 }
72 
73 static const struct iomap_ops zonefs_iomap_ops = {
74 	.iomap_begin	= zonefs_iomap_begin,
75 };
76 
77 static int zonefs_readpage(struct file *unused, struct page *page)
78 {
79 	return iomap_readpage(page, &zonefs_iomap_ops);
80 }
81 
82 static void zonefs_readahead(struct readahead_control *rac)
83 {
84 	iomap_readahead(rac, &zonefs_iomap_ops);
85 }
86 
87 /*
88  * Map blocks for page writeback. This is used only on conventional zone files,
89  * which implies that the page range can only be within the fixed inode size.
90  */
91 static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
92 			     struct inode *inode, loff_t offset)
93 {
94 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
95 
96 	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
97 		return -EIO;
98 	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
99 		return -EIO;
100 
101 	/* If the mapping is already OK, nothing needs to be done */
102 	if (offset >= wpc->iomap.offset &&
103 	    offset < wpc->iomap.offset + wpc->iomap.length)
104 		return 0;
105 
106 	return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
107 				  IOMAP_WRITE, &wpc->iomap, NULL);
108 }
109 
110 static const struct iomap_writeback_ops zonefs_writeback_ops = {
111 	.map_blocks		= zonefs_map_blocks,
112 };
113 
114 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
115 {
116 	struct iomap_writepage_ctx wpc = { };
117 
118 	return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
119 }
120 
121 static int zonefs_writepages(struct address_space *mapping,
122 			     struct writeback_control *wbc)
123 {
124 	struct iomap_writepage_ctx wpc = { };
125 
126 	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
127 }
128 
129 static const struct address_space_operations zonefs_file_aops = {
130 	.readpage		= zonefs_readpage,
131 	.readahead		= zonefs_readahead,
132 	.writepage		= zonefs_writepage,
133 	.writepages		= zonefs_writepages,
134 	.set_page_dirty		= iomap_set_page_dirty,
135 	.releasepage		= iomap_releasepage,
136 	.invalidatepage		= iomap_invalidatepage,
137 	.migratepage		= iomap_migrate_page,
138 	.is_partially_uptodate	= iomap_is_partially_uptodate,
139 	.error_remove_page	= generic_error_remove_page,
140 	.direct_IO		= noop_direct_IO,
141 };
142 
143 static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
144 {
145 	struct super_block *sb = inode->i_sb;
146 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
147 	loff_t old_isize = i_size_read(inode);
148 	loff_t nr_blocks;
149 
150 	if (new_isize == old_isize)
151 		return;
152 
153 	spin_lock(&sbi->s_lock);
154 
155 	/*
156 	 * This may be called for an update after an IO error.
157 	 * So beware of the values seen.
158 	 */
159 	if (new_isize < old_isize) {
160 		nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
161 		if (sbi->s_used_blocks > nr_blocks)
162 			sbi->s_used_blocks -= nr_blocks;
163 		else
164 			sbi->s_used_blocks = 0;
165 	} else {
166 		sbi->s_used_blocks +=
167 			(new_isize - old_isize) >> sb->s_blocksize_bits;
168 		if (sbi->s_used_blocks > sbi->s_blocks)
169 			sbi->s_used_blocks = sbi->s_blocks;
170 	}
171 
172 	spin_unlock(&sbi->s_lock);
173 }
174 
175 /*
176  * Check a zone condition and adjust its file inode access permissions for
177  * offline and readonly zones. Return the inode size corresponding to the
178  * amount of readable data in the zone.
179  */
180 static loff_t zonefs_check_zone_condition(struct inode *inode,
181 					  struct blk_zone *zone, bool warn,
182 					  bool mount)
183 {
184 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
185 
186 	switch (zone->cond) {
187 	case BLK_ZONE_COND_OFFLINE:
188 		/*
189 		 * Dead zone: make the inode immutable, disable all accesses
190 		 * and set the file size to 0 (zone wp set to zone start).
191 		 */
192 		if (warn)
193 			zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
194 				    inode->i_ino);
195 		inode->i_flags |= S_IMMUTABLE;
196 		inode->i_mode &= ~0777;
197 		zone->wp = zone->start;
198 		return 0;
199 	case BLK_ZONE_COND_READONLY:
200 		/*
201 		 * The write pointer of read-only zones is invalid. If such a
202 		 * zone is found during mount, the file size cannot be retrieved
203 		 * so we treat the zone as offline (mount == true case).
204 		 * Otherwise, keep the file size as it was when last updated
205 		 * so that the user can recover data. In both cases, writes are
206 		 * always disabled for the zone.
207 		 */
208 		if (warn)
209 			zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
210 				    inode->i_ino);
211 		inode->i_flags |= S_IMMUTABLE;
212 		if (mount) {
213 			zone->cond = BLK_ZONE_COND_OFFLINE;
214 			inode->i_mode &= ~0777;
215 			zone->wp = zone->start;
216 			return 0;
217 		}
218 		inode->i_mode &= ~0222;
219 		return i_size_read(inode);
220 	default:
221 		if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
222 			return zi->i_max_size;
223 		return (zone->wp - zone->start) << SECTOR_SHIFT;
224 	}
225 }
226 
227 struct zonefs_ioerr_data {
228 	struct inode	*inode;
229 	bool		write;
230 };
231 
232 static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
233 			      void *data)
234 {
235 	struct zonefs_ioerr_data *err = data;
236 	struct inode *inode = err->inode;
237 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
238 	struct super_block *sb = inode->i_sb;
239 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
240 	loff_t isize, data_size;
241 
242 	/*
243 	 * Check the zone condition: if the zone is not "bad" (offline or
244 	 * read-only), read errors are simply signaled to the IO issuer as long
245 	 * as there is no inconsistency between the inode size and the amount of
246 	 * data writen in the zone (data_size).
247 	 */
248 	data_size = zonefs_check_zone_condition(inode, zone, true, false);
249 	isize = i_size_read(inode);
250 	if (zone->cond != BLK_ZONE_COND_OFFLINE &&
251 	    zone->cond != BLK_ZONE_COND_READONLY &&
252 	    !err->write && isize == data_size)
253 		return 0;
254 
255 	/*
256 	 * At this point, we detected either a bad zone or an inconsistency
257 	 * between the inode size and the amount of data written in the zone.
258 	 * For the latter case, the cause may be a write IO error or an external
259 	 * action on the device. Two error patterns exist:
260 	 * 1) The inode size is lower than the amount of data in the zone:
261 	 *    a write operation partially failed and data was writen at the end
262 	 *    of the file. This can happen in the case of a large direct IO
263 	 *    needing several BIOs and/or write requests to be processed.
264 	 * 2) The inode size is larger than the amount of data in the zone:
265 	 *    this can happen with a deferred write error with the use of the
266 	 *    device side write cache after getting successful write IO
267 	 *    completions. Other possibilities are (a) an external corruption,
268 	 *    e.g. an application reset the zone directly, or (b) the device
269 	 *    has a serious problem (e.g. firmware bug).
270 	 *
271 	 * In all cases, warn about inode size inconsistency and handle the
272 	 * IO error according to the zone condition and to the mount options.
273 	 */
274 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
275 		zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
276 			    inode->i_ino, isize, data_size);
277 
278 	/*
279 	 * First handle bad zones signaled by hardware. The mount options
280 	 * errors=zone-ro and errors=zone-offline result in changing the
281 	 * zone condition to read-only and offline respectively, as if the
282 	 * condition was signaled by the hardware.
283 	 */
284 	if (zone->cond == BLK_ZONE_COND_OFFLINE ||
285 	    sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
286 		zonefs_warn(sb, "inode %lu: read/write access disabled\n",
287 			    inode->i_ino);
288 		if (zone->cond != BLK_ZONE_COND_OFFLINE) {
289 			zone->cond = BLK_ZONE_COND_OFFLINE;
290 			data_size = zonefs_check_zone_condition(inode, zone,
291 								false, false);
292 		}
293 	} else if (zone->cond == BLK_ZONE_COND_READONLY ||
294 		   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
295 		zonefs_warn(sb, "inode %lu: write access disabled\n",
296 			    inode->i_ino);
297 		if (zone->cond != BLK_ZONE_COND_READONLY) {
298 			zone->cond = BLK_ZONE_COND_READONLY;
299 			data_size = zonefs_check_zone_condition(inode, zone,
300 								false, false);
301 		}
302 	}
303 
304 	/*
305 	 * If error=remount-ro was specified, any error result in remounting
306 	 * the volume as read-only.
307 	 */
308 	if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
309 		zonefs_warn(sb, "remounting filesystem read-only\n");
310 		sb->s_flags |= SB_RDONLY;
311 	}
312 
313 	/*
314 	 * Update block usage stats and the inode size  to prevent access to
315 	 * invalid data.
316 	 */
317 	zonefs_update_stats(inode, data_size);
318 	i_size_write(inode, data_size);
319 	zi->i_wpoffset = data_size;
320 
321 	return 0;
322 }
323 
324 /*
325  * When an file IO error occurs, check the file zone to see if there is a change
326  * in the zone condition (e.g. offline or read-only). For a failed write to a
327  * sequential zone, the zone write pointer position must also be checked to
328  * eventually correct the file size and zonefs inode write pointer offset
329  * (which can be out of sync with the drive due to partial write failures).
330  */
331 static void zonefs_io_error(struct inode *inode, bool write)
332 {
333 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
334 	struct super_block *sb = inode->i_sb;
335 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
336 	unsigned int noio_flag;
337 	unsigned int nr_zones =
338 		zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
339 	struct zonefs_ioerr_data err = {
340 		.inode = inode,
341 		.write = write,
342 	};
343 	int ret;
344 
345 	mutex_lock(&zi->i_truncate_mutex);
346 
347 	/*
348 	 * Memory allocations in blkdev_report_zones() can trigger a memory
349 	 * reclaim which may in turn cause a recursion into zonefs as well as
350 	 * struct request allocations for the same device. The former case may
351 	 * end up in a deadlock on the inode truncate mutex, while the latter
352 	 * may prevent IO forward progress. Executing the report zones under
353 	 * the GFP_NOIO context avoids both problems.
354 	 */
355 	noio_flag = memalloc_noio_save();
356 	ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
357 				  zonefs_io_error_cb, &err);
358 	if (ret != nr_zones)
359 		zonefs_err(sb, "Get inode %lu zone information failed %d\n",
360 			   inode->i_ino, ret);
361 	memalloc_noio_restore(noio_flag);
362 
363 	mutex_unlock(&zi->i_truncate_mutex);
364 }
365 
366 static int zonefs_file_truncate(struct inode *inode, loff_t isize)
367 {
368 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
369 	loff_t old_isize;
370 	enum req_opf op;
371 	int ret = 0;
372 
373 	/*
374 	 * Only sequential zone files can be truncated and truncation is allowed
375 	 * only down to a 0 size, which is equivalent to a zone reset, and to
376 	 * the maximum file size, which is equivalent to a zone finish.
377 	 */
378 	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
379 		return -EPERM;
380 
381 	if (!isize)
382 		op = REQ_OP_ZONE_RESET;
383 	else if (isize == zi->i_max_size)
384 		op = REQ_OP_ZONE_FINISH;
385 	else
386 		return -EPERM;
387 
388 	inode_dio_wait(inode);
389 
390 	/* Serialize against page faults */
391 	down_write(&zi->i_mmap_sem);
392 
393 	/* Serialize against zonefs_iomap_begin() */
394 	mutex_lock(&zi->i_truncate_mutex);
395 
396 	old_isize = i_size_read(inode);
397 	if (isize == old_isize)
398 		goto unlock;
399 
400 	ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
401 			       zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS);
402 	if (ret) {
403 		zonefs_err(inode->i_sb,
404 			   "Zone management operation at %llu failed %d",
405 			   zi->i_zsector, ret);
406 		goto unlock;
407 	}
408 
409 	zonefs_update_stats(inode, isize);
410 	truncate_setsize(inode, isize);
411 	zi->i_wpoffset = isize;
412 
413 unlock:
414 	mutex_unlock(&zi->i_truncate_mutex);
415 	up_write(&zi->i_mmap_sem);
416 
417 	return ret;
418 }
419 
420 static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
421 {
422 	struct inode *inode = d_inode(dentry);
423 	int ret;
424 
425 	if (unlikely(IS_IMMUTABLE(inode)))
426 		return -EPERM;
427 
428 	ret = setattr_prepare(dentry, iattr);
429 	if (ret)
430 		return ret;
431 
432 	/*
433 	 * Since files and directories cannot be created nor deleted, do not
434 	 * allow setting any write attributes on the sub-directories grouping
435 	 * files by zone type.
436 	 */
437 	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
438 	    (iattr->ia_mode & 0222))
439 		return -EPERM;
440 
441 	if (((iattr->ia_valid & ATTR_UID) &&
442 	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
443 	    ((iattr->ia_valid & ATTR_GID) &&
444 	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
445 		ret = dquot_transfer(inode, iattr);
446 		if (ret)
447 			return ret;
448 	}
449 
450 	if (iattr->ia_valid & ATTR_SIZE) {
451 		ret = zonefs_file_truncate(inode, iattr->ia_size);
452 		if (ret)
453 			return ret;
454 	}
455 
456 	setattr_copy(inode, iattr);
457 
458 	return 0;
459 }
460 
461 static const struct inode_operations zonefs_file_inode_operations = {
462 	.setattr	= zonefs_inode_setattr,
463 };
464 
465 static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
466 			     int datasync)
467 {
468 	struct inode *inode = file_inode(file);
469 	int ret = 0;
470 
471 	if (unlikely(IS_IMMUTABLE(inode)))
472 		return -EPERM;
473 
474 	/*
475 	 * Since only direct writes are allowed in sequential files, page cache
476 	 * flush is needed only for conventional zone files.
477 	 */
478 	if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
479 		ret = file_write_and_wait_range(file, start, end);
480 	if (!ret)
481 		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
482 
483 	if (ret)
484 		zonefs_io_error(inode, true);
485 
486 	return ret;
487 }
488 
489 static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
490 {
491 	struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
492 	vm_fault_t ret;
493 
494 	down_read(&zi->i_mmap_sem);
495 	ret = filemap_fault(vmf);
496 	up_read(&zi->i_mmap_sem);
497 
498 	return ret;
499 }
500 
501 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
502 {
503 	struct inode *inode = file_inode(vmf->vma->vm_file);
504 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
505 	vm_fault_t ret;
506 
507 	if (unlikely(IS_IMMUTABLE(inode)))
508 		return VM_FAULT_SIGBUS;
509 
510 	/*
511 	 * Sanity check: only conventional zone files can have shared
512 	 * writeable mappings.
513 	 */
514 	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
515 		return VM_FAULT_NOPAGE;
516 
517 	sb_start_pagefault(inode->i_sb);
518 	file_update_time(vmf->vma->vm_file);
519 
520 	/* Serialize against truncates */
521 	down_read(&zi->i_mmap_sem);
522 	ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
523 	up_read(&zi->i_mmap_sem);
524 
525 	sb_end_pagefault(inode->i_sb);
526 	return ret;
527 }
528 
529 static const struct vm_operations_struct zonefs_file_vm_ops = {
530 	.fault		= zonefs_filemap_fault,
531 	.map_pages	= filemap_map_pages,
532 	.page_mkwrite	= zonefs_filemap_page_mkwrite,
533 };
534 
535 static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
536 {
537 	/*
538 	 * Conventional zones accept random writes, so their files can support
539 	 * shared writable mappings. For sequential zone files, only read
540 	 * mappings are possible since there are no guarantees for write
541 	 * ordering between msync() and page cache writeback.
542 	 */
543 	if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
544 	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
545 		return -EINVAL;
546 
547 	file_accessed(file);
548 	vma->vm_ops = &zonefs_file_vm_ops;
549 
550 	return 0;
551 }
552 
553 static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
554 {
555 	loff_t isize = i_size_read(file_inode(file));
556 
557 	/*
558 	 * Seeks are limited to below the zone size for conventional zones
559 	 * and below the zone write pointer for sequential zones. In both
560 	 * cases, this limit is the inode size.
561 	 */
562 	return generic_file_llseek_size(file, offset, whence, isize, isize);
563 }
564 
565 static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
566 					int error, unsigned int flags)
567 {
568 	struct inode *inode = file_inode(iocb->ki_filp);
569 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
570 
571 	if (error) {
572 		zonefs_io_error(inode, true);
573 		return error;
574 	}
575 
576 	if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
577 		/*
578 		 * Note that we may be seeing completions out of order,
579 		 * but that is not a problem since a write completed
580 		 * successfully necessarily means that all preceding writes
581 		 * were also successful. So we can safely increase the inode
582 		 * size to the write end location.
583 		 */
584 		mutex_lock(&zi->i_truncate_mutex);
585 		if (i_size_read(inode) < iocb->ki_pos + size) {
586 			zonefs_update_stats(inode, iocb->ki_pos + size);
587 			i_size_write(inode, iocb->ki_pos + size);
588 		}
589 		mutex_unlock(&zi->i_truncate_mutex);
590 	}
591 
592 	return 0;
593 }
594 
595 static const struct iomap_dio_ops zonefs_write_dio_ops = {
596 	.end_io			= zonefs_file_write_dio_end_io,
597 };
598 
599 static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
600 {
601 	struct inode *inode = file_inode(iocb->ki_filp);
602 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
603 	struct block_device *bdev = inode->i_sb->s_bdev;
604 	unsigned int max;
605 	struct bio *bio;
606 	ssize_t size;
607 	int nr_pages;
608 	ssize_t ret;
609 
610 	nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
611 	if (!nr_pages)
612 		return 0;
613 
614 	max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
615 	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
616 	iov_iter_truncate(from, max);
617 
618 	bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
619 	if (!bio)
620 		return -ENOMEM;
621 
622 	bio_set_dev(bio, bdev);
623 	bio->bi_iter.bi_sector = zi->i_zsector;
624 	bio->bi_write_hint = iocb->ki_hint;
625 	bio->bi_ioprio = iocb->ki_ioprio;
626 	bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
627 	if (iocb->ki_flags & IOCB_DSYNC)
628 		bio->bi_opf |= REQ_FUA;
629 
630 	ret = bio_iov_iter_get_pages(bio, from);
631 	if (unlikely(ret)) {
632 		bio_io_error(bio);
633 		return ret;
634 	}
635 	size = bio->bi_iter.bi_size;
636 	task_io_account_write(ret);
637 
638 	if (iocb->ki_flags & IOCB_HIPRI)
639 		bio_set_polled(bio, iocb);
640 
641 	ret = submit_bio_wait(bio);
642 
643 	bio_put(bio);
644 
645 	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
646 	if (ret >= 0) {
647 		iocb->ki_pos += size;
648 		return size;
649 	}
650 
651 	return ret;
652 }
653 
654 /*
655  * Handle direct writes. For sequential zone files, this is the only possible
656  * write path. For these files, check that the user is issuing writes
657  * sequentially from the end of the file. This code assumes that the block layer
658  * delivers write requests to the device in sequential order. This is always the
659  * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
660  * elevator feature is being used (e.g. mq-deadline). The block layer always
661  * automatically select such an elevator for zoned block devices during the
662  * device initialization.
663  */
664 static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
665 {
666 	struct inode *inode = file_inode(iocb->ki_filp);
667 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
668 	struct super_block *sb = inode->i_sb;
669 	bool sync = is_sync_kiocb(iocb);
670 	bool append = false;
671 	size_t count;
672 	ssize_t ret;
673 
674 	/*
675 	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
676 	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
677 	 * on the inode lock but the second goes through but is now unaligned).
678 	 */
679 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
680 	    (iocb->ki_flags & IOCB_NOWAIT))
681 		return -EOPNOTSUPP;
682 
683 	if (iocb->ki_flags & IOCB_NOWAIT) {
684 		if (!inode_trylock(inode))
685 			return -EAGAIN;
686 	} else {
687 		inode_lock(inode);
688 	}
689 
690 	ret = generic_write_checks(iocb, from);
691 	if (ret <= 0)
692 		goto inode_unlock;
693 
694 	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
695 	count = iov_iter_count(from);
696 
697 	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
698 		ret = -EINVAL;
699 		goto inode_unlock;
700 	}
701 
702 	/* Enforce sequential writes (append only) in sequential zones */
703 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
704 		mutex_lock(&zi->i_truncate_mutex);
705 		if (iocb->ki_pos != zi->i_wpoffset) {
706 			mutex_unlock(&zi->i_truncate_mutex);
707 			ret = -EINVAL;
708 			goto inode_unlock;
709 		}
710 		mutex_unlock(&zi->i_truncate_mutex);
711 		append = sync;
712 	}
713 
714 	if (append)
715 		ret = zonefs_file_dio_append(iocb, from);
716 	else
717 		ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
718 				   &zonefs_write_dio_ops, sync);
719 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
720 	    (ret > 0 || ret == -EIOCBQUEUED)) {
721 		if (ret > 0)
722 			count = ret;
723 		mutex_lock(&zi->i_truncate_mutex);
724 		zi->i_wpoffset += count;
725 		mutex_unlock(&zi->i_truncate_mutex);
726 	}
727 
728 inode_unlock:
729 	inode_unlock(inode);
730 
731 	return ret;
732 }
733 
734 static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
735 					  struct iov_iter *from)
736 {
737 	struct inode *inode = file_inode(iocb->ki_filp);
738 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
739 	ssize_t ret;
740 
741 	/*
742 	 * Direct IO writes are mandatory for sequential zone files so that the
743 	 * write IO issuing order is preserved.
744 	 */
745 	if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
746 		return -EIO;
747 
748 	if (iocb->ki_flags & IOCB_NOWAIT) {
749 		if (!inode_trylock(inode))
750 			return -EAGAIN;
751 	} else {
752 		inode_lock(inode);
753 	}
754 
755 	ret = generic_write_checks(iocb, from);
756 	if (ret <= 0)
757 		goto inode_unlock;
758 
759 	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
760 
761 	ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
762 	if (ret > 0)
763 		iocb->ki_pos += ret;
764 	else if (ret == -EIO)
765 		zonefs_io_error(inode, true);
766 
767 inode_unlock:
768 	inode_unlock(inode);
769 	if (ret > 0)
770 		ret = generic_write_sync(iocb, ret);
771 
772 	return ret;
773 }
774 
775 static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
776 {
777 	struct inode *inode = file_inode(iocb->ki_filp);
778 
779 	if (unlikely(IS_IMMUTABLE(inode)))
780 		return -EPERM;
781 
782 	if (sb_rdonly(inode->i_sb))
783 		return -EROFS;
784 
785 	/* Write operations beyond the zone size are not allowed */
786 	if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
787 		return -EFBIG;
788 
789 	if (iocb->ki_flags & IOCB_DIRECT)
790 		return zonefs_file_dio_write(iocb, from);
791 
792 	return zonefs_file_buffered_write(iocb, from);
793 }
794 
795 static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
796 				       int error, unsigned int flags)
797 {
798 	if (error) {
799 		zonefs_io_error(file_inode(iocb->ki_filp), false);
800 		return error;
801 	}
802 
803 	return 0;
804 }
805 
806 static const struct iomap_dio_ops zonefs_read_dio_ops = {
807 	.end_io			= zonefs_file_read_dio_end_io,
808 };
809 
810 static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
811 {
812 	struct inode *inode = file_inode(iocb->ki_filp);
813 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
814 	struct super_block *sb = inode->i_sb;
815 	loff_t isize;
816 	ssize_t ret;
817 
818 	/* Offline zones cannot be read */
819 	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
820 		return -EPERM;
821 
822 	if (iocb->ki_pos >= zi->i_max_size)
823 		return 0;
824 
825 	if (iocb->ki_flags & IOCB_NOWAIT) {
826 		if (!inode_trylock_shared(inode))
827 			return -EAGAIN;
828 	} else {
829 		inode_lock_shared(inode);
830 	}
831 
832 	/* Limit read operations to written data */
833 	mutex_lock(&zi->i_truncate_mutex);
834 	isize = i_size_read(inode);
835 	if (iocb->ki_pos >= isize) {
836 		mutex_unlock(&zi->i_truncate_mutex);
837 		ret = 0;
838 		goto inode_unlock;
839 	}
840 	iov_iter_truncate(to, isize - iocb->ki_pos);
841 	mutex_unlock(&zi->i_truncate_mutex);
842 
843 	if (iocb->ki_flags & IOCB_DIRECT) {
844 		size_t count = iov_iter_count(to);
845 
846 		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
847 			ret = -EINVAL;
848 			goto inode_unlock;
849 		}
850 		file_accessed(iocb->ki_filp);
851 		ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
852 				   &zonefs_read_dio_ops, is_sync_kiocb(iocb));
853 	} else {
854 		ret = generic_file_read_iter(iocb, to);
855 		if (ret == -EIO)
856 			zonefs_io_error(inode, false);
857 	}
858 
859 inode_unlock:
860 	inode_unlock_shared(inode);
861 
862 	return ret;
863 }
864 
865 static const struct file_operations zonefs_file_operations = {
866 	.open		= generic_file_open,
867 	.fsync		= zonefs_file_fsync,
868 	.mmap		= zonefs_file_mmap,
869 	.llseek		= zonefs_file_llseek,
870 	.read_iter	= zonefs_file_read_iter,
871 	.write_iter	= zonefs_file_write_iter,
872 	.splice_read	= generic_file_splice_read,
873 	.splice_write	= iter_file_splice_write,
874 	.iopoll		= iomap_dio_iopoll,
875 };
876 
877 static struct kmem_cache *zonefs_inode_cachep;
878 
879 static struct inode *zonefs_alloc_inode(struct super_block *sb)
880 {
881 	struct zonefs_inode_info *zi;
882 
883 	zi = kmem_cache_alloc(zonefs_inode_cachep, GFP_KERNEL);
884 	if (!zi)
885 		return NULL;
886 
887 	inode_init_once(&zi->i_vnode);
888 	mutex_init(&zi->i_truncate_mutex);
889 	init_rwsem(&zi->i_mmap_sem);
890 
891 	return &zi->i_vnode;
892 }
893 
894 static void zonefs_free_inode(struct inode *inode)
895 {
896 	kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
897 }
898 
899 /*
900  * File system stat.
901  */
902 static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
903 {
904 	struct super_block *sb = dentry->d_sb;
905 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
906 	enum zonefs_ztype t;
907 	u64 fsid;
908 
909 	buf->f_type = ZONEFS_MAGIC;
910 	buf->f_bsize = sb->s_blocksize;
911 	buf->f_namelen = ZONEFS_NAME_MAX;
912 
913 	spin_lock(&sbi->s_lock);
914 
915 	buf->f_blocks = sbi->s_blocks;
916 	if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
917 		buf->f_bfree = 0;
918 	else
919 		buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
920 	buf->f_bavail = buf->f_bfree;
921 
922 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
923 		if (sbi->s_nr_files[t])
924 			buf->f_files += sbi->s_nr_files[t] + 1;
925 	}
926 	buf->f_ffree = 0;
927 
928 	spin_unlock(&sbi->s_lock);
929 
930 	fsid = le64_to_cpup((void *)sbi->s_uuid.b) ^
931 		le64_to_cpup((void *)sbi->s_uuid.b + sizeof(u64));
932 	buf->f_fsid.val[0] = (u32)fsid;
933 	buf->f_fsid.val[1] = (u32)(fsid >> 32);
934 
935 	return 0;
936 }
937 
938 enum {
939 	Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
940 	Opt_err,
941 };
942 
943 static const match_table_t tokens = {
944 	{ Opt_errors_ro,	"errors=remount-ro"},
945 	{ Opt_errors_zro,	"errors=zone-ro"},
946 	{ Opt_errors_zol,	"errors=zone-offline"},
947 	{ Opt_errors_repair,	"errors=repair"},
948 	{ Opt_err,		NULL}
949 };
950 
951 static int zonefs_parse_options(struct super_block *sb, char *options)
952 {
953 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
954 	substring_t args[MAX_OPT_ARGS];
955 	char *p;
956 
957 	if (!options)
958 		return 0;
959 
960 	while ((p = strsep(&options, ",")) != NULL) {
961 		int token;
962 
963 		if (!*p)
964 			continue;
965 
966 		token = match_token(p, tokens, args);
967 		switch (token) {
968 		case Opt_errors_ro:
969 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
970 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
971 			break;
972 		case Opt_errors_zro:
973 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
974 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
975 			break;
976 		case Opt_errors_zol:
977 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
978 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
979 			break;
980 		case Opt_errors_repair:
981 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
982 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
983 			break;
984 		default:
985 			return -EINVAL;
986 		}
987 	}
988 
989 	return 0;
990 }
991 
992 static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
993 {
994 	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
995 
996 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
997 		seq_puts(seq, ",errors=remount-ro");
998 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
999 		seq_puts(seq, ",errors=zone-ro");
1000 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
1001 		seq_puts(seq, ",errors=zone-offline");
1002 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
1003 		seq_puts(seq, ",errors=repair");
1004 
1005 	return 0;
1006 }
1007 
1008 static int zonefs_remount(struct super_block *sb, int *flags, char *data)
1009 {
1010 	sync_filesystem(sb);
1011 
1012 	return zonefs_parse_options(sb, data);
1013 }
1014 
1015 static const struct super_operations zonefs_sops = {
1016 	.alloc_inode	= zonefs_alloc_inode,
1017 	.free_inode	= zonefs_free_inode,
1018 	.statfs		= zonefs_statfs,
1019 	.remount_fs	= zonefs_remount,
1020 	.show_options	= zonefs_show_options,
1021 };
1022 
1023 static const struct inode_operations zonefs_dir_inode_operations = {
1024 	.lookup		= simple_lookup,
1025 	.setattr	= zonefs_inode_setattr,
1026 };
1027 
1028 static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
1029 				  enum zonefs_ztype type)
1030 {
1031 	struct super_block *sb = parent->i_sb;
1032 
1033 	inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
1034 	inode_init_owner(inode, parent, S_IFDIR | 0555);
1035 	inode->i_op = &zonefs_dir_inode_operations;
1036 	inode->i_fop = &simple_dir_operations;
1037 	set_nlink(inode, 2);
1038 	inc_nlink(parent);
1039 }
1040 
1041 static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
1042 				   enum zonefs_ztype type)
1043 {
1044 	struct super_block *sb = inode->i_sb;
1045 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1046 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
1047 
1048 	inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
1049 	inode->i_mode = S_IFREG | sbi->s_perm;
1050 
1051 	zi->i_ztype = type;
1052 	zi->i_zsector = zone->start;
1053 	zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
1054 			       zone->len << SECTOR_SHIFT);
1055 	zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
1056 
1057 	inode->i_uid = sbi->s_uid;
1058 	inode->i_gid = sbi->s_gid;
1059 	inode->i_size = zi->i_wpoffset;
1060 	inode->i_blocks = zone->len;
1061 
1062 	inode->i_op = &zonefs_file_inode_operations;
1063 	inode->i_fop = &zonefs_file_operations;
1064 	inode->i_mapping->a_ops = &zonefs_file_aops;
1065 
1066 	sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
1067 	sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
1068 	sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
1069 }
1070 
1071 static struct dentry *zonefs_create_inode(struct dentry *parent,
1072 					const char *name, struct blk_zone *zone,
1073 					enum zonefs_ztype type)
1074 {
1075 	struct inode *dir = d_inode(parent);
1076 	struct dentry *dentry;
1077 	struct inode *inode;
1078 
1079 	dentry = d_alloc_name(parent, name);
1080 	if (!dentry)
1081 		return NULL;
1082 
1083 	inode = new_inode(parent->d_sb);
1084 	if (!inode)
1085 		goto dput;
1086 
1087 	inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
1088 	if (zone)
1089 		zonefs_init_file_inode(inode, zone, type);
1090 	else
1091 		zonefs_init_dir_inode(dir, inode, type);
1092 	d_add(dentry, inode);
1093 	dir->i_size++;
1094 
1095 	return dentry;
1096 
1097 dput:
1098 	dput(dentry);
1099 
1100 	return NULL;
1101 }
1102 
1103 struct zonefs_zone_data {
1104 	struct super_block	*sb;
1105 	unsigned int		nr_zones[ZONEFS_ZTYPE_MAX];
1106 	struct blk_zone		*zones;
1107 };
1108 
1109 /*
1110  * Create a zone group and populate it with zone files.
1111  */
1112 static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
1113 				enum zonefs_ztype type)
1114 {
1115 	struct super_block *sb = zd->sb;
1116 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1117 	struct blk_zone *zone, *next, *end;
1118 	const char *zgroup_name;
1119 	char *file_name;
1120 	struct dentry *dir;
1121 	unsigned int n = 0;
1122 	int ret = -ENOMEM;
1123 
1124 	/* If the group is empty, there is nothing to do */
1125 	if (!zd->nr_zones[type])
1126 		return 0;
1127 
1128 	file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
1129 	if (!file_name)
1130 		return -ENOMEM;
1131 
1132 	if (type == ZONEFS_ZTYPE_CNV)
1133 		zgroup_name = "cnv";
1134 	else
1135 		zgroup_name = "seq";
1136 
1137 	dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
1138 	if (!dir)
1139 		goto free;
1140 
1141 	/*
1142 	 * The first zone contains the super block: skip it.
1143 	 */
1144 	end = zd->zones + blkdev_nr_zones(sb->s_bdev->bd_disk);
1145 	for (zone = &zd->zones[1]; zone < end; zone = next) {
1146 
1147 		next = zone + 1;
1148 		if (zonefs_zone_type(zone) != type)
1149 			continue;
1150 
1151 		/*
1152 		 * For conventional zones, contiguous zones can be aggregated
1153 		 * together to form larger files. Note that this overwrites the
1154 		 * length of the first zone of the set of contiguous zones
1155 		 * aggregated together. If one offline or read-only zone is
1156 		 * found, assume that all zones aggregated have the same
1157 		 * condition.
1158 		 */
1159 		if (type == ZONEFS_ZTYPE_CNV &&
1160 		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
1161 			for (; next < end; next++) {
1162 				if (zonefs_zone_type(next) != type)
1163 					break;
1164 				zone->len += next->len;
1165 				if (next->cond == BLK_ZONE_COND_READONLY &&
1166 				    zone->cond != BLK_ZONE_COND_OFFLINE)
1167 					zone->cond = BLK_ZONE_COND_READONLY;
1168 				else if (next->cond == BLK_ZONE_COND_OFFLINE)
1169 					zone->cond = BLK_ZONE_COND_OFFLINE;
1170 			}
1171 		}
1172 
1173 		/*
1174 		 * Use the file number within its group as file name.
1175 		 */
1176 		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
1177 		if (!zonefs_create_inode(dir, file_name, zone, type))
1178 			goto free;
1179 
1180 		n++;
1181 	}
1182 
1183 	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
1184 		    zgroup_name, n, n > 1 ? "s" : "");
1185 
1186 	sbi->s_nr_files[type] = n;
1187 	ret = 0;
1188 
1189 free:
1190 	kfree(file_name);
1191 
1192 	return ret;
1193 }
1194 
1195 static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
1196 				   void *data)
1197 {
1198 	struct zonefs_zone_data *zd = data;
1199 
1200 	/*
1201 	 * Count the number of usable zones: the first zone at index 0 contains
1202 	 * the super block and is ignored.
1203 	 */
1204 	switch (zone->type) {
1205 	case BLK_ZONE_TYPE_CONVENTIONAL:
1206 		zone->wp = zone->start + zone->len;
1207 		if (idx)
1208 			zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
1209 		break;
1210 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1211 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1212 		if (idx)
1213 			zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
1214 		break;
1215 	default:
1216 		zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
1217 			   zone->type);
1218 		return -EIO;
1219 	}
1220 
1221 	memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
1222 
1223 	return 0;
1224 }
1225 
1226 static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
1227 {
1228 	struct block_device *bdev = zd->sb->s_bdev;
1229 	int ret;
1230 
1231 	zd->zones = kvcalloc(blkdev_nr_zones(bdev->bd_disk),
1232 			     sizeof(struct blk_zone), GFP_KERNEL);
1233 	if (!zd->zones)
1234 		return -ENOMEM;
1235 
1236 	/* Get zones information from the device */
1237 	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
1238 				  zonefs_get_zone_info_cb, zd);
1239 	if (ret < 0) {
1240 		zonefs_err(zd->sb, "Zone report failed %d\n", ret);
1241 		return ret;
1242 	}
1243 
1244 	if (ret != blkdev_nr_zones(bdev->bd_disk)) {
1245 		zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
1246 			   ret, blkdev_nr_zones(bdev->bd_disk));
1247 		return -EIO;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
1254 {
1255 	kvfree(zd->zones);
1256 }
1257 
1258 /*
1259  * Read super block information from the device.
1260  */
1261 static int zonefs_read_super(struct super_block *sb)
1262 {
1263 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1264 	struct zonefs_super *super;
1265 	u32 crc, stored_crc;
1266 	struct page *page;
1267 	struct bio_vec bio_vec;
1268 	struct bio bio;
1269 	int ret;
1270 
1271 	page = alloc_page(GFP_KERNEL);
1272 	if (!page)
1273 		return -ENOMEM;
1274 
1275 	bio_init(&bio, &bio_vec, 1);
1276 	bio.bi_iter.bi_sector = 0;
1277 	bio.bi_opf = REQ_OP_READ;
1278 	bio_set_dev(&bio, sb->s_bdev);
1279 	bio_add_page(&bio, page, PAGE_SIZE, 0);
1280 
1281 	ret = submit_bio_wait(&bio);
1282 	if (ret)
1283 		goto free_page;
1284 
1285 	super = kmap(page);
1286 
1287 	ret = -EINVAL;
1288 	if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
1289 		goto unmap;
1290 
1291 	stored_crc = le32_to_cpu(super->s_crc);
1292 	super->s_crc = 0;
1293 	crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
1294 	if (crc != stored_crc) {
1295 		zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
1296 			   crc, stored_crc);
1297 		goto unmap;
1298 	}
1299 
1300 	sbi->s_features = le64_to_cpu(super->s_features);
1301 	if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
1302 		zonefs_err(sb, "Unknown features set 0x%llx\n",
1303 			   sbi->s_features);
1304 		goto unmap;
1305 	}
1306 
1307 	if (sbi->s_features & ZONEFS_F_UID) {
1308 		sbi->s_uid = make_kuid(current_user_ns(),
1309 				       le32_to_cpu(super->s_uid));
1310 		if (!uid_valid(sbi->s_uid)) {
1311 			zonefs_err(sb, "Invalid UID feature\n");
1312 			goto unmap;
1313 		}
1314 	}
1315 
1316 	if (sbi->s_features & ZONEFS_F_GID) {
1317 		sbi->s_gid = make_kgid(current_user_ns(),
1318 				       le32_to_cpu(super->s_gid));
1319 		if (!gid_valid(sbi->s_gid)) {
1320 			zonefs_err(sb, "Invalid GID feature\n");
1321 			goto unmap;
1322 		}
1323 	}
1324 
1325 	if (sbi->s_features & ZONEFS_F_PERM)
1326 		sbi->s_perm = le32_to_cpu(super->s_perm);
1327 
1328 	if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
1329 		zonefs_err(sb, "Reserved area is being used\n");
1330 		goto unmap;
1331 	}
1332 
1333 	import_uuid(&sbi->s_uuid, super->s_uuid);
1334 	ret = 0;
1335 
1336 unmap:
1337 	kunmap(page);
1338 free_page:
1339 	__free_page(page);
1340 
1341 	return ret;
1342 }
1343 
1344 /*
1345  * Check that the device is zoned. If it is, get the list of zones and create
1346  * sub-directories and files according to the device zone configuration and
1347  * format options.
1348  */
1349 static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1350 {
1351 	struct zonefs_zone_data zd;
1352 	struct zonefs_sb_info *sbi;
1353 	struct inode *inode;
1354 	enum zonefs_ztype t;
1355 	int ret;
1356 
1357 	if (!bdev_is_zoned(sb->s_bdev)) {
1358 		zonefs_err(sb, "Not a zoned block device\n");
1359 		return -EINVAL;
1360 	}
1361 
1362 	/*
1363 	 * Initialize super block information: the maximum file size is updated
1364 	 * when the zone files are created so that the format option
1365 	 * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
1366 	 * beyond the zone size is taken into account.
1367 	 */
1368 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1369 	if (!sbi)
1370 		return -ENOMEM;
1371 
1372 	spin_lock_init(&sbi->s_lock);
1373 	sb->s_fs_info = sbi;
1374 	sb->s_magic = ZONEFS_MAGIC;
1375 	sb->s_maxbytes = 0;
1376 	sb->s_op = &zonefs_sops;
1377 	sb->s_time_gran	= 1;
1378 
1379 	/*
1380 	 * The block size is set to the device physical sector size to ensure
1381 	 * that write operations on 512e devices (512B logical block and 4KB
1382 	 * physical block) are always aligned to the device physical blocks,
1383 	 * as mandated by the ZBC/ZAC specifications.
1384 	 */
1385 	sb_set_blocksize(sb, bdev_physical_block_size(sb->s_bdev));
1386 	sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
1387 	sbi->s_uid = GLOBAL_ROOT_UID;
1388 	sbi->s_gid = GLOBAL_ROOT_GID;
1389 	sbi->s_perm = 0640;
1390 	sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
1391 
1392 	ret = zonefs_read_super(sb);
1393 	if (ret)
1394 		return ret;
1395 
1396 	ret = zonefs_parse_options(sb, data);
1397 	if (ret)
1398 		return ret;
1399 
1400 	memset(&zd, 0, sizeof(struct zonefs_zone_data));
1401 	zd.sb = sb;
1402 	ret = zonefs_get_zone_info(&zd);
1403 	if (ret)
1404 		goto cleanup;
1405 
1406 	zonefs_info(sb, "Mounting %u zones",
1407 		    blkdev_nr_zones(sb->s_bdev->bd_disk));
1408 
1409 	/* Create root directory inode */
1410 	ret = -ENOMEM;
1411 	inode = new_inode(sb);
1412 	if (!inode)
1413 		goto cleanup;
1414 
1415 	inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk);
1416 	inode->i_mode = S_IFDIR | 0555;
1417 	inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
1418 	inode->i_op = &zonefs_dir_inode_operations;
1419 	inode->i_fop = &simple_dir_operations;
1420 	set_nlink(inode, 2);
1421 
1422 	sb->s_root = d_make_root(inode);
1423 	if (!sb->s_root)
1424 		goto cleanup;
1425 
1426 	/* Create and populate files in zone groups directories */
1427 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1428 		ret = zonefs_create_zgroup(&zd, t);
1429 		if (ret)
1430 			break;
1431 	}
1432 
1433 cleanup:
1434 	zonefs_cleanup_zone_info(&zd);
1435 
1436 	return ret;
1437 }
1438 
1439 static struct dentry *zonefs_mount(struct file_system_type *fs_type,
1440 				   int flags, const char *dev_name, void *data)
1441 {
1442 	return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
1443 }
1444 
1445 static void zonefs_kill_super(struct super_block *sb)
1446 {
1447 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1448 
1449 	if (sb->s_root)
1450 		d_genocide(sb->s_root);
1451 	kill_block_super(sb);
1452 	kfree(sbi);
1453 }
1454 
1455 /*
1456  * File system definition and registration.
1457  */
1458 static struct file_system_type zonefs_type = {
1459 	.owner		= THIS_MODULE,
1460 	.name		= "zonefs",
1461 	.mount		= zonefs_mount,
1462 	.kill_sb	= zonefs_kill_super,
1463 	.fs_flags	= FS_REQUIRES_DEV,
1464 };
1465 
1466 static int __init zonefs_init_inodecache(void)
1467 {
1468 	zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
1469 			sizeof(struct zonefs_inode_info), 0,
1470 			(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1471 			NULL);
1472 	if (zonefs_inode_cachep == NULL)
1473 		return -ENOMEM;
1474 	return 0;
1475 }
1476 
1477 static void zonefs_destroy_inodecache(void)
1478 {
1479 	/*
1480 	 * Make sure all delayed rcu free inodes are flushed before we
1481 	 * destroy the inode cache.
1482 	 */
1483 	rcu_barrier();
1484 	kmem_cache_destroy(zonefs_inode_cachep);
1485 }
1486 
1487 static int __init zonefs_init(void)
1488 {
1489 	int ret;
1490 
1491 	BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
1492 
1493 	ret = zonefs_init_inodecache();
1494 	if (ret)
1495 		return ret;
1496 
1497 	ret = register_filesystem(&zonefs_type);
1498 	if (ret) {
1499 		zonefs_destroy_inodecache();
1500 		return ret;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static void __exit zonefs_exit(void)
1507 {
1508 	zonefs_destroy_inodecache();
1509 	unregister_filesystem(&zonefs_type);
1510 }
1511 
1512 MODULE_AUTHOR("Damien Le Moal");
1513 MODULE_DESCRIPTION("Zone file system for zoned block devices");
1514 MODULE_LICENSE("GPL");
1515 module_init(zonefs_init);
1516 module_exit(zonefs_exit);
1517