xref: /linux/fs/nilfs2/sufile.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS segment usage file.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  * Revised by Ryusuke Konishi.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/buffer_head.h>
15 #include <linux/errno.h>
16 #include "mdt.h"
17 #include "sufile.h"
18 
19 #include <trace/events/nilfs2.h>
20 
21 /**
22  * struct nilfs_sufile_info - on-memory private data of sufile
23  * @mi: on-memory private data of metadata file
24  * @ncleansegs: number of clean segments
25  * @allocmin: lower limit of allocatable segment range
26  * @allocmax: upper limit of allocatable segment range
27  */
28 struct nilfs_sufile_info {
29 	struct nilfs_mdt_info mi;
30 	unsigned long ncleansegs;/* number of clean segments */
31 	__u64 allocmin;		/* lower limit of allocatable segment range */
32 	__u64 allocmax;		/* upper limit of allocatable segment range */
33 };
34 
35 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
36 {
37 	return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
38 }
39 
40 static inline unsigned long
41 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
42 {
43 	return NILFS_MDT(sufile)->mi_entries_per_block;
44 }
45 
46 static unsigned long
47 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
48 {
49 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
50 
51 	t = div64_ul(t, nilfs_sufile_segment_usages_per_block(sufile));
52 	return (unsigned long)t;
53 }
54 
55 static unsigned long
56 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
57 {
58 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
59 
60 	return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
61 }
62 
63 static unsigned long
64 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
65 				     __u64 max)
66 {
67 	return min_t(unsigned long,
68 		     nilfs_sufile_segment_usages_per_block(sufile) -
69 		     nilfs_sufile_get_offset(sufile, curr),
70 		     max - curr + 1);
71 }
72 
73 static struct nilfs_segment_usage *
74 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 				     struct buffer_head *bh, void *kaddr)
76 {
77 	return kaddr + bh_offset(bh) +
78 		nilfs_sufile_get_offset(sufile, segnum) *
79 		NILFS_MDT(sufile)->mi_entry_size;
80 }
81 
82 static int nilfs_sufile_get_header_block(struct inode *sufile,
83 					 struct buffer_head **bhp)
84 {
85 	int err = nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86 
87 	if (unlikely(err == -ENOENT)) {
88 		nilfs_error(sufile->i_sb,
89 			    "missing header block in segment usage metadata");
90 		err = -EIO;
91 	}
92 	return err;
93 }
94 
95 static inline int
96 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
97 				     int create, struct buffer_head **bhp)
98 {
99 	return nilfs_mdt_get_block(sufile,
100 				   nilfs_sufile_get_blkoff(sufile, segnum),
101 				   create, NULL, bhp);
102 }
103 
104 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
105 						   __u64 segnum)
106 {
107 	return nilfs_mdt_delete_block(sufile,
108 				      nilfs_sufile_get_blkoff(sufile, segnum));
109 }
110 
111 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
112 				     u64 ncleanadd, u64 ndirtyadd)
113 {
114 	struct nilfs_sufile_header *header;
115 	void *kaddr;
116 
117 	kaddr = kmap_local_page(header_bh->b_page);
118 	header = kaddr + bh_offset(header_bh);
119 	le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
120 	le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
121 	kunmap_local(kaddr);
122 
123 	mark_buffer_dirty(header_bh);
124 }
125 
126 /**
127  * nilfs_sufile_get_ncleansegs - return the number of clean segments
128  * @sufile: inode of segment usage file
129  */
130 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
131 {
132 	return NILFS_SUI(sufile)->ncleansegs;
133 }
134 
135 /**
136  * nilfs_sufile_updatev - modify multiple segment usages at a time
137  * @sufile: inode of segment usage file
138  * @segnumv: array of segment numbers
139  * @nsegs: size of @segnumv array
140  * @create: creation flag
141  * @ndone: place to store number of modified segments on @segnumv
142  * @dofunc: primitive operation for the update
143  *
144  * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
145  * against the given array of segments.  The @dofunc is called with
146  * buffers of a header block and the sufile block in which the target
147  * segment usage entry is contained.  If @ndone is given, the number
148  * of successfully modified segments from the head is stored in the
149  * place @ndone points to.
150  *
151  * Return Value: On success, zero is returned.  On error, one of the
152  * following negative error codes is returned.
153  *
154  * %-EIO - I/O error.
155  *
156  * %-ENOMEM - Insufficient amount of memory available.
157  *
158  * %-ENOENT - Given segment usage is in hole block (may be returned if
159  *            @create is zero)
160  *
161  * %-EINVAL - Invalid segment usage number
162  */
163 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
164 			 int create, size_t *ndone,
165 			 void (*dofunc)(struct inode *, __u64,
166 					struct buffer_head *,
167 					struct buffer_head *))
168 {
169 	struct buffer_head *header_bh, *bh;
170 	unsigned long blkoff, prev_blkoff;
171 	__u64 *seg;
172 	size_t nerr = 0, n = 0;
173 	int ret = 0;
174 
175 	if (unlikely(nsegs == 0))
176 		goto out;
177 
178 	down_write(&NILFS_MDT(sufile)->mi_sem);
179 	for (seg = segnumv; seg < segnumv + nsegs; seg++) {
180 		if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
181 			nilfs_warn(sufile->i_sb,
182 				   "%s: invalid segment number: %llu",
183 				   __func__, (unsigned long long)*seg);
184 			nerr++;
185 		}
186 	}
187 	if (nerr > 0) {
188 		ret = -EINVAL;
189 		goto out_sem;
190 	}
191 
192 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
193 	if (ret < 0)
194 		goto out_sem;
195 
196 	seg = segnumv;
197 	blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
198 	ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
199 	if (ret < 0)
200 		goto out_header;
201 
202 	for (;;) {
203 		dofunc(sufile, *seg, header_bh, bh);
204 
205 		if (++seg >= segnumv + nsegs)
206 			break;
207 		prev_blkoff = blkoff;
208 		blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
209 		if (blkoff == prev_blkoff)
210 			continue;
211 
212 		/* get different block */
213 		brelse(bh);
214 		ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
215 		if (unlikely(ret < 0))
216 			goto out_header;
217 	}
218 	brelse(bh);
219 
220  out_header:
221 	n = seg - segnumv;
222 	brelse(header_bh);
223  out_sem:
224 	up_write(&NILFS_MDT(sufile)->mi_sem);
225  out:
226 	if (ndone)
227 		*ndone = n;
228 	return ret;
229 }
230 
231 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
232 			void (*dofunc)(struct inode *, __u64,
233 				       struct buffer_head *,
234 				       struct buffer_head *))
235 {
236 	struct buffer_head *header_bh, *bh;
237 	int ret;
238 
239 	if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
240 		nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
241 			   __func__, (unsigned long long)segnum);
242 		return -EINVAL;
243 	}
244 	down_write(&NILFS_MDT(sufile)->mi_sem);
245 
246 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
247 	if (ret < 0)
248 		goto out_sem;
249 
250 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
251 	if (!ret) {
252 		dofunc(sufile, segnum, header_bh, bh);
253 		brelse(bh);
254 	}
255 	brelse(header_bh);
256 
257  out_sem:
258 	up_write(&NILFS_MDT(sufile)->mi_sem);
259 	return ret;
260 }
261 
262 /**
263  * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
264  * @sufile: inode of segment usage file
265  * @start: minimum segment number of allocatable region (inclusive)
266  * @end: maximum segment number of allocatable region (inclusive)
267  *
268  * Return Value: On success, 0 is returned.  On error, one of the
269  * following negative error codes is returned.
270  *
271  * %-ERANGE - invalid segment region
272  */
273 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
274 {
275 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
276 	__u64 nsegs;
277 	int ret = -ERANGE;
278 
279 	down_write(&NILFS_MDT(sufile)->mi_sem);
280 	nsegs = nilfs_sufile_get_nsegments(sufile);
281 
282 	if (start <= end && end < nsegs) {
283 		sui->allocmin = start;
284 		sui->allocmax = end;
285 		ret = 0;
286 	}
287 	up_write(&NILFS_MDT(sufile)->mi_sem);
288 	return ret;
289 }
290 
291 /**
292  * nilfs_sufile_alloc - allocate a segment
293  * @sufile: inode of segment usage file
294  * @segnump: pointer to segment number
295  *
296  * Description: nilfs_sufile_alloc() allocates a clean segment.
297  *
298  * Return Value: On success, 0 is returned and the segment number of the
299  * allocated segment is stored in the place pointed by @segnump. On error, one
300  * of the following negative error codes is returned.
301  *
302  * %-EIO - I/O error.
303  *
304  * %-ENOMEM - Insufficient amount of memory available.
305  *
306  * %-ENOSPC - No clean segment left.
307  */
308 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
309 {
310 	struct buffer_head *header_bh, *su_bh;
311 	struct nilfs_sufile_header *header;
312 	struct nilfs_segment_usage *su;
313 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
314 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
315 	__u64 segnum, maxsegnum, last_alloc;
316 	void *kaddr;
317 	unsigned long nsegments, nsus, cnt;
318 	int ret, j;
319 
320 	down_write(&NILFS_MDT(sufile)->mi_sem);
321 
322 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
323 	if (ret < 0)
324 		goto out_sem;
325 	kaddr = kmap_local_page(header_bh->b_page);
326 	header = kaddr + bh_offset(header_bh);
327 	last_alloc = le64_to_cpu(header->sh_last_alloc);
328 	kunmap_local(kaddr);
329 
330 	nsegments = nilfs_sufile_get_nsegments(sufile);
331 	maxsegnum = sui->allocmax;
332 	segnum = last_alloc + 1;
333 	if (segnum < sui->allocmin || segnum > sui->allocmax)
334 		segnum = sui->allocmin;
335 
336 	for (cnt = 0; cnt < nsegments; cnt += nsus) {
337 		if (segnum > maxsegnum) {
338 			if (cnt < sui->allocmax - sui->allocmin + 1) {
339 				/*
340 				 * wrap around in the limited region.
341 				 * if allocation started from
342 				 * sui->allocmin, this never happens.
343 				 */
344 				segnum = sui->allocmin;
345 				maxsegnum = last_alloc;
346 			} else if (segnum > sui->allocmin &&
347 				   sui->allocmax + 1 < nsegments) {
348 				segnum = sui->allocmax + 1;
349 				maxsegnum = nsegments - 1;
350 			} else if (sui->allocmin > 0)  {
351 				segnum = 0;
352 				maxsegnum = sui->allocmin - 1;
353 			} else {
354 				break; /* never happens */
355 			}
356 		}
357 		trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
358 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
359 							   &su_bh);
360 		if (ret < 0)
361 			goto out_header;
362 		kaddr = kmap_local_page(su_bh->b_page);
363 		su = nilfs_sufile_block_get_segment_usage(
364 			sufile, segnum, su_bh, kaddr);
365 
366 		nsus = nilfs_sufile_segment_usages_in_block(
367 			sufile, segnum, maxsegnum);
368 		for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
369 			if (!nilfs_segment_usage_clean(su))
370 				continue;
371 			/* found a clean segment */
372 			nilfs_segment_usage_set_dirty(su);
373 			kunmap_local(kaddr);
374 
375 			kaddr = kmap_local_page(header_bh->b_page);
376 			header = kaddr + bh_offset(header_bh);
377 			le64_add_cpu(&header->sh_ncleansegs, -1);
378 			le64_add_cpu(&header->sh_ndirtysegs, 1);
379 			header->sh_last_alloc = cpu_to_le64(segnum);
380 			kunmap_local(kaddr);
381 
382 			sui->ncleansegs--;
383 			mark_buffer_dirty(header_bh);
384 			mark_buffer_dirty(su_bh);
385 			nilfs_mdt_mark_dirty(sufile);
386 			brelse(su_bh);
387 			*segnump = segnum;
388 
389 			trace_nilfs2_segment_usage_allocated(sufile, segnum);
390 
391 			goto out_header;
392 		}
393 
394 		kunmap_local(kaddr);
395 		brelse(su_bh);
396 	}
397 
398 	/* no segments left */
399 	ret = -ENOSPC;
400 
401  out_header:
402 	brelse(header_bh);
403 
404  out_sem:
405 	up_write(&NILFS_MDT(sufile)->mi_sem);
406 	return ret;
407 }
408 
409 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
410 				 struct buffer_head *header_bh,
411 				 struct buffer_head *su_bh)
412 {
413 	struct nilfs_segment_usage *su;
414 	void *kaddr;
415 
416 	kaddr = kmap_local_page(su_bh->b_page);
417 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
418 	if (unlikely(!nilfs_segment_usage_clean(su))) {
419 		nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
420 			   __func__, (unsigned long long)segnum);
421 		kunmap_local(kaddr);
422 		return;
423 	}
424 	nilfs_segment_usage_set_dirty(su);
425 	kunmap_local(kaddr);
426 
427 	nilfs_sufile_mod_counter(header_bh, -1, 1);
428 	NILFS_SUI(sufile)->ncleansegs--;
429 
430 	mark_buffer_dirty(su_bh);
431 	nilfs_mdt_mark_dirty(sufile);
432 }
433 
434 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
435 			   struct buffer_head *header_bh,
436 			   struct buffer_head *su_bh)
437 {
438 	struct nilfs_segment_usage *su;
439 	void *kaddr;
440 	int clean, dirty;
441 
442 	kaddr = kmap_local_page(su_bh->b_page);
443 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
444 	if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
445 	    su->su_nblocks == cpu_to_le32(0)) {
446 		kunmap_local(kaddr);
447 		return;
448 	}
449 	clean = nilfs_segment_usage_clean(su);
450 	dirty = nilfs_segment_usage_dirty(su);
451 
452 	/* make the segment garbage */
453 	su->su_lastmod = cpu_to_le64(0);
454 	su->su_nblocks = cpu_to_le32(0);
455 	su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
456 	kunmap_local(kaddr);
457 
458 	nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
459 	NILFS_SUI(sufile)->ncleansegs -= clean;
460 
461 	mark_buffer_dirty(su_bh);
462 	nilfs_mdt_mark_dirty(sufile);
463 }
464 
465 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
466 			  struct buffer_head *header_bh,
467 			  struct buffer_head *su_bh)
468 {
469 	struct nilfs_segment_usage *su;
470 	void *kaddr;
471 	int sudirty;
472 
473 	kaddr = kmap_local_page(su_bh->b_page);
474 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475 	if (nilfs_segment_usage_clean(su)) {
476 		nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
477 			   __func__, (unsigned long long)segnum);
478 		kunmap_local(kaddr);
479 		return;
480 	}
481 	if (unlikely(nilfs_segment_usage_error(su)))
482 		nilfs_warn(sufile->i_sb, "free segment %llu marked in error",
483 			   (unsigned long long)segnum);
484 
485 	sudirty = nilfs_segment_usage_dirty(su);
486 	if (unlikely(!sudirty))
487 		nilfs_warn(sufile->i_sb, "free unallocated segment %llu",
488 			   (unsigned long long)segnum);
489 
490 	nilfs_segment_usage_set_clean(su);
491 	kunmap_local(kaddr);
492 	mark_buffer_dirty(su_bh);
493 
494 	nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
495 	NILFS_SUI(sufile)->ncleansegs++;
496 
497 	nilfs_mdt_mark_dirty(sufile);
498 
499 	trace_nilfs2_segment_usage_freed(sufile, segnum);
500 }
501 
502 /**
503  * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
504  * @sufile: inode of segment usage file
505  * @segnum: segment number
506  */
507 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
508 {
509 	struct buffer_head *bh;
510 	void *kaddr;
511 	struct nilfs_segment_usage *su;
512 	int ret;
513 
514 	down_write(&NILFS_MDT(sufile)->mi_sem);
515 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
516 	if (unlikely(ret)) {
517 		if (ret == -ENOENT) {
518 			nilfs_error(sufile->i_sb,
519 				    "segment usage for segment %llu is unreadable due to a hole block",
520 				    (unsigned long long)segnum);
521 			ret = -EIO;
522 		}
523 		goto out_sem;
524 	}
525 
526 	kaddr = kmap_local_page(bh->b_page);
527 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
528 	if (unlikely(nilfs_segment_usage_error(su))) {
529 		struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
530 
531 		kunmap_local(kaddr);
532 		brelse(bh);
533 		if (nilfs_segment_is_active(nilfs, segnum)) {
534 			nilfs_error(sufile->i_sb,
535 				    "active segment %llu is erroneous",
536 				    (unsigned long long)segnum);
537 		} else {
538 			/*
539 			 * Segments marked erroneous are never allocated by
540 			 * nilfs_sufile_alloc(); only active segments, ie,
541 			 * the segments indexed by ns_segnum or ns_nextnum,
542 			 * can be erroneous here.
543 			 */
544 			WARN_ON_ONCE(1);
545 		}
546 		ret = -EIO;
547 	} else {
548 		nilfs_segment_usage_set_dirty(su);
549 		kunmap_local(kaddr);
550 		mark_buffer_dirty(bh);
551 		nilfs_mdt_mark_dirty(sufile);
552 		brelse(bh);
553 	}
554 out_sem:
555 	up_write(&NILFS_MDT(sufile)->mi_sem);
556 	return ret;
557 }
558 
559 /**
560  * nilfs_sufile_set_segment_usage - set usage of a segment
561  * @sufile: inode of segment usage file
562  * @segnum: segment number
563  * @nblocks: number of live blocks in the segment
564  * @modtime: modification time (option)
565  */
566 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
567 				   unsigned long nblocks, time64_t modtime)
568 {
569 	struct buffer_head *bh;
570 	struct nilfs_segment_usage *su;
571 	void *kaddr;
572 	int ret;
573 
574 	down_write(&NILFS_MDT(sufile)->mi_sem);
575 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
576 	if (ret < 0)
577 		goto out_sem;
578 
579 	kaddr = kmap_local_page(bh->b_page);
580 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
581 	if (modtime) {
582 		/*
583 		 * Check segusage error and set su_lastmod only when updating
584 		 * this entry with a valid timestamp, not for cancellation.
585 		 */
586 		WARN_ON_ONCE(nilfs_segment_usage_error(su));
587 		su->su_lastmod = cpu_to_le64(modtime);
588 	}
589 	su->su_nblocks = cpu_to_le32(nblocks);
590 	kunmap_local(kaddr);
591 
592 	mark_buffer_dirty(bh);
593 	nilfs_mdt_mark_dirty(sufile);
594 	brelse(bh);
595 
596  out_sem:
597 	up_write(&NILFS_MDT(sufile)->mi_sem);
598 	return ret;
599 }
600 
601 /**
602  * nilfs_sufile_get_stat - get segment usage statistics
603  * @sufile: inode of segment usage file
604  * @sustat: pointer to a structure of segment usage statistics
605  *
606  * Description: nilfs_sufile_get_stat() returns information about segment
607  * usage.
608  *
609  * Return Value: On success, 0 is returned, and segment usage information is
610  * stored in the place pointed by @sustat. On error, one of the following
611  * negative error codes is returned.
612  *
613  * %-EIO - I/O error.
614  *
615  * %-ENOMEM - Insufficient amount of memory available.
616  */
617 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
618 {
619 	struct buffer_head *header_bh;
620 	struct nilfs_sufile_header *header;
621 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
622 	void *kaddr;
623 	int ret;
624 
625 	down_read(&NILFS_MDT(sufile)->mi_sem);
626 
627 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
628 	if (ret < 0)
629 		goto out_sem;
630 
631 	kaddr = kmap_local_page(header_bh->b_page);
632 	header = kaddr + bh_offset(header_bh);
633 	sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
634 	sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
635 	sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
636 	sustat->ss_ctime = nilfs->ns_ctime;
637 	sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
638 	spin_lock(&nilfs->ns_last_segment_lock);
639 	sustat->ss_prot_seq = nilfs->ns_prot_seq;
640 	spin_unlock(&nilfs->ns_last_segment_lock);
641 	kunmap_local(kaddr);
642 	brelse(header_bh);
643 
644  out_sem:
645 	up_read(&NILFS_MDT(sufile)->mi_sem);
646 	return ret;
647 }
648 
649 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
650 			       struct buffer_head *header_bh,
651 			       struct buffer_head *su_bh)
652 {
653 	struct nilfs_segment_usage *su;
654 	void *kaddr;
655 	int suclean;
656 
657 	kaddr = kmap_local_page(su_bh->b_page);
658 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
659 	if (nilfs_segment_usage_error(su)) {
660 		kunmap_local(kaddr);
661 		return;
662 	}
663 	suclean = nilfs_segment_usage_clean(su);
664 	nilfs_segment_usage_set_error(su);
665 	kunmap_local(kaddr);
666 
667 	if (suclean) {
668 		nilfs_sufile_mod_counter(header_bh, -1, 0);
669 		NILFS_SUI(sufile)->ncleansegs--;
670 	}
671 	mark_buffer_dirty(su_bh);
672 	nilfs_mdt_mark_dirty(sufile);
673 }
674 
675 /**
676  * nilfs_sufile_truncate_range - truncate range of segment array
677  * @sufile: inode of segment usage file
678  * @start: start segment number (inclusive)
679  * @end: end segment number (inclusive)
680  *
681  * Return Value: On success, 0 is returned.  On error, one of the
682  * following negative error codes is returned.
683  *
684  * %-EIO - I/O error.
685  *
686  * %-ENOMEM - Insufficient amount of memory available.
687  *
688  * %-EINVAL - Invalid number of segments specified
689  *
690  * %-EBUSY - Dirty or active segments are present in the range
691  */
692 static int nilfs_sufile_truncate_range(struct inode *sufile,
693 				       __u64 start, __u64 end)
694 {
695 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
696 	struct buffer_head *header_bh;
697 	struct buffer_head *su_bh;
698 	struct nilfs_segment_usage *su, *su2;
699 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
700 	unsigned long segusages_per_block;
701 	unsigned long nsegs, ncleaned;
702 	__u64 segnum;
703 	void *kaddr;
704 	ssize_t n, nc;
705 	int ret;
706 	int j;
707 
708 	nsegs = nilfs_sufile_get_nsegments(sufile);
709 
710 	ret = -EINVAL;
711 	if (start > end || start >= nsegs)
712 		goto out;
713 
714 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
715 	if (ret < 0)
716 		goto out;
717 
718 	segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
719 	ncleaned = 0;
720 
721 	for (segnum = start; segnum <= end; segnum += n) {
722 		n = min_t(unsigned long,
723 			  segusages_per_block -
724 				  nilfs_sufile_get_offset(sufile, segnum),
725 			  end - segnum + 1);
726 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
727 							   &su_bh);
728 		if (ret < 0) {
729 			if (ret != -ENOENT)
730 				goto out_header;
731 			/* hole */
732 			continue;
733 		}
734 		kaddr = kmap_local_page(su_bh->b_page);
735 		su = nilfs_sufile_block_get_segment_usage(
736 			sufile, segnum, su_bh, kaddr);
737 		su2 = su;
738 		for (j = 0; j < n; j++, su = (void *)su + susz) {
739 			if ((le32_to_cpu(su->su_flags) &
740 			     ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
741 			    nilfs_segment_is_active(nilfs, segnum + j)) {
742 				ret = -EBUSY;
743 				kunmap_local(kaddr);
744 				brelse(su_bh);
745 				goto out_header;
746 			}
747 		}
748 		nc = 0;
749 		for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
750 			if (nilfs_segment_usage_error(su)) {
751 				nilfs_segment_usage_set_clean(su);
752 				nc++;
753 			}
754 		}
755 		kunmap_local(kaddr);
756 		if (nc > 0) {
757 			mark_buffer_dirty(su_bh);
758 			ncleaned += nc;
759 		}
760 		brelse(su_bh);
761 
762 		if (n == segusages_per_block) {
763 			/* make hole */
764 			nilfs_sufile_delete_segment_usage_block(sufile, segnum);
765 		}
766 	}
767 	ret = 0;
768 
769 out_header:
770 	if (ncleaned > 0) {
771 		NILFS_SUI(sufile)->ncleansegs += ncleaned;
772 		nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
773 		nilfs_mdt_mark_dirty(sufile);
774 	}
775 	brelse(header_bh);
776 out:
777 	return ret;
778 }
779 
780 /**
781  * nilfs_sufile_resize - resize segment array
782  * @sufile: inode of segment usage file
783  * @newnsegs: new number of segments
784  *
785  * Return Value: On success, 0 is returned.  On error, one of the
786  * following negative error codes is returned.
787  *
788  * %-EIO - I/O error.
789  *
790  * %-ENOMEM - Insufficient amount of memory available.
791  *
792  * %-ENOSPC - Enough free space is not left for shrinking
793  *
794  * %-EBUSY - Dirty or active segments exist in the region to be truncated
795  */
796 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
797 {
798 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
799 	struct buffer_head *header_bh;
800 	struct nilfs_sufile_header *header;
801 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
802 	void *kaddr;
803 	unsigned long nsegs, nrsvsegs;
804 	int ret = 0;
805 
806 	down_write(&NILFS_MDT(sufile)->mi_sem);
807 
808 	nsegs = nilfs_sufile_get_nsegments(sufile);
809 	if (nsegs == newnsegs)
810 		goto out;
811 
812 	ret = -ENOSPC;
813 	nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
814 	if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
815 		goto out;
816 
817 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
818 	if (ret < 0)
819 		goto out;
820 
821 	if (newnsegs > nsegs) {
822 		sui->ncleansegs += newnsegs - nsegs;
823 	} else /* newnsegs < nsegs */ {
824 		ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
825 		if (ret < 0)
826 			goto out_header;
827 
828 		sui->ncleansegs -= nsegs - newnsegs;
829 
830 		/*
831 		 * If the sufile is successfully truncated, immediately adjust
832 		 * the segment allocation space while locking the semaphore
833 		 * "mi_sem" so that nilfs_sufile_alloc() never allocates
834 		 * segments in the truncated space.
835 		 */
836 		sui->allocmax = newnsegs - 1;
837 		sui->allocmin = 0;
838 	}
839 
840 	kaddr = kmap_local_page(header_bh->b_page);
841 	header = kaddr + bh_offset(header_bh);
842 	header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
843 	kunmap_local(kaddr);
844 
845 	mark_buffer_dirty(header_bh);
846 	nilfs_mdt_mark_dirty(sufile);
847 	nilfs_set_nsegments(nilfs, newnsegs);
848 
849 out_header:
850 	brelse(header_bh);
851 out:
852 	up_write(&NILFS_MDT(sufile)->mi_sem);
853 	return ret;
854 }
855 
856 /**
857  * nilfs_sufile_get_suinfo - get segment usage information
858  * @sufile: inode of segment usage file
859  * @segnum: segment number to start looking
860  * @buf:    array of suinfo
861  * @sisz:   byte size of suinfo
862  * @nsi:    size of suinfo array
863  *
864  * Return: Count of segment usage info items stored in the output buffer on
865  * success, or the following negative error code on failure.
866  * * %-EIO	- I/O error (including metadata corruption).
867  * * %-ENOMEM	- Insufficient memory available.
868  */
869 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
870 				unsigned int sisz, size_t nsi)
871 {
872 	struct buffer_head *su_bh;
873 	struct nilfs_segment_usage *su;
874 	struct nilfs_suinfo *si = buf;
875 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
876 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
877 	void *kaddr;
878 	unsigned long nsegs, segusages_per_block;
879 	ssize_t n;
880 	int ret, i, j;
881 
882 	down_read(&NILFS_MDT(sufile)->mi_sem);
883 
884 	segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
885 	nsegs = min_t(unsigned long,
886 		      nilfs_sufile_get_nsegments(sufile) - segnum,
887 		      nsi);
888 	for (i = 0; i < nsegs; i += n, segnum += n) {
889 		n = min_t(unsigned long,
890 			  segusages_per_block -
891 				  nilfs_sufile_get_offset(sufile, segnum),
892 			  nsegs - i);
893 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
894 							   &su_bh);
895 		if (ret < 0) {
896 			if (ret != -ENOENT)
897 				goto out;
898 			/* hole */
899 			memset(si, 0, sisz * n);
900 			si = (void *)si + sisz * n;
901 			continue;
902 		}
903 
904 		kaddr = kmap_local_page(su_bh->b_page);
905 		su = nilfs_sufile_block_get_segment_usage(
906 			sufile, segnum, su_bh, kaddr);
907 		for (j = 0; j < n;
908 		     j++, su = (void *)su + susz, si = (void *)si + sisz) {
909 			si->sui_lastmod = le64_to_cpu(su->su_lastmod);
910 			si->sui_nblocks = le32_to_cpu(su->su_nblocks);
911 			si->sui_flags = le32_to_cpu(su->su_flags) &
912 				~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
913 			if (nilfs_segment_is_active(nilfs, segnum + j))
914 				si->sui_flags |=
915 					BIT(NILFS_SEGMENT_USAGE_ACTIVE);
916 		}
917 		kunmap_local(kaddr);
918 		brelse(su_bh);
919 	}
920 	ret = nsegs;
921 
922  out:
923 	up_read(&NILFS_MDT(sufile)->mi_sem);
924 	return ret;
925 }
926 
927 /**
928  * nilfs_sufile_set_suinfo - sets segment usage info
929  * @sufile: inode of segment usage file
930  * @buf: array of suinfo_update
931  * @supsz: byte size of suinfo_update
932  * @nsup: size of suinfo_update array
933  *
934  * Description: Takes an array of nilfs_suinfo_update structs and updates
935  * segment usage accordingly. Only the fields indicated by the sup_flags
936  * are updated.
937  *
938  * Return Value: On success, 0 is returned. On error, one of the
939  * following negative error codes is returned.
940  *
941  * %-EIO - I/O error.
942  *
943  * %-ENOMEM - Insufficient amount of memory available.
944  *
945  * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
946  */
947 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
948 				unsigned int supsz, size_t nsup)
949 {
950 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
951 	struct buffer_head *header_bh, *bh;
952 	struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
953 	struct nilfs_segment_usage *su;
954 	void *kaddr;
955 	unsigned long blkoff, prev_blkoff;
956 	int cleansi, cleansu, dirtysi, dirtysu;
957 	long ncleaned = 0, ndirtied = 0;
958 	int ret = 0;
959 
960 	if (unlikely(nsup == 0))
961 		return ret;
962 
963 	for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
964 		if (sup->sup_segnum >= nilfs->ns_nsegments
965 			|| (sup->sup_flags &
966 				(~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
967 			|| (nilfs_suinfo_update_nblocks(sup) &&
968 				sup->sup_sui.sui_nblocks >
969 				nilfs->ns_blocks_per_segment))
970 			return -EINVAL;
971 	}
972 
973 	down_write(&NILFS_MDT(sufile)->mi_sem);
974 
975 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
976 	if (ret < 0)
977 		goto out_sem;
978 
979 	sup = buf;
980 	blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
981 	ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
982 	if (ret < 0)
983 		goto out_header;
984 
985 	for (;;) {
986 		kaddr = kmap_local_page(bh->b_page);
987 		su = nilfs_sufile_block_get_segment_usage(
988 			sufile, sup->sup_segnum, bh, kaddr);
989 
990 		if (nilfs_suinfo_update_lastmod(sup))
991 			su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
992 
993 		if (nilfs_suinfo_update_nblocks(sup))
994 			su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
995 
996 		if (nilfs_suinfo_update_flags(sup)) {
997 			/*
998 			 * Active flag is a virtual flag projected by running
999 			 * nilfs kernel code - drop it not to write it to
1000 			 * disk.
1001 			 */
1002 			sup->sup_sui.sui_flags &=
1003 					~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
1004 
1005 			cleansi = nilfs_suinfo_clean(&sup->sup_sui);
1006 			cleansu = nilfs_segment_usage_clean(su);
1007 			dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
1008 			dirtysu = nilfs_segment_usage_dirty(su);
1009 
1010 			if (cleansi && !cleansu)
1011 				++ncleaned;
1012 			else if (!cleansi && cleansu)
1013 				--ncleaned;
1014 
1015 			if (dirtysi && !dirtysu)
1016 				++ndirtied;
1017 			else if (!dirtysi && dirtysu)
1018 				--ndirtied;
1019 
1020 			su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
1021 		}
1022 
1023 		kunmap_local(kaddr);
1024 
1025 		sup = (void *)sup + supsz;
1026 		if (sup >= supend)
1027 			break;
1028 
1029 		prev_blkoff = blkoff;
1030 		blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
1031 		if (blkoff == prev_blkoff)
1032 			continue;
1033 
1034 		/* get different block */
1035 		mark_buffer_dirty(bh);
1036 		put_bh(bh);
1037 		ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
1038 		if (unlikely(ret < 0))
1039 			goto out_mark;
1040 	}
1041 	mark_buffer_dirty(bh);
1042 	put_bh(bh);
1043 
1044  out_mark:
1045 	if (ncleaned || ndirtied) {
1046 		nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
1047 				(u64)ndirtied);
1048 		NILFS_SUI(sufile)->ncleansegs += ncleaned;
1049 	}
1050 	nilfs_mdt_mark_dirty(sufile);
1051  out_header:
1052 	put_bh(header_bh);
1053  out_sem:
1054 	up_write(&NILFS_MDT(sufile)->mi_sem);
1055 	return ret;
1056 }
1057 
1058 /**
1059  * nilfs_sufile_trim_fs() - trim ioctl handle function
1060  * @sufile: inode of segment usage file
1061  * @range: fstrim_range structure
1062  *
1063  * start:	First Byte to trim
1064  * len:		number of Bytes to trim from start
1065  * minlen:	minimum extent length in Bytes
1066  *
1067  * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
1068  * from start to start+len. start is rounded up to the next block boundary
1069  * and start+len is rounded down. For each clean segment blkdev_issue_discard
1070  * function is invoked.
1071  *
1072  * Return Value: On success, 0 is returned or negative error code, otherwise.
1073  */
1074 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1075 {
1076 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1077 	struct buffer_head *su_bh;
1078 	struct nilfs_segment_usage *su;
1079 	void *kaddr;
1080 	size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1081 	sector_t seg_start, seg_end, start_block, end_block;
1082 	sector_t start = 0, nblocks = 0;
1083 	u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1084 	int ret = 0;
1085 	unsigned int sects_per_block;
1086 
1087 	sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1088 			bdev_logical_block_size(nilfs->ns_bdev);
1089 	len = range->len >> nilfs->ns_blocksize_bits;
1090 	minlen = range->minlen >> nilfs->ns_blocksize_bits;
1091 	max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1092 
1093 	if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1094 		return -EINVAL;
1095 
1096 	start_block = (range->start + nilfs->ns_blocksize - 1) >>
1097 			nilfs->ns_blocksize_bits;
1098 
1099 	/*
1100 	 * range->len can be very large (actually, it is set to
1101 	 * ULLONG_MAX by default) - truncate upper end of the range
1102 	 * carefully so as not to overflow.
1103 	 */
1104 	if (max_blocks - start_block < len)
1105 		end_block = max_blocks - 1;
1106 	else
1107 		end_block = start_block + len - 1;
1108 
1109 	segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1110 	segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1111 
1112 	down_read(&NILFS_MDT(sufile)->mi_sem);
1113 
1114 	while (segnum <= segnum_end) {
1115 		n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1116 				segnum_end);
1117 
1118 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1119 							   &su_bh);
1120 		if (ret < 0) {
1121 			if (ret != -ENOENT)
1122 				goto out_sem;
1123 			/* hole */
1124 			segnum += n;
1125 			continue;
1126 		}
1127 
1128 		kaddr = kmap_local_page(su_bh->b_page);
1129 		su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1130 				su_bh, kaddr);
1131 		for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1132 			if (!nilfs_segment_usage_clean(su))
1133 				continue;
1134 
1135 			nilfs_get_segment_range(nilfs, segnum, &seg_start,
1136 						&seg_end);
1137 
1138 			if (!nblocks) {
1139 				/* start new extent */
1140 				start = seg_start;
1141 				nblocks = seg_end - seg_start + 1;
1142 				continue;
1143 			}
1144 
1145 			if (start + nblocks == seg_start) {
1146 				/* add to previous extent */
1147 				nblocks += seg_end - seg_start + 1;
1148 				continue;
1149 			}
1150 
1151 			/* discard previous extent */
1152 			if (start < start_block) {
1153 				nblocks -= start_block - start;
1154 				start = start_block;
1155 			}
1156 
1157 			if (nblocks >= minlen) {
1158 				kunmap_local(kaddr);
1159 
1160 				ret = blkdev_issue_discard(nilfs->ns_bdev,
1161 						start * sects_per_block,
1162 						nblocks * sects_per_block,
1163 						GFP_NOFS);
1164 				if (ret < 0) {
1165 					put_bh(su_bh);
1166 					goto out_sem;
1167 				}
1168 
1169 				ndiscarded += nblocks;
1170 				kaddr = kmap_local_page(su_bh->b_page);
1171 				su = nilfs_sufile_block_get_segment_usage(
1172 					sufile, segnum, su_bh, kaddr);
1173 			}
1174 
1175 			/* start new extent */
1176 			start = seg_start;
1177 			nblocks = seg_end - seg_start + 1;
1178 		}
1179 		kunmap_local(kaddr);
1180 		put_bh(su_bh);
1181 	}
1182 
1183 
1184 	if (nblocks) {
1185 		/* discard last extent */
1186 		if (start < start_block) {
1187 			nblocks -= start_block - start;
1188 			start = start_block;
1189 		}
1190 		if (start + nblocks > end_block + 1)
1191 			nblocks = end_block - start + 1;
1192 
1193 		if (nblocks >= minlen) {
1194 			ret = blkdev_issue_discard(nilfs->ns_bdev,
1195 					start * sects_per_block,
1196 					nblocks * sects_per_block,
1197 					GFP_NOFS);
1198 			if (!ret)
1199 				ndiscarded += nblocks;
1200 		}
1201 	}
1202 
1203 out_sem:
1204 	up_read(&NILFS_MDT(sufile)->mi_sem);
1205 
1206 	range->len = ndiscarded << nilfs->ns_blocksize_bits;
1207 	return ret;
1208 }
1209 
1210 /**
1211  * nilfs_sufile_read - read or get sufile inode
1212  * @sb: super block instance
1213  * @susize: size of a segment usage entry
1214  * @raw_inode: on-disk sufile inode
1215  * @inodep: buffer to store the inode
1216  */
1217 int nilfs_sufile_read(struct super_block *sb, size_t susize,
1218 		      struct nilfs_inode *raw_inode, struct inode **inodep)
1219 {
1220 	struct inode *sufile;
1221 	struct nilfs_sufile_info *sui;
1222 	struct buffer_head *header_bh;
1223 	struct nilfs_sufile_header *header;
1224 	void *kaddr;
1225 	int err;
1226 
1227 	if (susize > sb->s_blocksize) {
1228 		nilfs_err(sb, "too large segment usage size: %zu bytes",
1229 			  susize);
1230 		return -EINVAL;
1231 	} else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1232 		nilfs_err(sb, "too small segment usage size: %zu bytes",
1233 			  susize);
1234 		return -EINVAL;
1235 	}
1236 
1237 	sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1238 	if (unlikely(!sufile))
1239 		return -ENOMEM;
1240 	if (!(sufile->i_state & I_NEW))
1241 		goto out;
1242 
1243 	err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1244 	if (err)
1245 		goto failed;
1246 
1247 	nilfs_mdt_set_entry_size(sufile, susize,
1248 				 sizeof(struct nilfs_sufile_header));
1249 
1250 	err = nilfs_read_inode_common(sufile, raw_inode);
1251 	if (err)
1252 		goto failed;
1253 
1254 	err = nilfs_mdt_get_block(sufile, 0, 0, NULL, &header_bh);
1255 	if (unlikely(err)) {
1256 		if (err == -ENOENT) {
1257 			nilfs_err(sb,
1258 				  "missing header block in segment usage metadata");
1259 			err = -EINVAL;
1260 		}
1261 		goto failed;
1262 	}
1263 
1264 	sui = NILFS_SUI(sufile);
1265 	kaddr = kmap_local_page(header_bh->b_page);
1266 	header = kaddr + bh_offset(header_bh);
1267 	sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1268 	kunmap_local(kaddr);
1269 	brelse(header_bh);
1270 
1271 	sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1272 	sui->allocmin = 0;
1273 
1274 	unlock_new_inode(sufile);
1275  out:
1276 	*inodep = sufile;
1277 	return 0;
1278  failed:
1279 	iget_failed(sufile);
1280 	return err;
1281 }
1282