xref: /linux/fs/nilfs2/recovery.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS recovery logic
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  */
9 
10 #include <linux/buffer_head.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/slab.h>
14 #include <linux/crc32.h>
15 #include "nilfs.h"
16 #include "segment.h"
17 #include "sufile.h"
18 #include "page.h"
19 #include "segbuf.h"
20 
21 /*
22  * Segment check result
23  */
24 enum {
25 	NILFS_SEG_VALID,
26 	NILFS_SEG_NO_SUPER_ROOT,
27 	NILFS_SEG_FAIL_IO,
28 	NILFS_SEG_FAIL_MAGIC,
29 	NILFS_SEG_FAIL_SEQ,
30 	NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
31 	NILFS_SEG_FAIL_CHECKSUM_FULL,
32 	NILFS_SEG_FAIL_CONSISTENCY,
33 };
34 
35 /* work structure for recovery */
36 struct nilfs_recovery_block {
37 	ino_t ino;		/*
38 				 * Inode number of the file that this block
39 				 * belongs to
40 				 */
41 	sector_t blocknr;	/* block number */
42 	__u64 vblocknr;		/* virtual block number */
43 	unsigned long blkoff;	/* File offset of the data block (per block) */
44 	struct list_head list;
45 };
46 
47 
48 static int nilfs_warn_segment_error(struct super_block *sb, int err)
49 {
50 	const char *msg = NULL;
51 
52 	switch (err) {
53 	case NILFS_SEG_FAIL_IO:
54 		nilfs_err(sb, "I/O error reading segment");
55 		return -EIO;
56 	case NILFS_SEG_FAIL_MAGIC:
57 		msg = "Magic number mismatch";
58 		break;
59 	case NILFS_SEG_FAIL_SEQ:
60 		msg = "Sequence number mismatch";
61 		break;
62 	case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
63 		msg = "Checksum error in super root";
64 		break;
65 	case NILFS_SEG_FAIL_CHECKSUM_FULL:
66 		msg = "Checksum error in segment payload";
67 		break;
68 	case NILFS_SEG_FAIL_CONSISTENCY:
69 		msg = "Inconsistency found";
70 		break;
71 	case NILFS_SEG_NO_SUPER_ROOT:
72 		msg = "No super root in the last segment";
73 		break;
74 	default:
75 		nilfs_err(sb, "unrecognized segment error %d", err);
76 		return -EINVAL;
77 	}
78 	nilfs_warn(sb, "invalid segment: %s", msg);
79 	return -EINVAL;
80 }
81 
82 /**
83  * nilfs_compute_checksum - compute checksum of blocks continuously
84  * @nilfs: nilfs object
85  * @bhs: buffer head of start block
86  * @sum: place to store result
87  * @offset: offset bytes in the first block
88  * @check_bytes: number of bytes to be checked
89  * @start: DBN of start block
90  * @nblock: number of blocks to be checked
91  */
92 static int nilfs_compute_checksum(struct the_nilfs *nilfs,
93 				  struct buffer_head *bhs, u32 *sum,
94 				  unsigned long offset, u64 check_bytes,
95 				  sector_t start, unsigned long nblock)
96 {
97 	unsigned int blocksize = nilfs->ns_blocksize;
98 	unsigned long size;
99 	u32 crc;
100 
101 	BUG_ON(offset >= blocksize);
102 	check_bytes -= offset;
103 	size = min_t(u64, check_bytes, blocksize - offset);
104 	crc = crc32_le(nilfs->ns_crc_seed,
105 		       (unsigned char *)bhs->b_data + offset, size);
106 	if (--nblock > 0) {
107 		do {
108 			struct buffer_head *bh;
109 
110 			bh = __bread(nilfs->ns_bdev, ++start, blocksize);
111 			if (!bh)
112 				return -EIO;
113 			check_bytes -= size;
114 			size = min_t(u64, check_bytes, blocksize);
115 			crc = crc32_le(crc, bh->b_data, size);
116 			brelse(bh);
117 		} while (--nblock > 0);
118 	}
119 	*sum = crc;
120 	return 0;
121 }
122 
123 /**
124  * nilfs_read_super_root_block - read super root block
125  * @nilfs: nilfs object
126  * @sr_block: disk block number of the super root block
127  * @pbh: address of a buffer_head pointer to return super root buffer
128  * @check: CRC check flag
129  */
130 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
131 				struct buffer_head **pbh, int check)
132 {
133 	struct buffer_head *bh_sr;
134 	struct nilfs_super_root *sr;
135 	u32 crc;
136 	int ret;
137 
138 	*pbh = NULL;
139 	bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
140 	if (unlikely(!bh_sr)) {
141 		ret = NILFS_SEG_FAIL_IO;
142 		goto failed;
143 	}
144 
145 	sr = (struct nilfs_super_root *)bh_sr->b_data;
146 	if (check) {
147 		unsigned int bytes = le16_to_cpu(sr->sr_bytes);
148 
149 		if (bytes == 0 || bytes > nilfs->ns_blocksize) {
150 			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
151 			goto failed_bh;
152 		}
153 		if (nilfs_compute_checksum(
154 			    nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
155 			    sr_block, 1)) {
156 			ret = NILFS_SEG_FAIL_IO;
157 			goto failed_bh;
158 		}
159 		if (crc != le32_to_cpu(sr->sr_sum)) {
160 			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
161 			goto failed_bh;
162 		}
163 	}
164 	*pbh = bh_sr;
165 	return 0;
166 
167  failed_bh:
168 	brelse(bh_sr);
169 
170  failed:
171 	return nilfs_warn_segment_error(nilfs->ns_sb, ret);
172 }
173 
174 /**
175  * nilfs_read_log_header - read summary header of the specified log
176  * @nilfs: nilfs object
177  * @start_blocknr: start block number of the log
178  * @sum: pointer to return segment summary structure
179  */
180 static struct buffer_head *
181 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
182 		      struct nilfs_segment_summary **sum)
183 {
184 	struct buffer_head *bh_sum;
185 
186 	bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
187 	if (bh_sum)
188 		*sum = (struct nilfs_segment_summary *)bh_sum->b_data;
189 	return bh_sum;
190 }
191 
192 /**
193  * nilfs_validate_log - verify consistency of log
194  * @nilfs: nilfs object
195  * @seg_seq: sequence number of segment
196  * @bh_sum: buffer head of summary block
197  * @sum: segment summary struct
198  */
199 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
200 			      struct buffer_head *bh_sum,
201 			      struct nilfs_segment_summary *sum)
202 {
203 	unsigned long nblock;
204 	u32 crc;
205 	int ret;
206 
207 	ret = NILFS_SEG_FAIL_MAGIC;
208 	if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
209 		goto out;
210 
211 	ret = NILFS_SEG_FAIL_SEQ;
212 	if (le64_to_cpu(sum->ss_seq) != seg_seq)
213 		goto out;
214 
215 	nblock = le32_to_cpu(sum->ss_nblocks);
216 	ret = NILFS_SEG_FAIL_CONSISTENCY;
217 	if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
218 		/* This limits the number of blocks read in the CRC check */
219 		goto out;
220 
221 	ret = NILFS_SEG_FAIL_IO;
222 	if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
223 				   ((u64)nblock << nilfs->ns_blocksize_bits),
224 				   bh_sum->b_blocknr, nblock))
225 		goto out;
226 
227 	ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
228 	if (crc != le32_to_cpu(sum->ss_datasum))
229 		goto out;
230 	ret = 0;
231 out:
232 	return ret;
233 }
234 
235 /**
236  * nilfs_read_summary_info - read an item on summary blocks of a log
237  * @nilfs: nilfs object
238  * @pbh: the current buffer head on summary blocks [in, out]
239  * @offset: the current byte offset on summary blocks [in, out]
240  * @bytes: byte size of the item to be read
241  */
242 static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
243 				     struct buffer_head **pbh,
244 				     unsigned int *offset, unsigned int bytes)
245 {
246 	void *ptr;
247 	sector_t blocknr;
248 
249 	BUG_ON((*pbh)->b_size < *offset);
250 	if (bytes > (*pbh)->b_size - *offset) {
251 		blocknr = (*pbh)->b_blocknr;
252 		brelse(*pbh);
253 		*pbh = __bread(nilfs->ns_bdev, blocknr + 1,
254 			       nilfs->ns_blocksize);
255 		if (unlikely(!*pbh))
256 			return NULL;
257 		*offset = 0;
258 	}
259 	ptr = (*pbh)->b_data + *offset;
260 	*offset += bytes;
261 	return ptr;
262 }
263 
264 /**
265  * nilfs_skip_summary_info - skip items on summary blocks of a log
266  * @nilfs: nilfs object
267  * @pbh: the current buffer head on summary blocks [in, out]
268  * @offset: the current byte offset on summary blocks [in, out]
269  * @bytes: byte size of the item to be skipped
270  * @count: number of items to be skipped
271  */
272 static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
273 				    struct buffer_head **pbh,
274 				    unsigned int *offset, unsigned int bytes,
275 				    unsigned long count)
276 {
277 	unsigned int rest_item_in_current_block
278 		= ((*pbh)->b_size - *offset) / bytes;
279 
280 	if (count <= rest_item_in_current_block) {
281 		*offset += bytes * count;
282 	} else {
283 		sector_t blocknr = (*pbh)->b_blocknr;
284 		unsigned int nitem_per_block = (*pbh)->b_size / bytes;
285 		unsigned int bcnt;
286 
287 		count -= rest_item_in_current_block;
288 		bcnt = DIV_ROUND_UP(count, nitem_per_block);
289 		*offset = bytes * (count - (bcnt - 1) * nitem_per_block);
290 
291 		brelse(*pbh);
292 		*pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
293 			       nilfs->ns_blocksize);
294 	}
295 }
296 
297 /**
298  * nilfs_scan_dsync_log - get block information of a log written for data sync
299  * @nilfs: nilfs object
300  * @start_blocknr: start block number of the log
301  * @sum: log summary information
302  * @head: list head to add nilfs_recovery_block struct
303  */
304 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
305 				struct nilfs_segment_summary *sum,
306 				struct list_head *head)
307 {
308 	struct buffer_head *bh;
309 	unsigned int offset;
310 	u32 nfinfo, sumbytes;
311 	sector_t blocknr;
312 	ino_t ino;
313 	int err = -EIO;
314 
315 	nfinfo = le32_to_cpu(sum->ss_nfinfo);
316 	if (!nfinfo)
317 		return 0;
318 
319 	sumbytes = le32_to_cpu(sum->ss_sumbytes);
320 	blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
321 	bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
322 	if (unlikely(!bh))
323 		goto out;
324 
325 	offset = le16_to_cpu(sum->ss_bytes);
326 	for (;;) {
327 		unsigned long nblocks, ndatablk, nnodeblk;
328 		struct nilfs_finfo *finfo;
329 
330 		finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
331 						sizeof(*finfo));
332 		if (unlikely(!finfo))
333 			goto out;
334 
335 		ino = le64_to_cpu(finfo->fi_ino);
336 		nblocks = le32_to_cpu(finfo->fi_nblocks);
337 		ndatablk = le32_to_cpu(finfo->fi_ndatablk);
338 		nnodeblk = nblocks - ndatablk;
339 
340 		while (ndatablk-- > 0) {
341 			struct nilfs_recovery_block *rb;
342 			struct nilfs_binfo_v *binfo;
343 
344 			binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
345 							sizeof(*binfo));
346 			if (unlikely(!binfo))
347 				goto out;
348 
349 			rb = kmalloc(sizeof(*rb), GFP_NOFS);
350 			if (unlikely(!rb)) {
351 				err = -ENOMEM;
352 				goto out;
353 			}
354 			rb->ino = ino;
355 			rb->blocknr = blocknr++;
356 			rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
357 			rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
358 			/* INIT_LIST_HEAD(&rb->list); */
359 			list_add_tail(&rb->list, head);
360 		}
361 		if (--nfinfo == 0)
362 			break;
363 		blocknr += nnodeblk; /* always 0 for data sync logs */
364 		nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
365 					nnodeblk);
366 		if (unlikely(!bh))
367 			goto out;
368 	}
369 	err = 0;
370  out:
371 	brelse(bh);   /* brelse(NULL) is just ignored */
372 	return err;
373 }
374 
375 static void dispose_recovery_list(struct list_head *head)
376 {
377 	while (!list_empty(head)) {
378 		struct nilfs_recovery_block *rb;
379 
380 		rb = list_first_entry(head, struct nilfs_recovery_block, list);
381 		list_del(&rb->list);
382 		kfree(rb);
383 	}
384 }
385 
386 struct nilfs_segment_entry {
387 	struct list_head	list;
388 	__u64			segnum;
389 };
390 
391 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
392 {
393 	struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
394 
395 	if (unlikely(!ent))
396 		return -ENOMEM;
397 
398 	ent->segnum = segnum;
399 	INIT_LIST_HEAD(&ent->list);
400 	list_add_tail(&ent->list, head);
401 	return 0;
402 }
403 
404 void nilfs_dispose_segment_list(struct list_head *head)
405 {
406 	while (!list_empty(head)) {
407 		struct nilfs_segment_entry *ent;
408 
409 		ent = list_first_entry(head, struct nilfs_segment_entry, list);
410 		list_del(&ent->list);
411 		kfree(ent);
412 	}
413 }
414 
415 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
416 					      struct super_block *sb,
417 					      struct nilfs_recovery_info *ri)
418 {
419 	struct list_head *head = &ri->ri_used_segments;
420 	struct nilfs_segment_entry *ent, *n;
421 	struct inode *sufile = nilfs->ns_sufile;
422 	__u64 segnum[4];
423 	int err;
424 	int i;
425 
426 	segnum[0] = nilfs->ns_segnum;
427 	segnum[1] = nilfs->ns_nextnum;
428 	segnum[2] = ri->ri_segnum;
429 	segnum[3] = ri->ri_nextnum;
430 
431 	/*
432 	 * Releasing the next segment of the latest super root.
433 	 * The next segment is invalidated by this recovery.
434 	 */
435 	err = nilfs_sufile_free(sufile, segnum[1]);
436 	if (unlikely(err))
437 		goto failed;
438 
439 	for (i = 1; i < 4; i++) {
440 		err = nilfs_segment_list_add(head, segnum[i]);
441 		if (unlikely(err))
442 			goto failed;
443 	}
444 
445 	/*
446 	 * Collecting segments written after the latest super root.
447 	 * These are marked dirty to avoid being reallocated in the next write.
448 	 */
449 	list_for_each_entry_safe(ent, n, head, list) {
450 		if (ent->segnum != segnum[0]) {
451 			err = nilfs_sufile_scrap(sufile, ent->segnum);
452 			if (unlikely(err))
453 				goto failed;
454 		}
455 		list_del(&ent->list);
456 		kfree(ent);
457 	}
458 
459 	/* Allocate new segments for recovery */
460 	err = nilfs_sufile_alloc(sufile, &segnum[0]);
461 	if (unlikely(err))
462 		goto failed;
463 
464 	nilfs->ns_pseg_offset = 0;
465 	nilfs->ns_seg_seq = ri->ri_seq + 2;
466 	nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
467 
468  failed:
469 	/* No need to recover sufile because it will be destroyed on error */
470 	return err;
471 }
472 
473 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
474 				     struct nilfs_recovery_block *rb,
475 				     loff_t pos, struct page *page)
476 {
477 	struct buffer_head *bh_org;
478 	size_t from = pos & ~PAGE_MASK;
479 	void *kaddr;
480 
481 	bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
482 	if (unlikely(!bh_org))
483 		return -EIO;
484 
485 	kaddr = kmap_local_page(page);
486 	memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
487 	kunmap_local(kaddr);
488 	brelse(bh_org);
489 	return 0;
490 }
491 
492 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
493 				      struct super_block *sb,
494 				      struct nilfs_root *root,
495 				      struct list_head *head,
496 				      unsigned long *nr_salvaged_blocks)
497 {
498 	struct inode *inode;
499 	struct nilfs_recovery_block *rb, *n;
500 	unsigned int blocksize = nilfs->ns_blocksize;
501 	struct page *page;
502 	loff_t pos;
503 	int err = 0, err2 = 0;
504 
505 	list_for_each_entry_safe(rb, n, head, list) {
506 		inode = nilfs_iget(sb, root, rb->ino);
507 		if (IS_ERR(inode)) {
508 			err = PTR_ERR(inode);
509 			inode = NULL;
510 			goto failed_inode;
511 		}
512 
513 		pos = rb->blkoff << inode->i_blkbits;
514 		err = block_write_begin(inode->i_mapping, pos, blocksize,
515 					&page, nilfs_get_block);
516 		if (unlikely(err)) {
517 			loff_t isize = inode->i_size;
518 
519 			if (pos + blocksize > isize)
520 				nilfs_write_failed(inode->i_mapping,
521 							pos + blocksize);
522 			goto failed_inode;
523 		}
524 
525 		err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
526 		if (unlikely(err))
527 			goto failed_page;
528 
529 		err = nilfs_set_file_dirty(inode, 1);
530 		if (unlikely(err))
531 			goto failed_page;
532 
533 		block_write_end(NULL, inode->i_mapping, pos, blocksize,
534 				blocksize, page, NULL);
535 
536 		unlock_page(page);
537 		put_page(page);
538 
539 		(*nr_salvaged_blocks)++;
540 		goto next;
541 
542  failed_page:
543 		unlock_page(page);
544 		put_page(page);
545 
546  failed_inode:
547 		nilfs_warn(sb,
548 			   "error %d recovering data block (ino=%lu, block-offset=%llu)",
549 			   err, (unsigned long)rb->ino,
550 			   (unsigned long long)rb->blkoff);
551 		if (!err2)
552 			err2 = err;
553  next:
554 		iput(inode); /* iput(NULL) is just ignored */
555 		list_del_init(&rb->list);
556 		kfree(rb);
557 	}
558 	return err2;
559 }
560 
561 /**
562  * nilfs_do_roll_forward - salvage logical segments newer than the latest
563  * checkpoint
564  * @nilfs: nilfs object
565  * @sb: super block instance
566  * @root: NILFS root instance
567  * @ri: pointer to a nilfs_recovery_info
568  */
569 static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
570 				 struct super_block *sb,
571 				 struct nilfs_root *root,
572 				 struct nilfs_recovery_info *ri)
573 {
574 	struct buffer_head *bh_sum = NULL;
575 	struct nilfs_segment_summary *sum = NULL;
576 	sector_t pseg_start;
577 	sector_t seg_start, seg_end;  /* Starting/ending DBN of full segment */
578 	unsigned long nsalvaged_blocks = 0;
579 	unsigned int flags;
580 	u64 seg_seq;
581 	__u64 segnum, nextnum = 0;
582 	int empty_seg = 0;
583 	int err = 0, ret;
584 	LIST_HEAD(dsync_blocks);  /* list of data blocks to be recovered */
585 	enum {
586 		RF_INIT_ST,
587 		RF_DSYNC_ST,   /* scanning data-sync segments */
588 	};
589 	int state = RF_INIT_ST;
590 
591 	pseg_start = ri->ri_lsegs_start;
592 	seg_seq = ri->ri_lsegs_start_seq;
593 	segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
594 	nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
595 
596 	while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
597 		brelse(bh_sum);
598 		bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
599 		if (!bh_sum) {
600 			err = -EIO;
601 			goto failed;
602 		}
603 
604 		ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
605 		if (ret) {
606 			if (ret == NILFS_SEG_FAIL_IO) {
607 				err = -EIO;
608 				goto failed;
609 			}
610 			goto strayed;
611 		}
612 
613 		flags = le16_to_cpu(sum->ss_flags);
614 		if (flags & NILFS_SS_SR)
615 			goto confused;
616 
617 		/* Found a valid partial segment; do recovery actions */
618 		nextnum = nilfs_get_segnum_of_block(nilfs,
619 						    le64_to_cpu(sum->ss_next));
620 		empty_seg = 0;
621 		nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
622 		if (!(flags & NILFS_SS_GC))
623 			nilfs->ns_nongc_ctime = nilfs->ns_ctime;
624 
625 		switch (state) {
626 		case RF_INIT_ST:
627 			if (!(flags & NILFS_SS_LOGBGN) ||
628 			    !(flags & NILFS_SS_SYNDT))
629 				goto try_next_pseg;
630 			state = RF_DSYNC_ST;
631 			fallthrough;
632 		case RF_DSYNC_ST:
633 			if (!(flags & NILFS_SS_SYNDT))
634 				goto confused;
635 
636 			err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
637 						   &dsync_blocks);
638 			if (unlikely(err))
639 				goto failed;
640 			if (flags & NILFS_SS_LOGEND) {
641 				err = nilfs_recover_dsync_blocks(
642 					nilfs, sb, root, &dsync_blocks,
643 					&nsalvaged_blocks);
644 				if (unlikely(err))
645 					goto failed;
646 				state = RF_INIT_ST;
647 			}
648 			break; /* Fall through to try_next_pseg */
649 		}
650 
651  try_next_pseg:
652 		if (pseg_start == ri->ri_lsegs_end)
653 			break;
654 		pseg_start += le32_to_cpu(sum->ss_nblocks);
655 		if (pseg_start < seg_end)
656 			continue;
657 		goto feed_segment;
658 
659  strayed:
660 		if (pseg_start == ri->ri_lsegs_end)
661 			break;
662 
663  feed_segment:
664 		/* Looking to the next full segment */
665 		if (empty_seg++)
666 			break;
667 		seg_seq++;
668 		segnum = nextnum;
669 		nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
670 		pseg_start = seg_start;
671 	}
672 
673 	if (nsalvaged_blocks) {
674 		nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks);
675 		ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
676 	}
677  out:
678 	brelse(bh_sum);
679 	dispose_recovery_list(&dsync_blocks);
680 	return err;
681 
682  confused:
683 	err = -EINVAL;
684  failed:
685 	nilfs_err(sb,
686 		  "error %d roll-forwarding partial segment at blocknr = %llu",
687 		  err, (unsigned long long)pseg_start);
688 	goto out;
689 }
690 
691 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
692 				      struct nilfs_recovery_info *ri)
693 {
694 	struct buffer_head *bh;
695 	int err;
696 
697 	if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
698 	    nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
699 		return;
700 
701 	bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
702 	if (WARN_ON(!bh))
703 		return;  /* should never happen */
704 
705 	lock_buffer(bh);
706 	memset(bh->b_data, 0, bh->b_size);
707 	set_buffer_uptodate(bh);
708 	set_buffer_dirty(bh);
709 	unlock_buffer(bh);
710 
711 	err = sync_dirty_buffer(bh);
712 	if (unlikely(err))
713 		nilfs_warn(nilfs->ns_sb,
714 			   "buffer sync write failed during post-cleaning of recovery.");
715 	brelse(bh);
716 }
717 
718 /**
719  * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
720  * @nilfs: nilfs object
721  * @sb: super block instance
722  * @ri: pointer to a nilfs_recovery_info struct to store search results.
723  *
724  * Return Value: On success, 0 is returned.  On error, one of the following
725  * negative error code is returned.
726  *
727  * %-EINVAL - Inconsistent filesystem state.
728  *
729  * %-EIO - I/O error
730  *
731  * %-ENOSPC - No space left on device (only in a panic state).
732  *
733  * %-ERESTARTSYS - Interrupted.
734  *
735  * %-ENOMEM - Insufficient memory available.
736  */
737 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
738 			      struct super_block *sb,
739 			      struct nilfs_recovery_info *ri)
740 {
741 	struct nilfs_root *root;
742 	int err;
743 
744 	if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
745 		return 0;
746 
747 	err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
748 	if (unlikely(err)) {
749 		nilfs_err(sb, "error %d loading the latest checkpoint", err);
750 		return err;
751 	}
752 
753 	err = nilfs_do_roll_forward(nilfs, sb, root, ri);
754 	if (unlikely(err))
755 		goto failed;
756 
757 	if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
758 		err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
759 		if (unlikely(err)) {
760 			nilfs_err(sb, "error %d preparing segment for recovery",
761 				  err);
762 			goto failed;
763 		}
764 
765 		err = nilfs_attach_log_writer(sb, root);
766 		if (unlikely(err))
767 			goto failed;
768 
769 		set_nilfs_discontinued(nilfs);
770 		err = nilfs_construct_segment(sb);
771 		nilfs_detach_log_writer(sb);
772 
773 		if (unlikely(err)) {
774 			nilfs_err(sb, "error %d writing segment for recovery",
775 				  err);
776 			goto failed;
777 		}
778 
779 		nilfs_finish_roll_forward(nilfs, ri);
780 	}
781 
782  failed:
783 	nilfs_put_root(root);
784 	return err;
785 }
786 
787 /**
788  * nilfs_search_super_root - search the latest valid super root
789  * @nilfs: the_nilfs
790  * @ri: pointer to a nilfs_recovery_info struct to store search results.
791  *
792  * nilfs_search_super_root() looks for the latest super-root from a partial
793  * segment pointed by the superblock.  It sets up struct the_nilfs through
794  * this search. It fills nilfs_recovery_info (ri) required for recovery.
795  *
796  * Return Value: On success, 0 is returned.  On error, one of the following
797  * negative error code is returned.
798  *
799  * %-EINVAL - No valid segment found
800  *
801  * %-EIO - I/O error
802  *
803  * %-ENOMEM - Insufficient memory available.
804  */
805 int nilfs_search_super_root(struct the_nilfs *nilfs,
806 			    struct nilfs_recovery_info *ri)
807 {
808 	struct buffer_head *bh_sum = NULL;
809 	struct nilfs_segment_summary *sum = NULL;
810 	sector_t pseg_start, pseg_end, sr_pseg_start = 0;
811 	sector_t seg_start, seg_end; /* range of full segment (block number) */
812 	sector_t b, end;
813 	unsigned long nblocks;
814 	unsigned int flags;
815 	u64 seg_seq;
816 	__u64 segnum, nextnum = 0;
817 	__u64 cno;
818 	LIST_HEAD(segments);
819 	int empty_seg = 0, scan_newer = 0;
820 	int ret;
821 
822 	pseg_start = nilfs->ns_last_pseg;
823 	seg_seq = nilfs->ns_last_seq;
824 	cno = nilfs->ns_last_cno;
825 	segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
826 
827 	/* Calculate range of segment */
828 	nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
829 
830 	/* Read ahead segment */
831 	b = seg_start;
832 	while (b <= seg_end)
833 		__breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
834 
835 	for (;;) {
836 		brelse(bh_sum);
837 		ret = NILFS_SEG_FAIL_IO;
838 		bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
839 		if (!bh_sum)
840 			goto failed;
841 
842 		ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
843 		if (ret) {
844 			if (ret == NILFS_SEG_FAIL_IO)
845 				goto failed;
846 			goto strayed;
847 		}
848 
849 		nblocks = le32_to_cpu(sum->ss_nblocks);
850 		pseg_end = pseg_start + nblocks - 1;
851 		if (unlikely(pseg_end > seg_end)) {
852 			ret = NILFS_SEG_FAIL_CONSISTENCY;
853 			goto strayed;
854 		}
855 
856 		/* A valid partial segment */
857 		ri->ri_pseg_start = pseg_start;
858 		ri->ri_seq = seg_seq;
859 		ri->ri_segnum = segnum;
860 		nextnum = nilfs_get_segnum_of_block(nilfs,
861 						    le64_to_cpu(sum->ss_next));
862 		ri->ri_nextnum = nextnum;
863 		empty_seg = 0;
864 
865 		flags = le16_to_cpu(sum->ss_flags);
866 		if (!(flags & NILFS_SS_SR) && !scan_newer) {
867 			/*
868 			 * This will never happen because a superblock
869 			 * (last_segment) always points to a pseg with
870 			 * a super root.
871 			 */
872 			ret = NILFS_SEG_FAIL_CONSISTENCY;
873 			goto failed;
874 		}
875 
876 		if (pseg_start == seg_start) {
877 			nilfs_get_segment_range(nilfs, nextnum, &b, &end);
878 			while (b <= end)
879 				__breadahead(nilfs->ns_bdev, b++,
880 					     nilfs->ns_blocksize);
881 		}
882 		if (!(flags & NILFS_SS_SR)) {
883 			if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
884 				ri->ri_lsegs_start = pseg_start;
885 				ri->ri_lsegs_start_seq = seg_seq;
886 			}
887 			if (flags & NILFS_SS_LOGEND)
888 				ri->ri_lsegs_end = pseg_start;
889 			goto try_next_pseg;
890 		}
891 
892 		/* A valid super root was found. */
893 		ri->ri_cno = cno++;
894 		ri->ri_super_root = pseg_end;
895 		ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
896 
897 		nilfs_dispose_segment_list(&segments);
898 		sr_pseg_start = pseg_start;
899 		nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
900 		nilfs->ns_seg_seq = seg_seq;
901 		nilfs->ns_segnum = segnum;
902 		nilfs->ns_cno = cno;  /* nilfs->ns_cno = ri->ri_cno + 1 */
903 		nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
904 		nilfs->ns_nextnum = nextnum;
905 
906 		if (scan_newer)
907 			ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
908 		else {
909 			if (nilfs->ns_mount_state & NILFS_VALID_FS)
910 				goto super_root_found;
911 			scan_newer = 1;
912 		}
913 
914  try_next_pseg:
915 		/* Standing on a course, or met an inconsistent state */
916 		pseg_start += nblocks;
917 		if (pseg_start < seg_end)
918 			continue;
919 		goto feed_segment;
920 
921  strayed:
922 		/* Off the trail */
923 		if (!scan_newer)
924 			/*
925 			 * This can happen if a checkpoint was written without
926 			 * barriers, or as a result of an I/O failure.
927 			 */
928 			goto failed;
929 
930  feed_segment:
931 		/* Looking to the next full segment */
932 		if (empty_seg++)
933 			goto super_root_found; /* found a valid super root */
934 
935 		ret = nilfs_segment_list_add(&segments, segnum);
936 		if (unlikely(ret))
937 			goto failed;
938 
939 		seg_seq++;
940 		segnum = nextnum;
941 		nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
942 		pseg_start = seg_start;
943 	}
944 
945  super_root_found:
946 	/* Updating pointers relating to the latest checkpoint */
947 	brelse(bh_sum);
948 	list_splice_tail(&segments, &ri->ri_used_segments);
949 	nilfs->ns_last_pseg = sr_pseg_start;
950 	nilfs->ns_last_seq = nilfs->ns_seg_seq;
951 	nilfs->ns_last_cno = ri->ri_cno;
952 	return 0;
953 
954  failed:
955 	brelse(bh_sum);
956 	nilfs_dispose_segment_list(&segments);
957 	return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret);
958 }
959