xref: /linux/fs/xfs/xfs_log_recover.c (revision 511bd85485c676744a4c3a22f26965926891b131)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_log.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_alloc.h"
23 #include "xfs_ialloc.h"
24 #include "xfs_trace.h"
25 #include "xfs_icache.h"
26 #include "xfs_error.h"
27 #include "xfs_buf_item.h"
28 
29 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
30 
31 STATIC int
32 xlog_find_zeroed(
33 	struct xlog	*,
34 	xfs_daddr_t	*);
35 STATIC int
36 xlog_clear_stale_blocks(
37 	struct xlog	*,
38 	xfs_lsn_t);
39 #if defined(DEBUG)
40 STATIC void
41 xlog_recover_check_summary(
42 	struct xlog *);
43 #else
44 #define	xlog_recover_check_summary(log)
45 #endif
46 STATIC int
47 xlog_do_recovery_pass(
48         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
49 
50 /*
51  * Sector aligned buffer routines for buffer create/read/write/access
52  */
53 
54 /*
55  * Verify the log-relative block number and length in basic blocks are valid for
56  * an operation involving the given XFS log buffer. Returns true if the fields
57  * are valid, false otherwise.
58  */
59 static inline bool
60 xlog_verify_bno(
61 	struct xlog	*log,
62 	xfs_daddr_t	blk_no,
63 	int		bbcount)
64 {
65 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
66 		return false;
67 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68 		return false;
69 	return true;
70 }
71 
72 /*
73  * Allocate a buffer to hold log data.  The buffer needs to be able to map to
74  * a range of nbblks basic blocks at any valid offset within the log.
75  */
76 static char *
77 xlog_alloc_buffer(
78 	struct xlog	*log,
79 	int		nbblks)
80 {
81 	int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82 
83 	/*
84 	 * Pass log block 0 since we don't have an addr yet, buffer will be
85 	 * verified on read.
86 	 */
87 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
88 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
89 			nbblks);
90 		return NULL;
91 	}
92 
93 	/*
94 	 * We do log I/O in units of log sectors (a power-of-2 multiple of the
95 	 * basic block size), so we round up the requested size to accommodate
96 	 * the basic blocks required for complete log sectors.
97 	 *
98 	 * In addition, the buffer may be used for a non-sector-aligned block
99 	 * offset, in which case an I/O of the requested size could extend
100 	 * beyond the end of the buffer.  If the requested size is only 1 basic
101 	 * block it will never straddle a sector boundary, so this won't be an
102 	 * issue.  Nor will this be a problem if the log I/O is done in basic
103 	 * blocks (sector size 1).  But otherwise we extend the buffer by one
104 	 * extra log sector to ensure there's space to accommodate this
105 	 * possibility.
106 	 */
107 	if (nbblks > 1 && log->l_sectBBsize > 1)
108 		nbblks += log->l_sectBBsize;
109 	nbblks = round_up(nbblks, log->l_sectBBsize);
110 	return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
111 }
112 
113 /*
114  * Return the address of the start of the given block number's data
115  * in a log buffer.  The buffer covers a log sector-aligned region.
116  */
117 static inline unsigned int
118 xlog_align(
119 	struct xlog	*log,
120 	xfs_daddr_t	blk_no)
121 {
122 	return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
123 }
124 
125 static int
126 xlog_do_io(
127 	struct xlog		*log,
128 	xfs_daddr_t		blk_no,
129 	unsigned int		nbblks,
130 	char			*data,
131 	unsigned int		op)
132 {
133 	int			error;
134 
135 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
136 		xfs_warn(log->l_mp,
137 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
138 			 blk_no, nbblks);
139 		return -EFSCORRUPTED;
140 	}
141 
142 	blk_no = round_down(blk_no, log->l_sectBBsize);
143 	nbblks = round_up(nbblks, log->l_sectBBsize);
144 	ASSERT(nbblks > 0);
145 
146 	error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147 			BBTOB(nbblks), data, op);
148 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149 		xfs_alert(log->l_mp,
150 			  "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151 			  op == REQ_OP_WRITE ? "write" : "read",
152 			  blk_no, nbblks, error);
153 	}
154 	return error;
155 }
156 
157 STATIC int
158 xlog_bread_noalign(
159 	struct xlog	*log,
160 	xfs_daddr_t	blk_no,
161 	int		nbblks,
162 	char		*data)
163 {
164 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165 }
166 
167 STATIC int
168 xlog_bread(
169 	struct xlog	*log,
170 	xfs_daddr_t	blk_no,
171 	int		nbblks,
172 	char		*data,
173 	char		**offset)
174 {
175 	int		error;
176 
177 	error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 	if (!error)
179 		*offset = data + xlog_align(log, blk_no);
180 	return error;
181 }
182 
183 STATIC int
184 xlog_bwrite(
185 	struct xlog	*log,
186 	xfs_daddr_t	blk_no,
187 	int		nbblks,
188 	char		*data)
189 {
190 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
191 }
192 
193 #ifdef DEBUG
194 /*
195  * dump debug superblock and log record information
196  */
197 STATIC void
198 xlog_header_check_dump(
199 	xfs_mount_t		*mp,
200 	xlog_rec_header_t	*head)
201 {
202 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
203 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
204 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
205 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
206 }
207 #else
208 #define xlog_header_check_dump(mp, head)
209 #endif
210 
211 /*
212  * check log record header for recovery
213  */
214 STATIC int
215 xlog_header_check_recover(
216 	xfs_mount_t		*mp,
217 	xlog_rec_header_t	*head)
218 {
219 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
220 
221 	/*
222 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
223 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224 	 * a dirty log created in IRIX.
225 	 */
226 	if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
227 		xfs_warn(mp,
228 	"dirty log written in incompatible format - can't recover");
229 		xlog_header_check_dump(mp, head);
230 		return -EFSCORRUPTED;
231 	}
232 	if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233 					   &head->h_fs_uuid))) {
234 		xfs_warn(mp,
235 	"dirty log entry has mismatched uuid - can't recover");
236 		xlog_header_check_dump(mp, head);
237 		return -EFSCORRUPTED;
238 	}
239 	return 0;
240 }
241 
242 /*
243  * read the head block of the log and check the header
244  */
245 STATIC int
246 xlog_header_check_mount(
247 	xfs_mount_t		*mp,
248 	xlog_rec_header_t	*head)
249 {
250 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
251 
252 	if (uuid_is_null(&head->h_fs_uuid)) {
253 		/*
254 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
255 		 * h_fs_uuid is null, we assume this log was last mounted
256 		 * by IRIX and continue.
257 		 */
258 		xfs_warn(mp, "null uuid in log - IRIX style log");
259 	} else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260 						  &head->h_fs_uuid))) {
261 		xfs_warn(mp, "log has mismatched uuid - can't recover");
262 		xlog_header_check_dump(mp, head);
263 		return -EFSCORRUPTED;
264 	}
265 	return 0;
266 }
267 
268 void
269 xlog_recover_iodone(
270 	struct xfs_buf	*bp)
271 {
272 	if (bp->b_error) {
273 		/*
274 		 * We're not going to bother about retrying
275 		 * this during recovery. One strike!
276 		 */
277 		if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
278 			xfs_buf_ioerror_alert(bp, __this_address);
279 			xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
280 		}
281 	}
282 
283 	/*
284 	 * On v5 supers, a bli could be attached to update the metadata LSN.
285 	 * Clean it up.
286 	 */
287 	if (bp->b_log_item)
288 		xfs_buf_item_relse(bp);
289 	ASSERT(bp->b_log_item == NULL);
290 	bp->b_flags &= ~_XBF_LOGRECOVERY;
291 	xfs_buf_ioend_finish(bp);
292 }
293 
294 /*
295  * This routine finds (to an approximation) the first block in the physical
296  * log which contains the given cycle.  It uses a binary search algorithm.
297  * Note that the algorithm can not be perfect because the disk will not
298  * necessarily be perfect.
299  */
300 STATIC int
301 xlog_find_cycle_start(
302 	struct xlog	*log,
303 	char		*buffer,
304 	xfs_daddr_t	first_blk,
305 	xfs_daddr_t	*last_blk,
306 	uint		cycle)
307 {
308 	char		*offset;
309 	xfs_daddr_t	mid_blk;
310 	xfs_daddr_t	end_blk;
311 	uint		mid_cycle;
312 	int		error;
313 
314 	end_blk = *last_blk;
315 	mid_blk = BLK_AVG(first_blk, end_blk);
316 	while (mid_blk != first_blk && mid_blk != end_blk) {
317 		error = xlog_bread(log, mid_blk, 1, buffer, &offset);
318 		if (error)
319 			return error;
320 		mid_cycle = xlog_get_cycle(offset);
321 		if (mid_cycle == cycle)
322 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
323 		else
324 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
325 		mid_blk = BLK_AVG(first_blk, end_blk);
326 	}
327 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
328 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
329 
330 	*last_blk = end_blk;
331 
332 	return 0;
333 }
334 
335 /*
336  * Check that a range of blocks does not contain stop_on_cycle_no.
337  * Fill in *new_blk with the block offset where such a block is
338  * found, or with -1 (an invalid block number) if there is no such
339  * block in the range.  The scan needs to occur from front to back
340  * and the pointer into the region must be updated since a later
341  * routine will need to perform another test.
342  */
343 STATIC int
344 xlog_find_verify_cycle(
345 	struct xlog	*log,
346 	xfs_daddr_t	start_blk,
347 	int		nbblks,
348 	uint		stop_on_cycle_no,
349 	xfs_daddr_t	*new_blk)
350 {
351 	xfs_daddr_t	i, j;
352 	uint		cycle;
353 	char		*buffer;
354 	xfs_daddr_t	bufblks;
355 	char		*buf = NULL;
356 	int		error = 0;
357 
358 	/*
359 	 * Greedily allocate a buffer big enough to handle the full
360 	 * range of basic blocks we'll be examining.  If that fails,
361 	 * try a smaller size.  We need to be able to read at least
362 	 * a log sector, or we're out of luck.
363 	 */
364 	bufblks = 1 << ffs(nbblks);
365 	while (bufblks > log->l_logBBsize)
366 		bufblks >>= 1;
367 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
368 		bufblks >>= 1;
369 		if (bufblks < log->l_sectBBsize)
370 			return -ENOMEM;
371 	}
372 
373 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
374 		int	bcount;
375 
376 		bcount = min(bufblks, (start_blk + nbblks - i));
377 
378 		error = xlog_bread(log, i, bcount, buffer, &buf);
379 		if (error)
380 			goto out;
381 
382 		for (j = 0; j < bcount; j++) {
383 			cycle = xlog_get_cycle(buf);
384 			if (cycle == stop_on_cycle_no) {
385 				*new_blk = i+j;
386 				goto out;
387 			}
388 
389 			buf += BBSIZE;
390 		}
391 	}
392 
393 	*new_blk = -1;
394 
395 out:
396 	kmem_free(buffer);
397 	return error;
398 }
399 
400 /*
401  * Potentially backup over partial log record write.
402  *
403  * In the typical case, last_blk is the number of the block directly after
404  * a good log record.  Therefore, we subtract one to get the block number
405  * of the last block in the given buffer.  extra_bblks contains the number
406  * of blocks we would have read on a previous read.  This happens when the
407  * last log record is split over the end of the physical log.
408  *
409  * extra_bblks is the number of blocks potentially verified on a previous
410  * call to this routine.
411  */
412 STATIC int
413 xlog_find_verify_log_record(
414 	struct xlog		*log,
415 	xfs_daddr_t		start_blk,
416 	xfs_daddr_t		*last_blk,
417 	int			extra_bblks)
418 {
419 	xfs_daddr_t		i;
420 	char			*buffer;
421 	char			*offset = NULL;
422 	xlog_rec_header_t	*head = NULL;
423 	int			error = 0;
424 	int			smallmem = 0;
425 	int			num_blks = *last_blk - start_blk;
426 	int			xhdrs;
427 
428 	ASSERT(start_blk != 0 || *last_blk != start_blk);
429 
430 	buffer = xlog_alloc_buffer(log, num_blks);
431 	if (!buffer) {
432 		buffer = xlog_alloc_buffer(log, 1);
433 		if (!buffer)
434 			return -ENOMEM;
435 		smallmem = 1;
436 	} else {
437 		error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
438 		if (error)
439 			goto out;
440 		offset += ((num_blks - 1) << BBSHIFT);
441 	}
442 
443 	for (i = (*last_blk) - 1; i >= 0; i--) {
444 		if (i < start_blk) {
445 			/* valid log record not found */
446 			xfs_warn(log->l_mp,
447 		"Log inconsistent (didn't find previous header)");
448 			ASSERT(0);
449 			error = -EFSCORRUPTED;
450 			goto out;
451 		}
452 
453 		if (smallmem) {
454 			error = xlog_bread(log, i, 1, buffer, &offset);
455 			if (error)
456 				goto out;
457 		}
458 
459 		head = (xlog_rec_header_t *)offset;
460 
461 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
462 			break;
463 
464 		if (!smallmem)
465 			offset -= BBSIZE;
466 	}
467 
468 	/*
469 	 * We hit the beginning of the physical log & still no header.  Return
470 	 * to caller.  If caller can handle a return of -1, then this routine
471 	 * will be called again for the end of the physical log.
472 	 */
473 	if (i == -1) {
474 		error = 1;
475 		goto out;
476 	}
477 
478 	/*
479 	 * We have the final block of the good log (the first block
480 	 * of the log record _before_ the head. So we check the uuid.
481 	 */
482 	if ((error = xlog_header_check_mount(log->l_mp, head)))
483 		goto out;
484 
485 	/*
486 	 * We may have found a log record header before we expected one.
487 	 * last_blk will be the 1st block # with a given cycle #.  We may end
488 	 * up reading an entire log record.  In this case, we don't want to
489 	 * reset last_blk.  Only when last_blk points in the middle of a log
490 	 * record do we update last_blk.
491 	 */
492 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
493 		uint	h_size = be32_to_cpu(head->h_size);
494 
495 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
496 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
497 			xhdrs++;
498 	} else {
499 		xhdrs = 1;
500 	}
501 
502 	if (*last_blk - i + extra_bblks !=
503 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
504 		*last_blk = i;
505 
506 out:
507 	kmem_free(buffer);
508 	return error;
509 }
510 
511 /*
512  * Head is defined to be the point of the log where the next log write
513  * could go.  This means that incomplete LR writes at the end are
514  * eliminated when calculating the head.  We aren't guaranteed that previous
515  * LR have complete transactions.  We only know that a cycle number of
516  * current cycle number -1 won't be present in the log if we start writing
517  * from our current block number.
518  *
519  * last_blk contains the block number of the first block with a given
520  * cycle number.
521  *
522  * Return: zero if normal, non-zero if error.
523  */
524 STATIC int
525 xlog_find_head(
526 	struct xlog	*log,
527 	xfs_daddr_t	*return_head_blk)
528 {
529 	char		*buffer;
530 	char		*offset;
531 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
532 	int		num_scan_bblks;
533 	uint		first_half_cycle, last_half_cycle;
534 	uint		stop_on_cycle;
535 	int		error, log_bbnum = log->l_logBBsize;
536 
537 	/* Is the end of the log device zeroed? */
538 	error = xlog_find_zeroed(log, &first_blk);
539 	if (error < 0) {
540 		xfs_warn(log->l_mp, "empty log check failed");
541 		return error;
542 	}
543 	if (error == 1) {
544 		*return_head_blk = first_blk;
545 
546 		/* Is the whole lot zeroed? */
547 		if (!first_blk) {
548 			/* Linux XFS shouldn't generate totally zeroed logs -
549 			 * mkfs etc write a dummy unmount record to a fresh
550 			 * log so we can store the uuid in there
551 			 */
552 			xfs_warn(log->l_mp, "totally zeroed log");
553 		}
554 
555 		return 0;
556 	}
557 
558 	first_blk = 0;			/* get cycle # of 1st block */
559 	buffer = xlog_alloc_buffer(log, 1);
560 	if (!buffer)
561 		return -ENOMEM;
562 
563 	error = xlog_bread(log, 0, 1, buffer, &offset);
564 	if (error)
565 		goto out_free_buffer;
566 
567 	first_half_cycle = xlog_get_cycle(offset);
568 
569 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
570 	error = xlog_bread(log, last_blk, 1, buffer, &offset);
571 	if (error)
572 		goto out_free_buffer;
573 
574 	last_half_cycle = xlog_get_cycle(offset);
575 	ASSERT(last_half_cycle != 0);
576 
577 	/*
578 	 * If the 1st half cycle number is equal to the last half cycle number,
579 	 * then the entire log is stamped with the same cycle number.  In this
580 	 * case, head_blk can't be set to zero (which makes sense).  The below
581 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
582 	 * we set it to log_bbnum which is an invalid block number, but this
583 	 * value makes the math correct.  If head_blk doesn't changed through
584 	 * all the tests below, *head_blk is set to zero at the very end rather
585 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
586 	 * in a circular file.
587 	 */
588 	if (first_half_cycle == last_half_cycle) {
589 		/*
590 		 * In this case we believe that the entire log should have
591 		 * cycle number last_half_cycle.  We need to scan backwards
592 		 * from the end verifying that there are no holes still
593 		 * containing last_half_cycle - 1.  If we find such a hole,
594 		 * then the start of that hole will be the new head.  The
595 		 * simple case looks like
596 		 *        x | x ... | x - 1 | x
597 		 * Another case that fits this picture would be
598 		 *        x | x + 1 | x ... | x
599 		 * In this case the head really is somewhere at the end of the
600 		 * log, as one of the latest writes at the beginning was
601 		 * incomplete.
602 		 * One more case is
603 		 *        x | x + 1 | x ... | x - 1 | x
604 		 * This is really the combination of the above two cases, and
605 		 * the head has to end up at the start of the x-1 hole at the
606 		 * end of the log.
607 		 *
608 		 * In the 256k log case, we will read from the beginning to the
609 		 * end of the log and search for cycle numbers equal to x-1.
610 		 * We don't worry about the x+1 blocks that we encounter,
611 		 * because we know that they cannot be the head since the log
612 		 * started with x.
613 		 */
614 		head_blk = log_bbnum;
615 		stop_on_cycle = last_half_cycle - 1;
616 	} else {
617 		/*
618 		 * In this case we want to find the first block with cycle
619 		 * number matching last_half_cycle.  We expect the log to be
620 		 * some variation on
621 		 *        x + 1 ... | x ... | x
622 		 * The first block with cycle number x (last_half_cycle) will
623 		 * be where the new head belongs.  First we do a binary search
624 		 * for the first occurrence of last_half_cycle.  The binary
625 		 * search may not be totally accurate, so then we scan back
626 		 * from there looking for occurrences of last_half_cycle before
627 		 * us.  If that backwards scan wraps around the beginning of
628 		 * the log, then we look for occurrences of last_half_cycle - 1
629 		 * at the end of the log.  The cases we're looking for look
630 		 * like
631 		 *                               v binary search stopped here
632 		 *        x + 1 ... | x | x + 1 | x ... | x
633 		 *                   ^ but we want to locate this spot
634 		 * or
635 		 *        <---------> less than scan distance
636 		 *        x + 1 ... | x ... | x - 1 | x
637 		 *                           ^ we want to locate this spot
638 		 */
639 		stop_on_cycle = last_half_cycle;
640 		error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
641 				last_half_cycle);
642 		if (error)
643 			goto out_free_buffer;
644 	}
645 
646 	/*
647 	 * Now validate the answer.  Scan back some number of maximum possible
648 	 * blocks and make sure each one has the expected cycle number.  The
649 	 * maximum is determined by the total possible amount of buffering
650 	 * in the in-core log.  The following number can be made tighter if
651 	 * we actually look at the block size of the filesystem.
652 	 */
653 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
654 	if (head_blk >= num_scan_bblks) {
655 		/*
656 		 * We are guaranteed that the entire check can be performed
657 		 * in one buffer.
658 		 */
659 		start_blk = head_blk - num_scan_bblks;
660 		if ((error = xlog_find_verify_cycle(log,
661 						start_blk, num_scan_bblks,
662 						stop_on_cycle, &new_blk)))
663 			goto out_free_buffer;
664 		if (new_blk != -1)
665 			head_blk = new_blk;
666 	} else {		/* need to read 2 parts of log */
667 		/*
668 		 * We are going to scan backwards in the log in two parts.
669 		 * First we scan the physical end of the log.  In this part
670 		 * of the log, we are looking for blocks with cycle number
671 		 * last_half_cycle - 1.
672 		 * If we find one, then we know that the log starts there, as
673 		 * we've found a hole that didn't get written in going around
674 		 * the end of the physical log.  The simple case for this is
675 		 *        x + 1 ... | x ... | x - 1 | x
676 		 *        <---------> less than scan distance
677 		 * If all of the blocks at the end of the log have cycle number
678 		 * last_half_cycle, then we check the blocks at the start of
679 		 * the log looking for occurrences of last_half_cycle.  If we
680 		 * find one, then our current estimate for the location of the
681 		 * first occurrence of last_half_cycle is wrong and we move
682 		 * back to the hole we've found.  This case looks like
683 		 *        x + 1 ... | x | x + 1 | x ...
684 		 *                               ^ binary search stopped here
685 		 * Another case we need to handle that only occurs in 256k
686 		 * logs is
687 		 *        x + 1 ... | x ... | x+1 | x ...
688 		 *                   ^ binary search stops here
689 		 * In a 256k log, the scan at the end of the log will see the
690 		 * x + 1 blocks.  We need to skip past those since that is
691 		 * certainly not the head of the log.  By searching for
692 		 * last_half_cycle-1 we accomplish that.
693 		 */
694 		ASSERT(head_blk <= INT_MAX &&
695 			(xfs_daddr_t) num_scan_bblks >= head_blk);
696 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
697 		if ((error = xlog_find_verify_cycle(log, start_blk,
698 					num_scan_bblks - (int)head_blk,
699 					(stop_on_cycle - 1), &new_blk)))
700 			goto out_free_buffer;
701 		if (new_blk != -1) {
702 			head_blk = new_blk;
703 			goto validate_head;
704 		}
705 
706 		/*
707 		 * Scan beginning of log now.  The last part of the physical
708 		 * log is good.  This scan needs to verify that it doesn't find
709 		 * the last_half_cycle.
710 		 */
711 		start_blk = 0;
712 		ASSERT(head_blk <= INT_MAX);
713 		if ((error = xlog_find_verify_cycle(log,
714 					start_blk, (int)head_blk,
715 					stop_on_cycle, &new_blk)))
716 			goto out_free_buffer;
717 		if (new_blk != -1)
718 			head_blk = new_blk;
719 	}
720 
721 validate_head:
722 	/*
723 	 * Now we need to make sure head_blk is not pointing to a block in
724 	 * the middle of a log record.
725 	 */
726 	num_scan_bblks = XLOG_REC_SHIFT(log);
727 	if (head_blk >= num_scan_bblks) {
728 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
729 
730 		/* start ptr at last block ptr before head_blk */
731 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
732 		if (error == 1)
733 			error = -EIO;
734 		if (error)
735 			goto out_free_buffer;
736 	} else {
737 		start_blk = 0;
738 		ASSERT(head_blk <= INT_MAX);
739 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
740 		if (error < 0)
741 			goto out_free_buffer;
742 		if (error == 1) {
743 			/* We hit the beginning of the log during our search */
744 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
745 			new_blk = log_bbnum;
746 			ASSERT(start_blk <= INT_MAX &&
747 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
748 			ASSERT(head_blk <= INT_MAX);
749 			error = xlog_find_verify_log_record(log, start_blk,
750 							&new_blk, (int)head_blk);
751 			if (error == 1)
752 				error = -EIO;
753 			if (error)
754 				goto out_free_buffer;
755 			if (new_blk != log_bbnum)
756 				head_blk = new_blk;
757 		} else if (error)
758 			goto out_free_buffer;
759 	}
760 
761 	kmem_free(buffer);
762 	if (head_blk == log_bbnum)
763 		*return_head_blk = 0;
764 	else
765 		*return_head_blk = head_blk;
766 	/*
767 	 * When returning here, we have a good block number.  Bad block
768 	 * means that during a previous crash, we didn't have a clean break
769 	 * from cycle number N to cycle number N-1.  In this case, we need
770 	 * to find the first block with cycle number N-1.
771 	 */
772 	return 0;
773 
774 out_free_buffer:
775 	kmem_free(buffer);
776 	if (error)
777 		xfs_warn(log->l_mp, "failed to find log head");
778 	return error;
779 }
780 
781 /*
782  * Seek backwards in the log for log record headers.
783  *
784  * Given a starting log block, walk backwards until we find the provided number
785  * of records or hit the provided tail block. The return value is the number of
786  * records encountered or a negative error code. The log block and buffer
787  * pointer of the last record seen are returned in rblk and rhead respectively.
788  */
789 STATIC int
790 xlog_rseek_logrec_hdr(
791 	struct xlog		*log,
792 	xfs_daddr_t		head_blk,
793 	xfs_daddr_t		tail_blk,
794 	int			count,
795 	char			*buffer,
796 	xfs_daddr_t		*rblk,
797 	struct xlog_rec_header	**rhead,
798 	bool			*wrapped)
799 {
800 	int			i;
801 	int			error;
802 	int			found = 0;
803 	char			*offset = NULL;
804 	xfs_daddr_t		end_blk;
805 
806 	*wrapped = false;
807 
808 	/*
809 	 * Walk backwards from the head block until we hit the tail or the first
810 	 * block in the log.
811 	 */
812 	end_blk = head_blk > tail_blk ? tail_blk : 0;
813 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
814 		error = xlog_bread(log, i, 1, buffer, &offset);
815 		if (error)
816 			goto out_error;
817 
818 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
819 			*rblk = i;
820 			*rhead = (struct xlog_rec_header *) offset;
821 			if (++found == count)
822 				break;
823 		}
824 	}
825 
826 	/*
827 	 * If we haven't hit the tail block or the log record header count,
828 	 * start looking again from the end of the physical log. Note that
829 	 * callers can pass head == tail if the tail is not yet known.
830 	 */
831 	if (tail_blk >= head_blk && found != count) {
832 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
833 			error = xlog_bread(log, i, 1, buffer, &offset);
834 			if (error)
835 				goto out_error;
836 
837 			if (*(__be32 *)offset ==
838 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
839 				*wrapped = true;
840 				*rblk = i;
841 				*rhead = (struct xlog_rec_header *) offset;
842 				if (++found == count)
843 					break;
844 			}
845 		}
846 	}
847 
848 	return found;
849 
850 out_error:
851 	return error;
852 }
853 
854 /*
855  * Seek forward in the log for log record headers.
856  *
857  * Given head and tail blocks, walk forward from the tail block until we find
858  * the provided number of records or hit the head block. The return value is the
859  * number of records encountered or a negative error code. The log block and
860  * buffer pointer of the last record seen are returned in rblk and rhead
861  * respectively.
862  */
863 STATIC int
864 xlog_seek_logrec_hdr(
865 	struct xlog		*log,
866 	xfs_daddr_t		head_blk,
867 	xfs_daddr_t		tail_blk,
868 	int			count,
869 	char			*buffer,
870 	xfs_daddr_t		*rblk,
871 	struct xlog_rec_header	**rhead,
872 	bool			*wrapped)
873 {
874 	int			i;
875 	int			error;
876 	int			found = 0;
877 	char			*offset = NULL;
878 	xfs_daddr_t		end_blk;
879 
880 	*wrapped = false;
881 
882 	/*
883 	 * Walk forward from the tail block until we hit the head or the last
884 	 * block in the log.
885 	 */
886 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
887 	for (i = (int) tail_blk; i <= end_blk; i++) {
888 		error = xlog_bread(log, i, 1, buffer, &offset);
889 		if (error)
890 			goto out_error;
891 
892 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
893 			*rblk = i;
894 			*rhead = (struct xlog_rec_header *) offset;
895 			if (++found == count)
896 				break;
897 		}
898 	}
899 
900 	/*
901 	 * If we haven't hit the head block or the log record header count,
902 	 * start looking again from the start of the physical log.
903 	 */
904 	if (tail_blk > head_blk && found != count) {
905 		for (i = 0; i < (int) head_blk; i++) {
906 			error = xlog_bread(log, i, 1, buffer, &offset);
907 			if (error)
908 				goto out_error;
909 
910 			if (*(__be32 *)offset ==
911 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
912 				*wrapped = true;
913 				*rblk = i;
914 				*rhead = (struct xlog_rec_header *) offset;
915 				if (++found == count)
916 					break;
917 			}
918 		}
919 	}
920 
921 	return found;
922 
923 out_error:
924 	return error;
925 }
926 
927 /*
928  * Calculate distance from head to tail (i.e., unused space in the log).
929  */
930 static inline int
931 xlog_tail_distance(
932 	struct xlog	*log,
933 	xfs_daddr_t	head_blk,
934 	xfs_daddr_t	tail_blk)
935 {
936 	if (head_blk < tail_blk)
937 		return tail_blk - head_blk;
938 
939 	return tail_blk + (log->l_logBBsize - head_blk);
940 }
941 
942 /*
943  * Verify the log tail. This is particularly important when torn or incomplete
944  * writes have been detected near the front of the log and the head has been
945  * walked back accordingly.
946  *
947  * We also have to handle the case where the tail was pinned and the head
948  * blocked behind the tail right before a crash. If the tail had been pushed
949  * immediately prior to the crash and the subsequent checkpoint was only
950  * partially written, it's possible it overwrote the last referenced tail in the
951  * log with garbage. This is not a coherency problem because the tail must have
952  * been pushed before it can be overwritten, but appears as log corruption to
953  * recovery because we have no way to know the tail was updated if the
954  * subsequent checkpoint didn't write successfully.
955  *
956  * Therefore, CRC check the log from tail to head. If a failure occurs and the
957  * offending record is within max iclog bufs from the head, walk the tail
958  * forward and retry until a valid tail is found or corruption is detected out
959  * of the range of a possible overwrite.
960  */
961 STATIC int
962 xlog_verify_tail(
963 	struct xlog		*log,
964 	xfs_daddr_t		head_blk,
965 	xfs_daddr_t		*tail_blk,
966 	int			hsize)
967 {
968 	struct xlog_rec_header	*thead;
969 	char			*buffer;
970 	xfs_daddr_t		first_bad;
971 	int			error = 0;
972 	bool			wrapped;
973 	xfs_daddr_t		tmp_tail;
974 	xfs_daddr_t		orig_tail = *tail_blk;
975 
976 	buffer = xlog_alloc_buffer(log, 1);
977 	if (!buffer)
978 		return -ENOMEM;
979 
980 	/*
981 	 * Make sure the tail points to a record (returns positive count on
982 	 * success).
983 	 */
984 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
985 			&tmp_tail, &thead, &wrapped);
986 	if (error < 0)
987 		goto out;
988 	if (*tail_blk != tmp_tail)
989 		*tail_blk = tmp_tail;
990 
991 	/*
992 	 * Run a CRC check from the tail to the head. We can't just check
993 	 * MAX_ICLOGS records past the tail because the tail may point to stale
994 	 * blocks cleared during the search for the head/tail. These blocks are
995 	 * overwritten with zero-length records and thus record count is not a
996 	 * reliable indicator of the iclog state before a crash.
997 	 */
998 	first_bad = 0;
999 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1000 				      XLOG_RECOVER_CRCPASS, &first_bad);
1001 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1002 		int	tail_distance;
1003 
1004 		/*
1005 		 * Is corruption within range of the head? If so, retry from
1006 		 * the next record. Otherwise return an error.
1007 		 */
1008 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1009 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1010 			break;
1011 
1012 		/* skip to the next record; returns positive count on success */
1013 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1014 				buffer, &tmp_tail, &thead, &wrapped);
1015 		if (error < 0)
1016 			goto out;
1017 
1018 		*tail_blk = tmp_tail;
1019 		first_bad = 0;
1020 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1021 					      XLOG_RECOVER_CRCPASS, &first_bad);
1022 	}
1023 
1024 	if (!error && *tail_blk != orig_tail)
1025 		xfs_warn(log->l_mp,
1026 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1027 			 orig_tail, *tail_blk);
1028 out:
1029 	kmem_free(buffer);
1030 	return error;
1031 }
1032 
1033 /*
1034  * Detect and trim torn writes from the head of the log.
1035  *
1036  * Storage without sector atomicity guarantees can result in torn writes in the
1037  * log in the event of a crash. Our only means to detect this scenario is via
1038  * CRC verification. While we can't always be certain that CRC verification
1039  * failure is due to a torn write vs. an unrelated corruption, we do know that
1040  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1041  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1042  * the log and treat failures in this range as torn writes as a matter of
1043  * policy. In the event of CRC failure, the head is walked back to the last good
1044  * record in the log and the tail is updated from that record and verified.
1045  */
1046 STATIC int
1047 xlog_verify_head(
1048 	struct xlog		*log,
1049 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1050 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1051 	char			*buffer,
1052 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1053 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1054 	bool			*wrapped)	/* last rec. wraps phys. log */
1055 {
1056 	struct xlog_rec_header	*tmp_rhead;
1057 	char			*tmp_buffer;
1058 	xfs_daddr_t		first_bad;
1059 	xfs_daddr_t		tmp_rhead_blk;
1060 	int			found;
1061 	int			error;
1062 	bool			tmp_wrapped;
1063 
1064 	/*
1065 	 * Check the head of the log for torn writes. Search backwards from the
1066 	 * head until we hit the tail or the maximum number of log record I/Os
1067 	 * that could have been in flight at one time. Use a temporary buffer so
1068 	 * we don't trash the rhead/buffer pointers from the caller.
1069 	 */
1070 	tmp_buffer = xlog_alloc_buffer(log, 1);
1071 	if (!tmp_buffer)
1072 		return -ENOMEM;
1073 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1074 				      XLOG_MAX_ICLOGS, tmp_buffer,
1075 				      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1076 	kmem_free(tmp_buffer);
1077 	if (error < 0)
1078 		return error;
1079 
1080 	/*
1081 	 * Now run a CRC verification pass over the records starting at the
1082 	 * block found above to the current head. If a CRC failure occurs, the
1083 	 * log block of the first bad record is saved in first_bad.
1084 	 */
1085 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1086 				      XLOG_RECOVER_CRCPASS, &first_bad);
1087 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1088 		/*
1089 		 * We've hit a potential torn write. Reset the error and warn
1090 		 * about it.
1091 		 */
1092 		error = 0;
1093 		xfs_warn(log->l_mp,
1094 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1095 			 first_bad, *head_blk);
1096 
1097 		/*
1098 		 * Get the header block and buffer pointer for the last good
1099 		 * record before the bad record.
1100 		 *
1101 		 * Note that xlog_find_tail() clears the blocks at the new head
1102 		 * (i.e., the records with invalid CRC) if the cycle number
1103 		 * matches the current cycle.
1104 		 */
1105 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1106 				buffer, rhead_blk, rhead, wrapped);
1107 		if (found < 0)
1108 			return found;
1109 		if (found == 0)		/* XXX: right thing to do here? */
1110 			return -EIO;
1111 
1112 		/*
1113 		 * Reset the head block to the starting block of the first bad
1114 		 * log record and set the tail block based on the last good
1115 		 * record.
1116 		 *
1117 		 * Bail out if the updated head/tail match as this indicates
1118 		 * possible corruption outside of the acceptable
1119 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1120 		 */
1121 		*head_blk = first_bad;
1122 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1123 		if (*head_blk == *tail_blk) {
1124 			ASSERT(0);
1125 			return 0;
1126 		}
1127 	}
1128 	if (error)
1129 		return error;
1130 
1131 	return xlog_verify_tail(log, *head_blk, tail_blk,
1132 				be32_to_cpu((*rhead)->h_size));
1133 }
1134 
1135 /*
1136  * We need to make sure we handle log wrapping properly, so we can't use the
1137  * calculated logbno directly. Make sure it wraps to the correct bno inside the
1138  * log.
1139  *
1140  * The log is limited to 32 bit sizes, so we use the appropriate modulus
1141  * operation here and cast it back to a 64 bit daddr on return.
1142  */
1143 static inline xfs_daddr_t
1144 xlog_wrap_logbno(
1145 	struct xlog		*log,
1146 	xfs_daddr_t		bno)
1147 {
1148 	int			mod;
1149 
1150 	div_s64_rem(bno, log->l_logBBsize, &mod);
1151 	return mod;
1152 }
1153 
1154 /*
1155  * Check whether the head of the log points to an unmount record. In other
1156  * words, determine whether the log is clean. If so, update the in-core state
1157  * appropriately.
1158  */
1159 static int
1160 xlog_check_unmount_rec(
1161 	struct xlog		*log,
1162 	xfs_daddr_t		*head_blk,
1163 	xfs_daddr_t		*tail_blk,
1164 	struct xlog_rec_header	*rhead,
1165 	xfs_daddr_t		rhead_blk,
1166 	char			*buffer,
1167 	bool			*clean)
1168 {
1169 	struct xlog_op_header	*op_head;
1170 	xfs_daddr_t		umount_data_blk;
1171 	xfs_daddr_t		after_umount_blk;
1172 	int			hblks;
1173 	int			error;
1174 	char			*offset;
1175 
1176 	*clean = false;
1177 
1178 	/*
1179 	 * Look for unmount record. If we find it, then we know there was a
1180 	 * clean unmount. Since 'i' could be the last block in the physical
1181 	 * log, we convert to a log block before comparing to the head_blk.
1182 	 *
1183 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1184 	 * below. We won't want to clear the unmount record if there is one, so
1185 	 * we pass the lsn of the unmount record rather than the block after it.
1186 	 */
1187 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1188 		int	h_size = be32_to_cpu(rhead->h_size);
1189 		int	h_version = be32_to_cpu(rhead->h_version);
1190 
1191 		if ((h_version & XLOG_VERSION_2) &&
1192 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1193 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1194 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1195 				hblks++;
1196 		} else {
1197 			hblks = 1;
1198 		}
1199 	} else {
1200 		hblks = 1;
1201 	}
1202 
1203 	after_umount_blk = xlog_wrap_logbno(log,
1204 			rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1205 
1206 	if (*head_blk == after_umount_blk &&
1207 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1208 		umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1209 		error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1210 		if (error)
1211 			return error;
1212 
1213 		op_head = (struct xlog_op_header *)offset;
1214 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1215 			/*
1216 			 * Set tail and last sync so that newly written log
1217 			 * records will point recovery to after the current
1218 			 * unmount record.
1219 			 */
1220 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1221 					log->l_curr_cycle, after_umount_blk);
1222 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1223 					log->l_curr_cycle, after_umount_blk);
1224 			*tail_blk = after_umount_blk;
1225 
1226 			*clean = true;
1227 		}
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 static void
1234 xlog_set_state(
1235 	struct xlog		*log,
1236 	xfs_daddr_t		head_blk,
1237 	struct xlog_rec_header	*rhead,
1238 	xfs_daddr_t		rhead_blk,
1239 	bool			bump_cycle)
1240 {
1241 	/*
1242 	 * Reset log values according to the state of the log when we
1243 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1244 	 * one because the next write starts a new cycle rather than
1245 	 * continuing the cycle of the last good log record.  At this
1246 	 * point we have guaranteed that all partial log records have been
1247 	 * accounted for.  Therefore, we know that the last good log record
1248 	 * written was complete and ended exactly on the end boundary
1249 	 * of the physical log.
1250 	 */
1251 	log->l_prev_block = rhead_blk;
1252 	log->l_curr_block = (int)head_blk;
1253 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1254 	if (bump_cycle)
1255 		log->l_curr_cycle++;
1256 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1257 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1258 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1259 					BBTOB(log->l_curr_block));
1260 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1261 					BBTOB(log->l_curr_block));
1262 }
1263 
1264 /*
1265  * Find the sync block number or the tail of the log.
1266  *
1267  * This will be the block number of the last record to have its
1268  * associated buffers synced to disk.  Every log record header has
1269  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1270  * to get a sync block number.  The only concern is to figure out which
1271  * log record header to believe.
1272  *
1273  * The following algorithm uses the log record header with the largest
1274  * lsn.  The entire log record does not need to be valid.  We only care
1275  * that the header is valid.
1276  *
1277  * We could speed up search by using current head_blk buffer, but it is not
1278  * available.
1279  */
1280 STATIC int
1281 xlog_find_tail(
1282 	struct xlog		*log,
1283 	xfs_daddr_t		*head_blk,
1284 	xfs_daddr_t		*tail_blk)
1285 {
1286 	xlog_rec_header_t	*rhead;
1287 	char			*offset = NULL;
1288 	char			*buffer;
1289 	int			error;
1290 	xfs_daddr_t		rhead_blk;
1291 	xfs_lsn_t		tail_lsn;
1292 	bool			wrapped = false;
1293 	bool			clean = false;
1294 
1295 	/*
1296 	 * Find previous log record
1297 	 */
1298 	if ((error = xlog_find_head(log, head_blk)))
1299 		return error;
1300 	ASSERT(*head_blk < INT_MAX);
1301 
1302 	buffer = xlog_alloc_buffer(log, 1);
1303 	if (!buffer)
1304 		return -ENOMEM;
1305 	if (*head_blk == 0) {				/* special case */
1306 		error = xlog_bread(log, 0, 1, buffer, &offset);
1307 		if (error)
1308 			goto done;
1309 
1310 		if (xlog_get_cycle(offset) == 0) {
1311 			*tail_blk = 0;
1312 			/* leave all other log inited values alone */
1313 			goto done;
1314 		}
1315 	}
1316 
1317 	/*
1318 	 * Search backwards through the log looking for the log record header
1319 	 * block. This wraps all the way back around to the head so something is
1320 	 * seriously wrong if we can't find it.
1321 	 */
1322 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1323 				      &rhead_blk, &rhead, &wrapped);
1324 	if (error < 0)
1325 		goto done;
1326 	if (!error) {
1327 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1328 		error = -EFSCORRUPTED;
1329 		goto done;
1330 	}
1331 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1332 
1333 	/*
1334 	 * Set the log state based on the current head record.
1335 	 */
1336 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1337 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1338 
1339 	/*
1340 	 * Look for an unmount record at the head of the log. This sets the log
1341 	 * state to determine whether recovery is necessary.
1342 	 */
1343 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1344 				       rhead_blk, buffer, &clean);
1345 	if (error)
1346 		goto done;
1347 
1348 	/*
1349 	 * Verify the log head if the log is not clean (e.g., we have anything
1350 	 * but an unmount record at the head). This uses CRC verification to
1351 	 * detect and trim torn writes. If discovered, CRC failures are
1352 	 * considered torn writes and the log head is trimmed accordingly.
1353 	 *
1354 	 * Note that we can only run CRC verification when the log is dirty
1355 	 * because there's no guarantee that the log data behind an unmount
1356 	 * record is compatible with the current architecture.
1357 	 */
1358 	if (!clean) {
1359 		xfs_daddr_t	orig_head = *head_blk;
1360 
1361 		error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1362 					 &rhead_blk, &rhead, &wrapped);
1363 		if (error)
1364 			goto done;
1365 
1366 		/* update in-core state again if the head changed */
1367 		if (*head_blk != orig_head) {
1368 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1369 				       wrapped);
1370 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1371 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1372 						       rhead, rhead_blk, buffer,
1373 						       &clean);
1374 			if (error)
1375 				goto done;
1376 		}
1377 	}
1378 
1379 	/*
1380 	 * Note that the unmount was clean. If the unmount was not clean, we
1381 	 * need to know this to rebuild the superblock counters from the perag
1382 	 * headers if we have a filesystem using non-persistent counters.
1383 	 */
1384 	if (clean)
1385 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1386 
1387 	/*
1388 	 * Make sure that there are no blocks in front of the head
1389 	 * with the same cycle number as the head.  This can happen
1390 	 * because we allow multiple outstanding log writes concurrently,
1391 	 * and the later writes might make it out before earlier ones.
1392 	 *
1393 	 * We use the lsn from before modifying it so that we'll never
1394 	 * overwrite the unmount record after a clean unmount.
1395 	 *
1396 	 * Do this only if we are going to recover the filesystem
1397 	 *
1398 	 * NOTE: This used to say "if (!readonly)"
1399 	 * However on Linux, we can & do recover a read-only filesystem.
1400 	 * We only skip recovery if NORECOVERY is specified on mount,
1401 	 * in which case we would not be here.
1402 	 *
1403 	 * But... if the -device- itself is readonly, just skip this.
1404 	 * We can't recover this device anyway, so it won't matter.
1405 	 */
1406 	if (!xfs_readonly_buftarg(log->l_targ))
1407 		error = xlog_clear_stale_blocks(log, tail_lsn);
1408 
1409 done:
1410 	kmem_free(buffer);
1411 
1412 	if (error)
1413 		xfs_warn(log->l_mp, "failed to locate log tail");
1414 	return error;
1415 }
1416 
1417 /*
1418  * Is the log zeroed at all?
1419  *
1420  * The last binary search should be changed to perform an X block read
1421  * once X becomes small enough.  You can then search linearly through
1422  * the X blocks.  This will cut down on the number of reads we need to do.
1423  *
1424  * If the log is partially zeroed, this routine will pass back the blkno
1425  * of the first block with cycle number 0.  It won't have a complete LR
1426  * preceding it.
1427  *
1428  * Return:
1429  *	0  => the log is completely written to
1430  *	1 => use *blk_no as the first block of the log
1431  *	<0 => error has occurred
1432  */
1433 STATIC int
1434 xlog_find_zeroed(
1435 	struct xlog	*log,
1436 	xfs_daddr_t	*blk_no)
1437 {
1438 	char		*buffer;
1439 	char		*offset;
1440 	uint	        first_cycle, last_cycle;
1441 	xfs_daddr_t	new_blk, last_blk, start_blk;
1442 	xfs_daddr_t     num_scan_bblks;
1443 	int	        error, log_bbnum = log->l_logBBsize;
1444 
1445 	*blk_no = 0;
1446 
1447 	/* check totally zeroed log */
1448 	buffer = xlog_alloc_buffer(log, 1);
1449 	if (!buffer)
1450 		return -ENOMEM;
1451 	error = xlog_bread(log, 0, 1, buffer, &offset);
1452 	if (error)
1453 		goto out_free_buffer;
1454 
1455 	first_cycle = xlog_get_cycle(offset);
1456 	if (first_cycle == 0) {		/* completely zeroed log */
1457 		*blk_no = 0;
1458 		kmem_free(buffer);
1459 		return 1;
1460 	}
1461 
1462 	/* check partially zeroed log */
1463 	error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1464 	if (error)
1465 		goto out_free_buffer;
1466 
1467 	last_cycle = xlog_get_cycle(offset);
1468 	if (last_cycle != 0) {		/* log completely written to */
1469 		kmem_free(buffer);
1470 		return 0;
1471 	}
1472 
1473 	/* we have a partially zeroed log */
1474 	last_blk = log_bbnum-1;
1475 	error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1476 	if (error)
1477 		goto out_free_buffer;
1478 
1479 	/*
1480 	 * Validate the answer.  Because there is no way to guarantee that
1481 	 * the entire log is made up of log records which are the same size,
1482 	 * we scan over the defined maximum blocks.  At this point, the maximum
1483 	 * is not chosen to mean anything special.   XXXmiken
1484 	 */
1485 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1486 	ASSERT(num_scan_bblks <= INT_MAX);
1487 
1488 	if (last_blk < num_scan_bblks)
1489 		num_scan_bblks = last_blk;
1490 	start_blk = last_blk - num_scan_bblks;
1491 
1492 	/*
1493 	 * We search for any instances of cycle number 0 that occur before
1494 	 * our current estimate of the head.  What we're trying to detect is
1495 	 *        1 ... | 0 | 1 | 0...
1496 	 *                       ^ binary search ends here
1497 	 */
1498 	if ((error = xlog_find_verify_cycle(log, start_blk,
1499 					 (int)num_scan_bblks, 0, &new_blk)))
1500 		goto out_free_buffer;
1501 	if (new_blk != -1)
1502 		last_blk = new_blk;
1503 
1504 	/*
1505 	 * Potentially backup over partial log record write.  We don't need
1506 	 * to search the end of the log because we know it is zero.
1507 	 */
1508 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1509 	if (error == 1)
1510 		error = -EIO;
1511 	if (error)
1512 		goto out_free_buffer;
1513 
1514 	*blk_no = last_blk;
1515 out_free_buffer:
1516 	kmem_free(buffer);
1517 	if (error)
1518 		return error;
1519 	return 1;
1520 }
1521 
1522 /*
1523  * These are simple subroutines used by xlog_clear_stale_blocks() below
1524  * to initialize a buffer full of empty log record headers and write
1525  * them into the log.
1526  */
1527 STATIC void
1528 xlog_add_record(
1529 	struct xlog		*log,
1530 	char			*buf,
1531 	int			cycle,
1532 	int			block,
1533 	int			tail_cycle,
1534 	int			tail_block)
1535 {
1536 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1537 
1538 	memset(buf, 0, BBSIZE);
1539 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1540 	recp->h_cycle = cpu_to_be32(cycle);
1541 	recp->h_version = cpu_to_be32(
1542 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1543 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1544 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1545 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1546 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1547 }
1548 
1549 STATIC int
1550 xlog_write_log_records(
1551 	struct xlog	*log,
1552 	int		cycle,
1553 	int		start_block,
1554 	int		blocks,
1555 	int		tail_cycle,
1556 	int		tail_block)
1557 {
1558 	char		*offset;
1559 	char		*buffer;
1560 	int		balign, ealign;
1561 	int		sectbb = log->l_sectBBsize;
1562 	int		end_block = start_block + blocks;
1563 	int		bufblks;
1564 	int		error = 0;
1565 	int		i, j = 0;
1566 
1567 	/*
1568 	 * Greedily allocate a buffer big enough to handle the full
1569 	 * range of basic blocks to be written.  If that fails, try
1570 	 * a smaller size.  We need to be able to write at least a
1571 	 * log sector, or we're out of luck.
1572 	 */
1573 	bufblks = 1 << ffs(blocks);
1574 	while (bufblks > log->l_logBBsize)
1575 		bufblks >>= 1;
1576 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1577 		bufblks >>= 1;
1578 		if (bufblks < sectbb)
1579 			return -ENOMEM;
1580 	}
1581 
1582 	/* We may need to do a read at the start to fill in part of
1583 	 * the buffer in the starting sector not covered by the first
1584 	 * write below.
1585 	 */
1586 	balign = round_down(start_block, sectbb);
1587 	if (balign != start_block) {
1588 		error = xlog_bread_noalign(log, start_block, 1, buffer);
1589 		if (error)
1590 			goto out_free_buffer;
1591 
1592 		j = start_block - balign;
1593 	}
1594 
1595 	for (i = start_block; i < end_block; i += bufblks) {
1596 		int		bcount, endcount;
1597 
1598 		bcount = min(bufblks, end_block - start_block);
1599 		endcount = bcount - j;
1600 
1601 		/* We may need to do a read at the end to fill in part of
1602 		 * the buffer in the final sector not covered by the write.
1603 		 * If this is the same sector as the above read, skip it.
1604 		 */
1605 		ealign = round_down(end_block, sectbb);
1606 		if (j == 0 && (start_block + endcount > ealign)) {
1607 			error = xlog_bread_noalign(log, ealign, sectbb,
1608 					buffer + BBTOB(ealign - start_block));
1609 			if (error)
1610 				break;
1611 
1612 		}
1613 
1614 		offset = buffer + xlog_align(log, start_block);
1615 		for (; j < endcount; j++) {
1616 			xlog_add_record(log, offset, cycle, i+j,
1617 					tail_cycle, tail_block);
1618 			offset += BBSIZE;
1619 		}
1620 		error = xlog_bwrite(log, start_block, endcount, buffer);
1621 		if (error)
1622 			break;
1623 		start_block += endcount;
1624 		j = 0;
1625 	}
1626 
1627 out_free_buffer:
1628 	kmem_free(buffer);
1629 	return error;
1630 }
1631 
1632 /*
1633  * This routine is called to blow away any incomplete log writes out
1634  * in front of the log head.  We do this so that we won't become confused
1635  * if we come up, write only a little bit more, and then crash again.
1636  * If we leave the partial log records out there, this situation could
1637  * cause us to think those partial writes are valid blocks since they
1638  * have the current cycle number.  We get rid of them by overwriting them
1639  * with empty log records with the old cycle number rather than the
1640  * current one.
1641  *
1642  * The tail lsn is passed in rather than taken from
1643  * the log so that we will not write over the unmount record after a
1644  * clean unmount in a 512 block log.  Doing so would leave the log without
1645  * any valid log records in it until a new one was written.  If we crashed
1646  * during that time we would not be able to recover.
1647  */
1648 STATIC int
1649 xlog_clear_stale_blocks(
1650 	struct xlog	*log,
1651 	xfs_lsn_t	tail_lsn)
1652 {
1653 	int		tail_cycle, head_cycle;
1654 	int		tail_block, head_block;
1655 	int		tail_distance, max_distance;
1656 	int		distance;
1657 	int		error;
1658 
1659 	tail_cycle = CYCLE_LSN(tail_lsn);
1660 	tail_block = BLOCK_LSN(tail_lsn);
1661 	head_cycle = log->l_curr_cycle;
1662 	head_block = log->l_curr_block;
1663 
1664 	/*
1665 	 * Figure out the distance between the new head of the log
1666 	 * and the tail.  We want to write over any blocks beyond the
1667 	 * head that we may have written just before the crash, but
1668 	 * we don't want to overwrite the tail of the log.
1669 	 */
1670 	if (head_cycle == tail_cycle) {
1671 		/*
1672 		 * The tail is behind the head in the physical log,
1673 		 * so the distance from the head to the tail is the
1674 		 * distance from the head to the end of the log plus
1675 		 * the distance from the beginning of the log to the
1676 		 * tail.
1677 		 */
1678 		if (XFS_IS_CORRUPT(log->l_mp,
1679 				   head_block < tail_block ||
1680 				   head_block >= log->l_logBBsize))
1681 			return -EFSCORRUPTED;
1682 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1683 	} else {
1684 		/*
1685 		 * The head is behind the tail in the physical log,
1686 		 * so the distance from the head to the tail is just
1687 		 * the tail block minus the head block.
1688 		 */
1689 		if (XFS_IS_CORRUPT(log->l_mp,
1690 				   head_block >= tail_block ||
1691 				   head_cycle != tail_cycle + 1))
1692 			return -EFSCORRUPTED;
1693 		tail_distance = tail_block - head_block;
1694 	}
1695 
1696 	/*
1697 	 * If the head is right up against the tail, we can't clear
1698 	 * anything.
1699 	 */
1700 	if (tail_distance <= 0) {
1701 		ASSERT(tail_distance == 0);
1702 		return 0;
1703 	}
1704 
1705 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1706 	/*
1707 	 * Take the smaller of the maximum amount of outstanding I/O
1708 	 * we could have and the distance to the tail to clear out.
1709 	 * We take the smaller so that we don't overwrite the tail and
1710 	 * we don't waste all day writing from the head to the tail
1711 	 * for no reason.
1712 	 */
1713 	max_distance = min(max_distance, tail_distance);
1714 
1715 	if ((head_block + max_distance) <= log->l_logBBsize) {
1716 		/*
1717 		 * We can stomp all the blocks we need to without
1718 		 * wrapping around the end of the log.  Just do it
1719 		 * in a single write.  Use the cycle number of the
1720 		 * current cycle minus one so that the log will look like:
1721 		 *     n ... | n - 1 ...
1722 		 */
1723 		error = xlog_write_log_records(log, (head_cycle - 1),
1724 				head_block, max_distance, tail_cycle,
1725 				tail_block);
1726 		if (error)
1727 			return error;
1728 	} else {
1729 		/*
1730 		 * We need to wrap around the end of the physical log in
1731 		 * order to clear all the blocks.  Do it in two separate
1732 		 * I/Os.  The first write should be from the head to the
1733 		 * end of the physical log, and it should use the current
1734 		 * cycle number minus one just like above.
1735 		 */
1736 		distance = log->l_logBBsize - head_block;
1737 		error = xlog_write_log_records(log, (head_cycle - 1),
1738 				head_block, distance, tail_cycle,
1739 				tail_block);
1740 
1741 		if (error)
1742 			return error;
1743 
1744 		/*
1745 		 * Now write the blocks at the start of the physical log.
1746 		 * This writes the remainder of the blocks we want to clear.
1747 		 * It uses the current cycle number since we're now on the
1748 		 * same cycle as the head so that we get:
1749 		 *    n ... n ... | n - 1 ...
1750 		 *    ^^^^^ blocks we're writing
1751 		 */
1752 		distance = max_distance - (log->l_logBBsize - head_block);
1753 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1754 				tail_cycle, tail_block);
1755 		if (error)
1756 			return error;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 /*
1763  * Release the recovered intent item in the AIL that matches the given intent
1764  * type and intent id.
1765  */
1766 void
1767 xlog_recover_release_intent(
1768 	struct xlog		*log,
1769 	unsigned short		intent_type,
1770 	uint64_t		intent_id)
1771 {
1772 	struct xfs_ail_cursor	cur;
1773 	struct xfs_log_item	*lip;
1774 	struct xfs_ail		*ailp = log->l_ailp;
1775 
1776 	spin_lock(&ailp->ail_lock);
1777 	for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1778 	     lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1779 		if (lip->li_type != intent_type)
1780 			continue;
1781 		if (!lip->li_ops->iop_match(lip, intent_id))
1782 			continue;
1783 
1784 		spin_unlock(&ailp->ail_lock);
1785 		lip->li_ops->iop_release(lip);
1786 		spin_lock(&ailp->ail_lock);
1787 		break;
1788 	}
1789 
1790 	xfs_trans_ail_cursor_done(&cur);
1791 	spin_unlock(&ailp->ail_lock);
1792 }
1793 
1794 /******************************************************************************
1795  *
1796  *		Log recover routines
1797  *
1798  ******************************************************************************
1799  */
1800 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1801 	&xlog_buf_item_ops,
1802 	&xlog_inode_item_ops,
1803 	&xlog_dquot_item_ops,
1804 	&xlog_quotaoff_item_ops,
1805 	&xlog_icreate_item_ops,
1806 	&xlog_efi_item_ops,
1807 	&xlog_efd_item_ops,
1808 	&xlog_rui_item_ops,
1809 	&xlog_rud_item_ops,
1810 	&xlog_cui_item_ops,
1811 	&xlog_cud_item_ops,
1812 	&xlog_bui_item_ops,
1813 	&xlog_bud_item_ops,
1814 };
1815 
1816 static const struct xlog_recover_item_ops *
1817 xlog_find_item_ops(
1818 	struct xlog_recover_item		*item)
1819 {
1820 	unsigned int				i;
1821 
1822 	for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1823 		if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1824 			return xlog_recover_item_ops[i];
1825 
1826 	return NULL;
1827 }
1828 
1829 /*
1830  * Sort the log items in the transaction.
1831  *
1832  * The ordering constraints are defined by the inode allocation and unlink
1833  * behaviour. The rules are:
1834  *
1835  *	1. Every item is only logged once in a given transaction. Hence it
1836  *	   represents the last logged state of the item. Hence ordering is
1837  *	   dependent on the order in which operations need to be performed so
1838  *	   required initial conditions are always met.
1839  *
1840  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1841  *	   there's nothing to replay from them so we can simply cull them
1842  *	   from the transaction. However, we can't do that until after we've
1843  *	   replayed all the other items because they may be dependent on the
1844  *	   cancelled buffer and replaying the cancelled buffer can remove it
1845  *	   form the cancelled buffer table. Hence they have tobe done last.
1846  *
1847  *	3. Inode allocation buffers must be replayed before inode items that
1848  *	   read the buffer and replay changes into it. For filesystems using the
1849  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1850  *	   treated the same as inode allocation buffers as they create and
1851  *	   initialise the buffers directly.
1852  *
1853  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1854  *	   This ensures that inodes are completely flushed to the inode buffer
1855  *	   in a "free" state before we remove the unlinked inode list pointer.
1856  *
1857  * Hence the ordering needs to be inode allocation buffers first, inode items
1858  * second, inode unlink buffers third and cancelled buffers last.
1859  *
1860  * But there's a problem with that - we can't tell an inode allocation buffer
1861  * apart from a regular buffer, so we can't separate them. We can, however,
1862  * tell an inode unlink buffer from the others, and so we can separate them out
1863  * from all the other buffers and move them to last.
1864  *
1865  * Hence, 4 lists, in order from head to tail:
1866  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1867  *	- item_list for all non-buffer items
1868  *	- inode_buffer_list for inode unlink buffers
1869  *	- cancel_list for the cancelled buffers
1870  *
1871  * Note that we add objects to the tail of the lists so that first-to-last
1872  * ordering is preserved within the lists. Adding objects to the head of the
1873  * list means when we traverse from the head we walk them in last-to-first
1874  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1875  * but for all other items there may be specific ordering that we need to
1876  * preserve.
1877  */
1878 STATIC int
1879 xlog_recover_reorder_trans(
1880 	struct xlog		*log,
1881 	struct xlog_recover	*trans,
1882 	int			pass)
1883 {
1884 	struct xlog_recover_item *item, *n;
1885 	int			error = 0;
1886 	LIST_HEAD(sort_list);
1887 	LIST_HEAD(cancel_list);
1888 	LIST_HEAD(buffer_list);
1889 	LIST_HEAD(inode_buffer_list);
1890 	LIST_HEAD(item_list);
1891 
1892 	list_splice_init(&trans->r_itemq, &sort_list);
1893 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1894 		enum xlog_recover_reorder	fate = XLOG_REORDER_ITEM_LIST;
1895 
1896 		item->ri_ops = xlog_find_item_ops(item);
1897 		if (!item->ri_ops) {
1898 			xfs_warn(log->l_mp,
1899 				"%s: unrecognized type of log operation (%d)",
1900 				__func__, ITEM_TYPE(item));
1901 			ASSERT(0);
1902 			/*
1903 			 * return the remaining items back to the transaction
1904 			 * item list so they can be freed in caller.
1905 			 */
1906 			if (!list_empty(&sort_list))
1907 				list_splice_init(&sort_list, &trans->r_itemq);
1908 			error = -EFSCORRUPTED;
1909 			break;
1910 		}
1911 
1912 		if (item->ri_ops->reorder)
1913 			fate = item->ri_ops->reorder(item);
1914 
1915 		switch (fate) {
1916 		case XLOG_REORDER_BUFFER_LIST:
1917 			list_move_tail(&item->ri_list, &buffer_list);
1918 			break;
1919 		case XLOG_REORDER_CANCEL_LIST:
1920 			trace_xfs_log_recover_item_reorder_head(log,
1921 					trans, item, pass);
1922 			list_move(&item->ri_list, &cancel_list);
1923 			break;
1924 		case XLOG_REORDER_INODE_BUFFER_LIST:
1925 			list_move(&item->ri_list, &inode_buffer_list);
1926 			break;
1927 		case XLOG_REORDER_ITEM_LIST:
1928 			trace_xfs_log_recover_item_reorder_tail(log,
1929 							trans, item, pass);
1930 			list_move_tail(&item->ri_list, &item_list);
1931 			break;
1932 		}
1933 	}
1934 
1935 	ASSERT(list_empty(&sort_list));
1936 	if (!list_empty(&buffer_list))
1937 		list_splice(&buffer_list, &trans->r_itemq);
1938 	if (!list_empty(&item_list))
1939 		list_splice_tail(&item_list, &trans->r_itemq);
1940 	if (!list_empty(&inode_buffer_list))
1941 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1942 	if (!list_empty(&cancel_list))
1943 		list_splice_tail(&cancel_list, &trans->r_itemq);
1944 	return error;
1945 }
1946 
1947 void
1948 xlog_buf_readahead(
1949 	struct xlog		*log,
1950 	xfs_daddr_t		blkno,
1951 	uint			len,
1952 	const struct xfs_buf_ops *ops)
1953 {
1954 	if (!xlog_is_buffer_cancelled(log, blkno, len))
1955 		xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1956 }
1957 
1958 STATIC int
1959 xlog_recover_items_pass2(
1960 	struct xlog                     *log,
1961 	struct xlog_recover             *trans,
1962 	struct list_head                *buffer_list,
1963 	struct list_head                *item_list)
1964 {
1965 	struct xlog_recover_item	*item;
1966 	int				error = 0;
1967 
1968 	list_for_each_entry(item, item_list, ri_list) {
1969 		trace_xfs_log_recover_item_recover(log, trans, item,
1970 				XLOG_RECOVER_PASS2);
1971 
1972 		if (item->ri_ops->commit_pass2)
1973 			error = item->ri_ops->commit_pass2(log, buffer_list,
1974 					item, trans->r_lsn);
1975 		if (error)
1976 			return error;
1977 	}
1978 
1979 	return error;
1980 }
1981 
1982 /*
1983  * Perform the transaction.
1984  *
1985  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
1986  * EFIs and EFDs get queued up by adding entries into the AIL for them.
1987  */
1988 STATIC int
1989 xlog_recover_commit_trans(
1990 	struct xlog		*log,
1991 	struct xlog_recover	*trans,
1992 	int			pass,
1993 	struct list_head	*buffer_list)
1994 {
1995 	int				error = 0;
1996 	int				items_queued = 0;
1997 	struct xlog_recover_item	*item;
1998 	struct xlog_recover_item	*next;
1999 	LIST_HEAD			(ra_list);
2000 	LIST_HEAD			(done_list);
2001 
2002 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
2003 
2004 	hlist_del_init(&trans->r_list);
2005 
2006 	error = xlog_recover_reorder_trans(log, trans, pass);
2007 	if (error)
2008 		return error;
2009 
2010 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2011 		trace_xfs_log_recover_item_recover(log, trans, item, pass);
2012 
2013 		switch (pass) {
2014 		case XLOG_RECOVER_PASS1:
2015 			if (item->ri_ops->commit_pass1)
2016 				error = item->ri_ops->commit_pass1(log, item);
2017 			break;
2018 		case XLOG_RECOVER_PASS2:
2019 			if (item->ri_ops->ra_pass2)
2020 				item->ri_ops->ra_pass2(log, item);
2021 			list_move_tail(&item->ri_list, &ra_list);
2022 			items_queued++;
2023 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2024 				error = xlog_recover_items_pass2(log, trans,
2025 						buffer_list, &ra_list);
2026 				list_splice_tail_init(&ra_list, &done_list);
2027 				items_queued = 0;
2028 			}
2029 
2030 			break;
2031 		default:
2032 			ASSERT(0);
2033 		}
2034 
2035 		if (error)
2036 			goto out;
2037 	}
2038 
2039 out:
2040 	if (!list_empty(&ra_list)) {
2041 		if (!error)
2042 			error = xlog_recover_items_pass2(log, trans,
2043 					buffer_list, &ra_list);
2044 		list_splice_tail_init(&ra_list, &done_list);
2045 	}
2046 
2047 	if (!list_empty(&done_list))
2048 		list_splice_init(&done_list, &trans->r_itemq);
2049 
2050 	return error;
2051 }
2052 
2053 STATIC void
2054 xlog_recover_add_item(
2055 	struct list_head	*head)
2056 {
2057 	struct xlog_recover_item *item;
2058 
2059 	item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2060 	INIT_LIST_HEAD(&item->ri_list);
2061 	list_add_tail(&item->ri_list, head);
2062 }
2063 
2064 STATIC int
2065 xlog_recover_add_to_cont_trans(
2066 	struct xlog		*log,
2067 	struct xlog_recover	*trans,
2068 	char			*dp,
2069 	int			len)
2070 {
2071 	struct xlog_recover_item *item;
2072 	char			*ptr, *old_ptr;
2073 	int			old_len;
2074 
2075 	/*
2076 	 * If the transaction is empty, the header was split across this and the
2077 	 * previous record. Copy the rest of the header.
2078 	 */
2079 	if (list_empty(&trans->r_itemq)) {
2080 		ASSERT(len <= sizeof(struct xfs_trans_header));
2081 		if (len > sizeof(struct xfs_trans_header)) {
2082 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2083 			return -EFSCORRUPTED;
2084 		}
2085 
2086 		xlog_recover_add_item(&trans->r_itemq);
2087 		ptr = (char *)&trans->r_theader +
2088 				sizeof(struct xfs_trans_header) - len;
2089 		memcpy(ptr, dp, len);
2090 		return 0;
2091 	}
2092 
2093 	/* take the tail entry */
2094 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2095 			  ri_list);
2096 
2097 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2098 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
2099 
2100 	ptr = kmem_realloc(old_ptr, len + old_len, 0);
2101 	memcpy(&ptr[old_len], dp, len);
2102 	item->ri_buf[item->ri_cnt-1].i_len += len;
2103 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2104 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2105 	return 0;
2106 }
2107 
2108 /*
2109  * The next region to add is the start of a new region.  It could be
2110  * a whole region or it could be the first part of a new region.  Because
2111  * of this, the assumption here is that the type and size fields of all
2112  * format structures fit into the first 32 bits of the structure.
2113  *
2114  * This works because all regions must be 32 bit aligned.  Therefore, we
2115  * either have both fields or we have neither field.  In the case we have
2116  * neither field, the data part of the region is zero length.  We only have
2117  * a log_op_header and can throw away the header since a new one will appear
2118  * later.  If we have at least 4 bytes, then we can determine how many regions
2119  * will appear in the current log item.
2120  */
2121 STATIC int
2122 xlog_recover_add_to_trans(
2123 	struct xlog		*log,
2124 	struct xlog_recover	*trans,
2125 	char			*dp,
2126 	int			len)
2127 {
2128 	struct xfs_inode_log_format	*in_f;			/* any will do */
2129 	struct xlog_recover_item *item;
2130 	char			*ptr;
2131 
2132 	if (!len)
2133 		return 0;
2134 	if (list_empty(&trans->r_itemq)) {
2135 		/* we need to catch log corruptions here */
2136 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2137 			xfs_warn(log->l_mp, "%s: bad header magic number",
2138 				__func__);
2139 			ASSERT(0);
2140 			return -EFSCORRUPTED;
2141 		}
2142 
2143 		if (len > sizeof(struct xfs_trans_header)) {
2144 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2145 			ASSERT(0);
2146 			return -EFSCORRUPTED;
2147 		}
2148 
2149 		/*
2150 		 * The transaction header can be arbitrarily split across op
2151 		 * records. If we don't have the whole thing here, copy what we
2152 		 * do have and handle the rest in the next record.
2153 		 */
2154 		if (len == sizeof(struct xfs_trans_header))
2155 			xlog_recover_add_item(&trans->r_itemq);
2156 		memcpy(&trans->r_theader, dp, len);
2157 		return 0;
2158 	}
2159 
2160 	ptr = kmem_alloc(len, 0);
2161 	memcpy(ptr, dp, len);
2162 	in_f = (struct xfs_inode_log_format *)ptr;
2163 
2164 	/* take the tail entry */
2165 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2166 			  ri_list);
2167 	if (item->ri_total != 0 &&
2168 	     item->ri_total == item->ri_cnt) {
2169 		/* tail item is in use, get a new one */
2170 		xlog_recover_add_item(&trans->r_itemq);
2171 		item = list_entry(trans->r_itemq.prev,
2172 					struct xlog_recover_item, ri_list);
2173 	}
2174 
2175 	if (item->ri_total == 0) {		/* first region to be added */
2176 		if (in_f->ilf_size == 0 ||
2177 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2178 			xfs_warn(log->l_mp,
2179 		"bad number of regions (%d) in inode log format",
2180 				  in_f->ilf_size);
2181 			ASSERT(0);
2182 			kmem_free(ptr);
2183 			return -EFSCORRUPTED;
2184 		}
2185 
2186 		item->ri_total = in_f->ilf_size;
2187 		item->ri_buf =
2188 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2189 				    0);
2190 	}
2191 
2192 	if (item->ri_total <= item->ri_cnt) {
2193 		xfs_warn(log->l_mp,
2194 	"log item region count (%d) overflowed size (%d)",
2195 				item->ri_cnt, item->ri_total);
2196 		ASSERT(0);
2197 		kmem_free(ptr);
2198 		return -EFSCORRUPTED;
2199 	}
2200 
2201 	/* Description region is ri_buf[0] */
2202 	item->ri_buf[item->ri_cnt].i_addr = ptr;
2203 	item->ri_buf[item->ri_cnt].i_len  = len;
2204 	item->ri_cnt++;
2205 	trace_xfs_log_recover_item_add(log, trans, item, 0);
2206 	return 0;
2207 }
2208 
2209 /*
2210  * Free up any resources allocated by the transaction
2211  *
2212  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2213  */
2214 STATIC void
2215 xlog_recover_free_trans(
2216 	struct xlog_recover	*trans)
2217 {
2218 	struct xlog_recover_item *item, *n;
2219 	int			i;
2220 
2221 	hlist_del_init(&trans->r_list);
2222 
2223 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2224 		/* Free the regions in the item. */
2225 		list_del(&item->ri_list);
2226 		for (i = 0; i < item->ri_cnt; i++)
2227 			kmem_free(item->ri_buf[i].i_addr);
2228 		/* Free the item itself */
2229 		kmem_free(item->ri_buf);
2230 		kmem_free(item);
2231 	}
2232 	/* Free the transaction recover structure */
2233 	kmem_free(trans);
2234 }
2235 
2236 /*
2237  * On error or completion, trans is freed.
2238  */
2239 STATIC int
2240 xlog_recovery_process_trans(
2241 	struct xlog		*log,
2242 	struct xlog_recover	*trans,
2243 	char			*dp,
2244 	unsigned int		len,
2245 	unsigned int		flags,
2246 	int			pass,
2247 	struct list_head	*buffer_list)
2248 {
2249 	int			error = 0;
2250 	bool			freeit = false;
2251 
2252 	/* mask off ophdr transaction container flags */
2253 	flags &= ~XLOG_END_TRANS;
2254 	if (flags & XLOG_WAS_CONT_TRANS)
2255 		flags &= ~XLOG_CONTINUE_TRANS;
2256 
2257 	/*
2258 	 * Callees must not free the trans structure. We'll decide if we need to
2259 	 * free it or not based on the operation being done and it's result.
2260 	 */
2261 	switch (flags) {
2262 	/* expected flag values */
2263 	case 0:
2264 	case XLOG_CONTINUE_TRANS:
2265 		error = xlog_recover_add_to_trans(log, trans, dp, len);
2266 		break;
2267 	case XLOG_WAS_CONT_TRANS:
2268 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2269 		break;
2270 	case XLOG_COMMIT_TRANS:
2271 		error = xlog_recover_commit_trans(log, trans, pass,
2272 						  buffer_list);
2273 		/* success or fail, we are now done with this transaction. */
2274 		freeit = true;
2275 		break;
2276 
2277 	/* unexpected flag values */
2278 	case XLOG_UNMOUNT_TRANS:
2279 		/* just skip trans */
2280 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2281 		freeit = true;
2282 		break;
2283 	case XLOG_START_TRANS:
2284 	default:
2285 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2286 		ASSERT(0);
2287 		error = -EFSCORRUPTED;
2288 		break;
2289 	}
2290 	if (error || freeit)
2291 		xlog_recover_free_trans(trans);
2292 	return error;
2293 }
2294 
2295 /*
2296  * Lookup the transaction recovery structure associated with the ID in the
2297  * current ophdr. If the transaction doesn't exist and the start flag is set in
2298  * the ophdr, then allocate a new transaction for future ID matches to find.
2299  * Either way, return what we found during the lookup - an existing transaction
2300  * or nothing.
2301  */
2302 STATIC struct xlog_recover *
2303 xlog_recover_ophdr_to_trans(
2304 	struct hlist_head	rhash[],
2305 	struct xlog_rec_header	*rhead,
2306 	struct xlog_op_header	*ohead)
2307 {
2308 	struct xlog_recover	*trans;
2309 	xlog_tid_t		tid;
2310 	struct hlist_head	*rhp;
2311 
2312 	tid = be32_to_cpu(ohead->oh_tid);
2313 	rhp = &rhash[XLOG_RHASH(tid)];
2314 	hlist_for_each_entry(trans, rhp, r_list) {
2315 		if (trans->r_log_tid == tid)
2316 			return trans;
2317 	}
2318 
2319 	/*
2320 	 * skip over non-start transaction headers - we could be
2321 	 * processing slack space before the next transaction starts
2322 	 */
2323 	if (!(ohead->oh_flags & XLOG_START_TRANS))
2324 		return NULL;
2325 
2326 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2327 
2328 	/*
2329 	 * This is a new transaction so allocate a new recovery container to
2330 	 * hold the recovery ops that will follow.
2331 	 */
2332 	trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2333 	trans->r_log_tid = tid;
2334 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2335 	INIT_LIST_HEAD(&trans->r_itemq);
2336 	INIT_HLIST_NODE(&trans->r_list);
2337 	hlist_add_head(&trans->r_list, rhp);
2338 
2339 	/*
2340 	 * Nothing more to do for this ophdr. Items to be added to this new
2341 	 * transaction will be in subsequent ophdr containers.
2342 	 */
2343 	return NULL;
2344 }
2345 
2346 STATIC int
2347 xlog_recover_process_ophdr(
2348 	struct xlog		*log,
2349 	struct hlist_head	rhash[],
2350 	struct xlog_rec_header	*rhead,
2351 	struct xlog_op_header	*ohead,
2352 	char			*dp,
2353 	char			*end,
2354 	int			pass,
2355 	struct list_head	*buffer_list)
2356 {
2357 	struct xlog_recover	*trans;
2358 	unsigned int		len;
2359 	int			error;
2360 
2361 	/* Do we understand who wrote this op? */
2362 	if (ohead->oh_clientid != XFS_TRANSACTION &&
2363 	    ohead->oh_clientid != XFS_LOG) {
2364 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2365 			__func__, ohead->oh_clientid);
2366 		ASSERT(0);
2367 		return -EFSCORRUPTED;
2368 	}
2369 
2370 	/*
2371 	 * Check the ophdr contains all the data it is supposed to contain.
2372 	 */
2373 	len = be32_to_cpu(ohead->oh_len);
2374 	if (dp + len > end) {
2375 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2376 		WARN_ON(1);
2377 		return -EFSCORRUPTED;
2378 	}
2379 
2380 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2381 	if (!trans) {
2382 		/* nothing to do, so skip over this ophdr */
2383 		return 0;
2384 	}
2385 
2386 	/*
2387 	 * The recovered buffer queue is drained only once we know that all
2388 	 * recovery items for the current LSN have been processed. This is
2389 	 * required because:
2390 	 *
2391 	 * - Buffer write submission updates the metadata LSN of the buffer.
2392 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
2393 	 *   the recovery item.
2394 	 * - Separate recovery items against the same metadata buffer can share
2395 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
2396 	 *   defined as the starting LSN of the first record in which its
2397 	 *   transaction appears, that a record can hold multiple transactions,
2398 	 *   and/or that a transaction can span multiple records.
2399 	 *
2400 	 * In other words, we are allowed to submit a buffer from log recovery
2401 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
2402 	 * items and cause corruption.
2403 	 *
2404 	 * We don't know up front whether buffers are updated multiple times per
2405 	 * LSN. Therefore, track the current LSN of each commit log record as it
2406 	 * is processed and drain the queue when it changes. Use commit records
2407 	 * because they are ordered correctly by the logging code.
2408 	 */
2409 	if (log->l_recovery_lsn != trans->r_lsn &&
2410 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
2411 		error = xfs_buf_delwri_submit(buffer_list);
2412 		if (error)
2413 			return error;
2414 		log->l_recovery_lsn = trans->r_lsn;
2415 	}
2416 
2417 	return xlog_recovery_process_trans(log, trans, dp, len,
2418 					   ohead->oh_flags, pass, buffer_list);
2419 }
2420 
2421 /*
2422  * There are two valid states of the r_state field.  0 indicates that the
2423  * transaction structure is in a normal state.  We have either seen the
2424  * start of the transaction or the last operation we added was not a partial
2425  * operation.  If the last operation we added to the transaction was a
2426  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2427  *
2428  * NOTE: skip LRs with 0 data length.
2429  */
2430 STATIC int
2431 xlog_recover_process_data(
2432 	struct xlog		*log,
2433 	struct hlist_head	rhash[],
2434 	struct xlog_rec_header	*rhead,
2435 	char			*dp,
2436 	int			pass,
2437 	struct list_head	*buffer_list)
2438 {
2439 	struct xlog_op_header	*ohead;
2440 	char			*end;
2441 	int			num_logops;
2442 	int			error;
2443 
2444 	end = dp + be32_to_cpu(rhead->h_len);
2445 	num_logops = be32_to_cpu(rhead->h_num_logops);
2446 
2447 	/* check the log format matches our own - else we can't recover */
2448 	if (xlog_header_check_recover(log->l_mp, rhead))
2449 		return -EIO;
2450 
2451 	trace_xfs_log_recover_record(log, rhead, pass);
2452 	while ((dp < end) && num_logops) {
2453 
2454 		ohead = (struct xlog_op_header *)dp;
2455 		dp += sizeof(*ohead);
2456 		ASSERT(dp <= end);
2457 
2458 		/* errors will abort recovery */
2459 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2460 						   dp, end, pass, buffer_list);
2461 		if (error)
2462 			return error;
2463 
2464 		dp += be32_to_cpu(ohead->oh_len);
2465 		num_logops--;
2466 	}
2467 	return 0;
2468 }
2469 
2470 /* Take all the collected deferred ops and finish them in order. */
2471 static int
2472 xlog_finish_defer_ops(
2473 	struct xfs_trans	*parent_tp)
2474 {
2475 	struct xfs_mount	*mp = parent_tp->t_mountp;
2476 	struct xfs_trans	*tp;
2477 	int64_t			freeblks;
2478 	uint			resblks;
2479 	int			error;
2480 
2481 	/*
2482 	 * We're finishing the defer_ops that accumulated as a result of
2483 	 * recovering unfinished intent items during log recovery.  We
2484 	 * reserve an itruncate transaction because it is the largest
2485 	 * permanent transaction type.  Since we're the only user of the fs
2486 	 * right now, take 93% (15/16) of the available free blocks.  Use
2487 	 * weird math to avoid a 64-bit division.
2488 	 */
2489 	freeblks = percpu_counter_sum(&mp->m_fdblocks);
2490 	if (freeblks <= 0)
2491 		return -ENOSPC;
2492 	resblks = min_t(int64_t, UINT_MAX, freeblks);
2493 	resblks = (resblks * 15) >> 4;
2494 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
2495 			0, XFS_TRANS_RESERVE, &tp);
2496 	if (error)
2497 		return error;
2498 	/* transfer all collected dfops to this transaction */
2499 	xfs_defer_move(tp, parent_tp);
2500 
2501 	return xfs_trans_commit(tp);
2502 }
2503 
2504 /* Is this log item a deferred action intent? */
2505 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
2506 {
2507 	return lip->li_ops->iop_recover != NULL &&
2508 	       lip->li_ops->iop_match != NULL;
2509 }
2510 
2511 /*
2512  * When this is called, all of the log intent items which did not have
2513  * corresponding log done items should be in the AIL.  What we do now
2514  * is update the data structures associated with each one.
2515  *
2516  * Since we process the log intent items in normal transactions, they
2517  * will be removed at some point after the commit.  This prevents us
2518  * from just walking down the list processing each one.  We'll use a
2519  * flag in the intent item to skip those that we've already processed
2520  * and use the AIL iteration mechanism's generation count to try to
2521  * speed this up at least a bit.
2522  *
2523  * When we start, we know that the intents are the only things in the
2524  * AIL.  As we process them, however, other items are added to the
2525  * AIL.
2526  */
2527 STATIC int
2528 xlog_recover_process_intents(
2529 	struct xlog		*log)
2530 {
2531 	struct xfs_trans	*parent_tp;
2532 	struct xfs_ail_cursor	cur;
2533 	struct xfs_log_item	*lip;
2534 	struct xfs_ail		*ailp;
2535 	int			error;
2536 #if defined(DEBUG) || defined(XFS_WARN)
2537 	xfs_lsn_t		last_lsn;
2538 #endif
2539 
2540 	/*
2541 	 * The intent recovery handlers commit transactions to complete recovery
2542 	 * for individual intents, but any new deferred operations that are
2543 	 * queued during that process are held off until the very end. The
2544 	 * purpose of this transaction is to serve as a container for deferred
2545 	 * operations. Each intent recovery handler must transfer dfops here
2546 	 * before its local transaction commits, and we'll finish the entire
2547 	 * list below.
2548 	 */
2549 	error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
2550 	if (error)
2551 		return error;
2552 
2553 	ailp = log->l_ailp;
2554 	spin_lock(&ailp->ail_lock);
2555 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2556 #if defined(DEBUG) || defined(XFS_WARN)
2557 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2558 #endif
2559 	while (lip != NULL) {
2560 		/*
2561 		 * We're done when we see something other than an intent.
2562 		 * There should be no intents left in the AIL now.
2563 		 */
2564 		if (!xlog_item_is_intent(lip)) {
2565 #ifdef DEBUG
2566 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2567 				ASSERT(!xlog_item_is_intent(lip));
2568 #endif
2569 			break;
2570 		}
2571 
2572 		/*
2573 		 * We should never see a redo item with a LSN higher than
2574 		 * the last transaction we found in the log at the start
2575 		 * of recovery.
2576 		 */
2577 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2578 
2579 		/*
2580 		 * NOTE: If your intent processing routine can create more
2581 		 * deferred ops, you /must/ attach them to the transaction in
2582 		 * this routine or else those subsequent intents will get
2583 		 * replayed in the wrong order!
2584 		 */
2585 		if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) {
2586 			spin_unlock(&ailp->ail_lock);
2587 			error = lip->li_ops->iop_recover(lip, parent_tp);
2588 			spin_lock(&ailp->ail_lock);
2589 		}
2590 		if (error)
2591 			goto out;
2592 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2593 	}
2594 out:
2595 	xfs_trans_ail_cursor_done(&cur);
2596 	spin_unlock(&ailp->ail_lock);
2597 	if (!error)
2598 		error = xlog_finish_defer_ops(parent_tp);
2599 	xfs_trans_cancel(parent_tp);
2600 
2601 	return error;
2602 }
2603 
2604 /*
2605  * A cancel occurs when the mount has failed and we're bailing out.
2606  * Release all pending log intent items so they don't pin the AIL.
2607  */
2608 STATIC void
2609 xlog_recover_cancel_intents(
2610 	struct xlog		*log)
2611 {
2612 	struct xfs_log_item	*lip;
2613 	struct xfs_ail_cursor	cur;
2614 	struct xfs_ail		*ailp;
2615 
2616 	ailp = log->l_ailp;
2617 	spin_lock(&ailp->ail_lock);
2618 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2619 	while (lip != NULL) {
2620 		/*
2621 		 * We're done when we see something other than an intent.
2622 		 * There should be no intents left in the AIL now.
2623 		 */
2624 		if (!xlog_item_is_intent(lip)) {
2625 #ifdef DEBUG
2626 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2627 				ASSERT(!xlog_item_is_intent(lip));
2628 #endif
2629 			break;
2630 		}
2631 
2632 		spin_unlock(&ailp->ail_lock);
2633 		lip->li_ops->iop_release(lip);
2634 		spin_lock(&ailp->ail_lock);
2635 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2636 	}
2637 
2638 	xfs_trans_ail_cursor_done(&cur);
2639 	spin_unlock(&ailp->ail_lock);
2640 }
2641 
2642 /*
2643  * This routine performs a transaction to null out a bad inode pointer
2644  * in an agi unlinked inode hash bucket.
2645  */
2646 STATIC void
2647 xlog_recover_clear_agi_bucket(
2648 	xfs_mount_t	*mp,
2649 	xfs_agnumber_t	agno,
2650 	int		bucket)
2651 {
2652 	xfs_trans_t	*tp;
2653 	xfs_agi_t	*agi;
2654 	xfs_buf_t	*agibp;
2655 	int		offset;
2656 	int		error;
2657 
2658 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2659 	if (error)
2660 		goto out_error;
2661 
2662 	error = xfs_read_agi(mp, tp, agno, &agibp);
2663 	if (error)
2664 		goto out_abort;
2665 
2666 	agi = agibp->b_addr;
2667 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2668 	offset = offsetof(xfs_agi_t, agi_unlinked) +
2669 		 (sizeof(xfs_agino_t) * bucket);
2670 	xfs_trans_log_buf(tp, agibp, offset,
2671 			  (offset + sizeof(xfs_agino_t) - 1));
2672 
2673 	error = xfs_trans_commit(tp);
2674 	if (error)
2675 		goto out_error;
2676 	return;
2677 
2678 out_abort:
2679 	xfs_trans_cancel(tp);
2680 out_error:
2681 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2682 	return;
2683 }
2684 
2685 STATIC xfs_agino_t
2686 xlog_recover_process_one_iunlink(
2687 	struct xfs_mount		*mp,
2688 	xfs_agnumber_t			agno,
2689 	xfs_agino_t			agino,
2690 	int				bucket)
2691 {
2692 	struct xfs_buf			*ibp;
2693 	struct xfs_dinode		*dip;
2694 	struct xfs_inode		*ip;
2695 	xfs_ino_t			ino;
2696 	int				error;
2697 
2698 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
2699 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2700 	if (error)
2701 		goto fail;
2702 
2703 	/*
2704 	 * Get the on disk inode to find the next inode in the bucket.
2705 	 */
2706 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
2707 	if (error)
2708 		goto fail_iput;
2709 
2710 	xfs_iflags_clear(ip, XFS_IRECOVERY);
2711 	ASSERT(VFS_I(ip)->i_nlink == 0);
2712 	ASSERT(VFS_I(ip)->i_mode != 0);
2713 
2714 	/* setup for the next pass */
2715 	agino = be32_to_cpu(dip->di_next_unlinked);
2716 	xfs_buf_relse(ibp);
2717 
2718 	/*
2719 	 * Prevent any DMAPI event from being sent when the reference on
2720 	 * the inode is dropped.
2721 	 */
2722 	ip->i_d.di_dmevmask = 0;
2723 
2724 	xfs_irele(ip);
2725 	return agino;
2726 
2727  fail_iput:
2728 	xfs_irele(ip);
2729  fail:
2730 	/*
2731 	 * We can't read in the inode this bucket points to, or this inode
2732 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
2733 	 * some inodes and space, but at least we won't hang.
2734 	 *
2735 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2736 	 * clear the inode pointer in the bucket.
2737 	 */
2738 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
2739 	return NULLAGINO;
2740 }
2741 
2742 /*
2743  * Recover AGI unlinked lists
2744  *
2745  * This is called during recovery to process any inodes which we unlinked but
2746  * not freed when the system crashed.  These inodes will be on the lists in the
2747  * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2748  * any inodes found on the lists. Each inode is removed from the lists when it
2749  * has been fully truncated and is freed. The freeing of the inode and its
2750  * removal from the list must be atomic.
2751  *
2752  * If everything we touch in the agi processing loop is already in memory, this
2753  * loop can hold the cpu for a long time. It runs without lock contention,
2754  * memory allocation contention, the need wait for IO, etc, and so will run
2755  * until we either run out of inodes to process, run low on memory or we run out
2756  * of log space.
2757  *
2758  * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2759  * and can prevent other filesytem work (such as CIL pushes) from running. This
2760  * can lead to deadlocks if the recovery process runs out of log reservation
2761  * space. Hence we need to yield the CPU when there is other kernel work
2762  * scheduled on this CPU to ensure other scheduled work can run without undue
2763  * latency.
2764  */
2765 STATIC void
2766 xlog_recover_process_iunlinks(
2767 	struct xlog	*log)
2768 {
2769 	xfs_mount_t	*mp;
2770 	xfs_agnumber_t	agno;
2771 	xfs_agi_t	*agi;
2772 	xfs_buf_t	*agibp;
2773 	xfs_agino_t	agino;
2774 	int		bucket;
2775 	int		error;
2776 
2777 	mp = log->l_mp;
2778 
2779 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2780 		/*
2781 		 * Find the agi for this ag.
2782 		 */
2783 		error = xfs_read_agi(mp, NULL, agno, &agibp);
2784 		if (error) {
2785 			/*
2786 			 * AGI is b0rked. Don't process it.
2787 			 *
2788 			 * We should probably mark the filesystem as corrupt
2789 			 * after we've recovered all the ag's we can....
2790 			 */
2791 			continue;
2792 		}
2793 		/*
2794 		 * Unlock the buffer so that it can be acquired in the normal
2795 		 * course of the transaction to truncate and free each inode.
2796 		 * Because we are not racing with anyone else here for the AGI
2797 		 * buffer, we don't even need to hold it locked to read the
2798 		 * initial unlinked bucket entries out of the buffer. We keep
2799 		 * buffer reference though, so that it stays pinned in memory
2800 		 * while we need the buffer.
2801 		 */
2802 		agi = agibp->b_addr;
2803 		xfs_buf_unlock(agibp);
2804 
2805 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2806 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2807 			while (agino != NULLAGINO) {
2808 				agino = xlog_recover_process_one_iunlink(mp,
2809 							agno, agino, bucket);
2810 				cond_resched();
2811 			}
2812 		}
2813 		xfs_buf_rele(agibp);
2814 	}
2815 }
2816 
2817 STATIC void
2818 xlog_unpack_data(
2819 	struct xlog_rec_header	*rhead,
2820 	char			*dp,
2821 	struct xlog		*log)
2822 {
2823 	int			i, j, k;
2824 
2825 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2826 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2827 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2828 		dp += BBSIZE;
2829 	}
2830 
2831 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2832 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2833 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2834 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2835 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2836 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2837 			dp += BBSIZE;
2838 		}
2839 	}
2840 }
2841 
2842 /*
2843  * CRC check, unpack and process a log record.
2844  */
2845 STATIC int
2846 xlog_recover_process(
2847 	struct xlog		*log,
2848 	struct hlist_head	rhash[],
2849 	struct xlog_rec_header	*rhead,
2850 	char			*dp,
2851 	int			pass,
2852 	struct list_head	*buffer_list)
2853 {
2854 	__le32			old_crc = rhead->h_crc;
2855 	__le32			crc;
2856 
2857 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2858 
2859 	/*
2860 	 * Nothing else to do if this is a CRC verification pass. Just return
2861 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
2862 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2863 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2864 	 * know precisely what failed.
2865 	 */
2866 	if (pass == XLOG_RECOVER_CRCPASS) {
2867 		if (old_crc && crc != old_crc)
2868 			return -EFSBADCRC;
2869 		return 0;
2870 	}
2871 
2872 	/*
2873 	 * We're in the normal recovery path. Issue a warning if and only if the
2874 	 * CRC in the header is non-zero. This is an advisory warning and the
2875 	 * zero CRC check prevents warnings from being emitted when upgrading
2876 	 * the kernel from one that does not add CRCs by default.
2877 	 */
2878 	if (crc != old_crc) {
2879 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2880 			xfs_alert(log->l_mp,
2881 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
2882 					le32_to_cpu(old_crc),
2883 					le32_to_cpu(crc));
2884 			xfs_hex_dump(dp, 32);
2885 		}
2886 
2887 		/*
2888 		 * If the filesystem is CRC enabled, this mismatch becomes a
2889 		 * fatal log corruption failure.
2890 		 */
2891 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2892 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2893 			return -EFSCORRUPTED;
2894 		}
2895 	}
2896 
2897 	xlog_unpack_data(rhead, dp, log);
2898 
2899 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2900 					 buffer_list);
2901 }
2902 
2903 STATIC int
2904 xlog_valid_rec_header(
2905 	struct xlog		*log,
2906 	struct xlog_rec_header	*rhead,
2907 	xfs_daddr_t		blkno)
2908 {
2909 	int			hlen;
2910 
2911 	if (XFS_IS_CORRUPT(log->l_mp,
2912 			   rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2913 		return -EFSCORRUPTED;
2914 	if (XFS_IS_CORRUPT(log->l_mp,
2915 			   (!rhead->h_version ||
2916 			   (be32_to_cpu(rhead->h_version) &
2917 			    (~XLOG_VERSION_OKBITS))))) {
2918 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2919 			__func__, be32_to_cpu(rhead->h_version));
2920 		return -EFSCORRUPTED;
2921 	}
2922 
2923 	/* LR body must have data or it wouldn't have been written */
2924 	hlen = be32_to_cpu(rhead->h_len);
2925 	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
2926 		return -EFSCORRUPTED;
2927 	if (XFS_IS_CORRUPT(log->l_mp,
2928 			   blkno > log->l_logBBsize || blkno > INT_MAX))
2929 		return -EFSCORRUPTED;
2930 	return 0;
2931 }
2932 
2933 /*
2934  * Read the log from tail to head and process the log records found.
2935  * Handle the two cases where the tail and head are in the same cycle
2936  * and where the active portion of the log wraps around the end of
2937  * the physical log separately.  The pass parameter is passed through
2938  * to the routines called to process the data and is not looked at
2939  * here.
2940  */
2941 STATIC int
2942 xlog_do_recovery_pass(
2943 	struct xlog		*log,
2944 	xfs_daddr_t		head_blk,
2945 	xfs_daddr_t		tail_blk,
2946 	int			pass,
2947 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
2948 {
2949 	xlog_rec_header_t	*rhead;
2950 	xfs_daddr_t		blk_no, rblk_no;
2951 	xfs_daddr_t		rhead_blk;
2952 	char			*offset;
2953 	char			*hbp, *dbp;
2954 	int			error = 0, h_size, h_len;
2955 	int			error2 = 0;
2956 	int			bblks, split_bblks;
2957 	int			hblks, split_hblks, wrapped_hblks;
2958 	int			i;
2959 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
2960 	LIST_HEAD		(buffer_list);
2961 
2962 	ASSERT(head_blk != tail_blk);
2963 	blk_no = rhead_blk = tail_blk;
2964 
2965 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
2966 		INIT_HLIST_HEAD(&rhash[i]);
2967 
2968 	/*
2969 	 * Read the header of the tail block and get the iclog buffer size from
2970 	 * h_size.  Use this to tell how many sectors make up the log header.
2971 	 */
2972 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2973 		/*
2974 		 * When using variable length iclogs, read first sector of
2975 		 * iclog header and extract the header size from it.  Get a
2976 		 * new hbp that is the correct size.
2977 		 */
2978 		hbp = xlog_alloc_buffer(log, 1);
2979 		if (!hbp)
2980 			return -ENOMEM;
2981 
2982 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2983 		if (error)
2984 			goto bread_err1;
2985 
2986 		rhead = (xlog_rec_header_t *)offset;
2987 		error = xlog_valid_rec_header(log, rhead, tail_blk);
2988 		if (error)
2989 			goto bread_err1;
2990 
2991 		/*
2992 		 * xfsprogs has a bug where record length is based on lsunit but
2993 		 * h_size (iclog size) is hardcoded to 32k. Now that we
2994 		 * unconditionally CRC verify the unmount record, this means the
2995 		 * log buffer can be too small for the record and cause an
2996 		 * overrun.
2997 		 *
2998 		 * Detect this condition here. Use lsunit for the buffer size as
2999 		 * long as this looks like the mkfs case. Otherwise, return an
3000 		 * error to avoid a buffer overrun.
3001 		 */
3002 		h_size = be32_to_cpu(rhead->h_size);
3003 		h_len = be32_to_cpu(rhead->h_len);
3004 		if (h_len > h_size) {
3005 			if (h_len <= log->l_mp->m_logbsize &&
3006 			    be32_to_cpu(rhead->h_num_logops) == 1) {
3007 				xfs_warn(log->l_mp,
3008 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
3009 					 h_size, log->l_mp->m_logbsize);
3010 				h_size = log->l_mp->m_logbsize;
3011 			} else {
3012 				XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
3013 						log->l_mp);
3014 				error = -EFSCORRUPTED;
3015 				goto bread_err1;
3016 			}
3017 		}
3018 
3019 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3020 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3021 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3022 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3023 				hblks++;
3024 			kmem_free(hbp);
3025 			hbp = xlog_alloc_buffer(log, hblks);
3026 		} else {
3027 			hblks = 1;
3028 		}
3029 	} else {
3030 		ASSERT(log->l_sectBBsize == 1);
3031 		hblks = 1;
3032 		hbp = xlog_alloc_buffer(log, 1);
3033 		h_size = XLOG_BIG_RECORD_BSIZE;
3034 	}
3035 
3036 	if (!hbp)
3037 		return -ENOMEM;
3038 	dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3039 	if (!dbp) {
3040 		kmem_free(hbp);
3041 		return -ENOMEM;
3042 	}
3043 
3044 	memset(rhash, 0, sizeof(rhash));
3045 	if (tail_blk > head_blk) {
3046 		/*
3047 		 * Perform recovery around the end of the physical log.
3048 		 * When the head is not on the same cycle number as the tail,
3049 		 * we can't do a sequential recovery.
3050 		 */
3051 		while (blk_no < log->l_logBBsize) {
3052 			/*
3053 			 * Check for header wrapping around physical end-of-log
3054 			 */
3055 			offset = hbp;
3056 			split_hblks = 0;
3057 			wrapped_hblks = 0;
3058 			if (blk_no + hblks <= log->l_logBBsize) {
3059 				/* Read header in one read */
3060 				error = xlog_bread(log, blk_no, hblks, hbp,
3061 						   &offset);
3062 				if (error)
3063 					goto bread_err2;
3064 			} else {
3065 				/* This LR is split across physical log end */
3066 				if (blk_no != log->l_logBBsize) {
3067 					/* some data before physical log end */
3068 					ASSERT(blk_no <= INT_MAX);
3069 					split_hblks = log->l_logBBsize - (int)blk_no;
3070 					ASSERT(split_hblks > 0);
3071 					error = xlog_bread(log, blk_no,
3072 							   split_hblks, hbp,
3073 							   &offset);
3074 					if (error)
3075 						goto bread_err2;
3076 				}
3077 
3078 				/*
3079 				 * Note: this black magic still works with
3080 				 * large sector sizes (non-512) only because:
3081 				 * - we increased the buffer size originally
3082 				 *   by 1 sector giving us enough extra space
3083 				 *   for the second read;
3084 				 * - the log start is guaranteed to be sector
3085 				 *   aligned;
3086 				 * - we read the log end (LR header start)
3087 				 *   _first_, then the log start (LR header end)
3088 				 *   - order is important.
3089 				 */
3090 				wrapped_hblks = hblks - split_hblks;
3091 				error = xlog_bread_noalign(log, 0,
3092 						wrapped_hblks,
3093 						offset + BBTOB(split_hblks));
3094 				if (error)
3095 					goto bread_err2;
3096 			}
3097 			rhead = (xlog_rec_header_t *)offset;
3098 			error = xlog_valid_rec_header(log, rhead,
3099 						split_hblks ? blk_no : 0);
3100 			if (error)
3101 				goto bread_err2;
3102 
3103 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3104 			blk_no += hblks;
3105 
3106 			/*
3107 			 * Read the log record data in multiple reads if it
3108 			 * wraps around the end of the log. Note that if the
3109 			 * header already wrapped, blk_no could point past the
3110 			 * end of the log. The record data is contiguous in
3111 			 * that case.
3112 			 */
3113 			if (blk_no + bblks <= log->l_logBBsize ||
3114 			    blk_no >= log->l_logBBsize) {
3115 				rblk_no = xlog_wrap_logbno(log, blk_no);
3116 				error = xlog_bread(log, rblk_no, bblks, dbp,
3117 						   &offset);
3118 				if (error)
3119 					goto bread_err2;
3120 			} else {
3121 				/* This log record is split across the
3122 				 * physical end of log */
3123 				offset = dbp;
3124 				split_bblks = 0;
3125 				if (blk_no != log->l_logBBsize) {
3126 					/* some data is before the physical
3127 					 * end of log */
3128 					ASSERT(!wrapped_hblks);
3129 					ASSERT(blk_no <= INT_MAX);
3130 					split_bblks =
3131 						log->l_logBBsize - (int)blk_no;
3132 					ASSERT(split_bblks > 0);
3133 					error = xlog_bread(log, blk_no,
3134 							split_bblks, dbp,
3135 							&offset);
3136 					if (error)
3137 						goto bread_err2;
3138 				}
3139 
3140 				/*
3141 				 * Note: this black magic still works with
3142 				 * large sector sizes (non-512) only because:
3143 				 * - we increased the buffer size originally
3144 				 *   by 1 sector giving us enough extra space
3145 				 *   for the second read;
3146 				 * - the log start is guaranteed to be sector
3147 				 *   aligned;
3148 				 * - we read the log end (LR header start)
3149 				 *   _first_, then the log start (LR header end)
3150 				 *   - order is important.
3151 				 */
3152 				error = xlog_bread_noalign(log, 0,
3153 						bblks - split_bblks,
3154 						offset + BBTOB(split_bblks));
3155 				if (error)
3156 					goto bread_err2;
3157 			}
3158 
3159 			error = xlog_recover_process(log, rhash, rhead, offset,
3160 						     pass, &buffer_list);
3161 			if (error)
3162 				goto bread_err2;
3163 
3164 			blk_no += bblks;
3165 			rhead_blk = blk_no;
3166 		}
3167 
3168 		ASSERT(blk_no >= log->l_logBBsize);
3169 		blk_no -= log->l_logBBsize;
3170 		rhead_blk = blk_no;
3171 	}
3172 
3173 	/* read first part of physical log */
3174 	while (blk_no < head_blk) {
3175 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3176 		if (error)
3177 			goto bread_err2;
3178 
3179 		rhead = (xlog_rec_header_t *)offset;
3180 		error = xlog_valid_rec_header(log, rhead, blk_no);
3181 		if (error)
3182 			goto bread_err2;
3183 
3184 		/* blocks in data section */
3185 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3186 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3187 				   &offset);
3188 		if (error)
3189 			goto bread_err2;
3190 
3191 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
3192 					     &buffer_list);
3193 		if (error)
3194 			goto bread_err2;
3195 
3196 		blk_no += bblks + hblks;
3197 		rhead_blk = blk_no;
3198 	}
3199 
3200  bread_err2:
3201 	kmem_free(dbp);
3202  bread_err1:
3203 	kmem_free(hbp);
3204 
3205 	/*
3206 	 * Submit buffers that have been added from the last record processed,
3207 	 * regardless of error status.
3208 	 */
3209 	if (!list_empty(&buffer_list))
3210 		error2 = xfs_buf_delwri_submit(&buffer_list);
3211 
3212 	if (error && first_bad)
3213 		*first_bad = rhead_blk;
3214 
3215 	/*
3216 	 * Transactions are freed at commit time but transactions without commit
3217 	 * records on disk are never committed. Free any that may be left in the
3218 	 * hash table.
3219 	 */
3220 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3221 		struct hlist_node	*tmp;
3222 		struct xlog_recover	*trans;
3223 
3224 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3225 			xlog_recover_free_trans(trans);
3226 	}
3227 
3228 	return error ? error : error2;
3229 }
3230 
3231 /*
3232  * Do the recovery of the log.  We actually do this in two phases.
3233  * The two passes are necessary in order to implement the function
3234  * of cancelling a record written into the log.  The first pass
3235  * determines those things which have been cancelled, and the
3236  * second pass replays log items normally except for those which
3237  * have been cancelled.  The handling of the replay and cancellations
3238  * takes place in the log item type specific routines.
3239  *
3240  * The table of items which have cancel records in the log is allocated
3241  * and freed at this level, since only here do we know when all of
3242  * the log recovery has been completed.
3243  */
3244 STATIC int
3245 xlog_do_log_recovery(
3246 	struct xlog	*log,
3247 	xfs_daddr_t	head_blk,
3248 	xfs_daddr_t	tail_blk)
3249 {
3250 	int		error, i;
3251 
3252 	ASSERT(head_blk != tail_blk);
3253 
3254 	/*
3255 	 * First do a pass to find all of the cancelled buf log items.
3256 	 * Store them in the buf_cancel_table for use in the second pass.
3257 	 */
3258 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3259 						 sizeof(struct list_head),
3260 						 0);
3261 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3262 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3263 
3264 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3265 				      XLOG_RECOVER_PASS1, NULL);
3266 	if (error != 0) {
3267 		kmem_free(log->l_buf_cancel_table);
3268 		log->l_buf_cancel_table = NULL;
3269 		return error;
3270 	}
3271 	/*
3272 	 * Then do a second pass to actually recover the items in the log.
3273 	 * When it is complete free the table of buf cancel items.
3274 	 */
3275 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3276 				      XLOG_RECOVER_PASS2, NULL);
3277 #ifdef DEBUG
3278 	if (!error) {
3279 		int	i;
3280 
3281 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3282 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3283 	}
3284 #endif	/* DEBUG */
3285 
3286 	kmem_free(log->l_buf_cancel_table);
3287 	log->l_buf_cancel_table = NULL;
3288 
3289 	return error;
3290 }
3291 
3292 /*
3293  * Do the actual recovery
3294  */
3295 STATIC int
3296 xlog_do_recover(
3297 	struct xlog	*log,
3298 	xfs_daddr_t	head_blk,
3299 	xfs_daddr_t	tail_blk)
3300 {
3301 	struct xfs_mount *mp = log->l_mp;
3302 	int		error;
3303 	xfs_buf_t	*bp;
3304 	xfs_sb_t	*sbp;
3305 
3306 	trace_xfs_log_recover(log, head_blk, tail_blk);
3307 
3308 	/*
3309 	 * First replay the images in the log.
3310 	 */
3311 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3312 	if (error)
3313 		return error;
3314 
3315 	/*
3316 	 * If IO errors happened during recovery, bail out.
3317 	 */
3318 	if (XFS_FORCED_SHUTDOWN(mp)) {
3319 		return -EIO;
3320 	}
3321 
3322 	/*
3323 	 * We now update the tail_lsn since much of the recovery has completed
3324 	 * and there may be space available to use.  If there were no extent
3325 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3326 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3327 	 * lsn of the last known good LR on disk.  If there are extent frees
3328 	 * or iunlinks they will have some entries in the AIL; so we look at
3329 	 * the AIL to determine how to set the tail_lsn.
3330 	 */
3331 	xlog_assign_tail_lsn(mp);
3332 
3333 	/*
3334 	 * Now that we've finished replaying all buffer and inode
3335 	 * updates, re-read in the superblock and reverify it.
3336 	 */
3337 	bp = xfs_getsb(mp);
3338 	bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
3339 	ASSERT(!(bp->b_flags & XBF_WRITE));
3340 	bp->b_flags |= XBF_READ;
3341 	bp->b_ops = &xfs_sb_buf_ops;
3342 
3343 	error = xfs_buf_submit(bp);
3344 	if (error) {
3345 		if (!XFS_FORCED_SHUTDOWN(mp)) {
3346 			xfs_buf_ioerror_alert(bp, __this_address);
3347 			ASSERT(0);
3348 		}
3349 		xfs_buf_relse(bp);
3350 		return error;
3351 	}
3352 
3353 	/* Convert superblock from on-disk format */
3354 	sbp = &mp->m_sb;
3355 	xfs_sb_from_disk(sbp, bp->b_addr);
3356 	xfs_buf_relse(bp);
3357 
3358 	/* re-initialise in-core superblock and geometry structures */
3359 	xfs_reinit_percpu_counters(mp);
3360 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3361 	if (error) {
3362 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3363 		return error;
3364 	}
3365 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3366 
3367 	xlog_recover_check_summary(log);
3368 
3369 	/* Normal transactions can now occur */
3370 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3371 	return 0;
3372 }
3373 
3374 /*
3375  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3376  *
3377  * Return error or zero.
3378  */
3379 int
3380 xlog_recover(
3381 	struct xlog	*log)
3382 {
3383 	xfs_daddr_t	head_blk, tail_blk;
3384 	int		error;
3385 
3386 	/* find the tail of the log */
3387 	error = xlog_find_tail(log, &head_blk, &tail_blk);
3388 	if (error)
3389 		return error;
3390 
3391 	/*
3392 	 * The superblock was read before the log was available and thus the LSN
3393 	 * could not be verified. Check the superblock LSN against the current
3394 	 * LSN now that it's known.
3395 	 */
3396 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3397 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3398 		return -EINVAL;
3399 
3400 	if (tail_blk != head_blk) {
3401 		/* There used to be a comment here:
3402 		 *
3403 		 * disallow recovery on read-only mounts.  note -- mount
3404 		 * checks for ENOSPC and turns it into an intelligent
3405 		 * error message.
3406 		 * ...but this is no longer true.  Now, unless you specify
3407 		 * NORECOVERY (in which case this function would never be
3408 		 * called), we just go ahead and recover.  We do this all
3409 		 * under the vfs layer, so we can get away with it unless
3410 		 * the device itself is read-only, in which case we fail.
3411 		 */
3412 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3413 			return error;
3414 		}
3415 
3416 		/*
3417 		 * Version 5 superblock log feature mask validation. We know the
3418 		 * log is dirty so check if there are any unknown log features
3419 		 * in what we need to recover. If there are unknown features
3420 		 * (e.g. unsupported transactions, then simply reject the
3421 		 * attempt at recovery before touching anything.
3422 		 */
3423 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3424 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3425 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3426 			xfs_warn(log->l_mp,
3427 "Superblock has unknown incompatible log features (0x%x) enabled.",
3428 				(log->l_mp->m_sb.sb_features_log_incompat &
3429 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3430 			xfs_warn(log->l_mp,
3431 "The log can not be fully and/or safely recovered by this kernel.");
3432 			xfs_warn(log->l_mp,
3433 "Please recover the log on a kernel that supports the unknown features.");
3434 			return -EINVAL;
3435 		}
3436 
3437 		/*
3438 		 * Delay log recovery if the debug hook is set. This is debug
3439 		 * instrumention to coordinate simulation of I/O failures with
3440 		 * log recovery.
3441 		 */
3442 		if (xfs_globals.log_recovery_delay) {
3443 			xfs_notice(log->l_mp,
3444 				"Delaying log recovery for %d seconds.",
3445 				xfs_globals.log_recovery_delay);
3446 			msleep(xfs_globals.log_recovery_delay * 1000);
3447 		}
3448 
3449 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3450 				log->l_mp->m_logname ? log->l_mp->m_logname
3451 						     : "internal");
3452 
3453 		error = xlog_do_recover(log, head_blk, tail_blk);
3454 		log->l_flags |= XLOG_RECOVERY_NEEDED;
3455 	}
3456 	return error;
3457 }
3458 
3459 /*
3460  * In the first part of recovery we replay inodes and buffers and build
3461  * up the list of extent free items which need to be processed.  Here
3462  * we process the extent free items and clean up the on disk unlinked
3463  * inode lists.  This is separated from the first part of recovery so
3464  * that the root and real-time bitmap inodes can be read in from disk in
3465  * between the two stages.  This is necessary so that we can free space
3466  * in the real-time portion of the file system.
3467  */
3468 int
3469 xlog_recover_finish(
3470 	struct xlog	*log)
3471 {
3472 	/*
3473 	 * Now we're ready to do the transactions needed for the
3474 	 * rest of recovery.  Start with completing all the extent
3475 	 * free intent records and then process the unlinked inode
3476 	 * lists.  At this point, we essentially run in normal mode
3477 	 * except that we're still performing recovery actions
3478 	 * rather than accepting new requests.
3479 	 */
3480 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3481 		int	error;
3482 		error = xlog_recover_process_intents(log);
3483 		if (error) {
3484 			xfs_alert(log->l_mp, "Failed to recover intents");
3485 			return error;
3486 		}
3487 
3488 		/*
3489 		 * Sync the log to get all the intents out of the AIL.
3490 		 * This isn't absolutely necessary, but it helps in
3491 		 * case the unlink transactions would have problems
3492 		 * pushing the intents out of the way.
3493 		 */
3494 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3495 
3496 		xlog_recover_process_iunlinks(log);
3497 
3498 		xlog_recover_check_summary(log);
3499 
3500 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3501 				log->l_mp->m_logname ? log->l_mp->m_logname
3502 						     : "internal");
3503 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3504 	} else {
3505 		xfs_info(log->l_mp, "Ending clean mount");
3506 	}
3507 	return 0;
3508 }
3509 
3510 void
3511 xlog_recover_cancel(
3512 	struct xlog	*log)
3513 {
3514 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
3515 		xlog_recover_cancel_intents(log);
3516 }
3517 
3518 #if defined(DEBUG)
3519 /*
3520  * Read all of the agf and agi counters and check that they
3521  * are consistent with the superblock counters.
3522  */
3523 STATIC void
3524 xlog_recover_check_summary(
3525 	struct xlog	*log)
3526 {
3527 	xfs_mount_t	*mp;
3528 	xfs_buf_t	*agfbp;
3529 	xfs_buf_t	*agibp;
3530 	xfs_agnumber_t	agno;
3531 	uint64_t	freeblks;
3532 	uint64_t	itotal;
3533 	uint64_t	ifree;
3534 	int		error;
3535 
3536 	mp = log->l_mp;
3537 
3538 	freeblks = 0LL;
3539 	itotal = 0LL;
3540 	ifree = 0LL;
3541 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3542 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3543 		if (error) {
3544 			xfs_alert(mp, "%s agf read failed agno %d error %d",
3545 						__func__, agno, error);
3546 		} else {
3547 			struct xfs_agf	*agfp = agfbp->b_addr;
3548 
3549 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
3550 				    be32_to_cpu(agfp->agf_flcount);
3551 			xfs_buf_relse(agfbp);
3552 		}
3553 
3554 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3555 		if (error) {
3556 			xfs_alert(mp, "%s agi read failed agno %d error %d",
3557 						__func__, agno, error);
3558 		} else {
3559 			struct xfs_agi	*agi = agibp->b_addr;
3560 
3561 			itotal += be32_to_cpu(agi->agi_count);
3562 			ifree += be32_to_cpu(agi->agi_freecount);
3563 			xfs_buf_relse(agibp);
3564 		}
3565 	}
3566 }
3567 #endif /* DEBUG */
3568