xref: /linux/fs/xfs/xfs_log_recover.c (revision 3b812ecce736432e6b55e77028ea387eb1517d24)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
31 #include "xfs_log.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
45 #include "xfs_dir2.h"
46 
47 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
48 
49 STATIC int
50 xlog_find_zeroed(
51 	struct xlog	*,
52 	xfs_daddr_t	*);
53 STATIC int
54 xlog_clear_stale_blocks(
55 	struct xlog	*,
56 	xfs_lsn_t);
57 #if defined(DEBUG)
58 STATIC void
59 xlog_recover_check_summary(
60 	struct xlog *);
61 #else
62 #define	xlog_recover_check_summary(log)
63 #endif
64 STATIC int
65 xlog_do_recovery_pass(
66         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
67 
68 /*
69  * This structure is used during recovery to record the buf log items which
70  * have been canceled and should not be replayed.
71  */
72 struct xfs_buf_cancel {
73 	xfs_daddr_t		bc_blkno;
74 	uint			bc_len;
75 	int			bc_refcount;
76 	struct list_head	bc_list;
77 };
78 
79 /*
80  * Sector aligned buffer routines for buffer create/read/write/access
81  */
82 
83 /*
84  * Verify the given count of basic blocks is valid number of blocks
85  * to specify for an operation involving the given XFS log buffer.
86  * Returns nonzero if the count is valid, 0 otherwise.
87  */
88 
89 static inline int
90 xlog_buf_bbcount_valid(
91 	struct xlog	*log,
92 	int		bbcount)
93 {
94 	return bbcount > 0 && bbcount <= log->l_logBBsize;
95 }
96 
97 /*
98  * Allocate a buffer to hold log data.  The buffer needs to be able
99  * to map to a range of nbblks basic blocks at any valid (basic
100  * block) offset within the log.
101  */
102 STATIC xfs_buf_t *
103 xlog_get_bp(
104 	struct xlog	*log,
105 	int		nbblks)
106 {
107 	struct xfs_buf	*bp;
108 
109 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
110 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
111 			nbblks);
112 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
113 		return NULL;
114 	}
115 
116 	/*
117 	 * We do log I/O in units of log sectors (a power-of-2
118 	 * multiple of the basic block size), so we round up the
119 	 * requested size to accommodate the basic blocks required
120 	 * for complete log sectors.
121 	 *
122 	 * In addition, the buffer may be used for a non-sector-
123 	 * aligned block offset, in which case an I/O of the
124 	 * requested size could extend beyond the end of the
125 	 * buffer.  If the requested size is only 1 basic block it
126 	 * will never straddle a sector boundary, so this won't be
127 	 * an issue.  Nor will this be a problem if the log I/O is
128 	 * done in basic blocks (sector size 1).  But otherwise we
129 	 * extend the buffer by one extra log sector to ensure
130 	 * there's space to accommodate this possibility.
131 	 */
132 	if (nbblks > 1 && log->l_sectBBsize > 1)
133 		nbblks += log->l_sectBBsize;
134 	nbblks = round_up(nbblks, log->l_sectBBsize);
135 
136 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
137 	if (bp)
138 		xfs_buf_unlock(bp);
139 	return bp;
140 }
141 
142 STATIC void
143 xlog_put_bp(
144 	xfs_buf_t	*bp)
145 {
146 	xfs_buf_free(bp);
147 }
148 
149 /*
150  * Return the address of the start of the given block number's data
151  * in a log buffer.  The buffer covers a log sector-aligned region.
152  */
153 STATIC char *
154 xlog_align(
155 	struct xlog	*log,
156 	xfs_daddr_t	blk_no,
157 	int		nbblks,
158 	struct xfs_buf	*bp)
159 {
160 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
161 
162 	ASSERT(offset + nbblks <= bp->b_length);
163 	return bp->b_addr + BBTOB(offset);
164 }
165 
166 
167 /*
168  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
169  */
170 STATIC int
171 xlog_bread_noalign(
172 	struct xlog	*log,
173 	xfs_daddr_t	blk_no,
174 	int		nbblks,
175 	struct xfs_buf	*bp)
176 {
177 	int		error;
178 
179 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
180 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
181 			nbblks);
182 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
183 		return -EFSCORRUPTED;
184 	}
185 
186 	blk_no = round_down(blk_no, log->l_sectBBsize);
187 	nbblks = round_up(nbblks, log->l_sectBBsize);
188 
189 	ASSERT(nbblks > 0);
190 	ASSERT(nbblks <= bp->b_length);
191 
192 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
193 	XFS_BUF_READ(bp);
194 	bp->b_io_length = nbblks;
195 	bp->b_error = 0;
196 
197 	error = xfs_buf_submit_wait(bp);
198 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
199 		xfs_buf_ioerror_alert(bp, __func__);
200 	return error;
201 }
202 
203 STATIC int
204 xlog_bread(
205 	struct xlog	*log,
206 	xfs_daddr_t	blk_no,
207 	int		nbblks,
208 	struct xfs_buf	*bp,
209 	char		**offset)
210 {
211 	int		error;
212 
213 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
214 	if (error)
215 		return error;
216 
217 	*offset = xlog_align(log, blk_no, nbblks, bp);
218 	return 0;
219 }
220 
221 /*
222  * Read at an offset into the buffer. Returns with the buffer in it's original
223  * state regardless of the result of the read.
224  */
225 STATIC int
226 xlog_bread_offset(
227 	struct xlog	*log,
228 	xfs_daddr_t	blk_no,		/* block to read from */
229 	int		nbblks,		/* blocks to read */
230 	struct xfs_buf	*bp,
231 	char		*offset)
232 {
233 	char		*orig_offset = bp->b_addr;
234 	int		orig_len = BBTOB(bp->b_length);
235 	int		error, error2;
236 
237 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
238 	if (error)
239 		return error;
240 
241 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
242 
243 	/* must reset buffer pointer even on error */
244 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
245 	if (error)
246 		return error;
247 	return error2;
248 }
249 
250 /*
251  * Write out the buffer at the given block for the given number of blocks.
252  * The buffer is kept locked across the write and is returned locked.
253  * This can only be used for synchronous log writes.
254  */
255 STATIC int
256 xlog_bwrite(
257 	struct xlog	*log,
258 	xfs_daddr_t	blk_no,
259 	int		nbblks,
260 	struct xfs_buf	*bp)
261 {
262 	int		error;
263 
264 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
265 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
266 			nbblks);
267 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
268 		return -EFSCORRUPTED;
269 	}
270 
271 	blk_no = round_down(blk_no, log->l_sectBBsize);
272 	nbblks = round_up(nbblks, log->l_sectBBsize);
273 
274 	ASSERT(nbblks > 0);
275 	ASSERT(nbblks <= bp->b_length);
276 
277 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
278 	XFS_BUF_ZEROFLAGS(bp);
279 	xfs_buf_hold(bp);
280 	xfs_buf_lock(bp);
281 	bp->b_io_length = nbblks;
282 	bp->b_error = 0;
283 
284 	error = xfs_bwrite(bp);
285 	if (error)
286 		xfs_buf_ioerror_alert(bp, __func__);
287 	xfs_buf_relse(bp);
288 	return error;
289 }
290 
291 #ifdef DEBUG
292 /*
293  * dump debug superblock and log record information
294  */
295 STATIC void
296 xlog_header_check_dump(
297 	xfs_mount_t		*mp,
298 	xlog_rec_header_t	*head)
299 {
300 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
301 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
302 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
303 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
304 }
305 #else
306 #define xlog_header_check_dump(mp, head)
307 #endif
308 
309 /*
310  * check log record header for recovery
311  */
312 STATIC int
313 xlog_header_check_recover(
314 	xfs_mount_t		*mp,
315 	xlog_rec_header_t	*head)
316 {
317 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
318 
319 	/*
320 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
321 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
322 	 * a dirty log created in IRIX.
323 	 */
324 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
325 		xfs_warn(mp,
326 	"dirty log written in incompatible format - can't recover");
327 		xlog_header_check_dump(mp, head);
328 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
329 				 XFS_ERRLEVEL_HIGH, mp);
330 		return -EFSCORRUPTED;
331 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
332 		xfs_warn(mp,
333 	"dirty log entry has mismatched uuid - can't recover");
334 		xlog_header_check_dump(mp, head);
335 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
336 				 XFS_ERRLEVEL_HIGH, mp);
337 		return -EFSCORRUPTED;
338 	}
339 	return 0;
340 }
341 
342 /*
343  * read the head block of the log and check the header
344  */
345 STATIC int
346 xlog_header_check_mount(
347 	xfs_mount_t		*mp,
348 	xlog_rec_header_t	*head)
349 {
350 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
351 
352 	if (uuid_is_nil(&head->h_fs_uuid)) {
353 		/*
354 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
355 		 * h_fs_uuid is nil, we assume this log was last mounted
356 		 * by IRIX and continue.
357 		 */
358 		xfs_warn(mp, "nil uuid in log - IRIX style log");
359 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
360 		xfs_warn(mp, "log has mismatched uuid - can't recover");
361 		xlog_header_check_dump(mp, head);
362 		XFS_ERROR_REPORT("xlog_header_check_mount",
363 				 XFS_ERRLEVEL_HIGH, mp);
364 		return -EFSCORRUPTED;
365 	}
366 	return 0;
367 }
368 
369 STATIC void
370 xlog_recover_iodone(
371 	struct xfs_buf	*bp)
372 {
373 	if (bp->b_error) {
374 		/*
375 		 * We're not going to bother about retrying
376 		 * this during recovery. One strike!
377 		 */
378 		if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
379 			xfs_buf_ioerror_alert(bp, __func__);
380 			xfs_force_shutdown(bp->b_target->bt_mount,
381 						SHUTDOWN_META_IO_ERROR);
382 		}
383 	}
384 	bp->b_iodone = NULL;
385 	xfs_buf_ioend(bp);
386 }
387 
388 /*
389  * This routine finds (to an approximation) the first block in the physical
390  * log which contains the given cycle.  It uses a binary search algorithm.
391  * Note that the algorithm can not be perfect because the disk will not
392  * necessarily be perfect.
393  */
394 STATIC int
395 xlog_find_cycle_start(
396 	struct xlog	*log,
397 	struct xfs_buf	*bp,
398 	xfs_daddr_t	first_blk,
399 	xfs_daddr_t	*last_blk,
400 	uint		cycle)
401 {
402 	char		*offset;
403 	xfs_daddr_t	mid_blk;
404 	xfs_daddr_t	end_blk;
405 	uint		mid_cycle;
406 	int		error;
407 
408 	end_blk = *last_blk;
409 	mid_blk = BLK_AVG(first_blk, end_blk);
410 	while (mid_blk != first_blk && mid_blk != end_blk) {
411 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
412 		if (error)
413 			return error;
414 		mid_cycle = xlog_get_cycle(offset);
415 		if (mid_cycle == cycle)
416 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
417 		else
418 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
419 		mid_blk = BLK_AVG(first_blk, end_blk);
420 	}
421 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
422 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
423 
424 	*last_blk = end_blk;
425 
426 	return 0;
427 }
428 
429 /*
430  * Check that a range of blocks does not contain stop_on_cycle_no.
431  * Fill in *new_blk with the block offset where such a block is
432  * found, or with -1 (an invalid block number) if there is no such
433  * block in the range.  The scan needs to occur from front to back
434  * and the pointer into the region must be updated since a later
435  * routine will need to perform another test.
436  */
437 STATIC int
438 xlog_find_verify_cycle(
439 	struct xlog	*log,
440 	xfs_daddr_t	start_blk,
441 	int		nbblks,
442 	uint		stop_on_cycle_no,
443 	xfs_daddr_t	*new_blk)
444 {
445 	xfs_daddr_t	i, j;
446 	uint		cycle;
447 	xfs_buf_t	*bp;
448 	xfs_daddr_t	bufblks;
449 	char		*buf = NULL;
450 	int		error = 0;
451 
452 	/*
453 	 * Greedily allocate a buffer big enough to handle the full
454 	 * range of basic blocks we'll be examining.  If that fails,
455 	 * try a smaller size.  We need to be able to read at least
456 	 * a log sector, or we're out of luck.
457 	 */
458 	bufblks = 1 << ffs(nbblks);
459 	while (bufblks > log->l_logBBsize)
460 		bufblks >>= 1;
461 	while (!(bp = xlog_get_bp(log, bufblks))) {
462 		bufblks >>= 1;
463 		if (bufblks < log->l_sectBBsize)
464 			return -ENOMEM;
465 	}
466 
467 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
468 		int	bcount;
469 
470 		bcount = min(bufblks, (start_blk + nbblks - i));
471 
472 		error = xlog_bread(log, i, bcount, bp, &buf);
473 		if (error)
474 			goto out;
475 
476 		for (j = 0; j < bcount; j++) {
477 			cycle = xlog_get_cycle(buf);
478 			if (cycle == stop_on_cycle_no) {
479 				*new_blk = i+j;
480 				goto out;
481 			}
482 
483 			buf += BBSIZE;
484 		}
485 	}
486 
487 	*new_blk = -1;
488 
489 out:
490 	xlog_put_bp(bp);
491 	return error;
492 }
493 
494 /*
495  * Potentially backup over partial log record write.
496  *
497  * In the typical case, last_blk is the number of the block directly after
498  * a good log record.  Therefore, we subtract one to get the block number
499  * of the last block in the given buffer.  extra_bblks contains the number
500  * of blocks we would have read on a previous read.  This happens when the
501  * last log record is split over the end of the physical log.
502  *
503  * extra_bblks is the number of blocks potentially verified on a previous
504  * call to this routine.
505  */
506 STATIC int
507 xlog_find_verify_log_record(
508 	struct xlog		*log,
509 	xfs_daddr_t		start_blk,
510 	xfs_daddr_t		*last_blk,
511 	int			extra_bblks)
512 {
513 	xfs_daddr_t		i;
514 	xfs_buf_t		*bp;
515 	char			*offset = NULL;
516 	xlog_rec_header_t	*head = NULL;
517 	int			error = 0;
518 	int			smallmem = 0;
519 	int			num_blks = *last_blk - start_blk;
520 	int			xhdrs;
521 
522 	ASSERT(start_blk != 0 || *last_blk != start_blk);
523 
524 	if (!(bp = xlog_get_bp(log, num_blks))) {
525 		if (!(bp = xlog_get_bp(log, 1)))
526 			return -ENOMEM;
527 		smallmem = 1;
528 	} else {
529 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
530 		if (error)
531 			goto out;
532 		offset += ((num_blks - 1) << BBSHIFT);
533 	}
534 
535 	for (i = (*last_blk) - 1; i >= 0; i--) {
536 		if (i < start_blk) {
537 			/* valid log record not found */
538 			xfs_warn(log->l_mp,
539 		"Log inconsistent (didn't find previous header)");
540 			ASSERT(0);
541 			error = -EIO;
542 			goto out;
543 		}
544 
545 		if (smallmem) {
546 			error = xlog_bread(log, i, 1, bp, &offset);
547 			if (error)
548 				goto out;
549 		}
550 
551 		head = (xlog_rec_header_t *)offset;
552 
553 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
554 			break;
555 
556 		if (!smallmem)
557 			offset -= BBSIZE;
558 	}
559 
560 	/*
561 	 * We hit the beginning of the physical log & still no header.  Return
562 	 * to caller.  If caller can handle a return of -1, then this routine
563 	 * will be called again for the end of the physical log.
564 	 */
565 	if (i == -1) {
566 		error = 1;
567 		goto out;
568 	}
569 
570 	/*
571 	 * We have the final block of the good log (the first block
572 	 * of the log record _before_ the head. So we check the uuid.
573 	 */
574 	if ((error = xlog_header_check_mount(log->l_mp, head)))
575 		goto out;
576 
577 	/*
578 	 * We may have found a log record header before we expected one.
579 	 * last_blk will be the 1st block # with a given cycle #.  We may end
580 	 * up reading an entire log record.  In this case, we don't want to
581 	 * reset last_blk.  Only when last_blk points in the middle of a log
582 	 * record do we update last_blk.
583 	 */
584 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
585 		uint	h_size = be32_to_cpu(head->h_size);
586 
587 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
588 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
589 			xhdrs++;
590 	} else {
591 		xhdrs = 1;
592 	}
593 
594 	if (*last_blk - i + extra_bblks !=
595 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
596 		*last_blk = i;
597 
598 out:
599 	xlog_put_bp(bp);
600 	return error;
601 }
602 
603 /*
604  * Head is defined to be the point of the log where the next log write
605  * could go.  This means that incomplete LR writes at the end are
606  * eliminated when calculating the head.  We aren't guaranteed that previous
607  * LR have complete transactions.  We only know that a cycle number of
608  * current cycle number -1 won't be present in the log if we start writing
609  * from our current block number.
610  *
611  * last_blk contains the block number of the first block with a given
612  * cycle number.
613  *
614  * Return: zero if normal, non-zero if error.
615  */
616 STATIC int
617 xlog_find_head(
618 	struct xlog	*log,
619 	xfs_daddr_t	*return_head_blk)
620 {
621 	xfs_buf_t	*bp;
622 	char		*offset;
623 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
624 	int		num_scan_bblks;
625 	uint		first_half_cycle, last_half_cycle;
626 	uint		stop_on_cycle;
627 	int		error, log_bbnum = log->l_logBBsize;
628 
629 	/* Is the end of the log device zeroed? */
630 	error = xlog_find_zeroed(log, &first_blk);
631 	if (error < 0) {
632 		xfs_warn(log->l_mp, "empty log check failed");
633 		return error;
634 	}
635 	if (error == 1) {
636 		*return_head_blk = first_blk;
637 
638 		/* Is the whole lot zeroed? */
639 		if (!first_blk) {
640 			/* Linux XFS shouldn't generate totally zeroed logs -
641 			 * mkfs etc write a dummy unmount record to a fresh
642 			 * log so we can store the uuid in there
643 			 */
644 			xfs_warn(log->l_mp, "totally zeroed log");
645 		}
646 
647 		return 0;
648 	}
649 
650 	first_blk = 0;			/* get cycle # of 1st block */
651 	bp = xlog_get_bp(log, 1);
652 	if (!bp)
653 		return -ENOMEM;
654 
655 	error = xlog_bread(log, 0, 1, bp, &offset);
656 	if (error)
657 		goto bp_err;
658 
659 	first_half_cycle = xlog_get_cycle(offset);
660 
661 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
662 	error = xlog_bread(log, last_blk, 1, bp, &offset);
663 	if (error)
664 		goto bp_err;
665 
666 	last_half_cycle = xlog_get_cycle(offset);
667 	ASSERT(last_half_cycle != 0);
668 
669 	/*
670 	 * If the 1st half cycle number is equal to the last half cycle number,
671 	 * then the entire log is stamped with the same cycle number.  In this
672 	 * case, head_blk can't be set to zero (which makes sense).  The below
673 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
674 	 * we set it to log_bbnum which is an invalid block number, but this
675 	 * value makes the math correct.  If head_blk doesn't changed through
676 	 * all the tests below, *head_blk is set to zero at the very end rather
677 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
678 	 * in a circular file.
679 	 */
680 	if (first_half_cycle == last_half_cycle) {
681 		/*
682 		 * In this case we believe that the entire log should have
683 		 * cycle number last_half_cycle.  We need to scan backwards
684 		 * from the end verifying that there are no holes still
685 		 * containing last_half_cycle - 1.  If we find such a hole,
686 		 * then the start of that hole will be the new head.  The
687 		 * simple case looks like
688 		 *        x | x ... | x - 1 | x
689 		 * Another case that fits this picture would be
690 		 *        x | x + 1 | x ... | x
691 		 * In this case the head really is somewhere at the end of the
692 		 * log, as one of the latest writes at the beginning was
693 		 * incomplete.
694 		 * One more case is
695 		 *        x | x + 1 | x ... | x - 1 | x
696 		 * This is really the combination of the above two cases, and
697 		 * the head has to end up at the start of the x-1 hole at the
698 		 * end of the log.
699 		 *
700 		 * In the 256k log case, we will read from the beginning to the
701 		 * end of the log and search for cycle numbers equal to x-1.
702 		 * We don't worry about the x+1 blocks that we encounter,
703 		 * because we know that they cannot be the head since the log
704 		 * started with x.
705 		 */
706 		head_blk = log_bbnum;
707 		stop_on_cycle = last_half_cycle - 1;
708 	} else {
709 		/*
710 		 * In this case we want to find the first block with cycle
711 		 * number matching last_half_cycle.  We expect the log to be
712 		 * some variation on
713 		 *        x + 1 ... | x ... | x
714 		 * The first block with cycle number x (last_half_cycle) will
715 		 * be where the new head belongs.  First we do a binary search
716 		 * for the first occurrence of last_half_cycle.  The binary
717 		 * search may not be totally accurate, so then we scan back
718 		 * from there looking for occurrences of last_half_cycle before
719 		 * us.  If that backwards scan wraps around the beginning of
720 		 * the log, then we look for occurrences of last_half_cycle - 1
721 		 * at the end of the log.  The cases we're looking for look
722 		 * like
723 		 *                               v binary search stopped here
724 		 *        x + 1 ... | x | x + 1 | x ... | x
725 		 *                   ^ but we want to locate this spot
726 		 * or
727 		 *        <---------> less than scan distance
728 		 *        x + 1 ... | x ... | x - 1 | x
729 		 *                           ^ we want to locate this spot
730 		 */
731 		stop_on_cycle = last_half_cycle;
732 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
733 						&head_blk, last_half_cycle)))
734 			goto bp_err;
735 	}
736 
737 	/*
738 	 * Now validate the answer.  Scan back some number of maximum possible
739 	 * blocks and make sure each one has the expected cycle number.  The
740 	 * maximum is determined by the total possible amount of buffering
741 	 * in the in-core log.  The following number can be made tighter if
742 	 * we actually look at the block size of the filesystem.
743 	 */
744 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
745 	if (head_blk >= num_scan_bblks) {
746 		/*
747 		 * We are guaranteed that the entire check can be performed
748 		 * in one buffer.
749 		 */
750 		start_blk = head_blk - num_scan_bblks;
751 		if ((error = xlog_find_verify_cycle(log,
752 						start_blk, num_scan_bblks,
753 						stop_on_cycle, &new_blk)))
754 			goto bp_err;
755 		if (new_blk != -1)
756 			head_blk = new_blk;
757 	} else {		/* need to read 2 parts of log */
758 		/*
759 		 * We are going to scan backwards in the log in two parts.
760 		 * First we scan the physical end of the log.  In this part
761 		 * of the log, we are looking for blocks with cycle number
762 		 * last_half_cycle - 1.
763 		 * If we find one, then we know that the log starts there, as
764 		 * we've found a hole that didn't get written in going around
765 		 * the end of the physical log.  The simple case for this is
766 		 *        x + 1 ... | x ... | x - 1 | x
767 		 *        <---------> less than scan distance
768 		 * If all of the blocks at the end of the log have cycle number
769 		 * last_half_cycle, then we check the blocks at the start of
770 		 * the log looking for occurrences of last_half_cycle.  If we
771 		 * find one, then our current estimate for the location of the
772 		 * first occurrence of last_half_cycle is wrong and we move
773 		 * back to the hole we've found.  This case looks like
774 		 *        x + 1 ... | x | x + 1 | x ...
775 		 *                               ^ binary search stopped here
776 		 * Another case we need to handle that only occurs in 256k
777 		 * logs is
778 		 *        x + 1 ... | x ... | x+1 | x ...
779 		 *                   ^ binary search stops here
780 		 * In a 256k log, the scan at the end of the log will see the
781 		 * x + 1 blocks.  We need to skip past those since that is
782 		 * certainly not the head of the log.  By searching for
783 		 * last_half_cycle-1 we accomplish that.
784 		 */
785 		ASSERT(head_blk <= INT_MAX &&
786 			(xfs_daddr_t) num_scan_bblks >= head_blk);
787 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
788 		if ((error = xlog_find_verify_cycle(log, start_blk,
789 					num_scan_bblks - (int)head_blk,
790 					(stop_on_cycle - 1), &new_blk)))
791 			goto bp_err;
792 		if (new_blk != -1) {
793 			head_blk = new_blk;
794 			goto validate_head;
795 		}
796 
797 		/*
798 		 * Scan beginning of log now.  The last part of the physical
799 		 * log is good.  This scan needs to verify that it doesn't find
800 		 * the last_half_cycle.
801 		 */
802 		start_blk = 0;
803 		ASSERT(head_blk <= INT_MAX);
804 		if ((error = xlog_find_verify_cycle(log,
805 					start_blk, (int)head_blk,
806 					stop_on_cycle, &new_blk)))
807 			goto bp_err;
808 		if (new_blk != -1)
809 			head_blk = new_blk;
810 	}
811 
812 validate_head:
813 	/*
814 	 * Now we need to make sure head_blk is not pointing to a block in
815 	 * the middle of a log record.
816 	 */
817 	num_scan_bblks = XLOG_REC_SHIFT(log);
818 	if (head_blk >= num_scan_bblks) {
819 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
820 
821 		/* start ptr at last block ptr before head_blk */
822 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
823 		if (error == 1)
824 			error = -EIO;
825 		if (error)
826 			goto bp_err;
827 	} else {
828 		start_blk = 0;
829 		ASSERT(head_blk <= INT_MAX);
830 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
831 		if (error < 0)
832 			goto bp_err;
833 		if (error == 1) {
834 			/* We hit the beginning of the log during our search */
835 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
836 			new_blk = log_bbnum;
837 			ASSERT(start_blk <= INT_MAX &&
838 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
839 			ASSERT(head_blk <= INT_MAX);
840 			error = xlog_find_verify_log_record(log, start_blk,
841 							&new_blk, (int)head_blk);
842 			if (error == 1)
843 				error = -EIO;
844 			if (error)
845 				goto bp_err;
846 			if (new_blk != log_bbnum)
847 				head_blk = new_blk;
848 		} else if (error)
849 			goto bp_err;
850 	}
851 
852 	xlog_put_bp(bp);
853 	if (head_blk == log_bbnum)
854 		*return_head_blk = 0;
855 	else
856 		*return_head_blk = head_blk;
857 	/*
858 	 * When returning here, we have a good block number.  Bad block
859 	 * means that during a previous crash, we didn't have a clean break
860 	 * from cycle number N to cycle number N-1.  In this case, we need
861 	 * to find the first block with cycle number N-1.
862 	 */
863 	return 0;
864 
865  bp_err:
866 	xlog_put_bp(bp);
867 
868 	if (error)
869 		xfs_warn(log->l_mp, "failed to find log head");
870 	return error;
871 }
872 
873 /*
874  * Seek backwards in the log for log record headers.
875  *
876  * Given a starting log block, walk backwards until we find the provided number
877  * of records or hit the provided tail block. The return value is the number of
878  * records encountered or a negative error code. The log block and buffer
879  * pointer of the last record seen are returned in rblk and rhead respectively.
880  */
881 STATIC int
882 xlog_rseek_logrec_hdr(
883 	struct xlog		*log,
884 	xfs_daddr_t		head_blk,
885 	xfs_daddr_t		tail_blk,
886 	int			count,
887 	struct xfs_buf		*bp,
888 	xfs_daddr_t		*rblk,
889 	struct xlog_rec_header	**rhead,
890 	bool			*wrapped)
891 {
892 	int			i;
893 	int			error;
894 	int			found = 0;
895 	char			*offset = NULL;
896 	xfs_daddr_t		end_blk;
897 
898 	*wrapped = false;
899 
900 	/*
901 	 * Walk backwards from the head block until we hit the tail or the first
902 	 * block in the log.
903 	 */
904 	end_blk = head_blk > tail_blk ? tail_blk : 0;
905 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
906 		error = xlog_bread(log, i, 1, bp, &offset);
907 		if (error)
908 			goto out_error;
909 
910 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
911 			*rblk = i;
912 			*rhead = (struct xlog_rec_header *) offset;
913 			if (++found == count)
914 				break;
915 		}
916 	}
917 
918 	/*
919 	 * If we haven't hit the tail block or the log record header count,
920 	 * start looking again from the end of the physical log. Note that
921 	 * callers can pass head == tail if the tail is not yet known.
922 	 */
923 	if (tail_blk >= head_blk && found != count) {
924 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
925 			error = xlog_bread(log, i, 1, bp, &offset);
926 			if (error)
927 				goto out_error;
928 
929 			if (*(__be32 *)offset ==
930 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
931 				*wrapped = true;
932 				*rblk = i;
933 				*rhead = (struct xlog_rec_header *) offset;
934 				if (++found == count)
935 					break;
936 			}
937 		}
938 	}
939 
940 	return found;
941 
942 out_error:
943 	return error;
944 }
945 
946 /*
947  * Seek forward in the log for log record headers.
948  *
949  * Given head and tail blocks, walk forward from the tail block until we find
950  * the provided number of records or hit the head block. The return value is the
951  * number of records encountered or a negative error code. The log block and
952  * buffer pointer of the last record seen are returned in rblk and rhead
953  * respectively.
954  */
955 STATIC int
956 xlog_seek_logrec_hdr(
957 	struct xlog		*log,
958 	xfs_daddr_t		head_blk,
959 	xfs_daddr_t		tail_blk,
960 	int			count,
961 	struct xfs_buf		*bp,
962 	xfs_daddr_t		*rblk,
963 	struct xlog_rec_header	**rhead,
964 	bool			*wrapped)
965 {
966 	int			i;
967 	int			error;
968 	int			found = 0;
969 	char			*offset = NULL;
970 	xfs_daddr_t		end_blk;
971 
972 	*wrapped = false;
973 
974 	/*
975 	 * Walk forward from the tail block until we hit the head or the last
976 	 * block in the log.
977 	 */
978 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
979 	for (i = (int) tail_blk; i <= end_blk; i++) {
980 		error = xlog_bread(log, i, 1, bp, &offset);
981 		if (error)
982 			goto out_error;
983 
984 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
985 			*rblk = i;
986 			*rhead = (struct xlog_rec_header *) offset;
987 			if (++found == count)
988 				break;
989 		}
990 	}
991 
992 	/*
993 	 * If we haven't hit the head block or the log record header count,
994 	 * start looking again from the start of the physical log.
995 	 */
996 	if (tail_blk > head_blk && found != count) {
997 		for (i = 0; i < (int) head_blk; i++) {
998 			error = xlog_bread(log, i, 1, bp, &offset);
999 			if (error)
1000 				goto out_error;
1001 
1002 			if (*(__be32 *)offset ==
1003 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1004 				*wrapped = true;
1005 				*rblk = i;
1006 				*rhead = (struct xlog_rec_header *) offset;
1007 				if (++found == count)
1008 					break;
1009 			}
1010 		}
1011 	}
1012 
1013 	return found;
1014 
1015 out_error:
1016 	return error;
1017 }
1018 
1019 /*
1020  * Check the log tail for torn writes. This is required when torn writes are
1021  * detected at the head and the head had to be walked back to a previous record.
1022  * The tail of the previous record must now be verified to ensure the torn
1023  * writes didn't corrupt the previous tail.
1024  *
1025  * Return an error if CRC verification fails as recovery cannot proceed.
1026  */
1027 STATIC int
1028 xlog_verify_tail(
1029 	struct xlog		*log,
1030 	xfs_daddr_t		head_blk,
1031 	xfs_daddr_t		tail_blk)
1032 {
1033 	struct xlog_rec_header	*thead;
1034 	struct xfs_buf		*bp;
1035 	xfs_daddr_t		first_bad;
1036 	int			count;
1037 	int			error = 0;
1038 	bool			wrapped;
1039 	xfs_daddr_t		tmp_head;
1040 
1041 	bp = xlog_get_bp(log, 1);
1042 	if (!bp)
1043 		return -ENOMEM;
1044 
1045 	/*
1046 	 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
1047 	 * a temporary head block that points after the last possible
1048 	 * concurrently written record of the tail.
1049 	 */
1050 	count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
1051 				     XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
1052 				     &wrapped);
1053 	if (count < 0) {
1054 		error = count;
1055 		goto out;
1056 	}
1057 
1058 	/*
1059 	 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
1060 	 * into the actual log head. tmp_head points to the start of the record
1061 	 * so update it to the actual head block.
1062 	 */
1063 	if (count < XLOG_MAX_ICLOGS + 1)
1064 		tmp_head = head_blk;
1065 
1066 	/*
1067 	 * We now have a tail and temporary head block that covers at least
1068 	 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
1069 	 * records were completely written. Run a CRC verification pass from
1070 	 * tail to head and return the result.
1071 	 */
1072 	error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
1073 				      XLOG_RECOVER_CRCPASS, &first_bad);
1074 
1075 out:
1076 	xlog_put_bp(bp);
1077 	return error;
1078 }
1079 
1080 /*
1081  * Detect and trim torn writes from the head of the log.
1082  *
1083  * Storage without sector atomicity guarantees can result in torn writes in the
1084  * log in the event of a crash. Our only means to detect this scenario is via
1085  * CRC verification. While we can't always be certain that CRC verification
1086  * failure is due to a torn write vs. an unrelated corruption, we do know that
1087  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1088  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1089  * the log and treat failures in this range as torn writes as a matter of
1090  * policy. In the event of CRC failure, the head is walked back to the last good
1091  * record in the log and the tail is updated from that record and verified.
1092  */
1093 STATIC int
1094 xlog_verify_head(
1095 	struct xlog		*log,
1096 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1097 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1098 	struct xfs_buf		*bp,
1099 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1100 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1101 	bool			*wrapped)	/* last rec. wraps phys. log */
1102 {
1103 	struct xlog_rec_header	*tmp_rhead;
1104 	struct xfs_buf		*tmp_bp;
1105 	xfs_daddr_t		first_bad;
1106 	xfs_daddr_t		tmp_rhead_blk;
1107 	int			found;
1108 	int			error;
1109 	bool			tmp_wrapped;
1110 
1111 	/*
1112 	 * Check the head of the log for torn writes. Search backwards from the
1113 	 * head until we hit the tail or the maximum number of log record I/Os
1114 	 * that could have been in flight at one time. Use a temporary buffer so
1115 	 * we don't trash the rhead/bp pointers from the caller.
1116 	 */
1117 	tmp_bp = xlog_get_bp(log, 1);
1118 	if (!tmp_bp)
1119 		return -ENOMEM;
1120 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1121 				      XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1122 				      &tmp_rhead, &tmp_wrapped);
1123 	xlog_put_bp(tmp_bp);
1124 	if (error < 0)
1125 		return error;
1126 
1127 	/*
1128 	 * Now run a CRC verification pass over the records starting at the
1129 	 * block found above to the current head. If a CRC failure occurs, the
1130 	 * log block of the first bad record is saved in first_bad.
1131 	 */
1132 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1133 				      XLOG_RECOVER_CRCPASS, &first_bad);
1134 	if (error == -EFSBADCRC) {
1135 		/*
1136 		 * We've hit a potential torn write. Reset the error and warn
1137 		 * about it.
1138 		 */
1139 		error = 0;
1140 		xfs_warn(log->l_mp,
1141 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1142 			 first_bad, *head_blk);
1143 
1144 		/*
1145 		 * Get the header block and buffer pointer for the last good
1146 		 * record before the bad record.
1147 		 *
1148 		 * Note that xlog_find_tail() clears the blocks at the new head
1149 		 * (i.e., the records with invalid CRC) if the cycle number
1150 		 * matches the the current cycle.
1151 		 */
1152 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1153 					      rhead_blk, rhead, wrapped);
1154 		if (found < 0)
1155 			return found;
1156 		if (found == 0)		/* XXX: right thing to do here? */
1157 			return -EIO;
1158 
1159 		/*
1160 		 * Reset the head block to the starting block of the first bad
1161 		 * log record and set the tail block based on the last good
1162 		 * record.
1163 		 *
1164 		 * Bail out if the updated head/tail match as this indicates
1165 		 * possible corruption outside of the acceptable
1166 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1167 		 */
1168 		*head_blk = first_bad;
1169 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1170 		if (*head_blk == *tail_blk) {
1171 			ASSERT(0);
1172 			return 0;
1173 		}
1174 
1175 		/*
1176 		 * Now verify the tail based on the updated head. This is
1177 		 * required because the torn writes trimmed from the head could
1178 		 * have been written over the tail of a previous record. Return
1179 		 * any errors since recovery cannot proceed if the tail is
1180 		 * corrupt.
1181 		 *
1182 		 * XXX: This leaves a gap in truly robust protection from torn
1183 		 * writes in the log. If the head is behind the tail, the tail
1184 		 * pushes forward to create some space and then a crash occurs
1185 		 * causing the writes into the previous record's tail region to
1186 		 * tear, log recovery isn't able to recover.
1187 		 *
1188 		 * How likely is this to occur? If possible, can we do something
1189 		 * more intelligent here? Is it safe to push the tail forward if
1190 		 * we can determine that the tail is within the range of the
1191 		 * torn write (e.g., the kernel can only overwrite the tail if
1192 		 * it has actually been pushed forward)? Alternatively, could we
1193 		 * somehow prevent this condition at runtime?
1194 		 */
1195 		error = xlog_verify_tail(log, *head_blk, *tail_blk);
1196 	}
1197 
1198 	return error;
1199 }
1200 
1201 /*
1202  * Check whether the head of the log points to an unmount record. In other
1203  * words, determine whether the log is clean. If so, update the in-core state
1204  * appropriately.
1205  */
1206 static int
1207 xlog_check_unmount_rec(
1208 	struct xlog		*log,
1209 	xfs_daddr_t		*head_blk,
1210 	xfs_daddr_t		*tail_blk,
1211 	struct xlog_rec_header	*rhead,
1212 	xfs_daddr_t		rhead_blk,
1213 	struct xfs_buf		*bp,
1214 	bool			*clean)
1215 {
1216 	struct xlog_op_header	*op_head;
1217 	xfs_daddr_t		umount_data_blk;
1218 	xfs_daddr_t		after_umount_blk;
1219 	int			hblks;
1220 	int			error;
1221 	char			*offset;
1222 
1223 	*clean = false;
1224 
1225 	/*
1226 	 * Look for unmount record. If we find it, then we know there was a
1227 	 * clean unmount. Since 'i' could be the last block in the physical
1228 	 * log, we convert to a log block before comparing to the head_blk.
1229 	 *
1230 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1231 	 * below. We won't want to clear the unmount record if there is one, so
1232 	 * we pass the lsn of the unmount record rather than the block after it.
1233 	 */
1234 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1235 		int	h_size = be32_to_cpu(rhead->h_size);
1236 		int	h_version = be32_to_cpu(rhead->h_version);
1237 
1238 		if ((h_version & XLOG_VERSION_2) &&
1239 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1240 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1241 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1242 				hblks++;
1243 		} else {
1244 			hblks = 1;
1245 		}
1246 	} else {
1247 		hblks = 1;
1248 	}
1249 	after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1250 	after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1251 	if (*head_blk == after_umount_blk &&
1252 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1253 		umount_data_blk = rhead_blk + hblks;
1254 		umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1255 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1256 		if (error)
1257 			return error;
1258 
1259 		op_head = (struct xlog_op_header *)offset;
1260 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1261 			/*
1262 			 * Set tail and last sync so that newly written log
1263 			 * records will point recovery to after the current
1264 			 * unmount record.
1265 			 */
1266 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1267 					log->l_curr_cycle, after_umount_blk);
1268 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1269 					log->l_curr_cycle, after_umount_blk);
1270 			*tail_blk = after_umount_blk;
1271 
1272 			*clean = true;
1273 		}
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 static void
1280 xlog_set_state(
1281 	struct xlog		*log,
1282 	xfs_daddr_t		head_blk,
1283 	struct xlog_rec_header	*rhead,
1284 	xfs_daddr_t		rhead_blk,
1285 	bool			bump_cycle)
1286 {
1287 	/*
1288 	 * Reset log values according to the state of the log when we
1289 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1290 	 * one because the next write starts a new cycle rather than
1291 	 * continuing the cycle of the last good log record.  At this
1292 	 * point we have guaranteed that all partial log records have been
1293 	 * accounted for.  Therefore, we know that the last good log record
1294 	 * written was complete and ended exactly on the end boundary
1295 	 * of the physical log.
1296 	 */
1297 	log->l_prev_block = rhead_blk;
1298 	log->l_curr_block = (int)head_blk;
1299 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1300 	if (bump_cycle)
1301 		log->l_curr_cycle++;
1302 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1303 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1304 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1305 					BBTOB(log->l_curr_block));
1306 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1307 					BBTOB(log->l_curr_block));
1308 }
1309 
1310 /*
1311  * Find the sync block number or the tail of the log.
1312  *
1313  * This will be the block number of the last record to have its
1314  * associated buffers synced to disk.  Every log record header has
1315  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1316  * to get a sync block number.  The only concern is to figure out which
1317  * log record header to believe.
1318  *
1319  * The following algorithm uses the log record header with the largest
1320  * lsn.  The entire log record does not need to be valid.  We only care
1321  * that the header is valid.
1322  *
1323  * We could speed up search by using current head_blk buffer, but it is not
1324  * available.
1325  */
1326 STATIC int
1327 xlog_find_tail(
1328 	struct xlog		*log,
1329 	xfs_daddr_t		*head_blk,
1330 	xfs_daddr_t		*tail_blk)
1331 {
1332 	xlog_rec_header_t	*rhead;
1333 	char			*offset = NULL;
1334 	xfs_buf_t		*bp;
1335 	int			error;
1336 	xfs_daddr_t		rhead_blk;
1337 	xfs_lsn_t		tail_lsn;
1338 	bool			wrapped = false;
1339 	bool			clean = false;
1340 
1341 	/*
1342 	 * Find previous log record
1343 	 */
1344 	if ((error = xlog_find_head(log, head_blk)))
1345 		return error;
1346 	ASSERT(*head_blk < INT_MAX);
1347 
1348 	bp = xlog_get_bp(log, 1);
1349 	if (!bp)
1350 		return -ENOMEM;
1351 	if (*head_blk == 0) {				/* special case */
1352 		error = xlog_bread(log, 0, 1, bp, &offset);
1353 		if (error)
1354 			goto done;
1355 
1356 		if (xlog_get_cycle(offset) == 0) {
1357 			*tail_blk = 0;
1358 			/* leave all other log inited values alone */
1359 			goto done;
1360 		}
1361 	}
1362 
1363 	/*
1364 	 * Search backwards through the log looking for the log record header
1365 	 * block. This wraps all the way back around to the head so something is
1366 	 * seriously wrong if we can't find it.
1367 	 */
1368 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1369 				      &rhead_blk, &rhead, &wrapped);
1370 	if (error < 0)
1371 		return error;
1372 	if (!error) {
1373 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1374 		return -EIO;
1375 	}
1376 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1377 
1378 	/*
1379 	 * Set the log state based on the current head record.
1380 	 */
1381 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1382 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1383 
1384 	/*
1385 	 * Look for an unmount record at the head of the log. This sets the log
1386 	 * state to determine whether recovery is necessary.
1387 	 */
1388 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1389 				       rhead_blk, bp, &clean);
1390 	if (error)
1391 		goto done;
1392 
1393 	/*
1394 	 * Verify the log head if the log is not clean (e.g., we have anything
1395 	 * but an unmount record at the head). This uses CRC verification to
1396 	 * detect and trim torn writes. If discovered, CRC failures are
1397 	 * considered torn writes and the log head is trimmed accordingly.
1398 	 *
1399 	 * Note that we can only run CRC verification when the log is dirty
1400 	 * because there's no guarantee that the log data behind an unmount
1401 	 * record is compatible with the current architecture.
1402 	 */
1403 	if (!clean) {
1404 		xfs_daddr_t	orig_head = *head_blk;
1405 
1406 		error = xlog_verify_head(log, head_blk, tail_blk, bp,
1407 					 &rhead_blk, &rhead, &wrapped);
1408 		if (error)
1409 			goto done;
1410 
1411 		/* update in-core state again if the head changed */
1412 		if (*head_blk != orig_head) {
1413 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1414 				       wrapped);
1415 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1416 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1417 						       rhead, rhead_blk, bp,
1418 						       &clean);
1419 			if (error)
1420 				goto done;
1421 		}
1422 	}
1423 
1424 	/*
1425 	 * Note that the unmount was clean. If the unmount was not clean, we
1426 	 * need to know this to rebuild the superblock counters from the perag
1427 	 * headers if we have a filesystem using non-persistent counters.
1428 	 */
1429 	if (clean)
1430 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1431 
1432 	/*
1433 	 * Make sure that there are no blocks in front of the head
1434 	 * with the same cycle number as the head.  This can happen
1435 	 * because we allow multiple outstanding log writes concurrently,
1436 	 * and the later writes might make it out before earlier ones.
1437 	 *
1438 	 * We use the lsn from before modifying it so that we'll never
1439 	 * overwrite the unmount record after a clean unmount.
1440 	 *
1441 	 * Do this only if we are going to recover the filesystem
1442 	 *
1443 	 * NOTE: This used to say "if (!readonly)"
1444 	 * However on Linux, we can & do recover a read-only filesystem.
1445 	 * We only skip recovery if NORECOVERY is specified on mount,
1446 	 * in which case we would not be here.
1447 	 *
1448 	 * But... if the -device- itself is readonly, just skip this.
1449 	 * We can't recover this device anyway, so it won't matter.
1450 	 */
1451 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1452 		error = xlog_clear_stale_blocks(log, tail_lsn);
1453 
1454 done:
1455 	xlog_put_bp(bp);
1456 
1457 	if (error)
1458 		xfs_warn(log->l_mp, "failed to locate log tail");
1459 	return error;
1460 }
1461 
1462 /*
1463  * Is the log zeroed at all?
1464  *
1465  * The last binary search should be changed to perform an X block read
1466  * once X becomes small enough.  You can then search linearly through
1467  * the X blocks.  This will cut down on the number of reads we need to do.
1468  *
1469  * If the log is partially zeroed, this routine will pass back the blkno
1470  * of the first block with cycle number 0.  It won't have a complete LR
1471  * preceding it.
1472  *
1473  * Return:
1474  *	0  => the log is completely written to
1475  *	1 => use *blk_no as the first block of the log
1476  *	<0 => error has occurred
1477  */
1478 STATIC int
1479 xlog_find_zeroed(
1480 	struct xlog	*log,
1481 	xfs_daddr_t	*blk_no)
1482 {
1483 	xfs_buf_t	*bp;
1484 	char		*offset;
1485 	uint	        first_cycle, last_cycle;
1486 	xfs_daddr_t	new_blk, last_blk, start_blk;
1487 	xfs_daddr_t     num_scan_bblks;
1488 	int	        error, log_bbnum = log->l_logBBsize;
1489 
1490 	*blk_no = 0;
1491 
1492 	/* check totally zeroed log */
1493 	bp = xlog_get_bp(log, 1);
1494 	if (!bp)
1495 		return -ENOMEM;
1496 	error = xlog_bread(log, 0, 1, bp, &offset);
1497 	if (error)
1498 		goto bp_err;
1499 
1500 	first_cycle = xlog_get_cycle(offset);
1501 	if (first_cycle == 0) {		/* completely zeroed log */
1502 		*blk_no = 0;
1503 		xlog_put_bp(bp);
1504 		return 1;
1505 	}
1506 
1507 	/* check partially zeroed log */
1508 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1509 	if (error)
1510 		goto bp_err;
1511 
1512 	last_cycle = xlog_get_cycle(offset);
1513 	if (last_cycle != 0) {		/* log completely written to */
1514 		xlog_put_bp(bp);
1515 		return 0;
1516 	} else if (first_cycle != 1) {
1517 		/*
1518 		 * If the cycle of the last block is zero, the cycle of
1519 		 * the first block must be 1. If it's not, maybe we're
1520 		 * not looking at a log... Bail out.
1521 		 */
1522 		xfs_warn(log->l_mp,
1523 			"Log inconsistent or not a log (last==0, first!=1)");
1524 		error = -EINVAL;
1525 		goto bp_err;
1526 	}
1527 
1528 	/* we have a partially zeroed log */
1529 	last_blk = log_bbnum-1;
1530 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1531 		goto bp_err;
1532 
1533 	/*
1534 	 * Validate the answer.  Because there is no way to guarantee that
1535 	 * the entire log is made up of log records which are the same size,
1536 	 * we scan over the defined maximum blocks.  At this point, the maximum
1537 	 * is not chosen to mean anything special.   XXXmiken
1538 	 */
1539 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1540 	ASSERT(num_scan_bblks <= INT_MAX);
1541 
1542 	if (last_blk < num_scan_bblks)
1543 		num_scan_bblks = last_blk;
1544 	start_blk = last_blk - num_scan_bblks;
1545 
1546 	/*
1547 	 * We search for any instances of cycle number 0 that occur before
1548 	 * our current estimate of the head.  What we're trying to detect is
1549 	 *        1 ... | 0 | 1 | 0...
1550 	 *                       ^ binary search ends here
1551 	 */
1552 	if ((error = xlog_find_verify_cycle(log, start_blk,
1553 					 (int)num_scan_bblks, 0, &new_blk)))
1554 		goto bp_err;
1555 	if (new_blk != -1)
1556 		last_blk = new_blk;
1557 
1558 	/*
1559 	 * Potentially backup over partial log record write.  We don't need
1560 	 * to search the end of the log because we know it is zero.
1561 	 */
1562 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1563 	if (error == 1)
1564 		error = -EIO;
1565 	if (error)
1566 		goto bp_err;
1567 
1568 	*blk_no = last_blk;
1569 bp_err:
1570 	xlog_put_bp(bp);
1571 	if (error)
1572 		return error;
1573 	return 1;
1574 }
1575 
1576 /*
1577  * These are simple subroutines used by xlog_clear_stale_blocks() below
1578  * to initialize a buffer full of empty log record headers and write
1579  * them into the log.
1580  */
1581 STATIC void
1582 xlog_add_record(
1583 	struct xlog		*log,
1584 	char			*buf,
1585 	int			cycle,
1586 	int			block,
1587 	int			tail_cycle,
1588 	int			tail_block)
1589 {
1590 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1591 
1592 	memset(buf, 0, BBSIZE);
1593 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1594 	recp->h_cycle = cpu_to_be32(cycle);
1595 	recp->h_version = cpu_to_be32(
1596 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1597 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1598 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1599 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1600 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1601 }
1602 
1603 STATIC int
1604 xlog_write_log_records(
1605 	struct xlog	*log,
1606 	int		cycle,
1607 	int		start_block,
1608 	int		blocks,
1609 	int		tail_cycle,
1610 	int		tail_block)
1611 {
1612 	char		*offset;
1613 	xfs_buf_t	*bp;
1614 	int		balign, ealign;
1615 	int		sectbb = log->l_sectBBsize;
1616 	int		end_block = start_block + blocks;
1617 	int		bufblks;
1618 	int		error = 0;
1619 	int		i, j = 0;
1620 
1621 	/*
1622 	 * Greedily allocate a buffer big enough to handle the full
1623 	 * range of basic blocks to be written.  If that fails, try
1624 	 * a smaller size.  We need to be able to write at least a
1625 	 * log sector, or we're out of luck.
1626 	 */
1627 	bufblks = 1 << ffs(blocks);
1628 	while (bufblks > log->l_logBBsize)
1629 		bufblks >>= 1;
1630 	while (!(bp = xlog_get_bp(log, bufblks))) {
1631 		bufblks >>= 1;
1632 		if (bufblks < sectbb)
1633 			return -ENOMEM;
1634 	}
1635 
1636 	/* We may need to do a read at the start to fill in part of
1637 	 * the buffer in the starting sector not covered by the first
1638 	 * write below.
1639 	 */
1640 	balign = round_down(start_block, sectbb);
1641 	if (balign != start_block) {
1642 		error = xlog_bread_noalign(log, start_block, 1, bp);
1643 		if (error)
1644 			goto out_put_bp;
1645 
1646 		j = start_block - balign;
1647 	}
1648 
1649 	for (i = start_block; i < end_block; i += bufblks) {
1650 		int		bcount, endcount;
1651 
1652 		bcount = min(bufblks, end_block - start_block);
1653 		endcount = bcount - j;
1654 
1655 		/* We may need to do a read at the end to fill in part of
1656 		 * the buffer in the final sector not covered by the write.
1657 		 * If this is the same sector as the above read, skip it.
1658 		 */
1659 		ealign = round_down(end_block, sectbb);
1660 		if (j == 0 && (start_block + endcount > ealign)) {
1661 			offset = bp->b_addr + BBTOB(ealign - start_block);
1662 			error = xlog_bread_offset(log, ealign, sectbb,
1663 							bp, offset);
1664 			if (error)
1665 				break;
1666 
1667 		}
1668 
1669 		offset = xlog_align(log, start_block, endcount, bp);
1670 		for (; j < endcount; j++) {
1671 			xlog_add_record(log, offset, cycle, i+j,
1672 					tail_cycle, tail_block);
1673 			offset += BBSIZE;
1674 		}
1675 		error = xlog_bwrite(log, start_block, endcount, bp);
1676 		if (error)
1677 			break;
1678 		start_block += endcount;
1679 		j = 0;
1680 	}
1681 
1682  out_put_bp:
1683 	xlog_put_bp(bp);
1684 	return error;
1685 }
1686 
1687 /*
1688  * This routine is called to blow away any incomplete log writes out
1689  * in front of the log head.  We do this so that we won't become confused
1690  * if we come up, write only a little bit more, and then crash again.
1691  * If we leave the partial log records out there, this situation could
1692  * cause us to think those partial writes are valid blocks since they
1693  * have the current cycle number.  We get rid of them by overwriting them
1694  * with empty log records with the old cycle number rather than the
1695  * current one.
1696  *
1697  * The tail lsn is passed in rather than taken from
1698  * the log so that we will not write over the unmount record after a
1699  * clean unmount in a 512 block log.  Doing so would leave the log without
1700  * any valid log records in it until a new one was written.  If we crashed
1701  * during that time we would not be able to recover.
1702  */
1703 STATIC int
1704 xlog_clear_stale_blocks(
1705 	struct xlog	*log,
1706 	xfs_lsn_t	tail_lsn)
1707 {
1708 	int		tail_cycle, head_cycle;
1709 	int		tail_block, head_block;
1710 	int		tail_distance, max_distance;
1711 	int		distance;
1712 	int		error;
1713 
1714 	tail_cycle = CYCLE_LSN(tail_lsn);
1715 	tail_block = BLOCK_LSN(tail_lsn);
1716 	head_cycle = log->l_curr_cycle;
1717 	head_block = log->l_curr_block;
1718 
1719 	/*
1720 	 * Figure out the distance between the new head of the log
1721 	 * and the tail.  We want to write over any blocks beyond the
1722 	 * head that we may have written just before the crash, but
1723 	 * we don't want to overwrite the tail of the log.
1724 	 */
1725 	if (head_cycle == tail_cycle) {
1726 		/*
1727 		 * The tail is behind the head in the physical log,
1728 		 * so the distance from the head to the tail is the
1729 		 * distance from the head to the end of the log plus
1730 		 * the distance from the beginning of the log to the
1731 		 * tail.
1732 		 */
1733 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1734 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1735 					 XFS_ERRLEVEL_LOW, log->l_mp);
1736 			return -EFSCORRUPTED;
1737 		}
1738 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1739 	} else {
1740 		/*
1741 		 * The head is behind the tail in the physical log,
1742 		 * so the distance from the head to the tail is just
1743 		 * the tail block minus the head block.
1744 		 */
1745 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1746 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1747 					 XFS_ERRLEVEL_LOW, log->l_mp);
1748 			return -EFSCORRUPTED;
1749 		}
1750 		tail_distance = tail_block - head_block;
1751 	}
1752 
1753 	/*
1754 	 * If the head is right up against the tail, we can't clear
1755 	 * anything.
1756 	 */
1757 	if (tail_distance <= 0) {
1758 		ASSERT(tail_distance == 0);
1759 		return 0;
1760 	}
1761 
1762 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1763 	/*
1764 	 * Take the smaller of the maximum amount of outstanding I/O
1765 	 * we could have and the distance to the tail to clear out.
1766 	 * We take the smaller so that we don't overwrite the tail and
1767 	 * we don't waste all day writing from the head to the tail
1768 	 * for no reason.
1769 	 */
1770 	max_distance = MIN(max_distance, tail_distance);
1771 
1772 	if ((head_block + max_distance) <= log->l_logBBsize) {
1773 		/*
1774 		 * We can stomp all the blocks we need to without
1775 		 * wrapping around the end of the log.  Just do it
1776 		 * in a single write.  Use the cycle number of the
1777 		 * current cycle minus one so that the log will look like:
1778 		 *     n ... | n - 1 ...
1779 		 */
1780 		error = xlog_write_log_records(log, (head_cycle - 1),
1781 				head_block, max_distance, tail_cycle,
1782 				tail_block);
1783 		if (error)
1784 			return error;
1785 	} else {
1786 		/*
1787 		 * We need to wrap around the end of the physical log in
1788 		 * order to clear all the blocks.  Do it in two separate
1789 		 * I/Os.  The first write should be from the head to the
1790 		 * end of the physical log, and it should use the current
1791 		 * cycle number minus one just like above.
1792 		 */
1793 		distance = log->l_logBBsize - head_block;
1794 		error = xlog_write_log_records(log, (head_cycle - 1),
1795 				head_block, distance, tail_cycle,
1796 				tail_block);
1797 
1798 		if (error)
1799 			return error;
1800 
1801 		/*
1802 		 * Now write the blocks at the start of the physical log.
1803 		 * This writes the remainder of the blocks we want to clear.
1804 		 * It uses the current cycle number since we're now on the
1805 		 * same cycle as the head so that we get:
1806 		 *    n ... n ... | n - 1 ...
1807 		 *    ^^^^^ blocks we're writing
1808 		 */
1809 		distance = max_distance - (log->l_logBBsize - head_block);
1810 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1811 				tail_cycle, tail_block);
1812 		if (error)
1813 			return error;
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 /******************************************************************************
1820  *
1821  *		Log recover routines
1822  *
1823  ******************************************************************************
1824  */
1825 
1826 /*
1827  * Sort the log items in the transaction.
1828  *
1829  * The ordering constraints are defined by the inode allocation and unlink
1830  * behaviour. The rules are:
1831  *
1832  *	1. Every item is only logged once in a given transaction. Hence it
1833  *	   represents the last logged state of the item. Hence ordering is
1834  *	   dependent on the order in which operations need to be performed so
1835  *	   required initial conditions are always met.
1836  *
1837  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1838  *	   there's nothing to replay from them so we can simply cull them
1839  *	   from the transaction. However, we can't do that until after we've
1840  *	   replayed all the other items because they may be dependent on the
1841  *	   cancelled buffer and replaying the cancelled buffer can remove it
1842  *	   form the cancelled buffer table. Hence they have tobe done last.
1843  *
1844  *	3. Inode allocation buffers must be replayed before inode items that
1845  *	   read the buffer and replay changes into it. For filesystems using the
1846  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1847  *	   treated the same as inode allocation buffers as they create and
1848  *	   initialise the buffers directly.
1849  *
1850  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1851  *	   This ensures that inodes are completely flushed to the inode buffer
1852  *	   in a "free" state before we remove the unlinked inode list pointer.
1853  *
1854  * Hence the ordering needs to be inode allocation buffers first, inode items
1855  * second, inode unlink buffers third and cancelled buffers last.
1856  *
1857  * But there's a problem with that - we can't tell an inode allocation buffer
1858  * apart from a regular buffer, so we can't separate them. We can, however,
1859  * tell an inode unlink buffer from the others, and so we can separate them out
1860  * from all the other buffers and move them to last.
1861  *
1862  * Hence, 4 lists, in order from head to tail:
1863  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1864  *	- item_list for all non-buffer items
1865  *	- inode_buffer_list for inode unlink buffers
1866  *	- cancel_list for the cancelled buffers
1867  *
1868  * Note that we add objects to the tail of the lists so that first-to-last
1869  * ordering is preserved within the lists. Adding objects to the head of the
1870  * list means when we traverse from the head we walk them in last-to-first
1871  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1872  * but for all other items there may be specific ordering that we need to
1873  * preserve.
1874  */
1875 STATIC int
1876 xlog_recover_reorder_trans(
1877 	struct xlog		*log,
1878 	struct xlog_recover	*trans,
1879 	int			pass)
1880 {
1881 	xlog_recover_item_t	*item, *n;
1882 	int			error = 0;
1883 	LIST_HEAD(sort_list);
1884 	LIST_HEAD(cancel_list);
1885 	LIST_HEAD(buffer_list);
1886 	LIST_HEAD(inode_buffer_list);
1887 	LIST_HEAD(inode_list);
1888 
1889 	list_splice_init(&trans->r_itemq, &sort_list);
1890 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1891 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1892 
1893 		switch (ITEM_TYPE(item)) {
1894 		case XFS_LI_ICREATE:
1895 			list_move_tail(&item->ri_list, &buffer_list);
1896 			break;
1897 		case XFS_LI_BUF:
1898 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1899 				trace_xfs_log_recover_item_reorder_head(log,
1900 							trans, item, pass);
1901 				list_move(&item->ri_list, &cancel_list);
1902 				break;
1903 			}
1904 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1905 				list_move(&item->ri_list, &inode_buffer_list);
1906 				break;
1907 			}
1908 			list_move_tail(&item->ri_list, &buffer_list);
1909 			break;
1910 		case XFS_LI_INODE:
1911 		case XFS_LI_DQUOT:
1912 		case XFS_LI_QUOTAOFF:
1913 		case XFS_LI_EFD:
1914 		case XFS_LI_EFI:
1915 			trace_xfs_log_recover_item_reorder_tail(log,
1916 							trans, item, pass);
1917 			list_move_tail(&item->ri_list, &inode_list);
1918 			break;
1919 		default:
1920 			xfs_warn(log->l_mp,
1921 				"%s: unrecognized type of log operation",
1922 				__func__);
1923 			ASSERT(0);
1924 			/*
1925 			 * return the remaining items back to the transaction
1926 			 * item list so they can be freed in caller.
1927 			 */
1928 			if (!list_empty(&sort_list))
1929 				list_splice_init(&sort_list, &trans->r_itemq);
1930 			error = -EIO;
1931 			goto out;
1932 		}
1933 	}
1934 out:
1935 	ASSERT(list_empty(&sort_list));
1936 	if (!list_empty(&buffer_list))
1937 		list_splice(&buffer_list, &trans->r_itemq);
1938 	if (!list_empty(&inode_list))
1939 		list_splice_tail(&inode_list, &trans->r_itemq);
1940 	if (!list_empty(&inode_buffer_list))
1941 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1942 	if (!list_empty(&cancel_list))
1943 		list_splice_tail(&cancel_list, &trans->r_itemq);
1944 	return error;
1945 }
1946 
1947 /*
1948  * Build up the table of buf cancel records so that we don't replay
1949  * cancelled data in the second pass.  For buffer records that are
1950  * not cancel records, there is nothing to do here so we just return.
1951  *
1952  * If we get a cancel record which is already in the table, this indicates
1953  * that the buffer was cancelled multiple times.  In order to ensure
1954  * that during pass 2 we keep the record in the table until we reach its
1955  * last occurrence in the log, we keep a reference count in the cancel
1956  * record in the table to tell us how many times we expect to see this
1957  * record during the second pass.
1958  */
1959 STATIC int
1960 xlog_recover_buffer_pass1(
1961 	struct xlog			*log,
1962 	struct xlog_recover_item	*item)
1963 {
1964 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1965 	struct list_head	*bucket;
1966 	struct xfs_buf_cancel	*bcp;
1967 
1968 	/*
1969 	 * If this isn't a cancel buffer item, then just return.
1970 	 */
1971 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1972 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1973 		return 0;
1974 	}
1975 
1976 	/*
1977 	 * Insert an xfs_buf_cancel record into the hash table of them.
1978 	 * If there is already an identical record, bump its reference count.
1979 	 */
1980 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1981 	list_for_each_entry(bcp, bucket, bc_list) {
1982 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1983 		    bcp->bc_len == buf_f->blf_len) {
1984 			bcp->bc_refcount++;
1985 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1986 			return 0;
1987 		}
1988 	}
1989 
1990 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1991 	bcp->bc_blkno = buf_f->blf_blkno;
1992 	bcp->bc_len = buf_f->blf_len;
1993 	bcp->bc_refcount = 1;
1994 	list_add_tail(&bcp->bc_list, bucket);
1995 
1996 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1997 	return 0;
1998 }
1999 
2000 /*
2001  * Check to see whether the buffer being recovered has a corresponding
2002  * entry in the buffer cancel record table. If it is, return the cancel
2003  * buffer structure to the caller.
2004  */
2005 STATIC struct xfs_buf_cancel *
2006 xlog_peek_buffer_cancelled(
2007 	struct xlog		*log,
2008 	xfs_daddr_t		blkno,
2009 	uint			len,
2010 	ushort			flags)
2011 {
2012 	struct list_head	*bucket;
2013 	struct xfs_buf_cancel	*bcp;
2014 
2015 	if (!log->l_buf_cancel_table) {
2016 		/* empty table means no cancelled buffers in the log */
2017 		ASSERT(!(flags & XFS_BLF_CANCEL));
2018 		return NULL;
2019 	}
2020 
2021 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2022 	list_for_each_entry(bcp, bucket, bc_list) {
2023 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2024 			return bcp;
2025 	}
2026 
2027 	/*
2028 	 * We didn't find a corresponding entry in the table, so return 0 so
2029 	 * that the buffer is NOT cancelled.
2030 	 */
2031 	ASSERT(!(flags & XFS_BLF_CANCEL));
2032 	return NULL;
2033 }
2034 
2035 /*
2036  * If the buffer is being cancelled then return 1 so that it will be cancelled,
2037  * otherwise return 0.  If the buffer is actually a buffer cancel item
2038  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2039  * table and remove it from the table if this is the last reference.
2040  *
2041  * We remove the cancel record from the table when we encounter its last
2042  * occurrence in the log so that if the same buffer is re-used again after its
2043  * last cancellation we actually replay the changes made at that point.
2044  */
2045 STATIC int
2046 xlog_check_buffer_cancelled(
2047 	struct xlog		*log,
2048 	xfs_daddr_t		blkno,
2049 	uint			len,
2050 	ushort			flags)
2051 {
2052 	struct xfs_buf_cancel	*bcp;
2053 
2054 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2055 	if (!bcp)
2056 		return 0;
2057 
2058 	/*
2059 	 * We've go a match, so return 1 so that the recovery of this buffer
2060 	 * is cancelled.  If this buffer is actually a buffer cancel log
2061 	 * item, then decrement the refcount on the one in the table and
2062 	 * remove it if this is the last reference.
2063 	 */
2064 	if (flags & XFS_BLF_CANCEL) {
2065 		if (--bcp->bc_refcount == 0) {
2066 			list_del(&bcp->bc_list);
2067 			kmem_free(bcp);
2068 		}
2069 	}
2070 	return 1;
2071 }
2072 
2073 /*
2074  * Perform recovery for a buffer full of inodes.  In these buffers, the only
2075  * data which should be recovered is that which corresponds to the
2076  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2077  * data for the inodes is always logged through the inodes themselves rather
2078  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2079  *
2080  * The only time when buffers full of inodes are fully recovered is when the
2081  * buffer is full of newly allocated inodes.  In this case the buffer will
2082  * not be marked as an inode buffer and so will be sent to
2083  * xlog_recover_do_reg_buffer() below during recovery.
2084  */
2085 STATIC int
2086 xlog_recover_do_inode_buffer(
2087 	struct xfs_mount	*mp,
2088 	xlog_recover_item_t	*item,
2089 	struct xfs_buf		*bp,
2090 	xfs_buf_log_format_t	*buf_f)
2091 {
2092 	int			i;
2093 	int			item_index = 0;
2094 	int			bit = 0;
2095 	int			nbits = 0;
2096 	int			reg_buf_offset = 0;
2097 	int			reg_buf_bytes = 0;
2098 	int			next_unlinked_offset;
2099 	int			inodes_per_buf;
2100 	xfs_agino_t		*logged_nextp;
2101 	xfs_agino_t		*buffer_nextp;
2102 
2103 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2104 
2105 	/*
2106 	 * Post recovery validation only works properly on CRC enabled
2107 	 * filesystems.
2108 	 */
2109 	if (xfs_sb_version_hascrc(&mp->m_sb))
2110 		bp->b_ops = &xfs_inode_buf_ops;
2111 
2112 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2113 	for (i = 0; i < inodes_per_buf; i++) {
2114 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2115 			offsetof(xfs_dinode_t, di_next_unlinked);
2116 
2117 		while (next_unlinked_offset >=
2118 		       (reg_buf_offset + reg_buf_bytes)) {
2119 			/*
2120 			 * The next di_next_unlinked field is beyond
2121 			 * the current logged region.  Find the next
2122 			 * logged region that contains or is beyond
2123 			 * the current di_next_unlinked field.
2124 			 */
2125 			bit += nbits;
2126 			bit = xfs_next_bit(buf_f->blf_data_map,
2127 					   buf_f->blf_map_size, bit);
2128 
2129 			/*
2130 			 * If there are no more logged regions in the
2131 			 * buffer, then we're done.
2132 			 */
2133 			if (bit == -1)
2134 				return 0;
2135 
2136 			nbits = xfs_contig_bits(buf_f->blf_data_map,
2137 						buf_f->blf_map_size, bit);
2138 			ASSERT(nbits > 0);
2139 			reg_buf_offset = bit << XFS_BLF_SHIFT;
2140 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2141 			item_index++;
2142 		}
2143 
2144 		/*
2145 		 * If the current logged region starts after the current
2146 		 * di_next_unlinked field, then move on to the next
2147 		 * di_next_unlinked field.
2148 		 */
2149 		if (next_unlinked_offset < reg_buf_offset)
2150 			continue;
2151 
2152 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
2153 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2154 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
2155 							BBTOB(bp->b_io_length));
2156 
2157 		/*
2158 		 * The current logged region contains a copy of the
2159 		 * current di_next_unlinked field.  Extract its value
2160 		 * and copy it to the buffer copy.
2161 		 */
2162 		logged_nextp = item->ri_buf[item_index].i_addr +
2163 				next_unlinked_offset - reg_buf_offset;
2164 		if (unlikely(*logged_nextp == 0)) {
2165 			xfs_alert(mp,
2166 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2167 		"Trying to replay bad (0) inode di_next_unlinked field.",
2168 				item, bp);
2169 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2170 					 XFS_ERRLEVEL_LOW, mp);
2171 			return -EFSCORRUPTED;
2172 		}
2173 
2174 		buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2175 		*buffer_nextp = *logged_nextp;
2176 
2177 		/*
2178 		 * If necessary, recalculate the CRC in the on-disk inode. We
2179 		 * have to leave the inode in a consistent state for whoever
2180 		 * reads it next....
2181 		 */
2182 		xfs_dinode_calc_crc(mp,
2183 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2184 
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 /*
2191  * V5 filesystems know the age of the buffer on disk being recovered. We can
2192  * have newer objects on disk than we are replaying, and so for these cases we
2193  * don't want to replay the current change as that will make the buffer contents
2194  * temporarily invalid on disk.
2195  *
2196  * The magic number might not match the buffer type we are going to recover
2197  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2198  * extract the LSN of the existing object in the buffer based on it's current
2199  * magic number.  If we don't recognise the magic number in the buffer, then
2200  * return a LSN of -1 so that the caller knows it was an unrecognised block and
2201  * so can recover the buffer.
2202  *
2203  * Note: we cannot rely solely on magic number matches to determine that the
2204  * buffer has a valid LSN - we also need to verify that it belongs to this
2205  * filesystem, so we need to extract the object's LSN and compare it to that
2206  * which we read from the superblock. If the UUIDs don't match, then we've got a
2207  * stale metadata block from an old filesystem instance that we need to recover
2208  * over the top of.
2209  */
2210 static xfs_lsn_t
2211 xlog_recover_get_buf_lsn(
2212 	struct xfs_mount	*mp,
2213 	struct xfs_buf		*bp)
2214 {
2215 	__uint32_t		magic32;
2216 	__uint16_t		magic16;
2217 	__uint16_t		magicda;
2218 	void			*blk = bp->b_addr;
2219 	uuid_t			*uuid;
2220 	xfs_lsn_t		lsn = -1;
2221 
2222 	/* v4 filesystems always recover immediately */
2223 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2224 		goto recover_immediately;
2225 
2226 	magic32 = be32_to_cpu(*(__be32 *)blk);
2227 	switch (magic32) {
2228 	case XFS_ABTB_CRC_MAGIC:
2229 	case XFS_ABTC_CRC_MAGIC:
2230 	case XFS_ABTB_MAGIC:
2231 	case XFS_ABTC_MAGIC:
2232 	case XFS_IBT_CRC_MAGIC:
2233 	case XFS_IBT_MAGIC: {
2234 		struct xfs_btree_block *btb = blk;
2235 
2236 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2237 		uuid = &btb->bb_u.s.bb_uuid;
2238 		break;
2239 	}
2240 	case XFS_BMAP_CRC_MAGIC:
2241 	case XFS_BMAP_MAGIC: {
2242 		struct xfs_btree_block *btb = blk;
2243 
2244 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2245 		uuid = &btb->bb_u.l.bb_uuid;
2246 		break;
2247 	}
2248 	case XFS_AGF_MAGIC:
2249 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2250 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2251 		break;
2252 	case XFS_AGFL_MAGIC:
2253 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2254 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2255 		break;
2256 	case XFS_AGI_MAGIC:
2257 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2258 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2259 		break;
2260 	case XFS_SYMLINK_MAGIC:
2261 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2262 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2263 		break;
2264 	case XFS_DIR3_BLOCK_MAGIC:
2265 	case XFS_DIR3_DATA_MAGIC:
2266 	case XFS_DIR3_FREE_MAGIC:
2267 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2268 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2269 		break;
2270 	case XFS_ATTR3_RMT_MAGIC:
2271 		/*
2272 		 * Remote attr blocks are written synchronously, rather than
2273 		 * being logged. That means they do not contain a valid LSN
2274 		 * (i.e. transactionally ordered) in them, and hence any time we
2275 		 * see a buffer to replay over the top of a remote attribute
2276 		 * block we should simply do so.
2277 		 */
2278 		goto recover_immediately;
2279 	case XFS_SB_MAGIC:
2280 		/*
2281 		 * superblock uuids are magic. We may or may not have a
2282 		 * sb_meta_uuid on disk, but it will be set in the in-core
2283 		 * superblock. We set the uuid pointer for verification
2284 		 * according to the superblock feature mask to ensure we check
2285 		 * the relevant UUID in the superblock.
2286 		 */
2287 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2288 		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2289 			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2290 		else
2291 			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2292 		break;
2293 	default:
2294 		break;
2295 	}
2296 
2297 	if (lsn != (xfs_lsn_t)-1) {
2298 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2299 			goto recover_immediately;
2300 		return lsn;
2301 	}
2302 
2303 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2304 	switch (magicda) {
2305 	case XFS_DIR3_LEAF1_MAGIC:
2306 	case XFS_DIR3_LEAFN_MAGIC:
2307 	case XFS_DA3_NODE_MAGIC:
2308 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2309 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2310 		break;
2311 	default:
2312 		break;
2313 	}
2314 
2315 	if (lsn != (xfs_lsn_t)-1) {
2316 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2317 			goto recover_immediately;
2318 		return lsn;
2319 	}
2320 
2321 	/*
2322 	 * We do individual object checks on dquot and inode buffers as they
2323 	 * have their own individual LSN records. Also, we could have a stale
2324 	 * buffer here, so we have to at least recognise these buffer types.
2325 	 *
2326 	 * A notd complexity here is inode unlinked list processing - it logs
2327 	 * the inode directly in the buffer, but we don't know which inodes have
2328 	 * been modified, and there is no global buffer LSN. Hence we need to
2329 	 * recover all inode buffer types immediately. This problem will be
2330 	 * fixed by logical logging of the unlinked list modifications.
2331 	 */
2332 	magic16 = be16_to_cpu(*(__be16 *)blk);
2333 	switch (magic16) {
2334 	case XFS_DQUOT_MAGIC:
2335 	case XFS_DINODE_MAGIC:
2336 		goto recover_immediately;
2337 	default:
2338 		break;
2339 	}
2340 
2341 	/* unknown buffer contents, recover immediately */
2342 
2343 recover_immediately:
2344 	return (xfs_lsn_t)-1;
2345 
2346 }
2347 
2348 /*
2349  * Validate the recovered buffer is of the correct type and attach the
2350  * appropriate buffer operations to them for writeback. Magic numbers are in a
2351  * few places:
2352  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2353  *	the first 32 bits of the buffer (most blocks),
2354  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2355  */
2356 static void
2357 xlog_recover_validate_buf_type(
2358 	struct xfs_mount	*mp,
2359 	struct xfs_buf		*bp,
2360 	xfs_buf_log_format_t	*buf_f)
2361 {
2362 	struct xfs_da_blkinfo	*info = bp->b_addr;
2363 	__uint32_t		magic32;
2364 	__uint16_t		magic16;
2365 	__uint16_t		magicda;
2366 
2367 	/*
2368 	 * We can only do post recovery validation on items on CRC enabled
2369 	 * fielsystems as we need to know when the buffer was written to be able
2370 	 * to determine if we should have replayed the item. If we replay old
2371 	 * metadata over a newer buffer, then it will enter a temporarily
2372 	 * inconsistent state resulting in verification failures. Hence for now
2373 	 * just avoid the verification stage for non-crc filesystems
2374 	 */
2375 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2376 		return;
2377 
2378 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2379 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2380 	magicda = be16_to_cpu(info->magic);
2381 	switch (xfs_blft_from_flags(buf_f)) {
2382 	case XFS_BLFT_BTREE_BUF:
2383 		switch (magic32) {
2384 		case XFS_ABTB_CRC_MAGIC:
2385 		case XFS_ABTC_CRC_MAGIC:
2386 		case XFS_ABTB_MAGIC:
2387 		case XFS_ABTC_MAGIC:
2388 			bp->b_ops = &xfs_allocbt_buf_ops;
2389 			break;
2390 		case XFS_IBT_CRC_MAGIC:
2391 		case XFS_FIBT_CRC_MAGIC:
2392 		case XFS_IBT_MAGIC:
2393 		case XFS_FIBT_MAGIC:
2394 			bp->b_ops = &xfs_inobt_buf_ops;
2395 			break;
2396 		case XFS_BMAP_CRC_MAGIC:
2397 		case XFS_BMAP_MAGIC:
2398 			bp->b_ops = &xfs_bmbt_buf_ops;
2399 			break;
2400 		default:
2401 			xfs_warn(mp, "Bad btree block magic!");
2402 			ASSERT(0);
2403 			break;
2404 		}
2405 		break;
2406 	case XFS_BLFT_AGF_BUF:
2407 		if (magic32 != XFS_AGF_MAGIC) {
2408 			xfs_warn(mp, "Bad AGF block magic!");
2409 			ASSERT(0);
2410 			break;
2411 		}
2412 		bp->b_ops = &xfs_agf_buf_ops;
2413 		break;
2414 	case XFS_BLFT_AGFL_BUF:
2415 		if (magic32 != XFS_AGFL_MAGIC) {
2416 			xfs_warn(mp, "Bad AGFL block magic!");
2417 			ASSERT(0);
2418 			break;
2419 		}
2420 		bp->b_ops = &xfs_agfl_buf_ops;
2421 		break;
2422 	case XFS_BLFT_AGI_BUF:
2423 		if (magic32 != XFS_AGI_MAGIC) {
2424 			xfs_warn(mp, "Bad AGI block magic!");
2425 			ASSERT(0);
2426 			break;
2427 		}
2428 		bp->b_ops = &xfs_agi_buf_ops;
2429 		break;
2430 	case XFS_BLFT_UDQUOT_BUF:
2431 	case XFS_BLFT_PDQUOT_BUF:
2432 	case XFS_BLFT_GDQUOT_BUF:
2433 #ifdef CONFIG_XFS_QUOTA
2434 		if (magic16 != XFS_DQUOT_MAGIC) {
2435 			xfs_warn(mp, "Bad DQUOT block magic!");
2436 			ASSERT(0);
2437 			break;
2438 		}
2439 		bp->b_ops = &xfs_dquot_buf_ops;
2440 #else
2441 		xfs_alert(mp,
2442 	"Trying to recover dquots without QUOTA support built in!");
2443 		ASSERT(0);
2444 #endif
2445 		break;
2446 	case XFS_BLFT_DINO_BUF:
2447 		if (magic16 != XFS_DINODE_MAGIC) {
2448 			xfs_warn(mp, "Bad INODE block magic!");
2449 			ASSERT(0);
2450 			break;
2451 		}
2452 		bp->b_ops = &xfs_inode_buf_ops;
2453 		break;
2454 	case XFS_BLFT_SYMLINK_BUF:
2455 		if (magic32 != XFS_SYMLINK_MAGIC) {
2456 			xfs_warn(mp, "Bad symlink block magic!");
2457 			ASSERT(0);
2458 			break;
2459 		}
2460 		bp->b_ops = &xfs_symlink_buf_ops;
2461 		break;
2462 	case XFS_BLFT_DIR_BLOCK_BUF:
2463 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2464 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2465 			xfs_warn(mp, "Bad dir block magic!");
2466 			ASSERT(0);
2467 			break;
2468 		}
2469 		bp->b_ops = &xfs_dir3_block_buf_ops;
2470 		break;
2471 	case XFS_BLFT_DIR_DATA_BUF:
2472 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2473 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2474 			xfs_warn(mp, "Bad dir data magic!");
2475 			ASSERT(0);
2476 			break;
2477 		}
2478 		bp->b_ops = &xfs_dir3_data_buf_ops;
2479 		break;
2480 	case XFS_BLFT_DIR_FREE_BUF:
2481 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2482 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2483 			xfs_warn(mp, "Bad dir3 free magic!");
2484 			ASSERT(0);
2485 			break;
2486 		}
2487 		bp->b_ops = &xfs_dir3_free_buf_ops;
2488 		break;
2489 	case XFS_BLFT_DIR_LEAF1_BUF:
2490 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2491 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2492 			xfs_warn(mp, "Bad dir leaf1 magic!");
2493 			ASSERT(0);
2494 			break;
2495 		}
2496 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2497 		break;
2498 	case XFS_BLFT_DIR_LEAFN_BUF:
2499 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2500 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2501 			xfs_warn(mp, "Bad dir leafn magic!");
2502 			ASSERT(0);
2503 			break;
2504 		}
2505 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2506 		break;
2507 	case XFS_BLFT_DA_NODE_BUF:
2508 		if (magicda != XFS_DA_NODE_MAGIC &&
2509 		    magicda != XFS_DA3_NODE_MAGIC) {
2510 			xfs_warn(mp, "Bad da node magic!");
2511 			ASSERT(0);
2512 			break;
2513 		}
2514 		bp->b_ops = &xfs_da3_node_buf_ops;
2515 		break;
2516 	case XFS_BLFT_ATTR_LEAF_BUF:
2517 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2518 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2519 			xfs_warn(mp, "Bad attr leaf magic!");
2520 			ASSERT(0);
2521 			break;
2522 		}
2523 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2524 		break;
2525 	case XFS_BLFT_ATTR_RMT_BUF:
2526 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2527 			xfs_warn(mp, "Bad attr remote magic!");
2528 			ASSERT(0);
2529 			break;
2530 		}
2531 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2532 		break;
2533 	case XFS_BLFT_SB_BUF:
2534 		if (magic32 != XFS_SB_MAGIC) {
2535 			xfs_warn(mp, "Bad SB block magic!");
2536 			ASSERT(0);
2537 			break;
2538 		}
2539 		bp->b_ops = &xfs_sb_buf_ops;
2540 		break;
2541 	default:
2542 		xfs_warn(mp, "Unknown buffer type %d!",
2543 			 xfs_blft_from_flags(buf_f));
2544 		break;
2545 	}
2546 }
2547 
2548 /*
2549  * Perform a 'normal' buffer recovery.  Each logged region of the
2550  * buffer should be copied over the corresponding region in the
2551  * given buffer.  The bitmap in the buf log format structure indicates
2552  * where to place the logged data.
2553  */
2554 STATIC void
2555 xlog_recover_do_reg_buffer(
2556 	struct xfs_mount	*mp,
2557 	xlog_recover_item_t	*item,
2558 	struct xfs_buf		*bp,
2559 	xfs_buf_log_format_t	*buf_f)
2560 {
2561 	int			i;
2562 	int			bit;
2563 	int			nbits;
2564 	int                     error;
2565 
2566 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2567 
2568 	bit = 0;
2569 	i = 1;  /* 0 is the buf format structure */
2570 	while (1) {
2571 		bit = xfs_next_bit(buf_f->blf_data_map,
2572 				   buf_f->blf_map_size, bit);
2573 		if (bit == -1)
2574 			break;
2575 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2576 					buf_f->blf_map_size, bit);
2577 		ASSERT(nbits > 0);
2578 		ASSERT(item->ri_buf[i].i_addr != NULL);
2579 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2580 		ASSERT(BBTOB(bp->b_io_length) >=
2581 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2582 
2583 		/*
2584 		 * The dirty regions logged in the buffer, even though
2585 		 * contiguous, may span multiple chunks. This is because the
2586 		 * dirty region may span a physical page boundary in a buffer
2587 		 * and hence be split into two separate vectors for writing into
2588 		 * the log. Hence we need to trim nbits back to the length of
2589 		 * the current region being copied out of the log.
2590 		 */
2591 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2592 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2593 
2594 		/*
2595 		 * Do a sanity check if this is a dquot buffer. Just checking
2596 		 * the first dquot in the buffer should do. XXXThis is
2597 		 * probably a good thing to do for other buf types also.
2598 		 */
2599 		error = 0;
2600 		if (buf_f->blf_flags &
2601 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2602 			if (item->ri_buf[i].i_addr == NULL) {
2603 				xfs_alert(mp,
2604 					"XFS: NULL dquot in %s.", __func__);
2605 				goto next;
2606 			}
2607 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2608 				xfs_alert(mp,
2609 					"XFS: dquot too small (%d) in %s.",
2610 					item->ri_buf[i].i_len, __func__);
2611 				goto next;
2612 			}
2613 			error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2614 					       -1, 0, XFS_QMOPT_DOWARN,
2615 					       "dquot_buf_recover");
2616 			if (error)
2617 				goto next;
2618 		}
2619 
2620 		memcpy(xfs_buf_offset(bp,
2621 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2622 			item->ri_buf[i].i_addr,		/* source */
2623 			nbits<<XFS_BLF_SHIFT);		/* length */
2624  next:
2625 		i++;
2626 		bit += nbits;
2627 	}
2628 
2629 	/* Shouldn't be any more regions */
2630 	ASSERT(i == item->ri_total);
2631 
2632 	xlog_recover_validate_buf_type(mp, bp, buf_f);
2633 }
2634 
2635 /*
2636  * Perform a dquot buffer recovery.
2637  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2638  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2639  * Else, treat it as a regular buffer and do recovery.
2640  *
2641  * Return false if the buffer was tossed and true if we recovered the buffer to
2642  * indicate to the caller if the buffer needs writing.
2643  */
2644 STATIC bool
2645 xlog_recover_do_dquot_buffer(
2646 	struct xfs_mount		*mp,
2647 	struct xlog			*log,
2648 	struct xlog_recover_item	*item,
2649 	struct xfs_buf			*bp,
2650 	struct xfs_buf_log_format	*buf_f)
2651 {
2652 	uint			type;
2653 
2654 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2655 
2656 	/*
2657 	 * Filesystems are required to send in quota flags at mount time.
2658 	 */
2659 	if (!mp->m_qflags)
2660 		return false;
2661 
2662 	type = 0;
2663 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2664 		type |= XFS_DQ_USER;
2665 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2666 		type |= XFS_DQ_PROJ;
2667 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2668 		type |= XFS_DQ_GROUP;
2669 	/*
2670 	 * This type of quotas was turned off, so ignore this buffer
2671 	 */
2672 	if (log->l_quotaoffs_flag & type)
2673 		return false;
2674 
2675 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2676 	return true;
2677 }
2678 
2679 /*
2680  * This routine replays a modification made to a buffer at runtime.
2681  * There are actually two types of buffer, regular and inode, which
2682  * are handled differently.  Inode buffers are handled differently
2683  * in that we only recover a specific set of data from them, namely
2684  * the inode di_next_unlinked fields.  This is because all other inode
2685  * data is actually logged via inode records and any data we replay
2686  * here which overlaps that may be stale.
2687  *
2688  * When meta-data buffers are freed at run time we log a buffer item
2689  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2690  * of the buffer in the log should not be replayed at recovery time.
2691  * This is so that if the blocks covered by the buffer are reused for
2692  * file data before we crash we don't end up replaying old, freed
2693  * meta-data into a user's file.
2694  *
2695  * To handle the cancellation of buffer log items, we make two passes
2696  * over the log during recovery.  During the first we build a table of
2697  * those buffers which have been cancelled, and during the second we
2698  * only replay those buffers which do not have corresponding cancel
2699  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2700  * for more details on the implementation of the table of cancel records.
2701  */
2702 STATIC int
2703 xlog_recover_buffer_pass2(
2704 	struct xlog			*log,
2705 	struct list_head		*buffer_list,
2706 	struct xlog_recover_item	*item,
2707 	xfs_lsn_t			current_lsn)
2708 {
2709 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2710 	xfs_mount_t		*mp = log->l_mp;
2711 	xfs_buf_t		*bp;
2712 	int			error;
2713 	uint			buf_flags;
2714 	xfs_lsn_t		lsn;
2715 
2716 	/*
2717 	 * In this pass we only want to recover all the buffers which have
2718 	 * not been cancelled and are not cancellation buffers themselves.
2719 	 */
2720 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2721 			buf_f->blf_len, buf_f->blf_flags)) {
2722 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2723 		return 0;
2724 	}
2725 
2726 	trace_xfs_log_recover_buf_recover(log, buf_f);
2727 
2728 	buf_flags = 0;
2729 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2730 		buf_flags |= XBF_UNMAPPED;
2731 
2732 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2733 			  buf_flags, NULL);
2734 	if (!bp)
2735 		return -ENOMEM;
2736 	error = bp->b_error;
2737 	if (error) {
2738 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2739 		goto out_release;
2740 	}
2741 
2742 	/*
2743 	 * Recover the buffer only if we get an LSN from it and it's less than
2744 	 * the lsn of the transaction we are replaying.
2745 	 *
2746 	 * Note that we have to be extremely careful of readahead here.
2747 	 * Readahead does not attach verfiers to the buffers so if we don't
2748 	 * actually do any replay after readahead because of the LSN we found
2749 	 * in the buffer if more recent than that current transaction then we
2750 	 * need to attach the verifier directly. Failure to do so can lead to
2751 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
2752 	 * operate on the buffers and they won't get the verifier attached. This
2753 	 * can lead to blocks on disk having the correct content but a stale
2754 	 * CRC.
2755 	 *
2756 	 * It is safe to assume these clean buffers are currently up to date.
2757 	 * If the buffer is dirtied by a later transaction being replayed, then
2758 	 * the verifier will be reset to match whatever recover turns that
2759 	 * buffer into.
2760 	 */
2761 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2762 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2763 		xlog_recover_validate_buf_type(mp, bp, buf_f);
2764 		goto out_release;
2765 	}
2766 
2767 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2768 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2769 		if (error)
2770 			goto out_release;
2771 	} else if (buf_f->blf_flags &
2772 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2773 		bool	dirty;
2774 
2775 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2776 		if (!dirty)
2777 			goto out_release;
2778 	} else {
2779 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2780 	}
2781 
2782 	/*
2783 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2784 	 * slower when taking into account all the buffers to be flushed.
2785 	 *
2786 	 * Also make sure that only inode buffers with good sizes stay in
2787 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2788 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode
2789 	 * buffers in the log can be a different size if the log was generated
2790 	 * by an older kernel using unclustered inode buffers or a newer kernel
2791 	 * running with a different inode cluster size.  Regardless, if the
2792 	 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2793 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2794 	 * the buffer out of the buffer cache so that the buffer won't
2795 	 * overlap with future reads of those inodes.
2796 	 */
2797 	if (XFS_DINODE_MAGIC ==
2798 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2799 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2800 			(__uint32_t)log->l_mp->m_inode_cluster_size))) {
2801 		xfs_buf_stale(bp);
2802 		error = xfs_bwrite(bp);
2803 	} else {
2804 		ASSERT(bp->b_target->bt_mount == mp);
2805 		bp->b_iodone = xlog_recover_iodone;
2806 		xfs_buf_delwri_queue(bp, buffer_list);
2807 	}
2808 
2809 out_release:
2810 	xfs_buf_relse(bp);
2811 	return error;
2812 }
2813 
2814 /*
2815  * Inode fork owner changes
2816  *
2817  * If we have been told that we have to reparent the inode fork, it's because an
2818  * extent swap operation on a CRC enabled filesystem has been done and we are
2819  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2820  * owners of it.
2821  *
2822  * The complexity here is that we don't have an inode context to work with, so
2823  * after we've replayed the inode we need to instantiate one.  This is where the
2824  * fun begins.
2825  *
2826  * We are in the middle of log recovery, so we can't run transactions. That
2827  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2828  * that will result in the corresponding iput() running the inode through
2829  * xfs_inactive(). If we've just replayed an inode core that changes the link
2830  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2831  * transactions (bad!).
2832  *
2833  * So, to avoid this, we instantiate an inode directly from the inode core we've
2834  * just recovered. We have the buffer still locked, and all we really need to
2835  * instantiate is the inode core and the forks being modified. We can do this
2836  * manually, then run the inode btree owner change, and then tear down the
2837  * xfs_inode without having to run any transactions at all.
2838  *
2839  * Also, because we don't have a transaction context available here but need to
2840  * gather all the buffers we modify for writeback so we pass the buffer_list
2841  * instead for the operation to use.
2842  */
2843 
2844 STATIC int
2845 xfs_recover_inode_owner_change(
2846 	struct xfs_mount	*mp,
2847 	struct xfs_dinode	*dip,
2848 	struct xfs_inode_log_format *in_f,
2849 	struct list_head	*buffer_list)
2850 {
2851 	struct xfs_inode	*ip;
2852 	int			error;
2853 
2854 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2855 
2856 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2857 	if (!ip)
2858 		return -ENOMEM;
2859 
2860 	/* instantiate the inode */
2861 	xfs_dinode_from_disk(&ip->i_d, dip);
2862 	ASSERT(ip->i_d.di_version >= 3);
2863 
2864 	error = xfs_iformat_fork(ip, dip);
2865 	if (error)
2866 		goto out_free_ip;
2867 
2868 
2869 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2870 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2871 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2872 					      ip->i_ino, buffer_list);
2873 		if (error)
2874 			goto out_free_ip;
2875 	}
2876 
2877 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2878 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2879 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2880 					      ip->i_ino, buffer_list);
2881 		if (error)
2882 			goto out_free_ip;
2883 	}
2884 
2885 out_free_ip:
2886 	xfs_inode_free(ip);
2887 	return error;
2888 }
2889 
2890 STATIC int
2891 xlog_recover_inode_pass2(
2892 	struct xlog			*log,
2893 	struct list_head		*buffer_list,
2894 	struct xlog_recover_item	*item,
2895 	xfs_lsn_t			current_lsn)
2896 {
2897 	xfs_inode_log_format_t	*in_f;
2898 	xfs_mount_t		*mp = log->l_mp;
2899 	xfs_buf_t		*bp;
2900 	xfs_dinode_t		*dip;
2901 	int			len;
2902 	char			*src;
2903 	char			*dest;
2904 	int			error;
2905 	int			attr_index;
2906 	uint			fields;
2907 	xfs_icdinode_t		*dicp;
2908 	uint			isize;
2909 	int			need_free = 0;
2910 
2911 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2912 		in_f = item->ri_buf[0].i_addr;
2913 	} else {
2914 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2915 		need_free = 1;
2916 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2917 		if (error)
2918 			goto error;
2919 	}
2920 
2921 	/*
2922 	 * Inode buffers can be freed, look out for it,
2923 	 * and do not replay the inode.
2924 	 */
2925 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2926 					in_f->ilf_len, 0)) {
2927 		error = 0;
2928 		trace_xfs_log_recover_inode_cancel(log, in_f);
2929 		goto error;
2930 	}
2931 	trace_xfs_log_recover_inode_recover(log, in_f);
2932 
2933 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2934 			  &xfs_inode_buf_ops);
2935 	if (!bp) {
2936 		error = -ENOMEM;
2937 		goto error;
2938 	}
2939 	error = bp->b_error;
2940 	if (error) {
2941 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2942 		goto out_release;
2943 	}
2944 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2945 	dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2946 
2947 	/*
2948 	 * Make sure the place we're flushing out to really looks
2949 	 * like an inode!
2950 	 */
2951 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2952 		xfs_alert(mp,
2953 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2954 			__func__, dip, bp, in_f->ilf_ino);
2955 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2956 				 XFS_ERRLEVEL_LOW, mp);
2957 		error = -EFSCORRUPTED;
2958 		goto out_release;
2959 	}
2960 	dicp = item->ri_buf[1].i_addr;
2961 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2962 		xfs_alert(mp,
2963 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2964 			__func__, item, in_f->ilf_ino);
2965 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2966 				 XFS_ERRLEVEL_LOW, mp);
2967 		error = -EFSCORRUPTED;
2968 		goto out_release;
2969 	}
2970 
2971 	/*
2972 	 * If the inode has an LSN in it, recover the inode only if it's less
2973 	 * than the lsn of the transaction we are replaying. Note: we still
2974 	 * need to replay an owner change even though the inode is more recent
2975 	 * than the transaction as there is no guarantee that all the btree
2976 	 * blocks are more recent than this transaction, too.
2977 	 */
2978 	if (dip->di_version >= 3) {
2979 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
2980 
2981 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2982 			trace_xfs_log_recover_inode_skip(log, in_f);
2983 			error = 0;
2984 			goto out_owner_change;
2985 		}
2986 	}
2987 
2988 	/*
2989 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2990 	 * are transactional and if ordering is necessary we can determine that
2991 	 * more accurately by the LSN field in the V3 inode core. Don't trust
2992 	 * the inode versions we might be changing them here - use the
2993 	 * superblock flag to determine whether we need to look at di_flushiter
2994 	 * to skip replay when the on disk inode is newer than the log one
2995 	 */
2996 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2997 	    dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2998 		/*
2999 		 * Deal with the wrap case, DI_MAX_FLUSH is less
3000 		 * than smaller numbers
3001 		 */
3002 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3003 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3004 			/* do nothing */
3005 		} else {
3006 			trace_xfs_log_recover_inode_skip(log, in_f);
3007 			error = 0;
3008 			goto out_release;
3009 		}
3010 	}
3011 
3012 	/* Take the opportunity to reset the flush iteration count */
3013 	dicp->di_flushiter = 0;
3014 
3015 	if (unlikely(S_ISREG(dicp->di_mode))) {
3016 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
3017 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
3018 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3019 					 XFS_ERRLEVEL_LOW, mp, dicp);
3020 			xfs_alert(mp,
3021 		"%s: Bad regular inode log record, rec ptr 0x%p, "
3022 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3023 				__func__, item, dip, bp, in_f->ilf_ino);
3024 			error = -EFSCORRUPTED;
3025 			goto out_release;
3026 		}
3027 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
3028 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
3029 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
3030 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
3031 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3032 					     XFS_ERRLEVEL_LOW, mp, dicp);
3033 			xfs_alert(mp,
3034 		"%s: Bad dir inode log record, rec ptr 0x%p, "
3035 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3036 				__func__, item, dip, bp, in_f->ilf_ino);
3037 			error = -EFSCORRUPTED;
3038 			goto out_release;
3039 		}
3040 	}
3041 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
3042 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3043 				     XFS_ERRLEVEL_LOW, mp, dicp);
3044 		xfs_alert(mp,
3045 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3046 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3047 			__func__, item, dip, bp, in_f->ilf_ino,
3048 			dicp->di_nextents + dicp->di_anextents,
3049 			dicp->di_nblocks);
3050 		error = -EFSCORRUPTED;
3051 		goto out_release;
3052 	}
3053 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
3054 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3055 				     XFS_ERRLEVEL_LOW, mp, dicp);
3056 		xfs_alert(mp,
3057 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3058 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3059 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
3060 		error = -EFSCORRUPTED;
3061 		goto out_release;
3062 	}
3063 	isize = xfs_icdinode_size(dicp->di_version);
3064 	if (unlikely(item->ri_buf[1].i_len > isize)) {
3065 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3066 				     XFS_ERRLEVEL_LOW, mp, dicp);
3067 		xfs_alert(mp,
3068 			"%s: Bad inode log record length %d, rec ptr 0x%p",
3069 			__func__, item->ri_buf[1].i_len, item);
3070 		error = -EFSCORRUPTED;
3071 		goto out_release;
3072 	}
3073 
3074 	/* The core is in in-core format */
3075 	xfs_dinode_to_disk(dip, dicp);
3076 
3077 	/* the rest is in on-disk format */
3078 	if (item->ri_buf[1].i_len > isize) {
3079 		memcpy((char *)dip + isize,
3080 			item->ri_buf[1].i_addr + isize,
3081 			item->ri_buf[1].i_len - isize);
3082 	}
3083 
3084 	fields = in_f->ilf_fields;
3085 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
3086 	case XFS_ILOG_DEV:
3087 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3088 		break;
3089 	case XFS_ILOG_UUID:
3090 		memcpy(XFS_DFORK_DPTR(dip),
3091 		       &in_f->ilf_u.ilfu_uuid,
3092 		       sizeof(uuid_t));
3093 		break;
3094 	}
3095 
3096 	if (in_f->ilf_size == 2)
3097 		goto out_owner_change;
3098 	len = item->ri_buf[2].i_len;
3099 	src = item->ri_buf[2].i_addr;
3100 	ASSERT(in_f->ilf_size <= 4);
3101 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3102 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
3103 	       (len == in_f->ilf_dsize));
3104 
3105 	switch (fields & XFS_ILOG_DFORK) {
3106 	case XFS_ILOG_DDATA:
3107 	case XFS_ILOG_DEXT:
3108 		memcpy(XFS_DFORK_DPTR(dip), src, len);
3109 		break;
3110 
3111 	case XFS_ILOG_DBROOT:
3112 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3113 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3114 				 XFS_DFORK_DSIZE(dip, mp));
3115 		break;
3116 
3117 	default:
3118 		/*
3119 		 * There are no data fork flags set.
3120 		 */
3121 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
3122 		break;
3123 	}
3124 
3125 	/*
3126 	 * If we logged any attribute data, recover it.  There may or
3127 	 * may not have been any other non-core data logged in this
3128 	 * transaction.
3129 	 */
3130 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3131 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3132 			attr_index = 3;
3133 		} else {
3134 			attr_index = 2;
3135 		}
3136 		len = item->ri_buf[attr_index].i_len;
3137 		src = item->ri_buf[attr_index].i_addr;
3138 		ASSERT(len == in_f->ilf_asize);
3139 
3140 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3141 		case XFS_ILOG_ADATA:
3142 		case XFS_ILOG_AEXT:
3143 			dest = XFS_DFORK_APTR(dip);
3144 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3145 			memcpy(dest, src, len);
3146 			break;
3147 
3148 		case XFS_ILOG_ABROOT:
3149 			dest = XFS_DFORK_APTR(dip);
3150 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3151 					 len, (xfs_bmdr_block_t*)dest,
3152 					 XFS_DFORK_ASIZE(dip, mp));
3153 			break;
3154 
3155 		default:
3156 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3157 			ASSERT(0);
3158 			error = -EIO;
3159 			goto out_release;
3160 		}
3161 	}
3162 
3163 out_owner_change:
3164 	if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3165 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
3166 						       buffer_list);
3167 	/* re-generate the checksum. */
3168 	xfs_dinode_calc_crc(log->l_mp, dip);
3169 
3170 	ASSERT(bp->b_target->bt_mount == mp);
3171 	bp->b_iodone = xlog_recover_iodone;
3172 	xfs_buf_delwri_queue(bp, buffer_list);
3173 
3174 out_release:
3175 	xfs_buf_relse(bp);
3176 error:
3177 	if (need_free)
3178 		kmem_free(in_f);
3179 	return error;
3180 }
3181 
3182 /*
3183  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3184  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3185  * of that type.
3186  */
3187 STATIC int
3188 xlog_recover_quotaoff_pass1(
3189 	struct xlog			*log,
3190 	struct xlog_recover_item	*item)
3191 {
3192 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
3193 	ASSERT(qoff_f);
3194 
3195 	/*
3196 	 * The logitem format's flag tells us if this was user quotaoff,
3197 	 * group/project quotaoff or both.
3198 	 */
3199 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3200 		log->l_quotaoffs_flag |= XFS_DQ_USER;
3201 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3202 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3203 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3204 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3205 
3206 	return 0;
3207 }
3208 
3209 /*
3210  * Recover a dquot record
3211  */
3212 STATIC int
3213 xlog_recover_dquot_pass2(
3214 	struct xlog			*log,
3215 	struct list_head		*buffer_list,
3216 	struct xlog_recover_item	*item,
3217 	xfs_lsn_t			current_lsn)
3218 {
3219 	xfs_mount_t		*mp = log->l_mp;
3220 	xfs_buf_t		*bp;
3221 	struct xfs_disk_dquot	*ddq, *recddq;
3222 	int			error;
3223 	xfs_dq_logformat_t	*dq_f;
3224 	uint			type;
3225 
3226 
3227 	/*
3228 	 * Filesystems are required to send in quota flags at mount time.
3229 	 */
3230 	if (mp->m_qflags == 0)
3231 		return 0;
3232 
3233 	recddq = item->ri_buf[1].i_addr;
3234 	if (recddq == NULL) {
3235 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3236 		return -EIO;
3237 	}
3238 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3239 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3240 			item->ri_buf[1].i_len, __func__);
3241 		return -EIO;
3242 	}
3243 
3244 	/*
3245 	 * This type of quotas was turned off, so ignore this record.
3246 	 */
3247 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3248 	ASSERT(type);
3249 	if (log->l_quotaoffs_flag & type)
3250 		return 0;
3251 
3252 	/*
3253 	 * At this point we know that quota was _not_ turned off.
3254 	 * Since the mount flags are not indicating to us otherwise, this
3255 	 * must mean that quota is on, and the dquot needs to be replayed.
3256 	 * Remember that we may not have fully recovered the superblock yet,
3257 	 * so we can't do the usual trick of looking at the SB quota bits.
3258 	 *
3259 	 * The other possibility, of course, is that the quota subsystem was
3260 	 * removed since the last mount - ENOSYS.
3261 	 */
3262 	dq_f = item->ri_buf[0].i_addr;
3263 	ASSERT(dq_f);
3264 	error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3265 			   "xlog_recover_dquot_pass2 (log copy)");
3266 	if (error)
3267 		return -EIO;
3268 	ASSERT(dq_f->qlf_len == 1);
3269 
3270 	/*
3271 	 * At this point we are assuming that the dquots have been allocated
3272 	 * and hence the buffer has valid dquots stamped in it. It should,
3273 	 * therefore, pass verifier validation. If the dquot is bad, then the
3274 	 * we'll return an error here, so we don't need to specifically check
3275 	 * the dquot in the buffer after the verifier has run.
3276 	 */
3277 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3278 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3279 				   &xfs_dquot_buf_ops);
3280 	if (error)
3281 		return error;
3282 
3283 	ASSERT(bp);
3284 	ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3285 
3286 	/*
3287 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3288 	 * than the lsn of the transaction we are replaying.
3289 	 */
3290 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3291 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3292 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3293 
3294 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3295 			goto out_release;
3296 		}
3297 	}
3298 
3299 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3300 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3301 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3302 				 XFS_DQUOT_CRC_OFF);
3303 	}
3304 
3305 	ASSERT(dq_f->qlf_size == 2);
3306 	ASSERT(bp->b_target->bt_mount == mp);
3307 	bp->b_iodone = xlog_recover_iodone;
3308 	xfs_buf_delwri_queue(bp, buffer_list);
3309 
3310 out_release:
3311 	xfs_buf_relse(bp);
3312 	return 0;
3313 }
3314 
3315 /*
3316  * This routine is called to create an in-core extent free intent
3317  * item from the efi format structure which was logged on disk.
3318  * It allocates an in-core efi, copies the extents from the format
3319  * structure into it, and adds the efi to the AIL with the given
3320  * LSN.
3321  */
3322 STATIC int
3323 xlog_recover_efi_pass2(
3324 	struct xlog			*log,
3325 	struct xlog_recover_item	*item,
3326 	xfs_lsn_t			lsn)
3327 {
3328 	int				error;
3329 	struct xfs_mount		*mp = log->l_mp;
3330 	struct xfs_efi_log_item		*efip;
3331 	struct xfs_efi_log_format	*efi_formatp;
3332 
3333 	efi_formatp = item->ri_buf[0].i_addr;
3334 
3335 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3336 	error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3337 	if (error) {
3338 		xfs_efi_item_free(efip);
3339 		return error;
3340 	}
3341 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3342 
3343 	spin_lock(&log->l_ailp->xa_lock);
3344 	/*
3345 	 * The EFI has two references. One for the EFD and one for EFI to ensure
3346 	 * it makes it into the AIL. Insert the EFI into the AIL directly and
3347 	 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3348 	 * AIL lock.
3349 	 */
3350 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3351 	xfs_efi_release(efip);
3352 	return 0;
3353 }
3354 
3355 
3356 /*
3357  * This routine is called when an EFD format structure is found in a committed
3358  * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3359  * was still in the log. To do this it searches the AIL for the EFI with an id
3360  * equal to that in the EFD format structure. If we find it we drop the EFD
3361  * reference, which removes the EFI from the AIL and frees it.
3362  */
3363 STATIC int
3364 xlog_recover_efd_pass2(
3365 	struct xlog			*log,
3366 	struct xlog_recover_item	*item)
3367 {
3368 	xfs_efd_log_format_t	*efd_formatp;
3369 	xfs_efi_log_item_t	*efip = NULL;
3370 	xfs_log_item_t		*lip;
3371 	__uint64_t		efi_id;
3372 	struct xfs_ail_cursor	cur;
3373 	struct xfs_ail		*ailp = log->l_ailp;
3374 
3375 	efd_formatp = item->ri_buf[0].i_addr;
3376 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3377 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3378 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3379 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3380 	efi_id = efd_formatp->efd_efi_id;
3381 
3382 	/*
3383 	 * Search for the EFI with the id in the EFD format structure in the
3384 	 * AIL.
3385 	 */
3386 	spin_lock(&ailp->xa_lock);
3387 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3388 	while (lip != NULL) {
3389 		if (lip->li_type == XFS_LI_EFI) {
3390 			efip = (xfs_efi_log_item_t *)lip;
3391 			if (efip->efi_format.efi_id == efi_id) {
3392 				/*
3393 				 * Drop the EFD reference to the EFI. This
3394 				 * removes the EFI from the AIL and frees it.
3395 				 */
3396 				spin_unlock(&ailp->xa_lock);
3397 				xfs_efi_release(efip);
3398 				spin_lock(&ailp->xa_lock);
3399 				break;
3400 			}
3401 		}
3402 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3403 	}
3404 
3405 	xfs_trans_ail_cursor_done(&cur);
3406 	spin_unlock(&ailp->xa_lock);
3407 
3408 	return 0;
3409 }
3410 
3411 /*
3412  * This routine is called when an inode create format structure is found in a
3413  * committed transaction in the log.  It's purpose is to initialise the inodes
3414  * being allocated on disk. This requires us to get inode cluster buffers that
3415  * match the range to be intialised, stamped with inode templates and written
3416  * by delayed write so that subsequent modifications will hit the cached buffer
3417  * and only need writing out at the end of recovery.
3418  */
3419 STATIC int
3420 xlog_recover_do_icreate_pass2(
3421 	struct xlog		*log,
3422 	struct list_head	*buffer_list,
3423 	xlog_recover_item_t	*item)
3424 {
3425 	struct xfs_mount	*mp = log->l_mp;
3426 	struct xfs_icreate_log	*icl;
3427 	xfs_agnumber_t		agno;
3428 	xfs_agblock_t		agbno;
3429 	unsigned int		count;
3430 	unsigned int		isize;
3431 	xfs_agblock_t		length;
3432 	int			blks_per_cluster;
3433 	int			bb_per_cluster;
3434 	int			cancel_count;
3435 	int			nbufs;
3436 	int			i;
3437 
3438 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3439 	if (icl->icl_type != XFS_LI_ICREATE) {
3440 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3441 		return -EINVAL;
3442 	}
3443 
3444 	if (icl->icl_size != 1) {
3445 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3446 		return -EINVAL;
3447 	}
3448 
3449 	agno = be32_to_cpu(icl->icl_ag);
3450 	if (agno >= mp->m_sb.sb_agcount) {
3451 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3452 		return -EINVAL;
3453 	}
3454 	agbno = be32_to_cpu(icl->icl_agbno);
3455 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3456 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3457 		return -EINVAL;
3458 	}
3459 	isize = be32_to_cpu(icl->icl_isize);
3460 	if (isize != mp->m_sb.sb_inodesize) {
3461 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3462 		return -EINVAL;
3463 	}
3464 	count = be32_to_cpu(icl->icl_count);
3465 	if (!count) {
3466 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3467 		return -EINVAL;
3468 	}
3469 	length = be32_to_cpu(icl->icl_length);
3470 	if (!length || length >= mp->m_sb.sb_agblocks) {
3471 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3472 		return -EINVAL;
3473 	}
3474 
3475 	/*
3476 	 * The inode chunk is either full or sparse and we only support
3477 	 * m_ialloc_min_blks sized sparse allocations at this time.
3478 	 */
3479 	if (length != mp->m_ialloc_blks &&
3480 	    length != mp->m_ialloc_min_blks) {
3481 		xfs_warn(log->l_mp,
3482 			 "%s: unsupported chunk length", __FUNCTION__);
3483 		return -EINVAL;
3484 	}
3485 
3486 	/* verify inode count is consistent with extent length */
3487 	if ((count >> mp->m_sb.sb_inopblog) != length) {
3488 		xfs_warn(log->l_mp,
3489 			 "%s: inconsistent inode count and chunk length",
3490 			 __FUNCTION__);
3491 		return -EINVAL;
3492 	}
3493 
3494 	/*
3495 	 * The icreate transaction can cover multiple cluster buffers and these
3496 	 * buffers could have been freed and reused. Check the individual
3497 	 * buffers for cancellation so we don't overwrite anything written after
3498 	 * a cancellation.
3499 	 */
3500 	blks_per_cluster = xfs_icluster_size_fsb(mp);
3501 	bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3502 	nbufs = length / blks_per_cluster;
3503 	for (i = 0, cancel_count = 0; i < nbufs; i++) {
3504 		xfs_daddr_t	daddr;
3505 
3506 		daddr = XFS_AGB_TO_DADDR(mp, agno,
3507 					 agbno + i * blks_per_cluster);
3508 		if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3509 			cancel_count++;
3510 	}
3511 
3512 	/*
3513 	 * We currently only use icreate for a single allocation at a time. This
3514 	 * means we should expect either all or none of the buffers to be
3515 	 * cancelled. Be conservative and skip replay if at least one buffer is
3516 	 * cancelled, but warn the user that something is awry if the buffers
3517 	 * are not consistent.
3518 	 *
3519 	 * XXX: This must be refined to only skip cancelled clusters once we use
3520 	 * icreate for multiple chunk allocations.
3521 	 */
3522 	ASSERT(!cancel_count || cancel_count == nbufs);
3523 	if (cancel_count) {
3524 		if (cancel_count != nbufs)
3525 			xfs_warn(mp,
3526 	"WARNING: partial inode chunk cancellation, skipped icreate.");
3527 		trace_xfs_log_recover_icreate_cancel(log, icl);
3528 		return 0;
3529 	}
3530 
3531 	trace_xfs_log_recover_icreate_recover(log, icl);
3532 	return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3533 				     length, be32_to_cpu(icl->icl_gen));
3534 }
3535 
3536 STATIC void
3537 xlog_recover_buffer_ra_pass2(
3538 	struct xlog                     *log,
3539 	struct xlog_recover_item        *item)
3540 {
3541 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3542 	struct xfs_mount		*mp = log->l_mp;
3543 
3544 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3545 			buf_f->blf_len, buf_f->blf_flags)) {
3546 		return;
3547 	}
3548 
3549 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3550 				buf_f->blf_len, NULL);
3551 }
3552 
3553 STATIC void
3554 xlog_recover_inode_ra_pass2(
3555 	struct xlog                     *log,
3556 	struct xlog_recover_item        *item)
3557 {
3558 	struct xfs_inode_log_format	ilf_buf;
3559 	struct xfs_inode_log_format	*ilfp;
3560 	struct xfs_mount		*mp = log->l_mp;
3561 	int			error;
3562 
3563 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3564 		ilfp = item->ri_buf[0].i_addr;
3565 	} else {
3566 		ilfp = &ilf_buf;
3567 		memset(ilfp, 0, sizeof(*ilfp));
3568 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3569 		if (error)
3570 			return;
3571 	}
3572 
3573 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3574 		return;
3575 
3576 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3577 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3578 }
3579 
3580 STATIC void
3581 xlog_recover_dquot_ra_pass2(
3582 	struct xlog			*log,
3583 	struct xlog_recover_item	*item)
3584 {
3585 	struct xfs_mount	*mp = log->l_mp;
3586 	struct xfs_disk_dquot	*recddq;
3587 	struct xfs_dq_logformat	*dq_f;
3588 	uint			type;
3589 	int			len;
3590 
3591 
3592 	if (mp->m_qflags == 0)
3593 		return;
3594 
3595 	recddq = item->ri_buf[1].i_addr;
3596 	if (recddq == NULL)
3597 		return;
3598 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3599 		return;
3600 
3601 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3602 	ASSERT(type);
3603 	if (log->l_quotaoffs_flag & type)
3604 		return;
3605 
3606 	dq_f = item->ri_buf[0].i_addr;
3607 	ASSERT(dq_f);
3608 	ASSERT(dq_f->qlf_len == 1);
3609 
3610 	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3611 	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3612 		return;
3613 
3614 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3615 			  &xfs_dquot_buf_ra_ops);
3616 }
3617 
3618 STATIC void
3619 xlog_recover_ra_pass2(
3620 	struct xlog			*log,
3621 	struct xlog_recover_item	*item)
3622 {
3623 	switch (ITEM_TYPE(item)) {
3624 	case XFS_LI_BUF:
3625 		xlog_recover_buffer_ra_pass2(log, item);
3626 		break;
3627 	case XFS_LI_INODE:
3628 		xlog_recover_inode_ra_pass2(log, item);
3629 		break;
3630 	case XFS_LI_DQUOT:
3631 		xlog_recover_dquot_ra_pass2(log, item);
3632 		break;
3633 	case XFS_LI_EFI:
3634 	case XFS_LI_EFD:
3635 	case XFS_LI_QUOTAOFF:
3636 	default:
3637 		break;
3638 	}
3639 }
3640 
3641 STATIC int
3642 xlog_recover_commit_pass1(
3643 	struct xlog			*log,
3644 	struct xlog_recover		*trans,
3645 	struct xlog_recover_item	*item)
3646 {
3647 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3648 
3649 	switch (ITEM_TYPE(item)) {
3650 	case XFS_LI_BUF:
3651 		return xlog_recover_buffer_pass1(log, item);
3652 	case XFS_LI_QUOTAOFF:
3653 		return xlog_recover_quotaoff_pass1(log, item);
3654 	case XFS_LI_INODE:
3655 	case XFS_LI_EFI:
3656 	case XFS_LI_EFD:
3657 	case XFS_LI_DQUOT:
3658 	case XFS_LI_ICREATE:
3659 		/* nothing to do in pass 1 */
3660 		return 0;
3661 	default:
3662 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3663 			__func__, ITEM_TYPE(item));
3664 		ASSERT(0);
3665 		return -EIO;
3666 	}
3667 }
3668 
3669 STATIC int
3670 xlog_recover_commit_pass2(
3671 	struct xlog			*log,
3672 	struct xlog_recover		*trans,
3673 	struct list_head		*buffer_list,
3674 	struct xlog_recover_item	*item)
3675 {
3676 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3677 
3678 	switch (ITEM_TYPE(item)) {
3679 	case XFS_LI_BUF:
3680 		return xlog_recover_buffer_pass2(log, buffer_list, item,
3681 						 trans->r_lsn);
3682 	case XFS_LI_INODE:
3683 		return xlog_recover_inode_pass2(log, buffer_list, item,
3684 						 trans->r_lsn);
3685 	case XFS_LI_EFI:
3686 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3687 	case XFS_LI_EFD:
3688 		return xlog_recover_efd_pass2(log, item);
3689 	case XFS_LI_DQUOT:
3690 		return xlog_recover_dquot_pass2(log, buffer_list, item,
3691 						trans->r_lsn);
3692 	case XFS_LI_ICREATE:
3693 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3694 	case XFS_LI_QUOTAOFF:
3695 		/* nothing to do in pass2 */
3696 		return 0;
3697 	default:
3698 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3699 			__func__, ITEM_TYPE(item));
3700 		ASSERT(0);
3701 		return -EIO;
3702 	}
3703 }
3704 
3705 STATIC int
3706 xlog_recover_items_pass2(
3707 	struct xlog                     *log,
3708 	struct xlog_recover             *trans,
3709 	struct list_head                *buffer_list,
3710 	struct list_head                *item_list)
3711 {
3712 	struct xlog_recover_item	*item;
3713 	int				error = 0;
3714 
3715 	list_for_each_entry(item, item_list, ri_list) {
3716 		error = xlog_recover_commit_pass2(log, trans,
3717 					  buffer_list, item);
3718 		if (error)
3719 			return error;
3720 	}
3721 
3722 	return error;
3723 }
3724 
3725 /*
3726  * Perform the transaction.
3727  *
3728  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
3729  * EFIs and EFDs get queued up by adding entries into the AIL for them.
3730  */
3731 STATIC int
3732 xlog_recover_commit_trans(
3733 	struct xlog		*log,
3734 	struct xlog_recover	*trans,
3735 	int			pass)
3736 {
3737 	int				error = 0;
3738 	int				error2;
3739 	int				items_queued = 0;
3740 	struct xlog_recover_item	*item;
3741 	struct xlog_recover_item	*next;
3742 	LIST_HEAD			(buffer_list);
3743 	LIST_HEAD			(ra_list);
3744 	LIST_HEAD			(done_list);
3745 
3746 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3747 
3748 	hlist_del(&trans->r_list);
3749 
3750 	error = xlog_recover_reorder_trans(log, trans, pass);
3751 	if (error)
3752 		return error;
3753 
3754 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3755 		switch (pass) {
3756 		case XLOG_RECOVER_PASS1:
3757 			error = xlog_recover_commit_pass1(log, trans, item);
3758 			break;
3759 		case XLOG_RECOVER_PASS2:
3760 			xlog_recover_ra_pass2(log, item);
3761 			list_move_tail(&item->ri_list, &ra_list);
3762 			items_queued++;
3763 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3764 				error = xlog_recover_items_pass2(log, trans,
3765 						&buffer_list, &ra_list);
3766 				list_splice_tail_init(&ra_list, &done_list);
3767 				items_queued = 0;
3768 			}
3769 
3770 			break;
3771 		default:
3772 			ASSERT(0);
3773 		}
3774 
3775 		if (error)
3776 			goto out;
3777 	}
3778 
3779 out:
3780 	if (!list_empty(&ra_list)) {
3781 		if (!error)
3782 			error = xlog_recover_items_pass2(log, trans,
3783 					&buffer_list, &ra_list);
3784 		list_splice_tail_init(&ra_list, &done_list);
3785 	}
3786 
3787 	if (!list_empty(&done_list))
3788 		list_splice_init(&done_list, &trans->r_itemq);
3789 
3790 	error2 = xfs_buf_delwri_submit(&buffer_list);
3791 	return error ? error : error2;
3792 }
3793 
3794 STATIC void
3795 xlog_recover_add_item(
3796 	struct list_head	*head)
3797 {
3798 	xlog_recover_item_t	*item;
3799 
3800 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
3801 	INIT_LIST_HEAD(&item->ri_list);
3802 	list_add_tail(&item->ri_list, head);
3803 }
3804 
3805 STATIC int
3806 xlog_recover_add_to_cont_trans(
3807 	struct xlog		*log,
3808 	struct xlog_recover	*trans,
3809 	char			*dp,
3810 	int			len)
3811 {
3812 	xlog_recover_item_t	*item;
3813 	char			*ptr, *old_ptr;
3814 	int			old_len;
3815 
3816 	/*
3817 	 * If the transaction is empty, the header was split across this and the
3818 	 * previous record. Copy the rest of the header.
3819 	 */
3820 	if (list_empty(&trans->r_itemq)) {
3821 		ASSERT(len <= sizeof(struct xfs_trans_header));
3822 		if (len > sizeof(struct xfs_trans_header)) {
3823 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
3824 			return -EIO;
3825 		}
3826 
3827 		xlog_recover_add_item(&trans->r_itemq);
3828 		ptr = (char *)&trans->r_theader +
3829 				sizeof(struct xfs_trans_header) - len;
3830 		memcpy(ptr, dp, len);
3831 		return 0;
3832 	}
3833 
3834 	/* take the tail entry */
3835 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3836 
3837 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
3838 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
3839 
3840 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
3841 	memcpy(&ptr[old_len], dp, len);
3842 	item->ri_buf[item->ri_cnt-1].i_len += len;
3843 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
3844 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
3845 	return 0;
3846 }
3847 
3848 /*
3849  * The next region to add is the start of a new region.  It could be
3850  * a whole region or it could be the first part of a new region.  Because
3851  * of this, the assumption here is that the type and size fields of all
3852  * format structures fit into the first 32 bits of the structure.
3853  *
3854  * This works because all regions must be 32 bit aligned.  Therefore, we
3855  * either have both fields or we have neither field.  In the case we have
3856  * neither field, the data part of the region is zero length.  We only have
3857  * a log_op_header and can throw away the header since a new one will appear
3858  * later.  If we have at least 4 bytes, then we can determine how many regions
3859  * will appear in the current log item.
3860  */
3861 STATIC int
3862 xlog_recover_add_to_trans(
3863 	struct xlog		*log,
3864 	struct xlog_recover	*trans,
3865 	char			*dp,
3866 	int			len)
3867 {
3868 	xfs_inode_log_format_t	*in_f;			/* any will do */
3869 	xlog_recover_item_t	*item;
3870 	char			*ptr;
3871 
3872 	if (!len)
3873 		return 0;
3874 	if (list_empty(&trans->r_itemq)) {
3875 		/* we need to catch log corruptions here */
3876 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
3877 			xfs_warn(log->l_mp, "%s: bad header magic number",
3878 				__func__);
3879 			ASSERT(0);
3880 			return -EIO;
3881 		}
3882 
3883 		if (len > sizeof(struct xfs_trans_header)) {
3884 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
3885 			ASSERT(0);
3886 			return -EIO;
3887 		}
3888 
3889 		/*
3890 		 * The transaction header can be arbitrarily split across op
3891 		 * records. If we don't have the whole thing here, copy what we
3892 		 * do have and handle the rest in the next record.
3893 		 */
3894 		if (len == sizeof(struct xfs_trans_header))
3895 			xlog_recover_add_item(&trans->r_itemq);
3896 		memcpy(&trans->r_theader, dp, len);
3897 		return 0;
3898 	}
3899 
3900 	ptr = kmem_alloc(len, KM_SLEEP);
3901 	memcpy(ptr, dp, len);
3902 	in_f = (xfs_inode_log_format_t *)ptr;
3903 
3904 	/* take the tail entry */
3905 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3906 	if (item->ri_total != 0 &&
3907 	     item->ri_total == item->ri_cnt) {
3908 		/* tail item is in use, get a new one */
3909 		xlog_recover_add_item(&trans->r_itemq);
3910 		item = list_entry(trans->r_itemq.prev,
3911 					xlog_recover_item_t, ri_list);
3912 	}
3913 
3914 	if (item->ri_total == 0) {		/* first region to be added */
3915 		if (in_f->ilf_size == 0 ||
3916 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
3917 			xfs_warn(log->l_mp,
3918 		"bad number of regions (%d) in inode log format",
3919 				  in_f->ilf_size);
3920 			ASSERT(0);
3921 			kmem_free(ptr);
3922 			return -EIO;
3923 		}
3924 
3925 		item->ri_total = in_f->ilf_size;
3926 		item->ri_buf =
3927 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
3928 				    KM_SLEEP);
3929 	}
3930 	ASSERT(item->ri_total > item->ri_cnt);
3931 	/* Description region is ri_buf[0] */
3932 	item->ri_buf[item->ri_cnt].i_addr = ptr;
3933 	item->ri_buf[item->ri_cnt].i_len  = len;
3934 	item->ri_cnt++;
3935 	trace_xfs_log_recover_item_add(log, trans, item, 0);
3936 	return 0;
3937 }
3938 
3939 /*
3940  * Free up any resources allocated by the transaction
3941  *
3942  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3943  */
3944 STATIC void
3945 xlog_recover_free_trans(
3946 	struct xlog_recover	*trans)
3947 {
3948 	xlog_recover_item_t	*item, *n;
3949 	int			i;
3950 
3951 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3952 		/* Free the regions in the item. */
3953 		list_del(&item->ri_list);
3954 		for (i = 0; i < item->ri_cnt; i++)
3955 			kmem_free(item->ri_buf[i].i_addr);
3956 		/* Free the item itself */
3957 		kmem_free(item->ri_buf);
3958 		kmem_free(item);
3959 	}
3960 	/* Free the transaction recover structure */
3961 	kmem_free(trans);
3962 }
3963 
3964 /*
3965  * On error or completion, trans is freed.
3966  */
3967 STATIC int
3968 xlog_recovery_process_trans(
3969 	struct xlog		*log,
3970 	struct xlog_recover	*trans,
3971 	char			*dp,
3972 	unsigned int		len,
3973 	unsigned int		flags,
3974 	int			pass)
3975 {
3976 	int			error = 0;
3977 	bool			freeit = false;
3978 
3979 	/* mask off ophdr transaction container flags */
3980 	flags &= ~XLOG_END_TRANS;
3981 	if (flags & XLOG_WAS_CONT_TRANS)
3982 		flags &= ~XLOG_CONTINUE_TRANS;
3983 
3984 	/*
3985 	 * Callees must not free the trans structure. We'll decide if we need to
3986 	 * free it or not based on the operation being done and it's result.
3987 	 */
3988 	switch (flags) {
3989 	/* expected flag values */
3990 	case 0:
3991 	case XLOG_CONTINUE_TRANS:
3992 		error = xlog_recover_add_to_trans(log, trans, dp, len);
3993 		break;
3994 	case XLOG_WAS_CONT_TRANS:
3995 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
3996 		break;
3997 	case XLOG_COMMIT_TRANS:
3998 		error = xlog_recover_commit_trans(log, trans, pass);
3999 		/* success or fail, we are now done with this transaction. */
4000 		freeit = true;
4001 		break;
4002 
4003 	/* unexpected flag values */
4004 	case XLOG_UNMOUNT_TRANS:
4005 		/* just skip trans */
4006 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4007 		freeit = true;
4008 		break;
4009 	case XLOG_START_TRANS:
4010 	default:
4011 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4012 		ASSERT(0);
4013 		error = -EIO;
4014 		break;
4015 	}
4016 	if (error || freeit)
4017 		xlog_recover_free_trans(trans);
4018 	return error;
4019 }
4020 
4021 /*
4022  * Lookup the transaction recovery structure associated with the ID in the
4023  * current ophdr. If the transaction doesn't exist and the start flag is set in
4024  * the ophdr, then allocate a new transaction for future ID matches to find.
4025  * Either way, return what we found during the lookup - an existing transaction
4026  * or nothing.
4027  */
4028 STATIC struct xlog_recover *
4029 xlog_recover_ophdr_to_trans(
4030 	struct hlist_head	rhash[],
4031 	struct xlog_rec_header	*rhead,
4032 	struct xlog_op_header	*ohead)
4033 {
4034 	struct xlog_recover	*trans;
4035 	xlog_tid_t		tid;
4036 	struct hlist_head	*rhp;
4037 
4038 	tid = be32_to_cpu(ohead->oh_tid);
4039 	rhp = &rhash[XLOG_RHASH(tid)];
4040 	hlist_for_each_entry(trans, rhp, r_list) {
4041 		if (trans->r_log_tid == tid)
4042 			return trans;
4043 	}
4044 
4045 	/*
4046 	 * skip over non-start transaction headers - we could be
4047 	 * processing slack space before the next transaction starts
4048 	 */
4049 	if (!(ohead->oh_flags & XLOG_START_TRANS))
4050 		return NULL;
4051 
4052 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4053 
4054 	/*
4055 	 * This is a new transaction so allocate a new recovery container to
4056 	 * hold the recovery ops that will follow.
4057 	 */
4058 	trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4059 	trans->r_log_tid = tid;
4060 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4061 	INIT_LIST_HEAD(&trans->r_itemq);
4062 	INIT_HLIST_NODE(&trans->r_list);
4063 	hlist_add_head(&trans->r_list, rhp);
4064 
4065 	/*
4066 	 * Nothing more to do for this ophdr. Items to be added to this new
4067 	 * transaction will be in subsequent ophdr containers.
4068 	 */
4069 	return NULL;
4070 }
4071 
4072 STATIC int
4073 xlog_recover_process_ophdr(
4074 	struct xlog		*log,
4075 	struct hlist_head	rhash[],
4076 	struct xlog_rec_header	*rhead,
4077 	struct xlog_op_header	*ohead,
4078 	char			*dp,
4079 	char			*end,
4080 	int			pass)
4081 {
4082 	struct xlog_recover	*trans;
4083 	unsigned int		len;
4084 
4085 	/* Do we understand who wrote this op? */
4086 	if (ohead->oh_clientid != XFS_TRANSACTION &&
4087 	    ohead->oh_clientid != XFS_LOG) {
4088 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4089 			__func__, ohead->oh_clientid);
4090 		ASSERT(0);
4091 		return -EIO;
4092 	}
4093 
4094 	/*
4095 	 * Check the ophdr contains all the data it is supposed to contain.
4096 	 */
4097 	len = be32_to_cpu(ohead->oh_len);
4098 	if (dp + len > end) {
4099 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4100 		WARN_ON(1);
4101 		return -EIO;
4102 	}
4103 
4104 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4105 	if (!trans) {
4106 		/* nothing to do, so skip over this ophdr */
4107 		return 0;
4108 	}
4109 
4110 	return xlog_recovery_process_trans(log, trans, dp, len,
4111 					   ohead->oh_flags, pass);
4112 }
4113 
4114 /*
4115  * There are two valid states of the r_state field.  0 indicates that the
4116  * transaction structure is in a normal state.  We have either seen the
4117  * start of the transaction or the last operation we added was not a partial
4118  * operation.  If the last operation we added to the transaction was a
4119  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4120  *
4121  * NOTE: skip LRs with 0 data length.
4122  */
4123 STATIC int
4124 xlog_recover_process_data(
4125 	struct xlog		*log,
4126 	struct hlist_head	rhash[],
4127 	struct xlog_rec_header	*rhead,
4128 	char			*dp,
4129 	int			pass)
4130 {
4131 	struct xlog_op_header	*ohead;
4132 	char			*end;
4133 	int			num_logops;
4134 	int			error;
4135 
4136 	end = dp + be32_to_cpu(rhead->h_len);
4137 	num_logops = be32_to_cpu(rhead->h_num_logops);
4138 
4139 	/* check the log format matches our own - else we can't recover */
4140 	if (xlog_header_check_recover(log->l_mp, rhead))
4141 		return -EIO;
4142 
4143 	while ((dp < end) && num_logops) {
4144 
4145 		ohead = (struct xlog_op_header *)dp;
4146 		dp += sizeof(*ohead);
4147 		ASSERT(dp <= end);
4148 
4149 		/* errors will abort recovery */
4150 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4151 						    dp, end, pass);
4152 		if (error)
4153 			return error;
4154 
4155 		dp += be32_to_cpu(ohead->oh_len);
4156 		num_logops--;
4157 	}
4158 	return 0;
4159 }
4160 
4161 /*
4162  * Process an extent free intent item that was recovered from
4163  * the log.  We need to free the extents that it describes.
4164  */
4165 STATIC int
4166 xlog_recover_process_efi(
4167 	xfs_mount_t		*mp,
4168 	xfs_efi_log_item_t	*efip)
4169 {
4170 	xfs_efd_log_item_t	*efdp;
4171 	xfs_trans_t		*tp;
4172 	int			i;
4173 	int			error = 0;
4174 	xfs_extent_t		*extp;
4175 	xfs_fsblock_t		startblock_fsb;
4176 
4177 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
4178 
4179 	/*
4180 	 * First check the validity of the extents described by the
4181 	 * EFI.  If any are bad, then assume that all are bad and
4182 	 * just toss the EFI.
4183 	 */
4184 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4185 		extp = &(efip->efi_format.efi_extents[i]);
4186 		startblock_fsb = XFS_BB_TO_FSB(mp,
4187 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
4188 		if ((startblock_fsb == 0) ||
4189 		    (extp->ext_len == 0) ||
4190 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
4191 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
4192 			/*
4193 			 * This will pull the EFI from the AIL and
4194 			 * free the memory associated with it.
4195 			 */
4196 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4197 			xfs_efi_release(efip);
4198 			return -EIO;
4199 		}
4200 	}
4201 
4202 	tp = xfs_trans_alloc(mp, 0);
4203 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
4204 	if (error)
4205 		goto abort_error;
4206 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
4207 
4208 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4209 		extp = &(efip->efi_format.efi_extents[i]);
4210 		error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
4211 					      extp->ext_len);
4212 		if (error)
4213 			goto abort_error;
4214 
4215 	}
4216 
4217 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4218 	error = xfs_trans_commit(tp);
4219 	return error;
4220 
4221 abort_error:
4222 	xfs_trans_cancel(tp);
4223 	return error;
4224 }
4225 
4226 /*
4227  * When this is called, all of the EFIs which did not have
4228  * corresponding EFDs should be in the AIL.  What we do now
4229  * is free the extents associated with each one.
4230  *
4231  * Since we process the EFIs in normal transactions, they
4232  * will be removed at some point after the commit.  This prevents
4233  * us from just walking down the list processing each one.
4234  * We'll use a flag in the EFI to skip those that we've already
4235  * processed and use the AIL iteration mechanism's generation
4236  * count to try to speed this up at least a bit.
4237  *
4238  * When we start, we know that the EFIs are the only things in
4239  * the AIL.  As we process them, however, other items are added
4240  * to the AIL.  Since everything added to the AIL must come after
4241  * everything already in the AIL, we stop processing as soon as
4242  * we see something other than an EFI in the AIL.
4243  */
4244 STATIC int
4245 xlog_recover_process_efis(
4246 	struct xlog		*log)
4247 {
4248 	struct xfs_log_item	*lip;
4249 	struct xfs_efi_log_item	*efip;
4250 	int			error = 0;
4251 	struct xfs_ail_cursor	cur;
4252 	struct xfs_ail		*ailp;
4253 
4254 	ailp = log->l_ailp;
4255 	spin_lock(&ailp->xa_lock);
4256 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4257 	while (lip != NULL) {
4258 		/*
4259 		 * We're done when we see something other than an EFI.
4260 		 * There should be no EFIs left in the AIL now.
4261 		 */
4262 		if (lip->li_type != XFS_LI_EFI) {
4263 #ifdef DEBUG
4264 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4265 				ASSERT(lip->li_type != XFS_LI_EFI);
4266 #endif
4267 			break;
4268 		}
4269 
4270 		/*
4271 		 * Skip EFIs that we've already processed.
4272 		 */
4273 		efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4274 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
4275 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
4276 			continue;
4277 		}
4278 
4279 		spin_unlock(&ailp->xa_lock);
4280 		error = xlog_recover_process_efi(log->l_mp, efip);
4281 		spin_lock(&ailp->xa_lock);
4282 		if (error)
4283 			goto out;
4284 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4285 	}
4286 out:
4287 	xfs_trans_ail_cursor_done(&cur);
4288 	spin_unlock(&ailp->xa_lock);
4289 	return error;
4290 }
4291 
4292 /*
4293  * A cancel occurs when the mount has failed and we're bailing out. Release all
4294  * pending EFIs so they don't pin the AIL.
4295  */
4296 STATIC int
4297 xlog_recover_cancel_efis(
4298 	struct xlog		*log)
4299 {
4300 	struct xfs_log_item	*lip;
4301 	struct xfs_efi_log_item	*efip;
4302 	int			error = 0;
4303 	struct xfs_ail_cursor	cur;
4304 	struct xfs_ail		*ailp;
4305 
4306 	ailp = log->l_ailp;
4307 	spin_lock(&ailp->xa_lock);
4308 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4309 	while (lip != NULL) {
4310 		/*
4311 		 * We're done when we see something other than an EFI.
4312 		 * There should be no EFIs left in the AIL now.
4313 		 */
4314 		if (lip->li_type != XFS_LI_EFI) {
4315 #ifdef DEBUG
4316 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4317 				ASSERT(lip->li_type != XFS_LI_EFI);
4318 #endif
4319 			break;
4320 		}
4321 
4322 		efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4323 
4324 		spin_unlock(&ailp->xa_lock);
4325 		xfs_efi_release(efip);
4326 		spin_lock(&ailp->xa_lock);
4327 
4328 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4329 	}
4330 
4331 	xfs_trans_ail_cursor_done(&cur);
4332 	spin_unlock(&ailp->xa_lock);
4333 	return error;
4334 }
4335 
4336 /*
4337  * This routine performs a transaction to null out a bad inode pointer
4338  * in an agi unlinked inode hash bucket.
4339  */
4340 STATIC void
4341 xlog_recover_clear_agi_bucket(
4342 	xfs_mount_t	*mp,
4343 	xfs_agnumber_t	agno,
4344 	int		bucket)
4345 {
4346 	xfs_trans_t	*tp;
4347 	xfs_agi_t	*agi;
4348 	xfs_buf_t	*agibp;
4349 	int		offset;
4350 	int		error;
4351 
4352 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
4353 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
4354 	if (error)
4355 		goto out_abort;
4356 
4357 	error = xfs_read_agi(mp, tp, agno, &agibp);
4358 	if (error)
4359 		goto out_abort;
4360 
4361 	agi = XFS_BUF_TO_AGI(agibp);
4362 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4363 	offset = offsetof(xfs_agi_t, agi_unlinked) +
4364 		 (sizeof(xfs_agino_t) * bucket);
4365 	xfs_trans_log_buf(tp, agibp, offset,
4366 			  (offset + sizeof(xfs_agino_t) - 1));
4367 
4368 	error = xfs_trans_commit(tp);
4369 	if (error)
4370 		goto out_error;
4371 	return;
4372 
4373 out_abort:
4374 	xfs_trans_cancel(tp);
4375 out_error:
4376 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4377 	return;
4378 }
4379 
4380 STATIC xfs_agino_t
4381 xlog_recover_process_one_iunlink(
4382 	struct xfs_mount		*mp,
4383 	xfs_agnumber_t			agno,
4384 	xfs_agino_t			agino,
4385 	int				bucket)
4386 {
4387 	struct xfs_buf			*ibp;
4388 	struct xfs_dinode		*dip;
4389 	struct xfs_inode		*ip;
4390 	xfs_ino_t			ino;
4391 	int				error;
4392 
4393 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
4394 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4395 	if (error)
4396 		goto fail;
4397 
4398 	/*
4399 	 * Get the on disk inode to find the next inode in the bucket.
4400 	 */
4401 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4402 	if (error)
4403 		goto fail_iput;
4404 
4405 	ASSERT(ip->i_d.di_nlink == 0);
4406 	ASSERT(ip->i_d.di_mode != 0);
4407 
4408 	/* setup for the next pass */
4409 	agino = be32_to_cpu(dip->di_next_unlinked);
4410 	xfs_buf_relse(ibp);
4411 
4412 	/*
4413 	 * Prevent any DMAPI event from being sent when the reference on
4414 	 * the inode is dropped.
4415 	 */
4416 	ip->i_d.di_dmevmask = 0;
4417 
4418 	IRELE(ip);
4419 	return agino;
4420 
4421  fail_iput:
4422 	IRELE(ip);
4423  fail:
4424 	/*
4425 	 * We can't read in the inode this bucket points to, or this inode
4426 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
4427 	 * some inodes and space, but at least we won't hang.
4428 	 *
4429 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
4430 	 * clear the inode pointer in the bucket.
4431 	 */
4432 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
4433 	return NULLAGINO;
4434 }
4435 
4436 /*
4437  * xlog_iunlink_recover
4438  *
4439  * This is called during recovery to process any inodes which
4440  * we unlinked but not freed when the system crashed.  These
4441  * inodes will be on the lists in the AGI blocks.  What we do
4442  * here is scan all the AGIs and fully truncate and free any
4443  * inodes found on the lists.  Each inode is removed from the
4444  * lists when it has been fully truncated and is freed.  The
4445  * freeing of the inode and its removal from the list must be
4446  * atomic.
4447  */
4448 STATIC void
4449 xlog_recover_process_iunlinks(
4450 	struct xlog	*log)
4451 {
4452 	xfs_mount_t	*mp;
4453 	xfs_agnumber_t	agno;
4454 	xfs_agi_t	*agi;
4455 	xfs_buf_t	*agibp;
4456 	xfs_agino_t	agino;
4457 	int		bucket;
4458 	int		error;
4459 	uint		mp_dmevmask;
4460 
4461 	mp = log->l_mp;
4462 
4463 	/*
4464 	 * Prevent any DMAPI event from being sent while in this function.
4465 	 */
4466 	mp_dmevmask = mp->m_dmevmask;
4467 	mp->m_dmevmask = 0;
4468 
4469 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4470 		/*
4471 		 * Find the agi for this ag.
4472 		 */
4473 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4474 		if (error) {
4475 			/*
4476 			 * AGI is b0rked. Don't process it.
4477 			 *
4478 			 * We should probably mark the filesystem as corrupt
4479 			 * after we've recovered all the ag's we can....
4480 			 */
4481 			continue;
4482 		}
4483 		/*
4484 		 * Unlock the buffer so that it can be acquired in the normal
4485 		 * course of the transaction to truncate and free each inode.
4486 		 * Because we are not racing with anyone else here for the AGI
4487 		 * buffer, we don't even need to hold it locked to read the
4488 		 * initial unlinked bucket entries out of the buffer. We keep
4489 		 * buffer reference though, so that it stays pinned in memory
4490 		 * while we need the buffer.
4491 		 */
4492 		agi = XFS_BUF_TO_AGI(agibp);
4493 		xfs_buf_unlock(agibp);
4494 
4495 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4496 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4497 			while (agino != NULLAGINO) {
4498 				agino = xlog_recover_process_one_iunlink(mp,
4499 							agno, agino, bucket);
4500 			}
4501 		}
4502 		xfs_buf_rele(agibp);
4503 	}
4504 
4505 	mp->m_dmevmask = mp_dmevmask;
4506 }
4507 
4508 STATIC int
4509 xlog_unpack_data(
4510 	struct xlog_rec_header	*rhead,
4511 	char			*dp,
4512 	struct xlog		*log)
4513 {
4514 	int			i, j, k;
4515 
4516 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4517 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4518 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4519 		dp += BBSIZE;
4520 	}
4521 
4522 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4523 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4524 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4525 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4526 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4527 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4528 			dp += BBSIZE;
4529 		}
4530 	}
4531 
4532 	return 0;
4533 }
4534 
4535 /*
4536  * CRC check, unpack and process a log record.
4537  */
4538 STATIC int
4539 xlog_recover_process(
4540 	struct xlog		*log,
4541 	struct hlist_head	rhash[],
4542 	struct xlog_rec_header	*rhead,
4543 	char			*dp,
4544 	int			pass)
4545 {
4546 	int			error;
4547 	__le32			crc;
4548 
4549 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4550 
4551 	/*
4552 	 * Nothing else to do if this is a CRC verification pass. Just return
4553 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
4554 	 * sets h_crc to 0 so we must consider this valid even on v5 supers.
4555 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
4556 	 * know precisely what failed.
4557 	 */
4558 	if (pass == XLOG_RECOVER_CRCPASS) {
4559 		if (rhead->h_crc && crc != rhead->h_crc)
4560 			return -EFSBADCRC;
4561 		return 0;
4562 	}
4563 
4564 	/*
4565 	 * We're in the normal recovery path. Issue a warning if and only if the
4566 	 * CRC in the header is non-zero. This is an advisory warning and the
4567 	 * zero CRC check prevents warnings from being emitted when upgrading
4568 	 * the kernel from one that does not add CRCs by default.
4569 	 */
4570 	if (crc != rhead->h_crc) {
4571 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4572 			xfs_alert(log->l_mp,
4573 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
4574 					le32_to_cpu(rhead->h_crc),
4575 					le32_to_cpu(crc));
4576 			xfs_hex_dump(dp, 32);
4577 		}
4578 
4579 		/*
4580 		 * If the filesystem is CRC enabled, this mismatch becomes a
4581 		 * fatal log corruption failure.
4582 		 */
4583 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4584 			return -EFSCORRUPTED;
4585 	}
4586 
4587 	error = xlog_unpack_data(rhead, dp, log);
4588 	if (error)
4589 		return error;
4590 
4591 	return xlog_recover_process_data(log, rhash, rhead, dp, pass);
4592 }
4593 
4594 STATIC int
4595 xlog_valid_rec_header(
4596 	struct xlog		*log,
4597 	struct xlog_rec_header	*rhead,
4598 	xfs_daddr_t		blkno)
4599 {
4600 	int			hlen;
4601 
4602 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4603 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4604 				XFS_ERRLEVEL_LOW, log->l_mp);
4605 		return -EFSCORRUPTED;
4606 	}
4607 	if (unlikely(
4608 	    (!rhead->h_version ||
4609 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4610 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4611 			__func__, be32_to_cpu(rhead->h_version));
4612 		return -EIO;
4613 	}
4614 
4615 	/* LR body must have data or it wouldn't have been written */
4616 	hlen = be32_to_cpu(rhead->h_len);
4617 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4618 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4619 				XFS_ERRLEVEL_LOW, log->l_mp);
4620 		return -EFSCORRUPTED;
4621 	}
4622 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4623 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4624 				XFS_ERRLEVEL_LOW, log->l_mp);
4625 		return -EFSCORRUPTED;
4626 	}
4627 	return 0;
4628 }
4629 
4630 /*
4631  * Read the log from tail to head and process the log records found.
4632  * Handle the two cases where the tail and head are in the same cycle
4633  * and where the active portion of the log wraps around the end of
4634  * the physical log separately.  The pass parameter is passed through
4635  * to the routines called to process the data and is not looked at
4636  * here.
4637  */
4638 STATIC int
4639 xlog_do_recovery_pass(
4640 	struct xlog		*log,
4641 	xfs_daddr_t		head_blk,
4642 	xfs_daddr_t		tail_blk,
4643 	int			pass,
4644 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
4645 {
4646 	xlog_rec_header_t	*rhead;
4647 	xfs_daddr_t		blk_no;
4648 	xfs_daddr_t		rhead_blk;
4649 	char			*offset;
4650 	xfs_buf_t		*hbp, *dbp;
4651 	int			error = 0, h_size, h_len;
4652 	int			bblks, split_bblks;
4653 	int			hblks, split_hblks, wrapped_hblks;
4654 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
4655 
4656 	ASSERT(head_blk != tail_blk);
4657 	rhead_blk = 0;
4658 
4659 	/*
4660 	 * Read the header of the tail block and get the iclog buffer size from
4661 	 * h_size.  Use this to tell how many sectors make up the log header.
4662 	 */
4663 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4664 		/*
4665 		 * When using variable length iclogs, read first sector of
4666 		 * iclog header and extract the header size from it.  Get a
4667 		 * new hbp that is the correct size.
4668 		 */
4669 		hbp = xlog_get_bp(log, 1);
4670 		if (!hbp)
4671 			return -ENOMEM;
4672 
4673 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4674 		if (error)
4675 			goto bread_err1;
4676 
4677 		rhead = (xlog_rec_header_t *)offset;
4678 		error = xlog_valid_rec_header(log, rhead, tail_blk);
4679 		if (error)
4680 			goto bread_err1;
4681 
4682 		/*
4683 		 * xfsprogs has a bug where record length is based on lsunit but
4684 		 * h_size (iclog size) is hardcoded to 32k. Now that we
4685 		 * unconditionally CRC verify the unmount record, this means the
4686 		 * log buffer can be too small for the record and cause an
4687 		 * overrun.
4688 		 *
4689 		 * Detect this condition here. Use lsunit for the buffer size as
4690 		 * long as this looks like the mkfs case. Otherwise, return an
4691 		 * error to avoid a buffer overrun.
4692 		 */
4693 		h_size = be32_to_cpu(rhead->h_size);
4694 		h_len = be32_to_cpu(rhead->h_len);
4695 		if (h_len > h_size) {
4696 			if (h_len <= log->l_mp->m_logbsize &&
4697 			    be32_to_cpu(rhead->h_num_logops) == 1) {
4698 				xfs_warn(log->l_mp,
4699 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
4700 					 h_size, log->l_mp->m_logbsize);
4701 				h_size = log->l_mp->m_logbsize;
4702 			} else
4703 				return -EFSCORRUPTED;
4704 		}
4705 
4706 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4707 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4708 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4709 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
4710 				hblks++;
4711 			xlog_put_bp(hbp);
4712 			hbp = xlog_get_bp(log, hblks);
4713 		} else {
4714 			hblks = 1;
4715 		}
4716 	} else {
4717 		ASSERT(log->l_sectBBsize == 1);
4718 		hblks = 1;
4719 		hbp = xlog_get_bp(log, 1);
4720 		h_size = XLOG_BIG_RECORD_BSIZE;
4721 	}
4722 
4723 	if (!hbp)
4724 		return -ENOMEM;
4725 	dbp = xlog_get_bp(log, BTOBB(h_size));
4726 	if (!dbp) {
4727 		xlog_put_bp(hbp);
4728 		return -ENOMEM;
4729 	}
4730 
4731 	memset(rhash, 0, sizeof(rhash));
4732 	blk_no = rhead_blk = tail_blk;
4733 	if (tail_blk > head_blk) {
4734 		/*
4735 		 * Perform recovery around the end of the physical log.
4736 		 * When the head is not on the same cycle number as the tail,
4737 		 * we can't do a sequential recovery.
4738 		 */
4739 		while (blk_no < log->l_logBBsize) {
4740 			/*
4741 			 * Check for header wrapping around physical end-of-log
4742 			 */
4743 			offset = hbp->b_addr;
4744 			split_hblks = 0;
4745 			wrapped_hblks = 0;
4746 			if (blk_no + hblks <= log->l_logBBsize) {
4747 				/* Read header in one read */
4748 				error = xlog_bread(log, blk_no, hblks, hbp,
4749 						   &offset);
4750 				if (error)
4751 					goto bread_err2;
4752 			} else {
4753 				/* This LR is split across physical log end */
4754 				if (blk_no != log->l_logBBsize) {
4755 					/* some data before physical log end */
4756 					ASSERT(blk_no <= INT_MAX);
4757 					split_hblks = log->l_logBBsize - (int)blk_no;
4758 					ASSERT(split_hblks > 0);
4759 					error = xlog_bread(log, blk_no,
4760 							   split_hblks, hbp,
4761 							   &offset);
4762 					if (error)
4763 						goto bread_err2;
4764 				}
4765 
4766 				/*
4767 				 * Note: this black magic still works with
4768 				 * large sector sizes (non-512) only because:
4769 				 * - we increased the buffer size originally
4770 				 *   by 1 sector giving us enough extra space
4771 				 *   for the second read;
4772 				 * - the log start is guaranteed to be sector
4773 				 *   aligned;
4774 				 * - we read the log end (LR header start)
4775 				 *   _first_, then the log start (LR header end)
4776 				 *   - order is important.
4777 				 */
4778 				wrapped_hblks = hblks - split_hblks;
4779 				error = xlog_bread_offset(log, 0,
4780 						wrapped_hblks, hbp,
4781 						offset + BBTOB(split_hblks));
4782 				if (error)
4783 					goto bread_err2;
4784 			}
4785 			rhead = (xlog_rec_header_t *)offset;
4786 			error = xlog_valid_rec_header(log, rhead,
4787 						split_hblks ? blk_no : 0);
4788 			if (error)
4789 				goto bread_err2;
4790 
4791 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4792 			blk_no += hblks;
4793 
4794 			/* Read in data for log record */
4795 			if (blk_no + bblks <= log->l_logBBsize) {
4796 				error = xlog_bread(log, blk_no, bblks, dbp,
4797 						   &offset);
4798 				if (error)
4799 					goto bread_err2;
4800 			} else {
4801 				/* This log record is split across the
4802 				 * physical end of log */
4803 				offset = dbp->b_addr;
4804 				split_bblks = 0;
4805 				if (blk_no != log->l_logBBsize) {
4806 					/* some data is before the physical
4807 					 * end of log */
4808 					ASSERT(!wrapped_hblks);
4809 					ASSERT(blk_no <= INT_MAX);
4810 					split_bblks =
4811 						log->l_logBBsize - (int)blk_no;
4812 					ASSERT(split_bblks > 0);
4813 					error = xlog_bread(log, blk_no,
4814 							split_bblks, dbp,
4815 							&offset);
4816 					if (error)
4817 						goto bread_err2;
4818 				}
4819 
4820 				/*
4821 				 * Note: this black magic still works with
4822 				 * large sector sizes (non-512) only because:
4823 				 * - we increased the buffer size originally
4824 				 *   by 1 sector giving us enough extra space
4825 				 *   for the second read;
4826 				 * - the log start is guaranteed to be sector
4827 				 *   aligned;
4828 				 * - we read the log end (LR header start)
4829 				 *   _first_, then the log start (LR header end)
4830 				 *   - order is important.
4831 				 */
4832 				error = xlog_bread_offset(log, 0,
4833 						bblks - split_bblks, dbp,
4834 						offset + BBTOB(split_bblks));
4835 				if (error)
4836 					goto bread_err2;
4837 			}
4838 
4839 			error = xlog_recover_process(log, rhash, rhead, offset,
4840 						     pass);
4841 			if (error)
4842 				goto bread_err2;
4843 
4844 			blk_no += bblks;
4845 			rhead_blk = blk_no;
4846 		}
4847 
4848 		ASSERT(blk_no >= log->l_logBBsize);
4849 		blk_no -= log->l_logBBsize;
4850 		rhead_blk = blk_no;
4851 	}
4852 
4853 	/* read first part of physical log */
4854 	while (blk_no < head_blk) {
4855 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4856 		if (error)
4857 			goto bread_err2;
4858 
4859 		rhead = (xlog_rec_header_t *)offset;
4860 		error = xlog_valid_rec_header(log, rhead, blk_no);
4861 		if (error)
4862 			goto bread_err2;
4863 
4864 		/* blocks in data section */
4865 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4866 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4867 				   &offset);
4868 		if (error)
4869 			goto bread_err2;
4870 
4871 		error = xlog_recover_process(log, rhash, rhead, offset, pass);
4872 		if (error)
4873 			goto bread_err2;
4874 
4875 		blk_no += bblks + hblks;
4876 		rhead_blk = blk_no;
4877 	}
4878 
4879  bread_err2:
4880 	xlog_put_bp(dbp);
4881  bread_err1:
4882 	xlog_put_bp(hbp);
4883 
4884 	if (error && first_bad)
4885 		*first_bad = rhead_blk;
4886 
4887 	return error;
4888 }
4889 
4890 /*
4891  * Do the recovery of the log.  We actually do this in two phases.
4892  * The two passes are necessary in order to implement the function
4893  * of cancelling a record written into the log.  The first pass
4894  * determines those things which have been cancelled, and the
4895  * second pass replays log items normally except for those which
4896  * have been cancelled.  The handling of the replay and cancellations
4897  * takes place in the log item type specific routines.
4898  *
4899  * The table of items which have cancel records in the log is allocated
4900  * and freed at this level, since only here do we know when all of
4901  * the log recovery has been completed.
4902  */
4903 STATIC int
4904 xlog_do_log_recovery(
4905 	struct xlog	*log,
4906 	xfs_daddr_t	head_blk,
4907 	xfs_daddr_t	tail_blk)
4908 {
4909 	int		error, i;
4910 
4911 	ASSERT(head_blk != tail_blk);
4912 
4913 	/*
4914 	 * First do a pass to find all of the cancelled buf log items.
4915 	 * Store them in the buf_cancel_table for use in the second pass.
4916 	 */
4917 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4918 						 sizeof(struct list_head),
4919 						 KM_SLEEP);
4920 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4921 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4922 
4923 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4924 				      XLOG_RECOVER_PASS1, NULL);
4925 	if (error != 0) {
4926 		kmem_free(log->l_buf_cancel_table);
4927 		log->l_buf_cancel_table = NULL;
4928 		return error;
4929 	}
4930 	/*
4931 	 * Then do a second pass to actually recover the items in the log.
4932 	 * When it is complete free the table of buf cancel items.
4933 	 */
4934 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4935 				      XLOG_RECOVER_PASS2, NULL);
4936 #ifdef DEBUG
4937 	if (!error) {
4938 		int	i;
4939 
4940 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4941 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4942 	}
4943 #endif	/* DEBUG */
4944 
4945 	kmem_free(log->l_buf_cancel_table);
4946 	log->l_buf_cancel_table = NULL;
4947 
4948 	return error;
4949 }
4950 
4951 /*
4952  * Do the actual recovery
4953  */
4954 STATIC int
4955 xlog_do_recover(
4956 	struct xlog	*log,
4957 	xfs_daddr_t	head_blk,
4958 	xfs_daddr_t	tail_blk)
4959 {
4960 	int		error;
4961 	xfs_buf_t	*bp;
4962 	xfs_sb_t	*sbp;
4963 
4964 	/*
4965 	 * First replay the images in the log.
4966 	 */
4967 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
4968 	if (error)
4969 		return error;
4970 
4971 	/*
4972 	 * If IO errors happened during recovery, bail out.
4973 	 */
4974 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4975 		return -EIO;
4976 	}
4977 
4978 	/*
4979 	 * We now update the tail_lsn since much of the recovery has completed
4980 	 * and there may be space available to use.  If there were no extent
4981 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
4982 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
4983 	 * lsn of the last known good LR on disk.  If there are extent frees
4984 	 * or iunlinks they will have some entries in the AIL; so we look at
4985 	 * the AIL to determine how to set the tail_lsn.
4986 	 */
4987 	xlog_assign_tail_lsn(log->l_mp);
4988 
4989 	/*
4990 	 * Now that we've finished replaying all buffer and inode
4991 	 * updates, re-read in the superblock and reverify it.
4992 	 */
4993 	bp = xfs_getsb(log->l_mp, 0);
4994 	XFS_BUF_UNDONE(bp);
4995 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
4996 	XFS_BUF_READ(bp);
4997 	XFS_BUF_UNASYNC(bp);
4998 	bp->b_ops = &xfs_sb_buf_ops;
4999 
5000 	error = xfs_buf_submit_wait(bp);
5001 	if (error) {
5002 		if (!XFS_FORCED_SHUTDOWN(log->l_mp)) {
5003 			xfs_buf_ioerror_alert(bp, __func__);
5004 			ASSERT(0);
5005 		}
5006 		xfs_buf_relse(bp);
5007 		return error;
5008 	}
5009 
5010 	/* Convert superblock from on-disk format */
5011 	sbp = &log->l_mp->m_sb;
5012 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5013 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
5014 	ASSERT(xfs_sb_good_version(sbp));
5015 	xfs_reinit_percpu_counters(log->l_mp);
5016 
5017 	xfs_buf_relse(bp);
5018 
5019 
5020 	xlog_recover_check_summary(log);
5021 
5022 	/* Normal transactions can now occur */
5023 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5024 	return 0;
5025 }
5026 
5027 /*
5028  * Perform recovery and re-initialize some log variables in xlog_find_tail.
5029  *
5030  * Return error or zero.
5031  */
5032 int
5033 xlog_recover(
5034 	struct xlog	*log)
5035 {
5036 	xfs_daddr_t	head_blk, tail_blk;
5037 	int		error;
5038 
5039 	/* find the tail of the log */
5040 	error = xlog_find_tail(log, &head_blk, &tail_blk);
5041 	if (error)
5042 		return error;
5043 
5044 	/*
5045 	 * The superblock was read before the log was available and thus the LSN
5046 	 * could not be verified. Check the superblock LSN against the current
5047 	 * LSN now that it's known.
5048 	 */
5049 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5050 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5051 		return -EINVAL;
5052 
5053 	if (tail_blk != head_blk) {
5054 		/* There used to be a comment here:
5055 		 *
5056 		 * disallow recovery on read-only mounts.  note -- mount
5057 		 * checks for ENOSPC and turns it into an intelligent
5058 		 * error message.
5059 		 * ...but this is no longer true.  Now, unless you specify
5060 		 * NORECOVERY (in which case this function would never be
5061 		 * called), we just go ahead and recover.  We do this all
5062 		 * under the vfs layer, so we can get away with it unless
5063 		 * the device itself is read-only, in which case we fail.
5064 		 */
5065 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5066 			return error;
5067 		}
5068 
5069 		/*
5070 		 * Version 5 superblock log feature mask validation. We know the
5071 		 * log is dirty so check if there are any unknown log features
5072 		 * in what we need to recover. If there are unknown features
5073 		 * (e.g. unsupported transactions, then simply reject the
5074 		 * attempt at recovery before touching anything.
5075 		 */
5076 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5077 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5078 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5079 			xfs_warn(log->l_mp,
5080 "Superblock has unknown incompatible log features (0x%x) enabled.",
5081 				(log->l_mp->m_sb.sb_features_log_incompat &
5082 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5083 			xfs_warn(log->l_mp,
5084 "The log can not be fully and/or safely recovered by this kernel.");
5085 			xfs_warn(log->l_mp,
5086 "Please recover the log on a kernel that supports the unknown features.");
5087 			return -EINVAL;
5088 		}
5089 
5090 		/*
5091 		 * Delay log recovery if the debug hook is set. This is debug
5092 		 * instrumention to coordinate simulation of I/O failures with
5093 		 * log recovery.
5094 		 */
5095 		if (xfs_globals.log_recovery_delay) {
5096 			xfs_notice(log->l_mp,
5097 				"Delaying log recovery for %d seconds.",
5098 				xfs_globals.log_recovery_delay);
5099 			msleep(xfs_globals.log_recovery_delay * 1000);
5100 		}
5101 
5102 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5103 				log->l_mp->m_logname ? log->l_mp->m_logname
5104 						     : "internal");
5105 
5106 		error = xlog_do_recover(log, head_blk, tail_blk);
5107 		log->l_flags |= XLOG_RECOVERY_NEEDED;
5108 	}
5109 	return error;
5110 }
5111 
5112 /*
5113  * In the first part of recovery we replay inodes and buffers and build
5114  * up the list of extent free items which need to be processed.  Here
5115  * we process the extent free items and clean up the on disk unlinked
5116  * inode lists.  This is separated from the first part of recovery so
5117  * that the root and real-time bitmap inodes can be read in from disk in
5118  * between the two stages.  This is necessary so that we can free space
5119  * in the real-time portion of the file system.
5120  */
5121 int
5122 xlog_recover_finish(
5123 	struct xlog	*log)
5124 {
5125 	/*
5126 	 * Now we're ready to do the transactions needed for the
5127 	 * rest of recovery.  Start with completing all the extent
5128 	 * free intent records and then process the unlinked inode
5129 	 * lists.  At this point, we essentially run in normal mode
5130 	 * except that we're still performing recovery actions
5131 	 * rather than accepting new requests.
5132 	 */
5133 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5134 		int	error;
5135 		error = xlog_recover_process_efis(log);
5136 		if (error) {
5137 			xfs_alert(log->l_mp, "Failed to recover EFIs");
5138 			return error;
5139 		}
5140 		/*
5141 		 * Sync the log to get all the EFIs out of the AIL.
5142 		 * This isn't absolutely necessary, but it helps in
5143 		 * case the unlink transactions would have problems
5144 		 * pushing the EFIs out of the way.
5145 		 */
5146 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5147 
5148 		xlog_recover_process_iunlinks(log);
5149 
5150 		xlog_recover_check_summary(log);
5151 
5152 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5153 				log->l_mp->m_logname ? log->l_mp->m_logname
5154 						     : "internal");
5155 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5156 	} else {
5157 		xfs_info(log->l_mp, "Ending clean mount");
5158 	}
5159 	return 0;
5160 }
5161 
5162 int
5163 xlog_recover_cancel(
5164 	struct xlog	*log)
5165 {
5166 	int		error = 0;
5167 
5168 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
5169 		error = xlog_recover_cancel_efis(log);
5170 
5171 	return error;
5172 }
5173 
5174 #if defined(DEBUG)
5175 /*
5176  * Read all of the agf and agi counters and check that they
5177  * are consistent with the superblock counters.
5178  */
5179 void
5180 xlog_recover_check_summary(
5181 	struct xlog	*log)
5182 {
5183 	xfs_mount_t	*mp;
5184 	xfs_agf_t	*agfp;
5185 	xfs_buf_t	*agfbp;
5186 	xfs_buf_t	*agibp;
5187 	xfs_agnumber_t	agno;
5188 	__uint64_t	freeblks;
5189 	__uint64_t	itotal;
5190 	__uint64_t	ifree;
5191 	int		error;
5192 
5193 	mp = log->l_mp;
5194 
5195 	freeblks = 0LL;
5196 	itotal = 0LL;
5197 	ifree = 0LL;
5198 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5199 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5200 		if (error) {
5201 			xfs_alert(mp, "%s agf read failed agno %d error %d",
5202 						__func__, agno, error);
5203 		} else {
5204 			agfp = XFS_BUF_TO_AGF(agfbp);
5205 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
5206 				    be32_to_cpu(agfp->agf_flcount);
5207 			xfs_buf_relse(agfbp);
5208 		}
5209 
5210 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5211 		if (error) {
5212 			xfs_alert(mp, "%s agi read failed agno %d error %d",
5213 						__func__, agno, error);
5214 		} else {
5215 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
5216 
5217 			itotal += be32_to_cpu(agi->agi_count);
5218 			ifree += be32_to_cpu(agi->agi_freecount);
5219 			xfs_buf_relse(agibp);
5220 		}
5221 	}
5222 }
5223 #endif /* DEBUG */
5224