xref: /linux/fs/xfs/xfs_log_recover.c (revision ef69f8d2ff09518657c3ecaf2db8408c16549829)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
31 #include "xfs_log.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
45 #include "xfs_dir2.h"
46 #include "xfs_rmap_item.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_refcount_item.h"
49 #include "xfs_bmap_item.h"
50 
51 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
52 
53 STATIC int
54 xlog_find_zeroed(
55 	struct xlog	*,
56 	xfs_daddr_t	*);
57 STATIC int
58 xlog_clear_stale_blocks(
59 	struct xlog	*,
60 	xfs_lsn_t);
61 #if defined(DEBUG)
62 STATIC void
63 xlog_recover_check_summary(
64 	struct xlog *);
65 #else
66 #define	xlog_recover_check_summary(log)
67 #endif
68 STATIC int
69 xlog_do_recovery_pass(
70         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
71 
72 /*
73  * This structure is used during recovery to record the buf log items which
74  * have been canceled and should not be replayed.
75  */
76 struct xfs_buf_cancel {
77 	xfs_daddr_t		bc_blkno;
78 	uint			bc_len;
79 	int			bc_refcount;
80 	struct list_head	bc_list;
81 };
82 
83 /*
84  * Sector aligned buffer routines for buffer create/read/write/access
85  */
86 
87 /*
88  * Verify the log-relative block number and length in basic blocks are valid for
89  * an operation involving the given XFS log buffer. Returns true if the fields
90  * are valid, false otherwise.
91  */
92 static inline bool
93 xlog_verify_bp(
94 	struct xlog	*log,
95 	xfs_daddr_t	blk_no,
96 	int		bbcount)
97 {
98 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
99 		return false;
100 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
101 		return false;
102 	return true;
103 }
104 
105 /*
106  * Allocate a buffer to hold log data.  The buffer needs to be able
107  * to map to a range of nbblks basic blocks at any valid (basic
108  * block) offset within the log.
109  */
110 STATIC xfs_buf_t *
111 xlog_get_bp(
112 	struct xlog	*log,
113 	int		nbblks)
114 {
115 	struct xfs_buf	*bp;
116 
117 	/*
118 	 * Pass log block 0 since we don't have an addr yet, buffer will be
119 	 * verified on read.
120 	 */
121 	if (!xlog_verify_bp(log, 0, nbblks)) {
122 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
123 			nbblks);
124 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
125 		return NULL;
126 	}
127 
128 	/*
129 	 * We do log I/O in units of log sectors (a power-of-2
130 	 * multiple of the basic block size), so we round up the
131 	 * requested size to accommodate the basic blocks required
132 	 * for complete log sectors.
133 	 *
134 	 * In addition, the buffer may be used for a non-sector-
135 	 * aligned block offset, in which case an I/O of the
136 	 * requested size could extend beyond the end of the
137 	 * buffer.  If the requested size is only 1 basic block it
138 	 * will never straddle a sector boundary, so this won't be
139 	 * an issue.  Nor will this be a problem if the log I/O is
140 	 * done in basic blocks (sector size 1).  But otherwise we
141 	 * extend the buffer by one extra log sector to ensure
142 	 * there's space to accommodate this possibility.
143 	 */
144 	if (nbblks > 1 && log->l_sectBBsize > 1)
145 		nbblks += log->l_sectBBsize;
146 	nbblks = round_up(nbblks, log->l_sectBBsize);
147 
148 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
149 	if (bp)
150 		xfs_buf_unlock(bp);
151 	return bp;
152 }
153 
154 STATIC void
155 xlog_put_bp(
156 	xfs_buf_t	*bp)
157 {
158 	xfs_buf_free(bp);
159 }
160 
161 /*
162  * Return the address of the start of the given block number's data
163  * in a log buffer.  The buffer covers a log sector-aligned region.
164  */
165 STATIC char *
166 xlog_align(
167 	struct xlog	*log,
168 	xfs_daddr_t	blk_no,
169 	int		nbblks,
170 	struct xfs_buf	*bp)
171 {
172 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
173 
174 	ASSERT(offset + nbblks <= bp->b_length);
175 	return bp->b_addr + BBTOB(offset);
176 }
177 
178 
179 /*
180  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
181  */
182 STATIC int
183 xlog_bread_noalign(
184 	struct xlog	*log,
185 	xfs_daddr_t	blk_no,
186 	int		nbblks,
187 	struct xfs_buf	*bp)
188 {
189 	int		error;
190 
191 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
192 		xfs_warn(log->l_mp,
193 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
194 			 blk_no, nbblks);
195 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
196 		return -EFSCORRUPTED;
197 	}
198 
199 	blk_no = round_down(blk_no, log->l_sectBBsize);
200 	nbblks = round_up(nbblks, log->l_sectBBsize);
201 
202 	ASSERT(nbblks > 0);
203 	ASSERT(nbblks <= bp->b_length);
204 
205 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
206 	bp->b_flags |= XBF_READ;
207 	bp->b_io_length = nbblks;
208 	bp->b_error = 0;
209 
210 	error = xfs_buf_submit_wait(bp);
211 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
212 		xfs_buf_ioerror_alert(bp, __func__);
213 	return error;
214 }
215 
216 STATIC int
217 xlog_bread(
218 	struct xlog	*log,
219 	xfs_daddr_t	blk_no,
220 	int		nbblks,
221 	struct xfs_buf	*bp,
222 	char		**offset)
223 {
224 	int		error;
225 
226 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
227 	if (error)
228 		return error;
229 
230 	*offset = xlog_align(log, blk_no, nbblks, bp);
231 	return 0;
232 }
233 
234 /*
235  * Read at an offset into the buffer. Returns with the buffer in it's original
236  * state regardless of the result of the read.
237  */
238 STATIC int
239 xlog_bread_offset(
240 	struct xlog	*log,
241 	xfs_daddr_t	blk_no,		/* block to read from */
242 	int		nbblks,		/* blocks to read */
243 	struct xfs_buf	*bp,
244 	char		*offset)
245 {
246 	char		*orig_offset = bp->b_addr;
247 	int		orig_len = BBTOB(bp->b_length);
248 	int		error, error2;
249 
250 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
251 	if (error)
252 		return error;
253 
254 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
255 
256 	/* must reset buffer pointer even on error */
257 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
258 	if (error)
259 		return error;
260 	return error2;
261 }
262 
263 /*
264  * Write out the buffer at the given block for the given number of blocks.
265  * The buffer is kept locked across the write and is returned locked.
266  * This can only be used for synchronous log writes.
267  */
268 STATIC int
269 xlog_bwrite(
270 	struct xlog	*log,
271 	xfs_daddr_t	blk_no,
272 	int		nbblks,
273 	struct xfs_buf	*bp)
274 {
275 	int		error;
276 
277 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
278 		xfs_warn(log->l_mp,
279 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
280 			 blk_no, nbblks);
281 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
282 		return -EFSCORRUPTED;
283 	}
284 
285 	blk_no = round_down(blk_no, log->l_sectBBsize);
286 	nbblks = round_up(nbblks, log->l_sectBBsize);
287 
288 	ASSERT(nbblks > 0);
289 	ASSERT(nbblks <= bp->b_length);
290 
291 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
292 	xfs_buf_hold(bp);
293 	xfs_buf_lock(bp);
294 	bp->b_io_length = nbblks;
295 	bp->b_error = 0;
296 
297 	error = xfs_bwrite(bp);
298 	if (error)
299 		xfs_buf_ioerror_alert(bp, __func__);
300 	xfs_buf_relse(bp);
301 	return error;
302 }
303 
304 #ifdef DEBUG
305 /*
306  * dump debug superblock and log record information
307  */
308 STATIC void
309 xlog_header_check_dump(
310 	xfs_mount_t		*mp,
311 	xlog_rec_header_t	*head)
312 {
313 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
314 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
315 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
316 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
317 }
318 #else
319 #define xlog_header_check_dump(mp, head)
320 #endif
321 
322 /*
323  * check log record header for recovery
324  */
325 STATIC int
326 xlog_header_check_recover(
327 	xfs_mount_t		*mp,
328 	xlog_rec_header_t	*head)
329 {
330 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
331 
332 	/*
333 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
334 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
335 	 * a dirty log created in IRIX.
336 	 */
337 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
338 		xfs_warn(mp,
339 	"dirty log written in incompatible format - can't recover");
340 		xlog_header_check_dump(mp, head);
341 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
342 				 XFS_ERRLEVEL_HIGH, mp);
343 		return -EFSCORRUPTED;
344 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
345 		xfs_warn(mp,
346 	"dirty log entry has mismatched uuid - can't recover");
347 		xlog_header_check_dump(mp, head);
348 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
349 				 XFS_ERRLEVEL_HIGH, mp);
350 		return -EFSCORRUPTED;
351 	}
352 	return 0;
353 }
354 
355 /*
356  * read the head block of the log and check the header
357  */
358 STATIC int
359 xlog_header_check_mount(
360 	xfs_mount_t		*mp,
361 	xlog_rec_header_t	*head)
362 {
363 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
364 
365 	if (uuid_is_null(&head->h_fs_uuid)) {
366 		/*
367 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
368 		 * h_fs_uuid is null, we assume this log was last mounted
369 		 * by IRIX and continue.
370 		 */
371 		xfs_warn(mp, "null uuid in log - IRIX style log");
372 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
373 		xfs_warn(mp, "log has mismatched uuid - can't recover");
374 		xlog_header_check_dump(mp, head);
375 		XFS_ERROR_REPORT("xlog_header_check_mount",
376 				 XFS_ERRLEVEL_HIGH, mp);
377 		return -EFSCORRUPTED;
378 	}
379 	return 0;
380 }
381 
382 STATIC void
383 xlog_recover_iodone(
384 	struct xfs_buf	*bp)
385 {
386 	if (bp->b_error) {
387 		/*
388 		 * We're not going to bother about retrying
389 		 * this during recovery. One strike!
390 		 */
391 		if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
392 			xfs_buf_ioerror_alert(bp, __func__);
393 			xfs_force_shutdown(bp->b_target->bt_mount,
394 						SHUTDOWN_META_IO_ERROR);
395 		}
396 	}
397 
398 	/*
399 	 * On v5 supers, a bli could be attached to update the metadata LSN.
400 	 * Clean it up.
401 	 */
402 	if (bp->b_fspriv)
403 		xfs_buf_item_relse(bp);
404 	ASSERT(bp->b_fspriv == NULL);
405 
406 	bp->b_iodone = NULL;
407 	xfs_buf_ioend(bp);
408 }
409 
410 /*
411  * This routine finds (to an approximation) the first block in the physical
412  * log which contains the given cycle.  It uses a binary search algorithm.
413  * Note that the algorithm can not be perfect because the disk will not
414  * necessarily be perfect.
415  */
416 STATIC int
417 xlog_find_cycle_start(
418 	struct xlog	*log,
419 	struct xfs_buf	*bp,
420 	xfs_daddr_t	first_blk,
421 	xfs_daddr_t	*last_blk,
422 	uint		cycle)
423 {
424 	char		*offset;
425 	xfs_daddr_t	mid_blk;
426 	xfs_daddr_t	end_blk;
427 	uint		mid_cycle;
428 	int		error;
429 
430 	end_blk = *last_blk;
431 	mid_blk = BLK_AVG(first_blk, end_blk);
432 	while (mid_blk != first_blk && mid_blk != end_blk) {
433 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
434 		if (error)
435 			return error;
436 		mid_cycle = xlog_get_cycle(offset);
437 		if (mid_cycle == cycle)
438 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
439 		else
440 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
441 		mid_blk = BLK_AVG(first_blk, end_blk);
442 	}
443 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
444 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
445 
446 	*last_blk = end_blk;
447 
448 	return 0;
449 }
450 
451 /*
452  * Check that a range of blocks does not contain stop_on_cycle_no.
453  * Fill in *new_blk with the block offset where such a block is
454  * found, or with -1 (an invalid block number) if there is no such
455  * block in the range.  The scan needs to occur from front to back
456  * and the pointer into the region must be updated since a later
457  * routine will need to perform another test.
458  */
459 STATIC int
460 xlog_find_verify_cycle(
461 	struct xlog	*log,
462 	xfs_daddr_t	start_blk,
463 	int		nbblks,
464 	uint		stop_on_cycle_no,
465 	xfs_daddr_t	*new_blk)
466 {
467 	xfs_daddr_t	i, j;
468 	uint		cycle;
469 	xfs_buf_t	*bp;
470 	xfs_daddr_t	bufblks;
471 	char		*buf = NULL;
472 	int		error = 0;
473 
474 	/*
475 	 * Greedily allocate a buffer big enough to handle the full
476 	 * range of basic blocks we'll be examining.  If that fails,
477 	 * try a smaller size.  We need to be able to read at least
478 	 * a log sector, or we're out of luck.
479 	 */
480 	bufblks = 1 << ffs(nbblks);
481 	while (bufblks > log->l_logBBsize)
482 		bufblks >>= 1;
483 	while (!(bp = xlog_get_bp(log, bufblks))) {
484 		bufblks >>= 1;
485 		if (bufblks < log->l_sectBBsize)
486 			return -ENOMEM;
487 	}
488 
489 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
490 		int	bcount;
491 
492 		bcount = min(bufblks, (start_blk + nbblks - i));
493 
494 		error = xlog_bread(log, i, bcount, bp, &buf);
495 		if (error)
496 			goto out;
497 
498 		for (j = 0; j < bcount; j++) {
499 			cycle = xlog_get_cycle(buf);
500 			if (cycle == stop_on_cycle_no) {
501 				*new_blk = i+j;
502 				goto out;
503 			}
504 
505 			buf += BBSIZE;
506 		}
507 	}
508 
509 	*new_blk = -1;
510 
511 out:
512 	xlog_put_bp(bp);
513 	return error;
514 }
515 
516 /*
517  * Potentially backup over partial log record write.
518  *
519  * In the typical case, last_blk is the number of the block directly after
520  * a good log record.  Therefore, we subtract one to get the block number
521  * of the last block in the given buffer.  extra_bblks contains the number
522  * of blocks we would have read on a previous read.  This happens when the
523  * last log record is split over the end of the physical log.
524  *
525  * extra_bblks is the number of blocks potentially verified on a previous
526  * call to this routine.
527  */
528 STATIC int
529 xlog_find_verify_log_record(
530 	struct xlog		*log,
531 	xfs_daddr_t		start_blk,
532 	xfs_daddr_t		*last_blk,
533 	int			extra_bblks)
534 {
535 	xfs_daddr_t		i;
536 	xfs_buf_t		*bp;
537 	char			*offset = NULL;
538 	xlog_rec_header_t	*head = NULL;
539 	int			error = 0;
540 	int			smallmem = 0;
541 	int			num_blks = *last_blk - start_blk;
542 	int			xhdrs;
543 
544 	ASSERT(start_blk != 0 || *last_blk != start_blk);
545 
546 	if (!(bp = xlog_get_bp(log, num_blks))) {
547 		if (!(bp = xlog_get_bp(log, 1)))
548 			return -ENOMEM;
549 		smallmem = 1;
550 	} else {
551 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
552 		if (error)
553 			goto out;
554 		offset += ((num_blks - 1) << BBSHIFT);
555 	}
556 
557 	for (i = (*last_blk) - 1; i >= 0; i--) {
558 		if (i < start_blk) {
559 			/* valid log record not found */
560 			xfs_warn(log->l_mp,
561 		"Log inconsistent (didn't find previous header)");
562 			ASSERT(0);
563 			error = -EIO;
564 			goto out;
565 		}
566 
567 		if (smallmem) {
568 			error = xlog_bread(log, i, 1, bp, &offset);
569 			if (error)
570 				goto out;
571 		}
572 
573 		head = (xlog_rec_header_t *)offset;
574 
575 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
576 			break;
577 
578 		if (!smallmem)
579 			offset -= BBSIZE;
580 	}
581 
582 	/*
583 	 * We hit the beginning of the physical log & still no header.  Return
584 	 * to caller.  If caller can handle a return of -1, then this routine
585 	 * will be called again for the end of the physical log.
586 	 */
587 	if (i == -1) {
588 		error = 1;
589 		goto out;
590 	}
591 
592 	/*
593 	 * We have the final block of the good log (the first block
594 	 * of the log record _before_ the head. So we check the uuid.
595 	 */
596 	if ((error = xlog_header_check_mount(log->l_mp, head)))
597 		goto out;
598 
599 	/*
600 	 * We may have found a log record header before we expected one.
601 	 * last_blk will be the 1st block # with a given cycle #.  We may end
602 	 * up reading an entire log record.  In this case, we don't want to
603 	 * reset last_blk.  Only when last_blk points in the middle of a log
604 	 * record do we update last_blk.
605 	 */
606 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
607 		uint	h_size = be32_to_cpu(head->h_size);
608 
609 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
610 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
611 			xhdrs++;
612 	} else {
613 		xhdrs = 1;
614 	}
615 
616 	if (*last_blk - i + extra_bblks !=
617 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
618 		*last_blk = i;
619 
620 out:
621 	xlog_put_bp(bp);
622 	return error;
623 }
624 
625 /*
626  * Head is defined to be the point of the log where the next log write
627  * could go.  This means that incomplete LR writes at the end are
628  * eliminated when calculating the head.  We aren't guaranteed that previous
629  * LR have complete transactions.  We only know that a cycle number of
630  * current cycle number -1 won't be present in the log if we start writing
631  * from our current block number.
632  *
633  * last_blk contains the block number of the first block with a given
634  * cycle number.
635  *
636  * Return: zero if normal, non-zero if error.
637  */
638 STATIC int
639 xlog_find_head(
640 	struct xlog	*log,
641 	xfs_daddr_t	*return_head_blk)
642 {
643 	xfs_buf_t	*bp;
644 	char		*offset;
645 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
646 	int		num_scan_bblks;
647 	uint		first_half_cycle, last_half_cycle;
648 	uint		stop_on_cycle;
649 	int		error, log_bbnum = log->l_logBBsize;
650 
651 	/* Is the end of the log device zeroed? */
652 	error = xlog_find_zeroed(log, &first_blk);
653 	if (error < 0) {
654 		xfs_warn(log->l_mp, "empty log check failed");
655 		return error;
656 	}
657 	if (error == 1) {
658 		*return_head_blk = first_blk;
659 
660 		/* Is the whole lot zeroed? */
661 		if (!first_blk) {
662 			/* Linux XFS shouldn't generate totally zeroed logs -
663 			 * mkfs etc write a dummy unmount record to a fresh
664 			 * log so we can store the uuid in there
665 			 */
666 			xfs_warn(log->l_mp, "totally zeroed log");
667 		}
668 
669 		return 0;
670 	}
671 
672 	first_blk = 0;			/* get cycle # of 1st block */
673 	bp = xlog_get_bp(log, 1);
674 	if (!bp)
675 		return -ENOMEM;
676 
677 	error = xlog_bread(log, 0, 1, bp, &offset);
678 	if (error)
679 		goto bp_err;
680 
681 	first_half_cycle = xlog_get_cycle(offset);
682 
683 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
684 	error = xlog_bread(log, last_blk, 1, bp, &offset);
685 	if (error)
686 		goto bp_err;
687 
688 	last_half_cycle = xlog_get_cycle(offset);
689 	ASSERT(last_half_cycle != 0);
690 
691 	/*
692 	 * If the 1st half cycle number is equal to the last half cycle number,
693 	 * then the entire log is stamped with the same cycle number.  In this
694 	 * case, head_blk can't be set to zero (which makes sense).  The below
695 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
696 	 * we set it to log_bbnum which is an invalid block number, but this
697 	 * value makes the math correct.  If head_blk doesn't changed through
698 	 * all the tests below, *head_blk is set to zero at the very end rather
699 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
700 	 * in a circular file.
701 	 */
702 	if (first_half_cycle == last_half_cycle) {
703 		/*
704 		 * In this case we believe that the entire log should have
705 		 * cycle number last_half_cycle.  We need to scan backwards
706 		 * from the end verifying that there are no holes still
707 		 * containing last_half_cycle - 1.  If we find such a hole,
708 		 * then the start of that hole will be the new head.  The
709 		 * simple case looks like
710 		 *        x | x ... | x - 1 | x
711 		 * Another case that fits this picture would be
712 		 *        x | x + 1 | x ... | x
713 		 * In this case the head really is somewhere at the end of the
714 		 * log, as one of the latest writes at the beginning was
715 		 * incomplete.
716 		 * One more case is
717 		 *        x | x + 1 | x ... | x - 1 | x
718 		 * This is really the combination of the above two cases, and
719 		 * the head has to end up at the start of the x-1 hole at the
720 		 * end of the log.
721 		 *
722 		 * In the 256k log case, we will read from the beginning to the
723 		 * end of the log and search for cycle numbers equal to x-1.
724 		 * We don't worry about the x+1 blocks that we encounter,
725 		 * because we know that they cannot be the head since the log
726 		 * started with x.
727 		 */
728 		head_blk = log_bbnum;
729 		stop_on_cycle = last_half_cycle - 1;
730 	} else {
731 		/*
732 		 * In this case we want to find the first block with cycle
733 		 * number matching last_half_cycle.  We expect the log to be
734 		 * some variation on
735 		 *        x + 1 ... | x ... | x
736 		 * The first block with cycle number x (last_half_cycle) will
737 		 * be where the new head belongs.  First we do a binary search
738 		 * for the first occurrence of last_half_cycle.  The binary
739 		 * search may not be totally accurate, so then we scan back
740 		 * from there looking for occurrences of last_half_cycle before
741 		 * us.  If that backwards scan wraps around the beginning of
742 		 * the log, then we look for occurrences of last_half_cycle - 1
743 		 * at the end of the log.  The cases we're looking for look
744 		 * like
745 		 *                               v binary search stopped here
746 		 *        x + 1 ... | x | x + 1 | x ... | x
747 		 *                   ^ but we want to locate this spot
748 		 * or
749 		 *        <---------> less than scan distance
750 		 *        x + 1 ... | x ... | x - 1 | x
751 		 *                           ^ we want to locate this spot
752 		 */
753 		stop_on_cycle = last_half_cycle;
754 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
755 						&head_blk, last_half_cycle)))
756 			goto bp_err;
757 	}
758 
759 	/*
760 	 * Now validate the answer.  Scan back some number of maximum possible
761 	 * blocks and make sure each one has the expected cycle number.  The
762 	 * maximum is determined by the total possible amount of buffering
763 	 * in the in-core log.  The following number can be made tighter if
764 	 * we actually look at the block size of the filesystem.
765 	 */
766 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
767 	if (head_blk >= num_scan_bblks) {
768 		/*
769 		 * We are guaranteed that the entire check can be performed
770 		 * in one buffer.
771 		 */
772 		start_blk = head_blk - num_scan_bblks;
773 		if ((error = xlog_find_verify_cycle(log,
774 						start_blk, num_scan_bblks,
775 						stop_on_cycle, &new_blk)))
776 			goto bp_err;
777 		if (new_blk != -1)
778 			head_blk = new_blk;
779 	} else {		/* need to read 2 parts of log */
780 		/*
781 		 * We are going to scan backwards in the log in two parts.
782 		 * First we scan the physical end of the log.  In this part
783 		 * of the log, we are looking for blocks with cycle number
784 		 * last_half_cycle - 1.
785 		 * If we find one, then we know that the log starts there, as
786 		 * we've found a hole that didn't get written in going around
787 		 * the end of the physical log.  The simple case for this is
788 		 *        x + 1 ... | x ... | x - 1 | x
789 		 *        <---------> less than scan distance
790 		 * If all of the blocks at the end of the log have cycle number
791 		 * last_half_cycle, then we check the blocks at the start of
792 		 * the log looking for occurrences of last_half_cycle.  If we
793 		 * find one, then our current estimate for the location of the
794 		 * first occurrence of last_half_cycle is wrong and we move
795 		 * back to the hole we've found.  This case looks like
796 		 *        x + 1 ... | x | x + 1 | x ...
797 		 *                               ^ binary search stopped here
798 		 * Another case we need to handle that only occurs in 256k
799 		 * logs is
800 		 *        x + 1 ... | x ... | x+1 | x ...
801 		 *                   ^ binary search stops here
802 		 * In a 256k log, the scan at the end of the log will see the
803 		 * x + 1 blocks.  We need to skip past those since that is
804 		 * certainly not the head of the log.  By searching for
805 		 * last_half_cycle-1 we accomplish that.
806 		 */
807 		ASSERT(head_blk <= INT_MAX &&
808 			(xfs_daddr_t) num_scan_bblks >= head_blk);
809 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
810 		if ((error = xlog_find_verify_cycle(log, start_blk,
811 					num_scan_bblks - (int)head_blk,
812 					(stop_on_cycle - 1), &new_blk)))
813 			goto bp_err;
814 		if (new_blk != -1) {
815 			head_blk = new_blk;
816 			goto validate_head;
817 		}
818 
819 		/*
820 		 * Scan beginning of log now.  The last part of the physical
821 		 * log is good.  This scan needs to verify that it doesn't find
822 		 * the last_half_cycle.
823 		 */
824 		start_blk = 0;
825 		ASSERT(head_blk <= INT_MAX);
826 		if ((error = xlog_find_verify_cycle(log,
827 					start_blk, (int)head_blk,
828 					stop_on_cycle, &new_blk)))
829 			goto bp_err;
830 		if (new_blk != -1)
831 			head_blk = new_blk;
832 	}
833 
834 validate_head:
835 	/*
836 	 * Now we need to make sure head_blk is not pointing to a block in
837 	 * the middle of a log record.
838 	 */
839 	num_scan_bblks = XLOG_REC_SHIFT(log);
840 	if (head_blk >= num_scan_bblks) {
841 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
842 
843 		/* start ptr at last block ptr before head_blk */
844 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
845 		if (error == 1)
846 			error = -EIO;
847 		if (error)
848 			goto bp_err;
849 	} else {
850 		start_blk = 0;
851 		ASSERT(head_blk <= INT_MAX);
852 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
853 		if (error < 0)
854 			goto bp_err;
855 		if (error == 1) {
856 			/* We hit the beginning of the log during our search */
857 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
858 			new_blk = log_bbnum;
859 			ASSERT(start_blk <= INT_MAX &&
860 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
861 			ASSERT(head_blk <= INT_MAX);
862 			error = xlog_find_verify_log_record(log, start_blk,
863 							&new_blk, (int)head_blk);
864 			if (error == 1)
865 				error = -EIO;
866 			if (error)
867 				goto bp_err;
868 			if (new_blk != log_bbnum)
869 				head_blk = new_blk;
870 		} else if (error)
871 			goto bp_err;
872 	}
873 
874 	xlog_put_bp(bp);
875 	if (head_blk == log_bbnum)
876 		*return_head_blk = 0;
877 	else
878 		*return_head_blk = head_blk;
879 	/*
880 	 * When returning here, we have a good block number.  Bad block
881 	 * means that during a previous crash, we didn't have a clean break
882 	 * from cycle number N to cycle number N-1.  In this case, we need
883 	 * to find the first block with cycle number N-1.
884 	 */
885 	return 0;
886 
887  bp_err:
888 	xlog_put_bp(bp);
889 
890 	if (error)
891 		xfs_warn(log->l_mp, "failed to find log head");
892 	return error;
893 }
894 
895 /*
896  * Seek backwards in the log for log record headers.
897  *
898  * Given a starting log block, walk backwards until we find the provided number
899  * of records or hit the provided tail block. The return value is the number of
900  * records encountered or a negative error code. The log block and buffer
901  * pointer of the last record seen are returned in rblk and rhead respectively.
902  */
903 STATIC int
904 xlog_rseek_logrec_hdr(
905 	struct xlog		*log,
906 	xfs_daddr_t		head_blk,
907 	xfs_daddr_t		tail_blk,
908 	int			count,
909 	struct xfs_buf		*bp,
910 	xfs_daddr_t		*rblk,
911 	struct xlog_rec_header	**rhead,
912 	bool			*wrapped)
913 {
914 	int			i;
915 	int			error;
916 	int			found = 0;
917 	char			*offset = NULL;
918 	xfs_daddr_t		end_blk;
919 
920 	*wrapped = false;
921 
922 	/*
923 	 * Walk backwards from the head block until we hit the tail or the first
924 	 * block in the log.
925 	 */
926 	end_blk = head_blk > tail_blk ? tail_blk : 0;
927 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
928 		error = xlog_bread(log, i, 1, bp, &offset);
929 		if (error)
930 			goto out_error;
931 
932 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
933 			*rblk = i;
934 			*rhead = (struct xlog_rec_header *) offset;
935 			if (++found == count)
936 				break;
937 		}
938 	}
939 
940 	/*
941 	 * If we haven't hit the tail block or the log record header count,
942 	 * start looking again from the end of the physical log. Note that
943 	 * callers can pass head == tail if the tail is not yet known.
944 	 */
945 	if (tail_blk >= head_blk && found != count) {
946 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
947 			error = xlog_bread(log, i, 1, bp, &offset);
948 			if (error)
949 				goto out_error;
950 
951 			if (*(__be32 *)offset ==
952 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
953 				*wrapped = true;
954 				*rblk = i;
955 				*rhead = (struct xlog_rec_header *) offset;
956 				if (++found == count)
957 					break;
958 			}
959 		}
960 	}
961 
962 	return found;
963 
964 out_error:
965 	return error;
966 }
967 
968 /*
969  * Seek forward in the log for log record headers.
970  *
971  * Given head and tail blocks, walk forward from the tail block until we find
972  * the provided number of records or hit the head block. The return value is the
973  * number of records encountered or a negative error code. The log block and
974  * buffer pointer of the last record seen are returned in rblk and rhead
975  * respectively.
976  */
977 STATIC int
978 xlog_seek_logrec_hdr(
979 	struct xlog		*log,
980 	xfs_daddr_t		head_blk,
981 	xfs_daddr_t		tail_blk,
982 	int			count,
983 	struct xfs_buf		*bp,
984 	xfs_daddr_t		*rblk,
985 	struct xlog_rec_header	**rhead,
986 	bool			*wrapped)
987 {
988 	int			i;
989 	int			error;
990 	int			found = 0;
991 	char			*offset = NULL;
992 	xfs_daddr_t		end_blk;
993 
994 	*wrapped = false;
995 
996 	/*
997 	 * Walk forward from the tail block until we hit the head or the last
998 	 * block in the log.
999 	 */
1000 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
1001 	for (i = (int) tail_blk; i <= end_blk; i++) {
1002 		error = xlog_bread(log, i, 1, bp, &offset);
1003 		if (error)
1004 			goto out_error;
1005 
1006 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1007 			*rblk = i;
1008 			*rhead = (struct xlog_rec_header *) offset;
1009 			if (++found == count)
1010 				break;
1011 		}
1012 	}
1013 
1014 	/*
1015 	 * If we haven't hit the head block or the log record header count,
1016 	 * start looking again from the start of the physical log.
1017 	 */
1018 	if (tail_blk > head_blk && found != count) {
1019 		for (i = 0; i < (int) head_blk; i++) {
1020 			error = xlog_bread(log, i, 1, bp, &offset);
1021 			if (error)
1022 				goto out_error;
1023 
1024 			if (*(__be32 *)offset ==
1025 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1026 				*wrapped = true;
1027 				*rblk = i;
1028 				*rhead = (struct xlog_rec_header *) offset;
1029 				if (++found == count)
1030 					break;
1031 			}
1032 		}
1033 	}
1034 
1035 	return found;
1036 
1037 out_error:
1038 	return error;
1039 }
1040 
1041 /*
1042  * Calculate distance from head to tail (i.e., unused space in the log).
1043  */
1044 static inline int
1045 xlog_tail_distance(
1046 	struct xlog	*log,
1047 	xfs_daddr_t	head_blk,
1048 	xfs_daddr_t	tail_blk)
1049 {
1050 	if (head_blk < tail_blk)
1051 		return tail_blk - head_blk;
1052 
1053 	return tail_blk + (log->l_logBBsize - head_blk);
1054 }
1055 
1056 /*
1057  * Verify the log tail. This is particularly important when torn or incomplete
1058  * writes have been detected near the front of the log and the head has been
1059  * walked back accordingly.
1060  *
1061  * We also have to handle the case where the tail was pinned and the head
1062  * blocked behind the tail right before a crash. If the tail had been pushed
1063  * immediately prior to the crash and the subsequent checkpoint was only
1064  * partially written, it's possible it overwrote the last referenced tail in the
1065  * log with garbage. This is not a coherency problem because the tail must have
1066  * been pushed before it can be overwritten, but appears as log corruption to
1067  * recovery because we have no way to know the tail was updated if the
1068  * subsequent checkpoint didn't write successfully.
1069  *
1070  * Therefore, CRC check the log from tail to head. If a failure occurs and the
1071  * offending record is within max iclog bufs from the head, walk the tail
1072  * forward and retry until a valid tail is found or corruption is detected out
1073  * of the range of a possible overwrite.
1074  */
1075 STATIC int
1076 xlog_verify_tail(
1077 	struct xlog		*log,
1078 	xfs_daddr_t		head_blk,
1079 	xfs_daddr_t		*tail_blk,
1080 	int			hsize)
1081 {
1082 	struct xlog_rec_header	*thead;
1083 	struct xfs_buf		*bp;
1084 	xfs_daddr_t		first_bad;
1085 	int			error = 0;
1086 	bool			wrapped;
1087 	xfs_daddr_t		tmp_tail;
1088 	xfs_daddr_t		orig_tail = *tail_blk;
1089 
1090 	bp = xlog_get_bp(log, 1);
1091 	if (!bp)
1092 		return -ENOMEM;
1093 
1094 	/*
1095 	 * Make sure the tail points to a record (returns positive count on
1096 	 * success).
1097 	 */
1098 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1099 			&tmp_tail, &thead, &wrapped);
1100 	if (error < 0)
1101 		goto out;
1102 	if (*tail_blk != tmp_tail)
1103 		*tail_blk = tmp_tail;
1104 
1105 	/*
1106 	 * Run a CRC check from the tail to the head. We can't just check
1107 	 * MAX_ICLOGS records past the tail because the tail may point to stale
1108 	 * blocks cleared during the search for the head/tail. These blocks are
1109 	 * overwritten with zero-length records and thus record count is not a
1110 	 * reliable indicator of the iclog state before a crash.
1111 	 */
1112 	first_bad = 0;
1113 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1114 				      XLOG_RECOVER_CRCPASS, &first_bad);
1115 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1116 		int	tail_distance;
1117 
1118 		/*
1119 		 * Is corruption within range of the head? If so, retry from
1120 		 * the next record. Otherwise return an error.
1121 		 */
1122 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1123 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1124 			break;
1125 
1126 		/* skip to the next record; returns positive count on success */
1127 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1128 				&tmp_tail, &thead, &wrapped);
1129 		if (error < 0)
1130 			goto out;
1131 
1132 		*tail_blk = tmp_tail;
1133 		first_bad = 0;
1134 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1135 					      XLOG_RECOVER_CRCPASS, &first_bad);
1136 	}
1137 
1138 	if (!error && *tail_blk != orig_tail)
1139 		xfs_warn(log->l_mp,
1140 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1141 			 orig_tail, *tail_blk);
1142 out:
1143 	xlog_put_bp(bp);
1144 	return error;
1145 }
1146 
1147 /*
1148  * Detect and trim torn writes from the head of the log.
1149  *
1150  * Storage without sector atomicity guarantees can result in torn writes in the
1151  * log in the event of a crash. Our only means to detect this scenario is via
1152  * CRC verification. While we can't always be certain that CRC verification
1153  * failure is due to a torn write vs. an unrelated corruption, we do know that
1154  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1155  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1156  * the log and treat failures in this range as torn writes as a matter of
1157  * policy. In the event of CRC failure, the head is walked back to the last good
1158  * record in the log and the tail is updated from that record and verified.
1159  */
1160 STATIC int
1161 xlog_verify_head(
1162 	struct xlog		*log,
1163 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1164 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1165 	struct xfs_buf		*bp,
1166 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1167 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1168 	bool			*wrapped)	/* last rec. wraps phys. log */
1169 {
1170 	struct xlog_rec_header	*tmp_rhead;
1171 	struct xfs_buf		*tmp_bp;
1172 	xfs_daddr_t		first_bad;
1173 	xfs_daddr_t		tmp_rhead_blk;
1174 	int			found;
1175 	int			error;
1176 	bool			tmp_wrapped;
1177 
1178 	/*
1179 	 * Check the head of the log for torn writes. Search backwards from the
1180 	 * head until we hit the tail or the maximum number of log record I/Os
1181 	 * that could have been in flight at one time. Use a temporary buffer so
1182 	 * we don't trash the rhead/bp pointers from the caller.
1183 	 */
1184 	tmp_bp = xlog_get_bp(log, 1);
1185 	if (!tmp_bp)
1186 		return -ENOMEM;
1187 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1188 				      XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1189 				      &tmp_rhead, &tmp_wrapped);
1190 	xlog_put_bp(tmp_bp);
1191 	if (error < 0)
1192 		return error;
1193 
1194 	/*
1195 	 * Now run a CRC verification pass over the records starting at the
1196 	 * block found above to the current head. If a CRC failure occurs, the
1197 	 * log block of the first bad record is saved in first_bad.
1198 	 */
1199 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1200 				      XLOG_RECOVER_CRCPASS, &first_bad);
1201 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1202 		/*
1203 		 * We've hit a potential torn write. Reset the error and warn
1204 		 * about it.
1205 		 */
1206 		error = 0;
1207 		xfs_warn(log->l_mp,
1208 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1209 			 first_bad, *head_blk);
1210 
1211 		/*
1212 		 * Get the header block and buffer pointer for the last good
1213 		 * record before the bad record.
1214 		 *
1215 		 * Note that xlog_find_tail() clears the blocks at the new head
1216 		 * (i.e., the records with invalid CRC) if the cycle number
1217 		 * matches the the current cycle.
1218 		 */
1219 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1220 					      rhead_blk, rhead, wrapped);
1221 		if (found < 0)
1222 			return found;
1223 		if (found == 0)		/* XXX: right thing to do here? */
1224 			return -EIO;
1225 
1226 		/*
1227 		 * Reset the head block to the starting block of the first bad
1228 		 * log record and set the tail block based on the last good
1229 		 * record.
1230 		 *
1231 		 * Bail out if the updated head/tail match as this indicates
1232 		 * possible corruption outside of the acceptable
1233 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1234 		 */
1235 		*head_blk = first_bad;
1236 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1237 		if (*head_blk == *tail_blk) {
1238 			ASSERT(0);
1239 			return 0;
1240 		}
1241 	}
1242 	if (error)
1243 		return error;
1244 
1245 	return xlog_verify_tail(log, *head_blk, tail_blk,
1246 				be32_to_cpu((*rhead)->h_size));
1247 }
1248 
1249 /*
1250  * Check whether the head of the log points to an unmount record. In other
1251  * words, determine whether the log is clean. If so, update the in-core state
1252  * appropriately.
1253  */
1254 static int
1255 xlog_check_unmount_rec(
1256 	struct xlog		*log,
1257 	xfs_daddr_t		*head_blk,
1258 	xfs_daddr_t		*tail_blk,
1259 	struct xlog_rec_header	*rhead,
1260 	xfs_daddr_t		rhead_blk,
1261 	struct xfs_buf		*bp,
1262 	bool			*clean)
1263 {
1264 	struct xlog_op_header	*op_head;
1265 	xfs_daddr_t		umount_data_blk;
1266 	xfs_daddr_t		after_umount_blk;
1267 	int			hblks;
1268 	int			error;
1269 	char			*offset;
1270 
1271 	*clean = false;
1272 
1273 	/*
1274 	 * Look for unmount record. If we find it, then we know there was a
1275 	 * clean unmount. Since 'i' could be the last block in the physical
1276 	 * log, we convert to a log block before comparing to the head_blk.
1277 	 *
1278 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1279 	 * below. We won't want to clear the unmount record if there is one, so
1280 	 * we pass the lsn of the unmount record rather than the block after it.
1281 	 */
1282 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1283 		int	h_size = be32_to_cpu(rhead->h_size);
1284 		int	h_version = be32_to_cpu(rhead->h_version);
1285 
1286 		if ((h_version & XLOG_VERSION_2) &&
1287 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1288 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1289 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1290 				hblks++;
1291 		} else {
1292 			hblks = 1;
1293 		}
1294 	} else {
1295 		hblks = 1;
1296 	}
1297 	after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1298 	after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1299 	if (*head_blk == after_umount_blk &&
1300 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1301 		umount_data_blk = rhead_blk + hblks;
1302 		umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1303 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1304 		if (error)
1305 			return error;
1306 
1307 		op_head = (struct xlog_op_header *)offset;
1308 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1309 			/*
1310 			 * Set tail and last sync so that newly written log
1311 			 * records will point recovery to after the current
1312 			 * unmount record.
1313 			 */
1314 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1315 					log->l_curr_cycle, after_umount_blk);
1316 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1317 					log->l_curr_cycle, after_umount_blk);
1318 			*tail_blk = after_umount_blk;
1319 
1320 			*clean = true;
1321 		}
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static void
1328 xlog_set_state(
1329 	struct xlog		*log,
1330 	xfs_daddr_t		head_blk,
1331 	struct xlog_rec_header	*rhead,
1332 	xfs_daddr_t		rhead_blk,
1333 	bool			bump_cycle)
1334 {
1335 	/*
1336 	 * Reset log values according to the state of the log when we
1337 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1338 	 * one because the next write starts a new cycle rather than
1339 	 * continuing the cycle of the last good log record.  At this
1340 	 * point we have guaranteed that all partial log records have been
1341 	 * accounted for.  Therefore, we know that the last good log record
1342 	 * written was complete and ended exactly on the end boundary
1343 	 * of the physical log.
1344 	 */
1345 	log->l_prev_block = rhead_blk;
1346 	log->l_curr_block = (int)head_blk;
1347 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1348 	if (bump_cycle)
1349 		log->l_curr_cycle++;
1350 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1351 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1352 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1353 					BBTOB(log->l_curr_block));
1354 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1355 					BBTOB(log->l_curr_block));
1356 }
1357 
1358 /*
1359  * Find the sync block number or the tail of the log.
1360  *
1361  * This will be the block number of the last record to have its
1362  * associated buffers synced to disk.  Every log record header has
1363  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1364  * to get a sync block number.  The only concern is to figure out which
1365  * log record header to believe.
1366  *
1367  * The following algorithm uses the log record header with the largest
1368  * lsn.  The entire log record does not need to be valid.  We only care
1369  * that the header is valid.
1370  *
1371  * We could speed up search by using current head_blk buffer, but it is not
1372  * available.
1373  */
1374 STATIC int
1375 xlog_find_tail(
1376 	struct xlog		*log,
1377 	xfs_daddr_t		*head_blk,
1378 	xfs_daddr_t		*tail_blk)
1379 {
1380 	xlog_rec_header_t	*rhead;
1381 	char			*offset = NULL;
1382 	xfs_buf_t		*bp;
1383 	int			error;
1384 	xfs_daddr_t		rhead_blk;
1385 	xfs_lsn_t		tail_lsn;
1386 	bool			wrapped = false;
1387 	bool			clean = false;
1388 
1389 	/*
1390 	 * Find previous log record
1391 	 */
1392 	if ((error = xlog_find_head(log, head_blk)))
1393 		return error;
1394 	ASSERT(*head_blk < INT_MAX);
1395 
1396 	bp = xlog_get_bp(log, 1);
1397 	if (!bp)
1398 		return -ENOMEM;
1399 	if (*head_blk == 0) {				/* special case */
1400 		error = xlog_bread(log, 0, 1, bp, &offset);
1401 		if (error)
1402 			goto done;
1403 
1404 		if (xlog_get_cycle(offset) == 0) {
1405 			*tail_blk = 0;
1406 			/* leave all other log inited values alone */
1407 			goto done;
1408 		}
1409 	}
1410 
1411 	/*
1412 	 * Search backwards through the log looking for the log record header
1413 	 * block. This wraps all the way back around to the head so something is
1414 	 * seriously wrong if we can't find it.
1415 	 */
1416 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1417 				      &rhead_blk, &rhead, &wrapped);
1418 	if (error < 0)
1419 		return error;
1420 	if (!error) {
1421 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1422 		return -EIO;
1423 	}
1424 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1425 
1426 	/*
1427 	 * Set the log state based on the current head record.
1428 	 */
1429 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1430 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1431 
1432 	/*
1433 	 * Look for an unmount record at the head of the log. This sets the log
1434 	 * state to determine whether recovery is necessary.
1435 	 */
1436 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1437 				       rhead_blk, bp, &clean);
1438 	if (error)
1439 		goto done;
1440 
1441 	/*
1442 	 * Verify the log head if the log is not clean (e.g., we have anything
1443 	 * but an unmount record at the head). This uses CRC verification to
1444 	 * detect and trim torn writes. If discovered, CRC failures are
1445 	 * considered torn writes and the log head is trimmed accordingly.
1446 	 *
1447 	 * Note that we can only run CRC verification when the log is dirty
1448 	 * because there's no guarantee that the log data behind an unmount
1449 	 * record is compatible with the current architecture.
1450 	 */
1451 	if (!clean) {
1452 		xfs_daddr_t	orig_head = *head_blk;
1453 
1454 		error = xlog_verify_head(log, head_blk, tail_blk, bp,
1455 					 &rhead_blk, &rhead, &wrapped);
1456 		if (error)
1457 			goto done;
1458 
1459 		/* update in-core state again if the head changed */
1460 		if (*head_blk != orig_head) {
1461 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1462 				       wrapped);
1463 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1464 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1465 						       rhead, rhead_blk, bp,
1466 						       &clean);
1467 			if (error)
1468 				goto done;
1469 		}
1470 	}
1471 
1472 	/*
1473 	 * Note that the unmount was clean. If the unmount was not clean, we
1474 	 * need to know this to rebuild the superblock counters from the perag
1475 	 * headers if we have a filesystem using non-persistent counters.
1476 	 */
1477 	if (clean)
1478 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1479 
1480 	/*
1481 	 * Make sure that there are no blocks in front of the head
1482 	 * with the same cycle number as the head.  This can happen
1483 	 * because we allow multiple outstanding log writes concurrently,
1484 	 * and the later writes might make it out before earlier ones.
1485 	 *
1486 	 * We use the lsn from before modifying it so that we'll never
1487 	 * overwrite the unmount record after a clean unmount.
1488 	 *
1489 	 * Do this only if we are going to recover the filesystem
1490 	 *
1491 	 * NOTE: This used to say "if (!readonly)"
1492 	 * However on Linux, we can & do recover a read-only filesystem.
1493 	 * We only skip recovery if NORECOVERY is specified on mount,
1494 	 * in which case we would not be here.
1495 	 *
1496 	 * But... if the -device- itself is readonly, just skip this.
1497 	 * We can't recover this device anyway, so it won't matter.
1498 	 */
1499 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1500 		error = xlog_clear_stale_blocks(log, tail_lsn);
1501 
1502 done:
1503 	xlog_put_bp(bp);
1504 
1505 	if (error)
1506 		xfs_warn(log->l_mp, "failed to locate log tail");
1507 	return error;
1508 }
1509 
1510 /*
1511  * Is the log zeroed at all?
1512  *
1513  * The last binary search should be changed to perform an X block read
1514  * once X becomes small enough.  You can then search linearly through
1515  * the X blocks.  This will cut down on the number of reads we need to do.
1516  *
1517  * If the log is partially zeroed, this routine will pass back the blkno
1518  * of the first block with cycle number 0.  It won't have a complete LR
1519  * preceding it.
1520  *
1521  * Return:
1522  *	0  => the log is completely written to
1523  *	1 => use *blk_no as the first block of the log
1524  *	<0 => error has occurred
1525  */
1526 STATIC int
1527 xlog_find_zeroed(
1528 	struct xlog	*log,
1529 	xfs_daddr_t	*blk_no)
1530 {
1531 	xfs_buf_t	*bp;
1532 	char		*offset;
1533 	uint	        first_cycle, last_cycle;
1534 	xfs_daddr_t	new_blk, last_blk, start_blk;
1535 	xfs_daddr_t     num_scan_bblks;
1536 	int	        error, log_bbnum = log->l_logBBsize;
1537 
1538 	*blk_no = 0;
1539 
1540 	/* check totally zeroed log */
1541 	bp = xlog_get_bp(log, 1);
1542 	if (!bp)
1543 		return -ENOMEM;
1544 	error = xlog_bread(log, 0, 1, bp, &offset);
1545 	if (error)
1546 		goto bp_err;
1547 
1548 	first_cycle = xlog_get_cycle(offset);
1549 	if (first_cycle == 0) {		/* completely zeroed log */
1550 		*blk_no = 0;
1551 		xlog_put_bp(bp);
1552 		return 1;
1553 	}
1554 
1555 	/* check partially zeroed log */
1556 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1557 	if (error)
1558 		goto bp_err;
1559 
1560 	last_cycle = xlog_get_cycle(offset);
1561 	if (last_cycle != 0) {		/* log completely written to */
1562 		xlog_put_bp(bp);
1563 		return 0;
1564 	} else if (first_cycle != 1) {
1565 		/*
1566 		 * If the cycle of the last block is zero, the cycle of
1567 		 * the first block must be 1. If it's not, maybe we're
1568 		 * not looking at a log... Bail out.
1569 		 */
1570 		xfs_warn(log->l_mp,
1571 			"Log inconsistent or not a log (last==0, first!=1)");
1572 		error = -EINVAL;
1573 		goto bp_err;
1574 	}
1575 
1576 	/* we have a partially zeroed log */
1577 	last_blk = log_bbnum-1;
1578 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1579 		goto bp_err;
1580 
1581 	/*
1582 	 * Validate the answer.  Because there is no way to guarantee that
1583 	 * the entire log is made up of log records which are the same size,
1584 	 * we scan over the defined maximum blocks.  At this point, the maximum
1585 	 * is not chosen to mean anything special.   XXXmiken
1586 	 */
1587 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1588 	ASSERT(num_scan_bblks <= INT_MAX);
1589 
1590 	if (last_blk < num_scan_bblks)
1591 		num_scan_bblks = last_blk;
1592 	start_blk = last_blk - num_scan_bblks;
1593 
1594 	/*
1595 	 * We search for any instances of cycle number 0 that occur before
1596 	 * our current estimate of the head.  What we're trying to detect is
1597 	 *        1 ... | 0 | 1 | 0...
1598 	 *                       ^ binary search ends here
1599 	 */
1600 	if ((error = xlog_find_verify_cycle(log, start_blk,
1601 					 (int)num_scan_bblks, 0, &new_blk)))
1602 		goto bp_err;
1603 	if (new_blk != -1)
1604 		last_blk = new_blk;
1605 
1606 	/*
1607 	 * Potentially backup over partial log record write.  We don't need
1608 	 * to search the end of the log because we know it is zero.
1609 	 */
1610 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1611 	if (error == 1)
1612 		error = -EIO;
1613 	if (error)
1614 		goto bp_err;
1615 
1616 	*blk_no = last_blk;
1617 bp_err:
1618 	xlog_put_bp(bp);
1619 	if (error)
1620 		return error;
1621 	return 1;
1622 }
1623 
1624 /*
1625  * These are simple subroutines used by xlog_clear_stale_blocks() below
1626  * to initialize a buffer full of empty log record headers and write
1627  * them into the log.
1628  */
1629 STATIC void
1630 xlog_add_record(
1631 	struct xlog		*log,
1632 	char			*buf,
1633 	int			cycle,
1634 	int			block,
1635 	int			tail_cycle,
1636 	int			tail_block)
1637 {
1638 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1639 
1640 	memset(buf, 0, BBSIZE);
1641 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1642 	recp->h_cycle = cpu_to_be32(cycle);
1643 	recp->h_version = cpu_to_be32(
1644 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1645 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1646 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1647 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1648 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1649 }
1650 
1651 STATIC int
1652 xlog_write_log_records(
1653 	struct xlog	*log,
1654 	int		cycle,
1655 	int		start_block,
1656 	int		blocks,
1657 	int		tail_cycle,
1658 	int		tail_block)
1659 {
1660 	char		*offset;
1661 	xfs_buf_t	*bp;
1662 	int		balign, ealign;
1663 	int		sectbb = log->l_sectBBsize;
1664 	int		end_block = start_block + blocks;
1665 	int		bufblks;
1666 	int		error = 0;
1667 	int		i, j = 0;
1668 
1669 	/*
1670 	 * Greedily allocate a buffer big enough to handle the full
1671 	 * range of basic blocks to be written.  If that fails, try
1672 	 * a smaller size.  We need to be able to write at least a
1673 	 * log sector, or we're out of luck.
1674 	 */
1675 	bufblks = 1 << ffs(blocks);
1676 	while (bufblks > log->l_logBBsize)
1677 		bufblks >>= 1;
1678 	while (!(bp = xlog_get_bp(log, bufblks))) {
1679 		bufblks >>= 1;
1680 		if (bufblks < sectbb)
1681 			return -ENOMEM;
1682 	}
1683 
1684 	/* We may need to do a read at the start to fill in part of
1685 	 * the buffer in the starting sector not covered by the first
1686 	 * write below.
1687 	 */
1688 	balign = round_down(start_block, sectbb);
1689 	if (balign != start_block) {
1690 		error = xlog_bread_noalign(log, start_block, 1, bp);
1691 		if (error)
1692 			goto out_put_bp;
1693 
1694 		j = start_block - balign;
1695 	}
1696 
1697 	for (i = start_block; i < end_block; i += bufblks) {
1698 		int		bcount, endcount;
1699 
1700 		bcount = min(bufblks, end_block - start_block);
1701 		endcount = bcount - j;
1702 
1703 		/* We may need to do a read at the end to fill in part of
1704 		 * the buffer in the final sector not covered by the write.
1705 		 * If this is the same sector as the above read, skip it.
1706 		 */
1707 		ealign = round_down(end_block, sectbb);
1708 		if (j == 0 && (start_block + endcount > ealign)) {
1709 			offset = bp->b_addr + BBTOB(ealign - start_block);
1710 			error = xlog_bread_offset(log, ealign, sectbb,
1711 							bp, offset);
1712 			if (error)
1713 				break;
1714 
1715 		}
1716 
1717 		offset = xlog_align(log, start_block, endcount, bp);
1718 		for (; j < endcount; j++) {
1719 			xlog_add_record(log, offset, cycle, i+j,
1720 					tail_cycle, tail_block);
1721 			offset += BBSIZE;
1722 		}
1723 		error = xlog_bwrite(log, start_block, endcount, bp);
1724 		if (error)
1725 			break;
1726 		start_block += endcount;
1727 		j = 0;
1728 	}
1729 
1730  out_put_bp:
1731 	xlog_put_bp(bp);
1732 	return error;
1733 }
1734 
1735 /*
1736  * This routine is called to blow away any incomplete log writes out
1737  * in front of the log head.  We do this so that we won't become confused
1738  * if we come up, write only a little bit more, and then crash again.
1739  * If we leave the partial log records out there, this situation could
1740  * cause us to think those partial writes are valid blocks since they
1741  * have the current cycle number.  We get rid of them by overwriting them
1742  * with empty log records with the old cycle number rather than the
1743  * current one.
1744  *
1745  * The tail lsn is passed in rather than taken from
1746  * the log so that we will not write over the unmount record after a
1747  * clean unmount in a 512 block log.  Doing so would leave the log without
1748  * any valid log records in it until a new one was written.  If we crashed
1749  * during that time we would not be able to recover.
1750  */
1751 STATIC int
1752 xlog_clear_stale_blocks(
1753 	struct xlog	*log,
1754 	xfs_lsn_t	tail_lsn)
1755 {
1756 	int		tail_cycle, head_cycle;
1757 	int		tail_block, head_block;
1758 	int		tail_distance, max_distance;
1759 	int		distance;
1760 	int		error;
1761 
1762 	tail_cycle = CYCLE_LSN(tail_lsn);
1763 	tail_block = BLOCK_LSN(tail_lsn);
1764 	head_cycle = log->l_curr_cycle;
1765 	head_block = log->l_curr_block;
1766 
1767 	/*
1768 	 * Figure out the distance between the new head of the log
1769 	 * and the tail.  We want to write over any blocks beyond the
1770 	 * head that we may have written just before the crash, but
1771 	 * we don't want to overwrite the tail of the log.
1772 	 */
1773 	if (head_cycle == tail_cycle) {
1774 		/*
1775 		 * The tail is behind the head in the physical log,
1776 		 * so the distance from the head to the tail is the
1777 		 * distance from the head to the end of the log plus
1778 		 * the distance from the beginning of the log to the
1779 		 * tail.
1780 		 */
1781 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1782 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1783 					 XFS_ERRLEVEL_LOW, log->l_mp);
1784 			return -EFSCORRUPTED;
1785 		}
1786 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1787 	} else {
1788 		/*
1789 		 * The head is behind the tail in the physical log,
1790 		 * so the distance from the head to the tail is just
1791 		 * the tail block minus the head block.
1792 		 */
1793 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1794 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1795 					 XFS_ERRLEVEL_LOW, log->l_mp);
1796 			return -EFSCORRUPTED;
1797 		}
1798 		tail_distance = tail_block - head_block;
1799 	}
1800 
1801 	/*
1802 	 * If the head is right up against the tail, we can't clear
1803 	 * anything.
1804 	 */
1805 	if (tail_distance <= 0) {
1806 		ASSERT(tail_distance == 0);
1807 		return 0;
1808 	}
1809 
1810 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1811 	/*
1812 	 * Take the smaller of the maximum amount of outstanding I/O
1813 	 * we could have and the distance to the tail to clear out.
1814 	 * We take the smaller so that we don't overwrite the tail and
1815 	 * we don't waste all day writing from the head to the tail
1816 	 * for no reason.
1817 	 */
1818 	max_distance = MIN(max_distance, tail_distance);
1819 
1820 	if ((head_block + max_distance) <= log->l_logBBsize) {
1821 		/*
1822 		 * We can stomp all the blocks we need to without
1823 		 * wrapping around the end of the log.  Just do it
1824 		 * in a single write.  Use the cycle number of the
1825 		 * current cycle minus one so that the log will look like:
1826 		 *     n ... | n - 1 ...
1827 		 */
1828 		error = xlog_write_log_records(log, (head_cycle - 1),
1829 				head_block, max_distance, tail_cycle,
1830 				tail_block);
1831 		if (error)
1832 			return error;
1833 	} else {
1834 		/*
1835 		 * We need to wrap around the end of the physical log in
1836 		 * order to clear all the blocks.  Do it in two separate
1837 		 * I/Os.  The first write should be from the head to the
1838 		 * end of the physical log, and it should use the current
1839 		 * cycle number minus one just like above.
1840 		 */
1841 		distance = log->l_logBBsize - head_block;
1842 		error = xlog_write_log_records(log, (head_cycle - 1),
1843 				head_block, distance, tail_cycle,
1844 				tail_block);
1845 
1846 		if (error)
1847 			return error;
1848 
1849 		/*
1850 		 * Now write the blocks at the start of the physical log.
1851 		 * This writes the remainder of the blocks we want to clear.
1852 		 * It uses the current cycle number since we're now on the
1853 		 * same cycle as the head so that we get:
1854 		 *    n ... n ... | n - 1 ...
1855 		 *    ^^^^^ blocks we're writing
1856 		 */
1857 		distance = max_distance - (log->l_logBBsize - head_block);
1858 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1859 				tail_cycle, tail_block);
1860 		if (error)
1861 			return error;
1862 	}
1863 
1864 	return 0;
1865 }
1866 
1867 /******************************************************************************
1868  *
1869  *		Log recover routines
1870  *
1871  ******************************************************************************
1872  */
1873 
1874 /*
1875  * Sort the log items in the transaction.
1876  *
1877  * The ordering constraints are defined by the inode allocation and unlink
1878  * behaviour. The rules are:
1879  *
1880  *	1. Every item is only logged once in a given transaction. Hence it
1881  *	   represents the last logged state of the item. Hence ordering is
1882  *	   dependent on the order in which operations need to be performed so
1883  *	   required initial conditions are always met.
1884  *
1885  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1886  *	   there's nothing to replay from them so we can simply cull them
1887  *	   from the transaction. However, we can't do that until after we've
1888  *	   replayed all the other items because they may be dependent on the
1889  *	   cancelled buffer and replaying the cancelled buffer can remove it
1890  *	   form the cancelled buffer table. Hence they have tobe done last.
1891  *
1892  *	3. Inode allocation buffers must be replayed before inode items that
1893  *	   read the buffer and replay changes into it. For filesystems using the
1894  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1895  *	   treated the same as inode allocation buffers as they create and
1896  *	   initialise the buffers directly.
1897  *
1898  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1899  *	   This ensures that inodes are completely flushed to the inode buffer
1900  *	   in a "free" state before we remove the unlinked inode list pointer.
1901  *
1902  * Hence the ordering needs to be inode allocation buffers first, inode items
1903  * second, inode unlink buffers third and cancelled buffers last.
1904  *
1905  * But there's a problem with that - we can't tell an inode allocation buffer
1906  * apart from a regular buffer, so we can't separate them. We can, however,
1907  * tell an inode unlink buffer from the others, and so we can separate them out
1908  * from all the other buffers and move them to last.
1909  *
1910  * Hence, 4 lists, in order from head to tail:
1911  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1912  *	- item_list for all non-buffer items
1913  *	- inode_buffer_list for inode unlink buffers
1914  *	- cancel_list for the cancelled buffers
1915  *
1916  * Note that we add objects to the tail of the lists so that first-to-last
1917  * ordering is preserved within the lists. Adding objects to the head of the
1918  * list means when we traverse from the head we walk them in last-to-first
1919  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1920  * but for all other items there may be specific ordering that we need to
1921  * preserve.
1922  */
1923 STATIC int
1924 xlog_recover_reorder_trans(
1925 	struct xlog		*log,
1926 	struct xlog_recover	*trans,
1927 	int			pass)
1928 {
1929 	xlog_recover_item_t	*item, *n;
1930 	int			error = 0;
1931 	LIST_HEAD(sort_list);
1932 	LIST_HEAD(cancel_list);
1933 	LIST_HEAD(buffer_list);
1934 	LIST_HEAD(inode_buffer_list);
1935 	LIST_HEAD(inode_list);
1936 
1937 	list_splice_init(&trans->r_itemq, &sort_list);
1938 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1939 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1940 
1941 		switch (ITEM_TYPE(item)) {
1942 		case XFS_LI_ICREATE:
1943 			list_move_tail(&item->ri_list, &buffer_list);
1944 			break;
1945 		case XFS_LI_BUF:
1946 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1947 				trace_xfs_log_recover_item_reorder_head(log,
1948 							trans, item, pass);
1949 				list_move(&item->ri_list, &cancel_list);
1950 				break;
1951 			}
1952 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1953 				list_move(&item->ri_list, &inode_buffer_list);
1954 				break;
1955 			}
1956 			list_move_tail(&item->ri_list, &buffer_list);
1957 			break;
1958 		case XFS_LI_INODE:
1959 		case XFS_LI_DQUOT:
1960 		case XFS_LI_QUOTAOFF:
1961 		case XFS_LI_EFD:
1962 		case XFS_LI_EFI:
1963 		case XFS_LI_RUI:
1964 		case XFS_LI_RUD:
1965 		case XFS_LI_CUI:
1966 		case XFS_LI_CUD:
1967 		case XFS_LI_BUI:
1968 		case XFS_LI_BUD:
1969 			trace_xfs_log_recover_item_reorder_tail(log,
1970 							trans, item, pass);
1971 			list_move_tail(&item->ri_list, &inode_list);
1972 			break;
1973 		default:
1974 			xfs_warn(log->l_mp,
1975 				"%s: unrecognized type of log operation",
1976 				__func__);
1977 			ASSERT(0);
1978 			/*
1979 			 * return the remaining items back to the transaction
1980 			 * item list so they can be freed in caller.
1981 			 */
1982 			if (!list_empty(&sort_list))
1983 				list_splice_init(&sort_list, &trans->r_itemq);
1984 			error = -EIO;
1985 			goto out;
1986 		}
1987 	}
1988 out:
1989 	ASSERT(list_empty(&sort_list));
1990 	if (!list_empty(&buffer_list))
1991 		list_splice(&buffer_list, &trans->r_itemq);
1992 	if (!list_empty(&inode_list))
1993 		list_splice_tail(&inode_list, &trans->r_itemq);
1994 	if (!list_empty(&inode_buffer_list))
1995 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1996 	if (!list_empty(&cancel_list))
1997 		list_splice_tail(&cancel_list, &trans->r_itemq);
1998 	return error;
1999 }
2000 
2001 /*
2002  * Build up the table of buf cancel records so that we don't replay
2003  * cancelled data in the second pass.  For buffer records that are
2004  * not cancel records, there is nothing to do here so we just return.
2005  *
2006  * If we get a cancel record which is already in the table, this indicates
2007  * that the buffer was cancelled multiple times.  In order to ensure
2008  * that during pass 2 we keep the record in the table until we reach its
2009  * last occurrence in the log, we keep a reference count in the cancel
2010  * record in the table to tell us how many times we expect to see this
2011  * record during the second pass.
2012  */
2013 STATIC int
2014 xlog_recover_buffer_pass1(
2015 	struct xlog			*log,
2016 	struct xlog_recover_item	*item)
2017 {
2018 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2019 	struct list_head	*bucket;
2020 	struct xfs_buf_cancel	*bcp;
2021 
2022 	/*
2023 	 * If this isn't a cancel buffer item, then just return.
2024 	 */
2025 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2026 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2027 		return 0;
2028 	}
2029 
2030 	/*
2031 	 * Insert an xfs_buf_cancel record into the hash table of them.
2032 	 * If there is already an identical record, bump its reference count.
2033 	 */
2034 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2035 	list_for_each_entry(bcp, bucket, bc_list) {
2036 		if (bcp->bc_blkno == buf_f->blf_blkno &&
2037 		    bcp->bc_len == buf_f->blf_len) {
2038 			bcp->bc_refcount++;
2039 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2040 			return 0;
2041 		}
2042 	}
2043 
2044 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2045 	bcp->bc_blkno = buf_f->blf_blkno;
2046 	bcp->bc_len = buf_f->blf_len;
2047 	bcp->bc_refcount = 1;
2048 	list_add_tail(&bcp->bc_list, bucket);
2049 
2050 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2051 	return 0;
2052 }
2053 
2054 /*
2055  * Check to see whether the buffer being recovered has a corresponding
2056  * entry in the buffer cancel record table. If it is, return the cancel
2057  * buffer structure to the caller.
2058  */
2059 STATIC struct xfs_buf_cancel *
2060 xlog_peek_buffer_cancelled(
2061 	struct xlog		*log,
2062 	xfs_daddr_t		blkno,
2063 	uint			len,
2064 	unsigned short			flags)
2065 {
2066 	struct list_head	*bucket;
2067 	struct xfs_buf_cancel	*bcp;
2068 
2069 	if (!log->l_buf_cancel_table) {
2070 		/* empty table means no cancelled buffers in the log */
2071 		ASSERT(!(flags & XFS_BLF_CANCEL));
2072 		return NULL;
2073 	}
2074 
2075 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2076 	list_for_each_entry(bcp, bucket, bc_list) {
2077 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2078 			return bcp;
2079 	}
2080 
2081 	/*
2082 	 * We didn't find a corresponding entry in the table, so return 0 so
2083 	 * that the buffer is NOT cancelled.
2084 	 */
2085 	ASSERT(!(flags & XFS_BLF_CANCEL));
2086 	return NULL;
2087 }
2088 
2089 /*
2090  * If the buffer is being cancelled then return 1 so that it will be cancelled,
2091  * otherwise return 0.  If the buffer is actually a buffer cancel item
2092  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2093  * table and remove it from the table if this is the last reference.
2094  *
2095  * We remove the cancel record from the table when we encounter its last
2096  * occurrence in the log so that if the same buffer is re-used again after its
2097  * last cancellation we actually replay the changes made at that point.
2098  */
2099 STATIC int
2100 xlog_check_buffer_cancelled(
2101 	struct xlog		*log,
2102 	xfs_daddr_t		blkno,
2103 	uint			len,
2104 	unsigned short			flags)
2105 {
2106 	struct xfs_buf_cancel	*bcp;
2107 
2108 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2109 	if (!bcp)
2110 		return 0;
2111 
2112 	/*
2113 	 * We've go a match, so return 1 so that the recovery of this buffer
2114 	 * is cancelled.  If this buffer is actually a buffer cancel log
2115 	 * item, then decrement the refcount on the one in the table and
2116 	 * remove it if this is the last reference.
2117 	 */
2118 	if (flags & XFS_BLF_CANCEL) {
2119 		if (--bcp->bc_refcount == 0) {
2120 			list_del(&bcp->bc_list);
2121 			kmem_free(bcp);
2122 		}
2123 	}
2124 	return 1;
2125 }
2126 
2127 /*
2128  * Perform recovery for a buffer full of inodes.  In these buffers, the only
2129  * data which should be recovered is that which corresponds to the
2130  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2131  * data for the inodes is always logged through the inodes themselves rather
2132  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2133  *
2134  * The only time when buffers full of inodes are fully recovered is when the
2135  * buffer is full of newly allocated inodes.  In this case the buffer will
2136  * not be marked as an inode buffer and so will be sent to
2137  * xlog_recover_do_reg_buffer() below during recovery.
2138  */
2139 STATIC int
2140 xlog_recover_do_inode_buffer(
2141 	struct xfs_mount	*mp,
2142 	xlog_recover_item_t	*item,
2143 	struct xfs_buf		*bp,
2144 	xfs_buf_log_format_t	*buf_f)
2145 {
2146 	int			i;
2147 	int			item_index = 0;
2148 	int			bit = 0;
2149 	int			nbits = 0;
2150 	int			reg_buf_offset = 0;
2151 	int			reg_buf_bytes = 0;
2152 	int			next_unlinked_offset;
2153 	int			inodes_per_buf;
2154 	xfs_agino_t		*logged_nextp;
2155 	xfs_agino_t		*buffer_nextp;
2156 
2157 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2158 
2159 	/*
2160 	 * Post recovery validation only works properly on CRC enabled
2161 	 * filesystems.
2162 	 */
2163 	if (xfs_sb_version_hascrc(&mp->m_sb))
2164 		bp->b_ops = &xfs_inode_buf_ops;
2165 
2166 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2167 	for (i = 0; i < inodes_per_buf; i++) {
2168 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2169 			offsetof(xfs_dinode_t, di_next_unlinked);
2170 
2171 		while (next_unlinked_offset >=
2172 		       (reg_buf_offset + reg_buf_bytes)) {
2173 			/*
2174 			 * The next di_next_unlinked field is beyond
2175 			 * the current logged region.  Find the next
2176 			 * logged region that contains or is beyond
2177 			 * the current di_next_unlinked field.
2178 			 */
2179 			bit += nbits;
2180 			bit = xfs_next_bit(buf_f->blf_data_map,
2181 					   buf_f->blf_map_size, bit);
2182 
2183 			/*
2184 			 * If there are no more logged regions in the
2185 			 * buffer, then we're done.
2186 			 */
2187 			if (bit == -1)
2188 				return 0;
2189 
2190 			nbits = xfs_contig_bits(buf_f->blf_data_map,
2191 						buf_f->blf_map_size, bit);
2192 			ASSERT(nbits > 0);
2193 			reg_buf_offset = bit << XFS_BLF_SHIFT;
2194 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2195 			item_index++;
2196 		}
2197 
2198 		/*
2199 		 * If the current logged region starts after the current
2200 		 * di_next_unlinked field, then move on to the next
2201 		 * di_next_unlinked field.
2202 		 */
2203 		if (next_unlinked_offset < reg_buf_offset)
2204 			continue;
2205 
2206 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
2207 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2208 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
2209 							BBTOB(bp->b_io_length));
2210 
2211 		/*
2212 		 * The current logged region contains a copy of the
2213 		 * current di_next_unlinked field.  Extract its value
2214 		 * and copy it to the buffer copy.
2215 		 */
2216 		logged_nextp = item->ri_buf[item_index].i_addr +
2217 				next_unlinked_offset - reg_buf_offset;
2218 		if (unlikely(*logged_nextp == 0)) {
2219 			xfs_alert(mp,
2220 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2221 		"Trying to replay bad (0) inode di_next_unlinked field.",
2222 				item, bp);
2223 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2224 					 XFS_ERRLEVEL_LOW, mp);
2225 			return -EFSCORRUPTED;
2226 		}
2227 
2228 		buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2229 		*buffer_nextp = *logged_nextp;
2230 
2231 		/*
2232 		 * If necessary, recalculate the CRC in the on-disk inode. We
2233 		 * have to leave the inode in a consistent state for whoever
2234 		 * reads it next....
2235 		 */
2236 		xfs_dinode_calc_crc(mp,
2237 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2238 
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 /*
2245  * V5 filesystems know the age of the buffer on disk being recovered. We can
2246  * have newer objects on disk than we are replaying, and so for these cases we
2247  * don't want to replay the current change as that will make the buffer contents
2248  * temporarily invalid on disk.
2249  *
2250  * The magic number might not match the buffer type we are going to recover
2251  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2252  * extract the LSN of the existing object in the buffer based on it's current
2253  * magic number.  If we don't recognise the magic number in the buffer, then
2254  * return a LSN of -1 so that the caller knows it was an unrecognised block and
2255  * so can recover the buffer.
2256  *
2257  * Note: we cannot rely solely on magic number matches to determine that the
2258  * buffer has a valid LSN - we also need to verify that it belongs to this
2259  * filesystem, so we need to extract the object's LSN and compare it to that
2260  * which we read from the superblock. If the UUIDs don't match, then we've got a
2261  * stale metadata block from an old filesystem instance that we need to recover
2262  * over the top of.
2263  */
2264 static xfs_lsn_t
2265 xlog_recover_get_buf_lsn(
2266 	struct xfs_mount	*mp,
2267 	struct xfs_buf		*bp)
2268 {
2269 	uint32_t		magic32;
2270 	uint16_t		magic16;
2271 	uint16_t		magicda;
2272 	void			*blk = bp->b_addr;
2273 	uuid_t			*uuid;
2274 	xfs_lsn_t		lsn = -1;
2275 
2276 	/* v4 filesystems always recover immediately */
2277 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2278 		goto recover_immediately;
2279 
2280 	magic32 = be32_to_cpu(*(__be32 *)blk);
2281 	switch (magic32) {
2282 	case XFS_ABTB_CRC_MAGIC:
2283 	case XFS_ABTC_CRC_MAGIC:
2284 	case XFS_ABTB_MAGIC:
2285 	case XFS_ABTC_MAGIC:
2286 	case XFS_RMAP_CRC_MAGIC:
2287 	case XFS_REFC_CRC_MAGIC:
2288 	case XFS_IBT_CRC_MAGIC:
2289 	case XFS_IBT_MAGIC: {
2290 		struct xfs_btree_block *btb = blk;
2291 
2292 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2293 		uuid = &btb->bb_u.s.bb_uuid;
2294 		break;
2295 	}
2296 	case XFS_BMAP_CRC_MAGIC:
2297 	case XFS_BMAP_MAGIC: {
2298 		struct xfs_btree_block *btb = blk;
2299 
2300 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2301 		uuid = &btb->bb_u.l.bb_uuid;
2302 		break;
2303 	}
2304 	case XFS_AGF_MAGIC:
2305 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2306 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2307 		break;
2308 	case XFS_AGFL_MAGIC:
2309 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2310 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2311 		break;
2312 	case XFS_AGI_MAGIC:
2313 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2314 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2315 		break;
2316 	case XFS_SYMLINK_MAGIC:
2317 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2318 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2319 		break;
2320 	case XFS_DIR3_BLOCK_MAGIC:
2321 	case XFS_DIR3_DATA_MAGIC:
2322 	case XFS_DIR3_FREE_MAGIC:
2323 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2324 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2325 		break;
2326 	case XFS_ATTR3_RMT_MAGIC:
2327 		/*
2328 		 * Remote attr blocks are written synchronously, rather than
2329 		 * being logged. That means they do not contain a valid LSN
2330 		 * (i.e. transactionally ordered) in them, and hence any time we
2331 		 * see a buffer to replay over the top of a remote attribute
2332 		 * block we should simply do so.
2333 		 */
2334 		goto recover_immediately;
2335 	case XFS_SB_MAGIC:
2336 		/*
2337 		 * superblock uuids are magic. We may or may not have a
2338 		 * sb_meta_uuid on disk, but it will be set in the in-core
2339 		 * superblock. We set the uuid pointer for verification
2340 		 * according to the superblock feature mask to ensure we check
2341 		 * the relevant UUID in the superblock.
2342 		 */
2343 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2344 		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2345 			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2346 		else
2347 			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2348 		break;
2349 	default:
2350 		break;
2351 	}
2352 
2353 	if (lsn != (xfs_lsn_t)-1) {
2354 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2355 			goto recover_immediately;
2356 		return lsn;
2357 	}
2358 
2359 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2360 	switch (magicda) {
2361 	case XFS_DIR3_LEAF1_MAGIC:
2362 	case XFS_DIR3_LEAFN_MAGIC:
2363 	case XFS_DA3_NODE_MAGIC:
2364 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2365 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2366 		break;
2367 	default:
2368 		break;
2369 	}
2370 
2371 	if (lsn != (xfs_lsn_t)-1) {
2372 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2373 			goto recover_immediately;
2374 		return lsn;
2375 	}
2376 
2377 	/*
2378 	 * We do individual object checks on dquot and inode buffers as they
2379 	 * have their own individual LSN records. Also, we could have a stale
2380 	 * buffer here, so we have to at least recognise these buffer types.
2381 	 *
2382 	 * A notd complexity here is inode unlinked list processing - it logs
2383 	 * the inode directly in the buffer, but we don't know which inodes have
2384 	 * been modified, and there is no global buffer LSN. Hence we need to
2385 	 * recover all inode buffer types immediately. This problem will be
2386 	 * fixed by logical logging of the unlinked list modifications.
2387 	 */
2388 	magic16 = be16_to_cpu(*(__be16 *)blk);
2389 	switch (magic16) {
2390 	case XFS_DQUOT_MAGIC:
2391 	case XFS_DINODE_MAGIC:
2392 		goto recover_immediately;
2393 	default:
2394 		break;
2395 	}
2396 
2397 	/* unknown buffer contents, recover immediately */
2398 
2399 recover_immediately:
2400 	return (xfs_lsn_t)-1;
2401 
2402 }
2403 
2404 /*
2405  * Validate the recovered buffer is of the correct type and attach the
2406  * appropriate buffer operations to them for writeback. Magic numbers are in a
2407  * few places:
2408  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2409  *	the first 32 bits of the buffer (most blocks),
2410  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2411  */
2412 static void
2413 xlog_recover_validate_buf_type(
2414 	struct xfs_mount	*mp,
2415 	struct xfs_buf		*bp,
2416 	xfs_buf_log_format_t	*buf_f,
2417 	xfs_lsn_t		current_lsn)
2418 {
2419 	struct xfs_da_blkinfo	*info = bp->b_addr;
2420 	uint32_t		magic32;
2421 	uint16_t		magic16;
2422 	uint16_t		magicda;
2423 	char			*warnmsg = NULL;
2424 
2425 	/*
2426 	 * We can only do post recovery validation on items on CRC enabled
2427 	 * fielsystems as we need to know when the buffer was written to be able
2428 	 * to determine if we should have replayed the item. If we replay old
2429 	 * metadata over a newer buffer, then it will enter a temporarily
2430 	 * inconsistent state resulting in verification failures. Hence for now
2431 	 * just avoid the verification stage for non-crc filesystems
2432 	 */
2433 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2434 		return;
2435 
2436 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2437 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2438 	magicda = be16_to_cpu(info->magic);
2439 	switch (xfs_blft_from_flags(buf_f)) {
2440 	case XFS_BLFT_BTREE_BUF:
2441 		switch (magic32) {
2442 		case XFS_ABTB_CRC_MAGIC:
2443 		case XFS_ABTC_CRC_MAGIC:
2444 		case XFS_ABTB_MAGIC:
2445 		case XFS_ABTC_MAGIC:
2446 			bp->b_ops = &xfs_allocbt_buf_ops;
2447 			break;
2448 		case XFS_IBT_CRC_MAGIC:
2449 		case XFS_FIBT_CRC_MAGIC:
2450 		case XFS_IBT_MAGIC:
2451 		case XFS_FIBT_MAGIC:
2452 			bp->b_ops = &xfs_inobt_buf_ops;
2453 			break;
2454 		case XFS_BMAP_CRC_MAGIC:
2455 		case XFS_BMAP_MAGIC:
2456 			bp->b_ops = &xfs_bmbt_buf_ops;
2457 			break;
2458 		case XFS_RMAP_CRC_MAGIC:
2459 			bp->b_ops = &xfs_rmapbt_buf_ops;
2460 			break;
2461 		case XFS_REFC_CRC_MAGIC:
2462 			bp->b_ops = &xfs_refcountbt_buf_ops;
2463 			break;
2464 		default:
2465 			warnmsg = "Bad btree block magic!";
2466 			break;
2467 		}
2468 		break;
2469 	case XFS_BLFT_AGF_BUF:
2470 		if (magic32 != XFS_AGF_MAGIC) {
2471 			warnmsg = "Bad AGF block magic!";
2472 			break;
2473 		}
2474 		bp->b_ops = &xfs_agf_buf_ops;
2475 		break;
2476 	case XFS_BLFT_AGFL_BUF:
2477 		if (magic32 != XFS_AGFL_MAGIC) {
2478 			warnmsg = "Bad AGFL block magic!";
2479 			break;
2480 		}
2481 		bp->b_ops = &xfs_agfl_buf_ops;
2482 		break;
2483 	case XFS_BLFT_AGI_BUF:
2484 		if (magic32 != XFS_AGI_MAGIC) {
2485 			warnmsg = "Bad AGI block magic!";
2486 			break;
2487 		}
2488 		bp->b_ops = &xfs_agi_buf_ops;
2489 		break;
2490 	case XFS_BLFT_UDQUOT_BUF:
2491 	case XFS_BLFT_PDQUOT_BUF:
2492 	case XFS_BLFT_GDQUOT_BUF:
2493 #ifdef CONFIG_XFS_QUOTA
2494 		if (magic16 != XFS_DQUOT_MAGIC) {
2495 			warnmsg = "Bad DQUOT block magic!";
2496 			break;
2497 		}
2498 		bp->b_ops = &xfs_dquot_buf_ops;
2499 #else
2500 		xfs_alert(mp,
2501 	"Trying to recover dquots without QUOTA support built in!");
2502 		ASSERT(0);
2503 #endif
2504 		break;
2505 	case XFS_BLFT_DINO_BUF:
2506 		if (magic16 != XFS_DINODE_MAGIC) {
2507 			warnmsg = "Bad INODE block magic!";
2508 			break;
2509 		}
2510 		bp->b_ops = &xfs_inode_buf_ops;
2511 		break;
2512 	case XFS_BLFT_SYMLINK_BUF:
2513 		if (magic32 != XFS_SYMLINK_MAGIC) {
2514 			warnmsg = "Bad symlink block magic!";
2515 			break;
2516 		}
2517 		bp->b_ops = &xfs_symlink_buf_ops;
2518 		break;
2519 	case XFS_BLFT_DIR_BLOCK_BUF:
2520 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2521 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2522 			warnmsg = "Bad dir block magic!";
2523 			break;
2524 		}
2525 		bp->b_ops = &xfs_dir3_block_buf_ops;
2526 		break;
2527 	case XFS_BLFT_DIR_DATA_BUF:
2528 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2529 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2530 			warnmsg = "Bad dir data magic!";
2531 			break;
2532 		}
2533 		bp->b_ops = &xfs_dir3_data_buf_ops;
2534 		break;
2535 	case XFS_BLFT_DIR_FREE_BUF:
2536 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2537 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2538 			warnmsg = "Bad dir3 free magic!";
2539 			break;
2540 		}
2541 		bp->b_ops = &xfs_dir3_free_buf_ops;
2542 		break;
2543 	case XFS_BLFT_DIR_LEAF1_BUF:
2544 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2545 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2546 			warnmsg = "Bad dir leaf1 magic!";
2547 			break;
2548 		}
2549 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2550 		break;
2551 	case XFS_BLFT_DIR_LEAFN_BUF:
2552 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2553 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2554 			warnmsg = "Bad dir leafn magic!";
2555 			break;
2556 		}
2557 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2558 		break;
2559 	case XFS_BLFT_DA_NODE_BUF:
2560 		if (magicda != XFS_DA_NODE_MAGIC &&
2561 		    magicda != XFS_DA3_NODE_MAGIC) {
2562 			warnmsg = "Bad da node magic!";
2563 			break;
2564 		}
2565 		bp->b_ops = &xfs_da3_node_buf_ops;
2566 		break;
2567 	case XFS_BLFT_ATTR_LEAF_BUF:
2568 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2569 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2570 			warnmsg = "Bad attr leaf magic!";
2571 			break;
2572 		}
2573 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2574 		break;
2575 	case XFS_BLFT_ATTR_RMT_BUF:
2576 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2577 			warnmsg = "Bad attr remote magic!";
2578 			break;
2579 		}
2580 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2581 		break;
2582 	case XFS_BLFT_SB_BUF:
2583 		if (magic32 != XFS_SB_MAGIC) {
2584 			warnmsg = "Bad SB block magic!";
2585 			break;
2586 		}
2587 		bp->b_ops = &xfs_sb_buf_ops;
2588 		break;
2589 #ifdef CONFIG_XFS_RT
2590 	case XFS_BLFT_RTBITMAP_BUF:
2591 	case XFS_BLFT_RTSUMMARY_BUF:
2592 		/* no magic numbers for verification of RT buffers */
2593 		bp->b_ops = &xfs_rtbuf_ops;
2594 		break;
2595 #endif /* CONFIG_XFS_RT */
2596 	default:
2597 		xfs_warn(mp, "Unknown buffer type %d!",
2598 			 xfs_blft_from_flags(buf_f));
2599 		break;
2600 	}
2601 
2602 	/*
2603 	 * Nothing else to do in the case of a NULL current LSN as this means
2604 	 * the buffer is more recent than the change in the log and will be
2605 	 * skipped.
2606 	 */
2607 	if (current_lsn == NULLCOMMITLSN)
2608 		return;
2609 
2610 	if (warnmsg) {
2611 		xfs_warn(mp, warnmsg);
2612 		ASSERT(0);
2613 	}
2614 
2615 	/*
2616 	 * We must update the metadata LSN of the buffer as it is written out to
2617 	 * ensure that older transactions never replay over this one and corrupt
2618 	 * the buffer. This can occur if log recovery is interrupted at some
2619 	 * point after the current transaction completes, at which point a
2620 	 * subsequent mount starts recovery from the beginning.
2621 	 *
2622 	 * Write verifiers update the metadata LSN from log items attached to
2623 	 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2624 	 * the verifier. We'll clean it up in our ->iodone() callback.
2625 	 */
2626 	if (bp->b_ops) {
2627 		struct xfs_buf_log_item	*bip;
2628 
2629 		ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2630 		bp->b_iodone = xlog_recover_iodone;
2631 		xfs_buf_item_init(bp, mp);
2632 		bip = bp->b_fspriv;
2633 		bip->bli_item.li_lsn = current_lsn;
2634 	}
2635 }
2636 
2637 /*
2638  * Perform a 'normal' buffer recovery.  Each logged region of the
2639  * buffer should be copied over the corresponding region in the
2640  * given buffer.  The bitmap in the buf log format structure indicates
2641  * where to place the logged data.
2642  */
2643 STATIC void
2644 xlog_recover_do_reg_buffer(
2645 	struct xfs_mount	*mp,
2646 	xlog_recover_item_t	*item,
2647 	struct xfs_buf		*bp,
2648 	xfs_buf_log_format_t	*buf_f,
2649 	xfs_lsn_t		current_lsn)
2650 {
2651 	int			i;
2652 	int			bit;
2653 	int			nbits;
2654 	int                     error;
2655 
2656 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2657 
2658 	bit = 0;
2659 	i = 1;  /* 0 is the buf format structure */
2660 	while (1) {
2661 		bit = xfs_next_bit(buf_f->blf_data_map,
2662 				   buf_f->blf_map_size, bit);
2663 		if (bit == -1)
2664 			break;
2665 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2666 					buf_f->blf_map_size, bit);
2667 		ASSERT(nbits > 0);
2668 		ASSERT(item->ri_buf[i].i_addr != NULL);
2669 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2670 		ASSERT(BBTOB(bp->b_io_length) >=
2671 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2672 
2673 		/*
2674 		 * The dirty regions logged in the buffer, even though
2675 		 * contiguous, may span multiple chunks. This is because the
2676 		 * dirty region may span a physical page boundary in a buffer
2677 		 * and hence be split into two separate vectors for writing into
2678 		 * the log. Hence we need to trim nbits back to the length of
2679 		 * the current region being copied out of the log.
2680 		 */
2681 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2682 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2683 
2684 		/*
2685 		 * Do a sanity check if this is a dquot buffer. Just checking
2686 		 * the first dquot in the buffer should do. XXXThis is
2687 		 * probably a good thing to do for other buf types also.
2688 		 */
2689 		error = 0;
2690 		if (buf_f->blf_flags &
2691 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2692 			if (item->ri_buf[i].i_addr == NULL) {
2693 				xfs_alert(mp,
2694 					"XFS: NULL dquot in %s.", __func__);
2695 				goto next;
2696 			}
2697 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2698 				xfs_alert(mp,
2699 					"XFS: dquot too small (%d) in %s.",
2700 					item->ri_buf[i].i_len, __func__);
2701 				goto next;
2702 			}
2703 			error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2704 					       -1, 0, XFS_QMOPT_DOWARN,
2705 					       "dquot_buf_recover");
2706 			if (error)
2707 				goto next;
2708 		}
2709 
2710 		memcpy(xfs_buf_offset(bp,
2711 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2712 			item->ri_buf[i].i_addr,		/* source */
2713 			nbits<<XFS_BLF_SHIFT);		/* length */
2714  next:
2715 		i++;
2716 		bit += nbits;
2717 	}
2718 
2719 	/* Shouldn't be any more regions */
2720 	ASSERT(i == item->ri_total);
2721 
2722 	xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2723 }
2724 
2725 /*
2726  * Perform a dquot buffer recovery.
2727  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2728  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2729  * Else, treat it as a regular buffer and do recovery.
2730  *
2731  * Return false if the buffer was tossed and true if we recovered the buffer to
2732  * indicate to the caller if the buffer needs writing.
2733  */
2734 STATIC bool
2735 xlog_recover_do_dquot_buffer(
2736 	struct xfs_mount		*mp,
2737 	struct xlog			*log,
2738 	struct xlog_recover_item	*item,
2739 	struct xfs_buf			*bp,
2740 	struct xfs_buf_log_format	*buf_f)
2741 {
2742 	uint			type;
2743 
2744 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2745 
2746 	/*
2747 	 * Filesystems are required to send in quota flags at mount time.
2748 	 */
2749 	if (!mp->m_qflags)
2750 		return false;
2751 
2752 	type = 0;
2753 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2754 		type |= XFS_DQ_USER;
2755 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2756 		type |= XFS_DQ_PROJ;
2757 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2758 		type |= XFS_DQ_GROUP;
2759 	/*
2760 	 * This type of quotas was turned off, so ignore this buffer
2761 	 */
2762 	if (log->l_quotaoffs_flag & type)
2763 		return false;
2764 
2765 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2766 	return true;
2767 }
2768 
2769 /*
2770  * This routine replays a modification made to a buffer at runtime.
2771  * There are actually two types of buffer, regular and inode, which
2772  * are handled differently.  Inode buffers are handled differently
2773  * in that we only recover a specific set of data from them, namely
2774  * the inode di_next_unlinked fields.  This is because all other inode
2775  * data is actually logged via inode records and any data we replay
2776  * here which overlaps that may be stale.
2777  *
2778  * When meta-data buffers are freed at run time we log a buffer item
2779  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2780  * of the buffer in the log should not be replayed at recovery time.
2781  * This is so that if the blocks covered by the buffer are reused for
2782  * file data before we crash we don't end up replaying old, freed
2783  * meta-data into a user's file.
2784  *
2785  * To handle the cancellation of buffer log items, we make two passes
2786  * over the log during recovery.  During the first we build a table of
2787  * those buffers which have been cancelled, and during the second we
2788  * only replay those buffers which do not have corresponding cancel
2789  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2790  * for more details on the implementation of the table of cancel records.
2791  */
2792 STATIC int
2793 xlog_recover_buffer_pass2(
2794 	struct xlog			*log,
2795 	struct list_head		*buffer_list,
2796 	struct xlog_recover_item	*item,
2797 	xfs_lsn_t			current_lsn)
2798 {
2799 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2800 	xfs_mount_t		*mp = log->l_mp;
2801 	xfs_buf_t		*bp;
2802 	int			error;
2803 	uint			buf_flags;
2804 	xfs_lsn_t		lsn;
2805 
2806 	/*
2807 	 * In this pass we only want to recover all the buffers which have
2808 	 * not been cancelled and are not cancellation buffers themselves.
2809 	 */
2810 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2811 			buf_f->blf_len, buf_f->blf_flags)) {
2812 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2813 		return 0;
2814 	}
2815 
2816 	trace_xfs_log_recover_buf_recover(log, buf_f);
2817 
2818 	buf_flags = 0;
2819 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2820 		buf_flags |= XBF_UNMAPPED;
2821 
2822 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2823 			  buf_flags, NULL);
2824 	if (!bp)
2825 		return -ENOMEM;
2826 	error = bp->b_error;
2827 	if (error) {
2828 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2829 		goto out_release;
2830 	}
2831 
2832 	/*
2833 	 * Recover the buffer only if we get an LSN from it and it's less than
2834 	 * the lsn of the transaction we are replaying.
2835 	 *
2836 	 * Note that we have to be extremely careful of readahead here.
2837 	 * Readahead does not attach verfiers to the buffers so if we don't
2838 	 * actually do any replay after readahead because of the LSN we found
2839 	 * in the buffer if more recent than that current transaction then we
2840 	 * need to attach the verifier directly. Failure to do so can lead to
2841 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
2842 	 * operate on the buffers and they won't get the verifier attached. This
2843 	 * can lead to blocks on disk having the correct content but a stale
2844 	 * CRC.
2845 	 *
2846 	 * It is safe to assume these clean buffers are currently up to date.
2847 	 * If the buffer is dirtied by a later transaction being replayed, then
2848 	 * the verifier will be reset to match whatever recover turns that
2849 	 * buffer into.
2850 	 */
2851 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2852 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2853 		trace_xfs_log_recover_buf_skip(log, buf_f);
2854 		xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2855 		goto out_release;
2856 	}
2857 
2858 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2859 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2860 		if (error)
2861 			goto out_release;
2862 	} else if (buf_f->blf_flags &
2863 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2864 		bool	dirty;
2865 
2866 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2867 		if (!dirty)
2868 			goto out_release;
2869 	} else {
2870 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2871 	}
2872 
2873 	/*
2874 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2875 	 * slower when taking into account all the buffers to be flushed.
2876 	 *
2877 	 * Also make sure that only inode buffers with good sizes stay in
2878 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2879 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode
2880 	 * buffers in the log can be a different size if the log was generated
2881 	 * by an older kernel using unclustered inode buffers or a newer kernel
2882 	 * running with a different inode cluster size.  Regardless, if the
2883 	 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2884 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2885 	 * the buffer out of the buffer cache so that the buffer won't
2886 	 * overlap with future reads of those inodes.
2887 	 */
2888 	if (XFS_DINODE_MAGIC ==
2889 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2890 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2891 			(uint32_t)log->l_mp->m_inode_cluster_size))) {
2892 		xfs_buf_stale(bp);
2893 		error = xfs_bwrite(bp);
2894 	} else {
2895 		ASSERT(bp->b_target->bt_mount == mp);
2896 		bp->b_iodone = xlog_recover_iodone;
2897 		xfs_buf_delwri_queue(bp, buffer_list);
2898 	}
2899 
2900 out_release:
2901 	xfs_buf_relse(bp);
2902 	return error;
2903 }
2904 
2905 /*
2906  * Inode fork owner changes
2907  *
2908  * If we have been told that we have to reparent the inode fork, it's because an
2909  * extent swap operation on a CRC enabled filesystem has been done and we are
2910  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2911  * owners of it.
2912  *
2913  * The complexity here is that we don't have an inode context to work with, so
2914  * after we've replayed the inode we need to instantiate one.  This is where the
2915  * fun begins.
2916  *
2917  * We are in the middle of log recovery, so we can't run transactions. That
2918  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2919  * that will result in the corresponding iput() running the inode through
2920  * xfs_inactive(). If we've just replayed an inode core that changes the link
2921  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2922  * transactions (bad!).
2923  *
2924  * So, to avoid this, we instantiate an inode directly from the inode core we've
2925  * just recovered. We have the buffer still locked, and all we really need to
2926  * instantiate is the inode core and the forks being modified. We can do this
2927  * manually, then run the inode btree owner change, and then tear down the
2928  * xfs_inode without having to run any transactions at all.
2929  *
2930  * Also, because we don't have a transaction context available here but need to
2931  * gather all the buffers we modify for writeback so we pass the buffer_list
2932  * instead for the operation to use.
2933  */
2934 
2935 STATIC int
2936 xfs_recover_inode_owner_change(
2937 	struct xfs_mount	*mp,
2938 	struct xfs_dinode	*dip,
2939 	struct xfs_inode_log_format *in_f,
2940 	struct list_head	*buffer_list)
2941 {
2942 	struct xfs_inode	*ip;
2943 	int			error;
2944 
2945 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2946 
2947 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2948 	if (!ip)
2949 		return -ENOMEM;
2950 
2951 	/* instantiate the inode */
2952 	xfs_inode_from_disk(ip, dip);
2953 	ASSERT(ip->i_d.di_version >= 3);
2954 
2955 	error = xfs_iformat_fork(ip, dip);
2956 	if (error)
2957 		goto out_free_ip;
2958 
2959 
2960 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2961 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2962 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2963 					      ip->i_ino, buffer_list);
2964 		if (error)
2965 			goto out_free_ip;
2966 	}
2967 
2968 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2969 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2970 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2971 					      ip->i_ino, buffer_list);
2972 		if (error)
2973 			goto out_free_ip;
2974 	}
2975 
2976 out_free_ip:
2977 	xfs_inode_free(ip);
2978 	return error;
2979 }
2980 
2981 STATIC int
2982 xlog_recover_inode_pass2(
2983 	struct xlog			*log,
2984 	struct list_head		*buffer_list,
2985 	struct xlog_recover_item	*item,
2986 	xfs_lsn_t			current_lsn)
2987 {
2988 	struct xfs_inode_log_format	*in_f;
2989 	xfs_mount_t		*mp = log->l_mp;
2990 	xfs_buf_t		*bp;
2991 	xfs_dinode_t		*dip;
2992 	int			len;
2993 	char			*src;
2994 	char			*dest;
2995 	int			error;
2996 	int			attr_index;
2997 	uint			fields;
2998 	struct xfs_log_dinode	*ldip;
2999 	uint			isize;
3000 	int			need_free = 0;
3001 
3002 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3003 		in_f = item->ri_buf[0].i_addr;
3004 	} else {
3005 		in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
3006 		need_free = 1;
3007 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
3008 		if (error)
3009 			goto error;
3010 	}
3011 
3012 	/*
3013 	 * Inode buffers can be freed, look out for it,
3014 	 * and do not replay the inode.
3015 	 */
3016 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3017 					in_f->ilf_len, 0)) {
3018 		error = 0;
3019 		trace_xfs_log_recover_inode_cancel(log, in_f);
3020 		goto error;
3021 	}
3022 	trace_xfs_log_recover_inode_recover(log, in_f);
3023 
3024 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3025 			  &xfs_inode_buf_ops);
3026 	if (!bp) {
3027 		error = -ENOMEM;
3028 		goto error;
3029 	}
3030 	error = bp->b_error;
3031 	if (error) {
3032 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3033 		goto out_release;
3034 	}
3035 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3036 	dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3037 
3038 	/*
3039 	 * Make sure the place we're flushing out to really looks
3040 	 * like an inode!
3041 	 */
3042 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
3043 		xfs_alert(mp,
3044 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
3045 			__func__, dip, bp, in_f->ilf_ino);
3046 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3047 				 XFS_ERRLEVEL_LOW, mp);
3048 		error = -EFSCORRUPTED;
3049 		goto out_release;
3050 	}
3051 	ldip = item->ri_buf[1].i_addr;
3052 	if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3053 		xfs_alert(mp,
3054 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
3055 			__func__, item, in_f->ilf_ino);
3056 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3057 				 XFS_ERRLEVEL_LOW, mp);
3058 		error = -EFSCORRUPTED;
3059 		goto out_release;
3060 	}
3061 
3062 	/*
3063 	 * If the inode has an LSN in it, recover the inode only if it's less
3064 	 * than the lsn of the transaction we are replaying. Note: we still
3065 	 * need to replay an owner change even though the inode is more recent
3066 	 * than the transaction as there is no guarantee that all the btree
3067 	 * blocks are more recent than this transaction, too.
3068 	 */
3069 	if (dip->di_version >= 3) {
3070 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
3071 
3072 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3073 			trace_xfs_log_recover_inode_skip(log, in_f);
3074 			error = 0;
3075 			goto out_owner_change;
3076 		}
3077 	}
3078 
3079 	/*
3080 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3081 	 * are transactional and if ordering is necessary we can determine that
3082 	 * more accurately by the LSN field in the V3 inode core. Don't trust
3083 	 * the inode versions we might be changing them here - use the
3084 	 * superblock flag to determine whether we need to look at di_flushiter
3085 	 * to skip replay when the on disk inode is newer than the log one
3086 	 */
3087 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3088 	    ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3089 		/*
3090 		 * Deal with the wrap case, DI_MAX_FLUSH is less
3091 		 * than smaller numbers
3092 		 */
3093 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3094 		    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3095 			/* do nothing */
3096 		} else {
3097 			trace_xfs_log_recover_inode_skip(log, in_f);
3098 			error = 0;
3099 			goto out_release;
3100 		}
3101 	}
3102 
3103 	/* Take the opportunity to reset the flush iteration count */
3104 	ldip->di_flushiter = 0;
3105 
3106 	if (unlikely(S_ISREG(ldip->di_mode))) {
3107 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3108 		    (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3109 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3110 					 XFS_ERRLEVEL_LOW, mp, ldip);
3111 			xfs_alert(mp,
3112 		"%s: Bad regular inode log record, rec ptr 0x%p, "
3113 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3114 				__func__, item, dip, bp, in_f->ilf_ino);
3115 			error = -EFSCORRUPTED;
3116 			goto out_release;
3117 		}
3118 	} else if (unlikely(S_ISDIR(ldip->di_mode))) {
3119 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3120 		    (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3121 		    (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3122 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3123 					     XFS_ERRLEVEL_LOW, mp, ldip);
3124 			xfs_alert(mp,
3125 		"%s: Bad dir inode log record, rec ptr 0x%p, "
3126 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3127 				__func__, item, dip, bp, in_f->ilf_ino);
3128 			error = -EFSCORRUPTED;
3129 			goto out_release;
3130 		}
3131 	}
3132 	if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3133 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3134 				     XFS_ERRLEVEL_LOW, mp, ldip);
3135 		xfs_alert(mp,
3136 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3137 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3138 			__func__, item, dip, bp, in_f->ilf_ino,
3139 			ldip->di_nextents + ldip->di_anextents,
3140 			ldip->di_nblocks);
3141 		error = -EFSCORRUPTED;
3142 		goto out_release;
3143 	}
3144 	if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3145 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3146 				     XFS_ERRLEVEL_LOW, mp, ldip);
3147 		xfs_alert(mp,
3148 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3149 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3150 			item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3151 		error = -EFSCORRUPTED;
3152 		goto out_release;
3153 	}
3154 	isize = xfs_log_dinode_size(ldip->di_version);
3155 	if (unlikely(item->ri_buf[1].i_len > isize)) {
3156 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3157 				     XFS_ERRLEVEL_LOW, mp, ldip);
3158 		xfs_alert(mp,
3159 			"%s: Bad inode log record length %d, rec ptr 0x%p",
3160 			__func__, item->ri_buf[1].i_len, item);
3161 		error = -EFSCORRUPTED;
3162 		goto out_release;
3163 	}
3164 
3165 	/* recover the log dinode inode into the on disk inode */
3166 	xfs_log_dinode_to_disk(ldip, dip);
3167 
3168 	/* the rest is in on-disk format */
3169 	if (item->ri_buf[1].i_len > isize) {
3170 		memcpy((char *)dip + isize,
3171 			item->ri_buf[1].i_addr + isize,
3172 			item->ri_buf[1].i_len - isize);
3173 	}
3174 
3175 	fields = in_f->ilf_fields;
3176 	if (fields & XFS_ILOG_DEV)
3177 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3178 
3179 	if (in_f->ilf_size == 2)
3180 		goto out_owner_change;
3181 	len = item->ri_buf[2].i_len;
3182 	src = item->ri_buf[2].i_addr;
3183 	ASSERT(in_f->ilf_size <= 4);
3184 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3185 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
3186 	       (len == in_f->ilf_dsize));
3187 
3188 	switch (fields & XFS_ILOG_DFORK) {
3189 	case XFS_ILOG_DDATA:
3190 	case XFS_ILOG_DEXT:
3191 		memcpy(XFS_DFORK_DPTR(dip), src, len);
3192 		break;
3193 
3194 	case XFS_ILOG_DBROOT:
3195 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3196 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3197 				 XFS_DFORK_DSIZE(dip, mp));
3198 		break;
3199 
3200 	default:
3201 		/*
3202 		 * There are no data fork flags set.
3203 		 */
3204 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
3205 		break;
3206 	}
3207 
3208 	/*
3209 	 * If we logged any attribute data, recover it.  There may or
3210 	 * may not have been any other non-core data logged in this
3211 	 * transaction.
3212 	 */
3213 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3214 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3215 			attr_index = 3;
3216 		} else {
3217 			attr_index = 2;
3218 		}
3219 		len = item->ri_buf[attr_index].i_len;
3220 		src = item->ri_buf[attr_index].i_addr;
3221 		ASSERT(len == in_f->ilf_asize);
3222 
3223 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3224 		case XFS_ILOG_ADATA:
3225 		case XFS_ILOG_AEXT:
3226 			dest = XFS_DFORK_APTR(dip);
3227 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3228 			memcpy(dest, src, len);
3229 			break;
3230 
3231 		case XFS_ILOG_ABROOT:
3232 			dest = XFS_DFORK_APTR(dip);
3233 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3234 					 len, (xfs_bmdr_block_t*)dest,
3235 					 XFS_DFORK_ASIZE(dip, mp));
3236 			break;
3237 
3238 		default:
3239 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3240 			ASSERT(0);
3241 			error = -EIO;
3242 			goto out_release;
3243 		}
3244 	}
3245 
3246 out_owner_change:
3247 	if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3248 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
3249 						       buffer_list);
3250 	/* re-generate the checksum. */
3251 	xfs_dinode_calc_crc(log->l_mp, dip);
3252 
3253 	ASSERT(bp->b_target->bt_mount == mp);
3254 	bp->b_iodone = xlog_recover_iodone;
3255 	xfs_buf_delwri_queue(bp, buffer_list);
3256 
3257 out_release:
3258 	xfs_buf_relse(bp);
3259 error:
3260 	if (need_free)
3261 		kmem_free(in_f);
3262 	return error;
3263 }
3264 
3265 /*
3266  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3267  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3268  * of that type.
3269  */
3270 STATIC int
3271 xlog_recover_quotaoff_pass1(
3272 	struct xlog			*log,
3273 	struct xlog_recover_item	*item)
3274 {
3275 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
3276 	ASSERT(qoff_f);
3277 
3278 	/*
3279 	 * The logitem format's flag tells us if this was user quotaoff,
3280 	 * group/project quotaoff or both.
3281 	 */
3282 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3283 		log->l_quotaoffs_flag |= XFS_DQ_USER;
3284 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3285 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3286 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3287 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3288 
3289 	return 0;
3290 }
3291 
3292 /*
3293  * Recover a dquot record
3294  */
3295 STATIC int
3296 xlog_recover_dquot_pass2(
3297 	struct xlog			*log,
3298 	struct list_head		*buffer_list,
3299 	struct xlog_recover_item	*item,
3300 	xfs_lsn_t			current_lsn)
3301 {
3302 	xfs_mount_t		*mp = log->l_mp;
3303 	xfs_buf_t		*bp;
3304 	struct xfs_disk_dquot	*ddq, *recddq;
3305 	int			error;
3306 	xfs_dq_logformat_t	*dq_f;
3307 	uint			type;
3308 
3309 
3310 	/*
3311 	 * Filesystems are required to send in quota flags at mount time.
3312 	 */
3313 	if (mp->m_qflags == 0)
3314 		return 0;
3315 
3316 	recddq = item->ri_buf[1].i_addr;
3317 	if (recddq == NULL) {
3318 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3319 		return -EIO;
3320 	}
3321 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3322 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3323 			item->ri_buf[1].i_len, __func__);
3324 		return -EIO;
3325 	}
3326 
3327 	/*
3328 	 * This type of quotas was turned off, so ignore this record.
3329 	 */
3330 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3331 	ASSERT(type);
3332 	if (log->l_quotaoffs_flag & type)
3333 		return 0;
3334 
3335 	/*
3336 	 * At this point we know that quota was _not_ turned off.
3337 	 * Since the mount flags are not indicating to us otherwise, this
3338 	 * must mean that quota is on, and the dquot needs to be replayed.
3339 	 * Remember that we may not have fully recovered the superblock yet,
3340 	 * so we can't do the usual trick of looking at the SB quota bits.
3341 	 *
3342 	 * The other possibility, of course, is that the quota subsystem was
3343 	 * removed since the last mount - ENOSYS.
3344 	 */
3345 	dq_f = item->ri_buf[0].i_addr;
3346 	ASSERT(dq_f);
3347 	error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3348 			   "xlog_recover_dquot_pass2 (log copy)");
3349 	if (error)
3350 		return -EIO;
3351 	ASSERT(dq_f->qlf_len == 1);
3352 
3353 	/*
3354 	 * At this point we are assuming that the dquots have been allocated
3355 	 * and hence the buffer has valid dquots stamped in it. It should,
3356 	 * therefore, pass verifier validation. If the dquot is bad, then the
3357 	 * we'll return an error here, so we don't need to specifically check
3358 	 * the dquot in the buffer after the verifier has run.
3359 	 */
3360 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3361 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3362 				   &xfs_dquot_buf_ops);
3363 	if (error)
3364 		return error;
3365 
3366 	ASSERT(bp);
3367 	ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3368 
3369 	/*
3370 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3371 	 * than the lsn of the transaction we are replaying.
3372 	 */
3373 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3374 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3375 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3376 
3377 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3378 			goto out_release;
3379 		}
3380 	}
3381 
3382 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3383 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3384 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3385 				 XFS_DQUOT_CRC_OFF);
3386 	}
3387 
3388 	ASSERT(dq_f->qlf_size == 2);
3389 	ASSERT(bp->b_target->bt_mount == mp);
3390 	bp->b_iodone = xlog_recover_iodone;
3391 	xfs_buf_delwri_queue(bp, buffer_list);
3392 
3393 out_release:
3394 	xfs_buf_relse(bp);
3395 	return 0;
3396 }
3397 
3398 /*
3399  * This routine is called to create an in-core extent free intent
3400  * item from the efi format structure which was logged on disk.
3401  * It allocates an in-core efi, copies the extents from the format
3402  * structure into it, and adds the efi to the AIL with the given
3403  * LSN.
3404  */
3405 STATIC int
3406 xlog_recover_efi_pass2(
3407 	struct xlog			*log,
3408 	struct xlog_recover_item	*item,
3409 	xfs_lsn_t			lsn)
3410 {
3411 	int				error;
3412 	struct xfs_mount		*mp = log->l_mp;
3413 	struct xfs_efi_log_item		*efip;
3414 	struct xfs_efi_log_format	*efi_formatp;
3415 
3416 	efi_formatp = item->ri_buf[0].i_addr;
3417 
3418 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3419 	error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3420 	if (error) {
3421 		xfs_efi_item_free(efip);
3422 		return error;
3423 	}
3424 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3425 
3426 	spin_lock(&log->l_ailp->xa_lock);
3427 	/*
3428 	 * The EFI has two references. One for the EFD and one for EFI to ensure
3429 	 * it makes it into the AIL. Insert the EFI into the AIL directly and
3430 	 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3431 	 * AIL lock.
3432 	 */
3433 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3434 	xfs_efi_release(efip);
3435 	return 0;
3436 }
3437 
3438 
3439 /*
3440  * This routine is called when an EFD format structure is found in a committed
3441  * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3442  * was still in the log. To do this it searches the AIL for the EFI with an id
3443  * equal to that in the EFD format structure. If we find it we drop the EFD
3444  * reference, which removes the EFI from the AIL and frees it.
3445  */
3446 STATIC int
3447 xlog_recover_efd_pass2(
3448 	struct xlog			*log,
3449 	struct xlog_recover_item	*item)
3450 {
3451 	xfs_efd_log_format_t	*efd_formatp;
3452 	xfs_efi_log_item_t	*efip = NULL;
3453 	xfs_log_item_t		*lip;
3454 	uint64_t		efi_id;
3455 	struct xfs_ail_cursor	cur;
3456 	struct xfs_ail		*ailp = log->l_ailp;
3457 
3458 	efd_formatp = item->ri_buf[0].i_addr;
3459 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3460 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3461 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3462 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3463 	efi_id = efd_formatp->efd_efi_id;
3464 
3465 	/*
3466 	 * Search for the EFI with the id in the EFD format structure in the
3467 	 * AIL.
3468 	 */
3469 	spin_lock(&ailp->xa_lock);
3470 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3471 	while (lip != NULL) {
3472 		if (lip->li_type == XFS_LI_EFI) {
3473 			efip = (xfs_efi_log_item_t *)lip;
3474 			if (efip->efi_format.efi_id == efi_id) {
3475 				/*
3476 				 * Drop the EFD reference to the EFI. This
3477 				 * removes the EFI from the AIL and frees it.
3478 				 */
3479 				spin_unlock(&ailp->xa_lock);
3480 				xfs_efi_release(efip);
3481 				spin_lock(&ailp->xa_lock);
3482 				break;
3483 			}
3484 		}
3485 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3486 	}
3487 
3488 	xfs_trans_ail_cursor_done(&cur);
3489 	spin_unlock(&ailp->xa_lock);
3490 
3491 	return 0;
3492 }
3493 
3494 /*
3495  * This routine is called to create an in-core extent rmap update
3496  * item from the rui format structure which was logged on disk.
3497  * It allocates an in-core rui, copies the extents from the format
3498  * structure into it, and adds the rui to the AIL with the given
3499  * LSN.
3500  */
3501 STATIC int
3502 xlog_recover_rui_pass2(
3503 	struct xlog			*log,
3504 	struct xlog_recover_item	*item,
3505 	xfs_lsn_t			lsn)
3506 {
3507 	int				error;
3508 	struct xfs_mount		*mp = log->l_mp;
3509 	struct xfs_rui_log_item		*ruip;
3510 	struct xfs_rui_log_format	*rui_formatp;
3511 
3512 	rui_formatp = item->ri_buf[0].i_addr;
3513 
3514 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3515 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3516 	if (error) {
3517 		xfs_rui_item_free(ruip);
3518 		return error;
3519 	}
3520 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3521 
3522 	spin_lock(&log->l_ailp->xa_lock);
3523 	/*
3524 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3525 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3526 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3527 	 * AIL lock.
3528 	 */
3529 	xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3530 	xfs_rui_release(ruip);
3531 	return 0;
3532 }
3533 
3534 
3535 /*
3536  * This routine is called when an RUD format structure is found in a committed
3537  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3538  * was still in the log. To do this it searches the AIL for the RUI with an id
3539  * equal to that in the RUD format structure. If we find it we drop the RUD
3540  * reference, which removes the RUI from the AIL and frees it.
3541  */
3542 STATIC int
3543 xlog_recover_rud_pass2(
3544 	struct xlog			*log,
3545 	struct xlog_recover_item	*item)
3546 {
3547 	struct xfs_rud_log_format	*rud_formatp;
3548 	struct xfs_rui_log_item		*ruip = NULL;
3549 	struct xfs_log_item		*lip;
3550 	uint64_t			rui_id;
3551 	struct xfs_ail_cursor		cur;
3552 	struct xfs_ail			*ailp = log->l_ailp;
3553 
3554 	rud_formatp = item->ri_buf[0].i_addr;
3555 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3556 	rui_id = rud_formatp->rud_rui_id;
3557 
3558 	/*
3559 	 * Search for the RUI with the id in the RUD format structure in the
3560 	 * AIL.
3561 	 */
3562 	spin_lock(&ailp->xa_lock);
3563 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3564 	while (lip != NULL) {
3565 		if (lip->li_type == XFS_LI_RUI) {
3566 			ruip = (struct xfs_rui_log_item *)lip;
3567 			if (ruip->rui_format.rui_id == rui_id) {
3568 				/*
3569 				 * Drop the RUD reference to the RUI. This
3570 				 * removes the RUI from the AIL and frees it.
3571 				 */
3572 				spin_unlock(&ailp->xa_lock);
3573 				xfs_rui_release(ruip);
3574 				spin_lock(&ailp->xa_lock);
3575 				break;
3576 			}
3577 		}
3578 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3579 	}
3580 
3581 	xfs_trans_ail_cursor_done(&cur);
3582 	spin_unlock(&ailp->xa_lock);
3583 
3584 	return 0;
3585 }
3586 
3587 /*
3588  * Copy an CUI format buffer from the given buf, and into the destination
3589  * CUI format structure.  The CUI/CUD items were designed not to need any
3590  * special alignment handling.
3591  */
3592 static int
3593 xfs_cui_copy_format(
3594 	struct xfs_log_iovec		*buf,
3595 	struct xfs_cui_log_format	*dst_cui_fmt)
3596 {
3597 	struct xfs_cui_log_format	*src_cui_fmt;
3598 	uint				len;
3599 
3600 	src_cui_fmt = buf->i_addr;
3601 	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3602 
3603 	if (buf->i_len == len) {
3604 		memcpy(dst_cui_fmt, src_cui_fmt, len);
3605 		return 0;
3606 	}
3607 	return -EFSCORRUPTED;
3608 }
3609 
3610 /*
3611  * This routine is called to create an in-core extent refcount update
3612  * item from the cui format structure which was logged on disk.
3613  * It allocates an in-core cui, copies the extents from the format
3614  * structure into it, and adds the cui to the AIL with the given
3615  * LSN.
3616  */
3617 STATIC int
3618 xlog_recover_cui_pass2(
3619 	struct xlog			*log,
3620 	struct xlog_recover_item	*item,
3621 	xfs_lsn_t			lsn)
3622 {
3623 	int				error;
3624 	struct xfs_mount		*mp = log->l_mp;
3625 	struct xfs_cui_log_item		*cuip;
3626 	struct xfs_cui_log_format	*cui_formatp;
3627 
3628 	cui_formatp = item->ri_buf[0].i_addr;
3629 
3630 	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3631 	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3632 	if (error) {
3633 		xfs_cui_item_free(cuip);
3634 		return error;
3635 	}
3636 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3637 
3638 	spin_lock(&log->l_ailp->xa_lock);
3639 	/*
3640 	 * The CUI has two references. One for the CUD and one for CUI to ensure
3641 	 * it makes it into the AIL. Insert the CUI into the AIL directly and
3642 	 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3643 	 * AIL lock.
3644 	 */
3645 	xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3646 	xfs_cui_release(cuip);
3647 	return 0;
3648 }
3649 
3650 
3651 /*
3652  * This routine is called when an CUD format structure is found in a committed
3653  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3654  * was still in the log. To do this it searches the AIL for the CUI with an id
3655  * equal to that in the CUD format structure. If we find it we drop the CUD
3656  * reference, which removes the CUI from the AIL and frees it.
3657  */
3658 STATIC int
3659 xlog_recover_cud_pass2(
3660 	struct xlog			*log,
3661 	struct xlog_recover_item	*item)
3662 {
3663 	struct xfs_cud_log_format	*cud_formatp;
3664 	struct xfs_cui_log_item		*cuip = NULL;
3665 	struct xfs_log_item		*lip;
3666 	uint64_t			cui_id;
3667 	struct xfs_ail_cursor		cur;
3668 	struct xfs_ail			*ailp = log->l_ailp;
3669 
3670 	cud_formatp = item->ri_buf[0].i_addr;
3671 	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3672 		return -EFSCORRUPTED;
3673 	cui_id = cud_formatp->cud_cui_id;
3674 
3675 	/*
3676 	 * Search for the CUI with the id in the CUD format structure in the
3677 	 * AIL.
3678 	 */
3679 	spin_lock(&ailp->xa_lock);
3680 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3681 	while (lip != NULL) {
3682 		if (lip->li_type == XFS_LI_CUI) {
3683 			cuip = (struct xfs_cui_log_item *)lip;
3684 			if (cuip->cui_format.cui_id == cui_id) {
3685 				/*
3686 				 * Drop the CUD reference to the CUI. This
3687 				 * removes the CUI from the AIL and frees it.
3688 				 */
3689 				spin_unlock(&ailp->xa_lock);
3690 				xfs_cui_release(cuip);
3691 				spin_lock(&ailp->xa_lock);
3692 				break;
3693 			}
3694 		}
3695 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3696 	}
3697 
3698 	xfs_trans_ail_cursor_done(&cur);
3699 	spin_unlock(&ailp->xa_lock);
3700 
3701 	return 0;
3702 }
3703 
3704 /*
3705  * Copy an BUI format buffer from the given buf, and into the destination
3706  * BUI format structure.  The BUI/BUD items were designed not to need any
3707  * special alignment handling.
3708  */
3709 static int
3710 xfs_bui_copy_format(
3711 	struct xfs_log_iovec		*buf,
3712 	struct xfs_bui_log_format	*dst_bui_fmt)
3713 {
3714 	struct xfs_bui_log_format	*src_bui_fmt;
3715 	uint				len;
3716 
3717 	src_bui_fmt = buf->i_addr;
3718 	len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3719 
3720 	if (buf->i_len == len) {
3721 		memcpy(dst_bui_fmt, src_bui_fmt, len);
3722 		return 0;
3723 	}
3724 	return -EFSCORRUPTED;
3725 }
3726 
3727 /*
3728  * This routine is called to create an in-core extent bmap update
3729  * item from the bui format structure which was logged on disk.
3730  * It allocates an in-core bui, copies the extents from the format
3731  * structure into it, and adds the bui to the AIL with the given
3732  * LSN.
3733  */
3734 STATIC int
3735 xlog_recover_bui_pass2(
3736 	struct xlog			*log,
3737 	struct xlog_recover_item	*item,
3738 	xfs_lsn_t			lsn)
3739 {
3740 	int				error;
3741 	struct xfs_mount		*mp = log->l_mp;
3742 	struct xfs_bui_log_item		*buip;
3743 	struct xfs_bui_log_format	*bui_formatp;
3744 
3745 	bui_formatp = item->ri_buf[0].i_addr;
3746 
3747 	if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3748 		return -EFSCORRUPTED;
3749 	buip = xfs_bui_init(mp);
3750 	error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3751 	if (error) {
3752 		xfs_bui_item_free(buip);
3753 		return error;
3754 	}
3755 	atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3756 
3757 	spin_lock(&log->l_ailp->xa_lock);
3758 	/*
3759 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3760 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3761 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3762 	 * AIL lock.
3763 	 */
3764 	xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3765 	xfs_bui_release(buip);
3766 	return 0;
3767 }
3768 
3769 
3770 /*
3771  * This routine is called when an BUD format structure is found in a committed
3772  * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3773  * was still in the log. To do this it searches the AIL for the BUI with an id
3774  * equal to that in the BUD format structure. If we find it we drop the BUD
3775  * reference, which removes the BUI from the AIL and frees it.
3776  */
3777 STATIC int
3778 xlog_recover_bud_pass2(
3779 	struct xlog			*log,
3780 	struct xlog_recover_item	*item)
3781 {
3782 	struct xfs_bud_log_format	*bud_formatp;
3783 	struct xfs_bui_log_item		*buip = NULL;
3784 	struct xfs_log_item		*lip;
3785 	uint64_t			bui_id;
3786 	struct xfs_ail_cursor		cur;
3787 	struct xfs_ail			*ailp = log->l_ailp;
3788 
3789 	bud_formatp = item->ri_buf[0].i_addr;
3790 	if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3791 		return -EFSCORRUPTED;
3792 	bui_id = bud_formatp->bud_bui_id;
3793 
3794 	/*
3795 	 * Search for the BUI with the id in the BUD format structure in the
3796 	 * AIL.
3797 	 */
3798 	spin_lock(&ailp->xa_lock);
3799 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3800 	while (lip != NULL) {
3801 		if (lip->li_type == XFS_LI_BUI) {
3802 			buip = (struct xfs_bui_log_item *)lip;
3803 			if (buip->bui_format.bui_id == bui_id) {
3804 				/*
3805 				 * Drop the BUD reference to the BUI. This
3806 				 * removes the BUI from the AIL and frees it.
3807 				 */
3808 				spin_unlock(&ailp->xa_lock);
3809 				xfs_bui_release(buip);
3810 				spin_lock(&ailp->xa_lock);
3811 				break;
3812 			}
3813 		}
3814 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3815 	}
3816 
3817 	xfs_trans_ail_cursor_done(&cur);
3818 	spin_unlock(&ailp->xa_lock);
3819 
3820 	return 0;
3821 }
3822 
3823 /*
3824  * This routine is called when an inode create format structure is found in a
3825  * committed transaction in the log.  It's purpose is to initialise the inodes
3826  * being allocated on disk. This requires us to get inode cluster buffers that
3827  * match the range to be initialised, stamped with inode templates and written
3828  * by delayed write so that subsequent modifications will hit the cached buffer
3829  * and only need writing out at the end of recovery.
3830  */
3831 STATIC int
3832 xlog_recover_do_icreate_pass2(
3833 	struct xlog		*log,
3834 	struct list_head	*buffer_list,
3835 	xlog_recover_item_t	*item)
3836 {
3837 	struct xfs_mount	*mp = log->l_mp;
3838 	struct xfs_icreate_log	*icl;
3839 	xfs_agnumber_t		agno;
3840 	xfs_agblock_t		agbno;
3841 	unsigned int		count;
3842 	unsigned int		isize;
3843 	xfs_agblock_t		length;
3844 	int			blks_per_cluster;
3845 	int			bb_per_cluster;
3846 	int			cancel_count;
3847 	int			nbufs;
3848 	int			i;
3849 
3850 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3851 	if (icl->icl_type != XFS_LI_ICREATE) {
3852 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3853 		return -EINVAL;
3854 	}
3855 
3856 	if (icl->icl_size != 1) {
3857 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3858 		return -EINVAL;
3859 	}
3860 
3861 	agno = be32_to_cpu(icl->icl_ag);
3862 	if (agno >= mp->m_sb.sb_agcount) {
3863 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3864 		return -EINVAL;
3865 	}
3866 	agbno = be32_to_cpu(icl->icl_agbno);
3867 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3868 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3869 		return -EINVAL;
3870 	}
3871 	isize = be32_to_cpu(icl->icl_isize);
3872 	if (isize != mp->m_sb.sb_inodesize) {
3873 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3874 		return -EINVAL;
3875 	}
3876 	count = be32_to_cpu(icl->icl_count);
3877 	if (!count) {
3878 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3879 		return -EINVAL;
3880 	}
3881 	length = be32_to_cpu(icl->icl_length);
3882 	if (!length || length >= mp->m_sb.sb_agblocks) {
3883 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3884 		return -EINVAL;
3885 	}
3886 
3887 	/*
3888 	 * The inode chunk is either full or sparse and we only support
3889 	 * m_ialloc_min_blks sized sparse allocations at this time.
3890 	 */
3891 	if (length != mp->m_ialloc_blks &&
3892 	    length != mp->m_ialloc_min_blks) {
3893 		xfs_warn(log->l_mp,
3894 			 "%s: unsupported chunk length", __FUNCTION__);
3895 		return -EINVAL;
3896 	}
3897 
3898 	/* verify inode count is consistent with extent length */
3899 	if ((count >> mp->m_sb.sb_inopblog) != length) {
3900 		xfs_warn(log->l_mp,
3901 			 "%s: inconsistent inode count and chunk length",
3902 			 __FUNCTION__);
3903 		return -EINVAL;
3904 	}
3905 
3906 	/*
3907 	 * The icreate transaction can cover multiple cluster buffers and these
3908 	 * buffers could have been freed and reused. Check the individual
3909 	 * buffers for cancellation so we don't overwrite anything written after
3910 	 * a cancellation.
3911 	 */
3912 	blks_per_cluster = xfs_icluster_size_fsb(mp);
3913 	bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3914 	nbufs = length / blks_per_cluster;
3915 	for (i = 0, cancel_count = 0; i < nbufs; i++) {
3916 		xfs_daddr_t	daddr;
3917 
3918 		daddr = XFS_AGB_TO_DADDR(mp, agno,
3919 					 agbno + i * blks_per_cluster);
3920 		if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3921 			cancel_count++;
3922 	}
3923 
3924 	/*
3925 	 * We currently only use icreate for a single allocation at a time. This
3926 	 * means we should expect either all or none of the buffers to be
3927 	 * cancelled. Be conservative and skip replay if at least one buffer is
3928 	 * cancelled, but warn the user that something is awry if the buffers
3929 	 * are not consistent.
3930 	 *
3931 	 * XXX: This must be refined to only skip cancelled clusters once we use
3932 	 * icreate for multiple chunk allocations.
3933 	 */
3934 	ASSERT(!cancel_count || cancel_count == nbufs);
3935 	if (cancel_count) {
3936 		if (cancel_count != nbufs)
3937 			xfs_warn(mp,
3938 	"WARNING: partial inode chunk cancellation, skipped icreate.");
3939 		trace_xfs_log_recover_icreate_cancel(log, icl);
3940 		return 0;
3941 	}
3942 
3943 	trace_xfs_log_recover_icreate_recover(log, icl);
3944 	return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3945 				     length, be32_to_cpu(icl->icl_gen));
3946 }
3947 
3948 STATIC void
3949 xlog_recover_buffer_ra_pass2(
3950 	struct xlog                     *log,
3951 	struct xlog_recover_item        *item)
3952 {
3953 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3954 	struct xfs_mount		*mp = log->l_mp;
3955 
3956 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3957 			buf_f->blf_len, buf_f->blf_flags)) {
3958 		return;
3959 	}
3960 
3961 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3962 				buf_f->blf_len, NULL);
3963 }
3964 
3965 STATIC void
3966 xlog_recover_inode_ra_pass2(
3967 	struct xlog                     *log,
3968 	struct xlog_recover_item        *item)
3969 {
3970 	struct xfs_inode_log_format	ilf_buf;
3971 	struct xfs_inode_log_format	*ilfp;
3972 	struct xfs_mount		*mp = log->l_mp;
3973 	int			error;
3974 
3975 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3976 		ilfp = item->ri_buf[0].i_addr;
3977 	} else {
3978 		ilfp = &ilf_buf;
3979 		memset(ilfp, 0, sizeof(*ilfp));
3980 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3981 		if (error)
3982 			return;
3983 	}
3984 
3985 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3986 		return;
3987 
3988 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3989 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3990 }
3991 
3992 STATIC void
3993 xlog_recover_dquot_ra_pass2(
3994 	struct xlog			*log,
3995 	struct xlog_recover_item	*item)
3996 {
3997 	struct xfs_mount	*mp = log->l_mp;
3998 	struct xfs_disk_dquot	*recddq;
3999 	struct xfs_dq_logformat	*dq_f;
4000 	uint			type;
4001 	int			len;
4002 
4003 
4004 	if (mp->m_qflags == 0)
4005 		return;
4006 
4007 	recddq = item->ri_buf[1].i_addr;
4008 	if (recddq == NULL)
4009 		return;
4010 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4011 		return;
4012 
4013 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4014 	ASSERT(type);
4015 	if (log->l_quotaoffs_flag & type)
4016 		return;
4017 
4018 	dq_f = item->ri_buf[0].i_addr;
4019 	ASSERT(dq_f);
4020 	ASSERT(dq_f->qlf_len == 1);
4021 
4022 	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4023 	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4024 		return;
4025 
4026 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4027 			  &xfs_dquot_buf_ra_ops);
4028 }
4029 
4030 STATIC void
4031 xlog_recover_ra_pass2(
4032 	struct xlog			*log,
4033 	struct xlog_recover_item	*item)
4034 {
4035 	switch (ITEM_TYPE(item)) {
4036 	case XFS_LI_BUF:
4037 		xlog_recover_buffer_ra_pass2(log, item);
4038 		break;
4039 	case XFS_LI_INODE:
4040 		xlog_recover_inode_ra_pass2(log, item);
4041 		break;
4042 	case XFS_LI_DQUOT:
4043 		xlog_recover_dquot_ra_pass2(log, item);
4044 		break;
4045 	case XFS_LI_EFI:
4046 	case XFS_LI_EFD:
4047 	case XFS_LI_QUOTAOFF:
4048 	case XFS_LI_RUI:
4049 	case XFS_LI_RUD:
4050 	case XFS_LI_CUI:
4051 	case XFS_LI_CUD:
4052 	case XFS_LI_BUI:
4053 	case XFS_LI_BUD:
4054 	default:
4055 		break;
4056 	}
4057 }
4058 
4059 STATIC int
4060 xlog_recover_commit_pass1(
4061 	struct xlog			*log,
4062 	struct xlog_recover		*trans,
4063 	struct xlog_recover_item	*item)
4064 {
4065 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4066 
4067 	switch (ITEM_TYPE(item)) {
4068 	case XFS_LI_BUF:
4069 		return xlog_recover_buffer_pass1(log, item);
4070 	case XFS_LI_QUOTAOFF:
4071 		return xlog_recover_quotaoff_pass1(log, item);
4072 	case XFS_LI_INODE:
4073 	case XFS_LI_EFI:
4074 	case XFS_LI_EFD:
4075 	case XFS_LI_DQUOT:
4076 	case XFS_LI_ICREATE:
4077 	case XFS_LI_RUI:
4078 	case XFS_LI_RUD:
4079 	case XFS_LI_CUI:
4080 	case XFS_LI_CUD:
4081 	case XFS_LI_BUI:
4082 	case XFS_LI_BUD:
4083 		/* nothing to do in pass 1 */
4084 		return 0;
4085 	default:
4086 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4087 			__func__, ITEM_TYPE(item));
4088 		ASSERT(0);
4089 		return -EIO;
4090 	}
4091 }
4092 
4093 STATIC int
4094 xlog_recover_commit_pass2(
4095 	struct xlog			*log,
4096 	struct xlog_recover		*trans,
4097 	struct list_head		*buffer_list,
4098 	struct xlog_recover_item	*item)
4099 {
4100 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4101 
4102 	switch (ITEM_TYPE(item)) {
4103 	case XFS_LI_BUF:
4104 		return xlog_recover_buffer_pass2(log, buffer_list, item,
4105 						 trans->r_lsn);
4106 	case XFS_LI_INODE:
4107 		return xlog_recover_inode_pass2(log, buffer_list, item,
4108 						 trans->r_lsn);
4109 	case XFS_LI_EFI:
4110 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4111 	case XFS_LI_EFD:
4112 		return xlog_recover_efd_pass2(log, item);
4113 	case XFS_LI_RUI:
4114 		return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4115 	case XFS_LI_RUD:
4116 		return xlog_recover_rud_pass2(log, item);
4117 	case XFS_LI_CUI:
4118 		return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4119 	case XFS_LI_CUD:
4120 		return xlog_recover_cud_pass2(log, item);
4121 	case XFS_LI_BUI:
4122 		return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4123 	case XFS_LI_BUD:
4124 		return xlog_recover_bud_pass2(log, item);
4125 	case XFS_LI_DQUOT:
4126 		return xlog_recover_dquot_pass2(log, buffer_list, item,
4127 						trans->r_lsn);
4128 	case XFS_LI_ICREATE:
4129 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4130 	case XFS_LI_QUOTAOFF:
4131 		/* nothing to do in pass2 */
4132 		return 0;
4133 	default:
4134 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4135 			__func__, ITEM_TYPE(item));
4136 		ASSERT(0);
4137 		return -EIO;
4138 	}
4139 }
4140 
4141 STATIC int
4142 xlog_recover_items_pass2(
4143 	struct xlog                     *log,
4144 	struct xlog_recover             *trans,
4145 	struct list_head                *buffer_list,
4146 	struct list_head                *item_list)
4147 {
4148 	struct xlog_recover_item	*item;
4149 	int				error = 0;
4150 
4151 	list_for_each_entry(item, item_list, ri_list) {
4152 		error = xlog_recover_commit_pass2(log, trans,
4153 					  buffer_list, item);
4154 		if (error)
4155 			return error;
4156 	}
4157 
4158 	return error;
4159 }
4160 
4161 /*
4162  * Perform the transaction.
4163  *
4164  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
4165  * EFIs and EFDs get queued up by adding entries into the AIL for them.
4166  */
4167 STATIC int
4168 xlog_recover_commit_trans(
4169 	struct xlog		*log,
4170 	struct xlog_recover	*trans,
4171 	int			pass,
4172 	struct list_head	*buffer_list)
4173 {
4174 	int				error = 0;
4175 	int				items_queued = 0;
4176 	struct xlog_recover_item	*item;
4177 	struct xlog_recover_item	*next;
4178 	LIST_HEAD			(ra_list);
4179 	LIST_HEAD			(done_list);
4180 
4181 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4182 
4183 	hlist_del_init(&trans->r_list);
4184 
4185 	error = xlog_recover_reorder_trans(log, trans, pass);
4186 	if (error)
4187 		return error;
4188 
4189 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4190 		switch (pass) {
4191 		case XLOG_RECOVER_PASS1:
4192 			error = xlog_recover_commit_pass1(log, trans, item);
4193 			break;
4194 		case XLOG_RECOVER_PASS2:
4195 			xlog_recover_ra_pass2(log, item);
4196 			list_move_tail(&item->ri_list, &ra_list);
4197 			items_queued++;
4198 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4199 				error = xlog_recover_items_pass2(log, trans,
4200 						buffer_list, &ra_list);
4201 				list_splice_tail_init(&ra_list, &done_list);
4202 				items_queued = 0;
4203 			}
4204 
4205 			break;
4206 		default:
4207 			ASSERT(0);
4208 		}
4209 
4210 		if (error)
4211 			goto out;
4212 	}
4213 
4214 out:
4215 	if (!list_empty(&ra_list)) {
4216 		if (!error)
4217 			error = xlog_recover_items_pass2(log, trans,
4218 					buffer_list, &ra_list);
4219 		list_splice_tail_init(&ra_list, &done_list);
4220 	}
4221 
4222 	if (!list_empty(&done_list))
4223 		list_splice_init(&done_list, &trans->r_itemq);
4224 
4225 	return error;
4226 }
4227 
4228 STATIC void
4229 xlog_recover_add_item(
4230 	struct list_head	*head)
4231 {
4232 	xlog_recover_item_t	*item;
4233 
4234 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4235 	INIT_LIST_HEAD(&item->ri_list);
4236 	list_add_tail(&item->ri_list, head);
4237 }
4238 
4239 STATIC int
4240 xlog_recover_add_to_cont_trans(
4241 	struct xlog		*log,
4242 	struct xlog_recover	*trans,
4243 	char			*dp,
4244 	int			len)
4245 {
4246 	xlog_recover_item_t	*item;
4247 	char			*ptr, *old_ptr;
4248 	int			old_len;
4249 
4250 	/*
4251 	 * If the transaction is empty, the header was split across this and the
4252 	 * previous record. Copy the rest of the header.
4253 	 */
4254 	if (list_empty(&trans->r_itemq)) {
4255 		ASSERT(len <= sizeof(struct xfs_trans_header));
4256 		if (len > sizeof(struct xfs_trans_header)) {
4257 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4258 			return -EIO;
4259 		}
4260 
4261 		xlog_recover_add_item(&trans->r_itemq);
4262 		ptr = (char *)&trans->r_theader +
4263 				sizeof(struct xfs_trans_header) - len;
4264 		memcpy(ptr, dp, len);
4265 		return 0;
4266 	}
4267 
4268 	/* take the tail entry */
4269 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4270 
4271 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4272 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
4273 
4274 	ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4275 	memcpy(&ptr[old_len], dp, len);
4276 	item->ri_buf[item->ri_cnt-1].i_len += len;
4277 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4278 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4279 	return 0;
4280 }
4281 
4282 /*
4283  * The next region to add is the start of a new region.  It could be
4284  * a whole region or it could be the first part of a new region.  Because
4285  * of this, the assumption here is that the type and size fields of all
4286  * format structures fit into the first 32 bits of the structure.
4287  *
4288  * This works because all regions must be 32 bit aligned.  Therefore, we
4289  * either have both fields or we have neither field.  In the case we have
4290  * neither field, the data part of the region is zero length.  We only have
4291  * a log_op_header and can throw away the header since a new one will appear
4292  * later.  If we have at least 4 bytes, then we can determine how many regions
4293  * will appear in the current log item.
4294  */
4295 STATIC int
4296 xlog_recover_add_to_trans(
4297 	struct xlog		*log,
4298 	struct xlog_recover	*trans,
4299 	char			*dp,
4300 	int			len)
4301 {
4302 	struct xfs_inode_log_format	*in_f;			/* any will do */
4303 	xlog_recover_item_t	*item;
4304 	char			*ptr;
4305 
4306 	if (!len)
4307 		return 0;
4308 	if (list_empty(&trans->r_itemq)) {
4309 		/* we need to catch log corruptions here */
4310 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4311 			xfs_warn(log->l_mp, "%s: bad header magic number",
4312 				__func__);
4313 			ASSERT(0);
4314 			return -EIO;
4315 		}
4316 
4317 		if (len > sizeof(struct xfs_trans_header)) {
4318 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4319 			ASSERT(0);
4320 			return -EIO;
4321 		}
4322 
4323 		/*
4324 		 * The transaction header can be arbitrarily split across op
4325 		 * records. If we don't have the whole thing here, copy what we
4326 		 * do have and handle the rest in the next record.
4327 		 */
4328 		if (len == sizeof(struct xfs_trans_header))
4329 			xlog_recover_add_item(&trans->r_itemq);
4330 		memcpy(&trans->r_theader, dp, len);
4331 		return 0;
4332 	}
4333 
4334 	ptr = kmem_alloc(len, KM_SLEEP);
4335 	memcpy(ptr, dp, len);
4336 	in_f = (struct xfs_inode_log_format *)ptr;
4337 
4338 	/* take the tail entry */
4339 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4340 	if (item->ri_total != 0 &&
4341 	     item->ri_total == item->ri_cnt) {
4342 		/* tail item is in use, get a new one */
4343 		xlog_recover_add_item(&trans->r_itemq);
4344 		item = list_entry(trans->r_itemq.prev,
4345 					xlog_recover_item_t, ri_list);
4346 	}
4347 
4348 	if (item->ri_total == 0) {		/* first region to be added */
4349 		if (in_f->ilf_size == 0 ||
4350 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4351 			xfs_warn(log->l_mp,
4352 		"bad number of regions (%d) in inode log format",
4353 				  in_f->ilf_size);
4354 			ASSERT(0);
4355 			kmem_free(ptr);
4356 			return -EIO;
4357 		}
4358 
4359 		item->ri_total = in_f->ilf_size;
4360 		item->ri_buf =
4361 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4362 				    KM_SLEEP);
4363 	}
4364 	ASSERT(item->ri_total > item->ri_cnt);
4365 	/* Description region is ri_buf[0] */
4366 	item->ri_buf[item->ri_cnt].i_addr = ptr;
4367 	item->ri_buf[item->ri_cnt].i_len  = len;
4368 	item->ri_cnt++;
4369 	trace_xfs_log_recover_item_add(log, trans, item, 0);
4370 	return 0;
4371 }
4372 
4373 /*
4374  * Free up any resources allocated by the transaction
4375  *
4376  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4377  */
4378 STATIC void
4379 xlog_recover_free_trans(
4380 	struct xlog_recover	*trans)
4381 {
4382 	xlog_recover_item_t	*item, *n;
4383 	int			i;
4384 
4385 	hlist_del_init(&trans->r_list);
4386 
4387 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4388 		/* Free the regions in the item. */
4389 		list_del(&item->ri_list);
4390 		for (i = 0; i < item->ri_cnt; i++)
4391 			kmem_free(item->ri_buf[i].i_addr);
4392 		/* Free the item itself */
4393 		kmem_free(item->ri_buf);
4394 		kmem_free(item);
4395 	}
4396 	/* Free the transaction recover structure */
4397 	kmem_free(trans);
4398 }
4399 
4400 /*
4401  * On error or completion, trans is freed.
4402  */
4403 STATIC int
4404 xlog_recovery_process_trans(
4405 	struct xlog		*log,
4406 	struct xlog_recover	*trans,
4407 	char			*dp,
4408 	unsigned int		len,
4409 	unsigned int		flags,
4410 	int			pass,
4411 	struct list_head	*buffer_list)
4412 {
4413 	int			error = 0;
4414 	bool			freeit = false;
4415 
4416 	/* mask off ophdr transaction container flags */
4417 	flags &= ~XLOG_END_TRANS;
4418 	if (flags & XLOG_WAS_CONT_TRANS)
4419 		flags &= ~XLOG_CONTINUE_TRANS;
4420 
4421 	/*
4422 	 * Callees must not free the trans structure. We'll decide if we need to
4423 	 * free it or not based on the operation being done and it's result.
4424 	 */
4425 	switch (flags) {
4426 	/* expected flag values */
4427 	case 0:
4428 	case XLOG_CONTINUE_TRANS:
4429 		error = xlog_recover_add_to_trans(log, trans, dp, len);
4430 		break;
4431 	case XLOG_WAS_CONT_TRANS:
4432 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4433 		break;
4434 	case XLOG_COMMIT_TRANS:
4435 		error = xlog_recover_commit_trans(log, trans, pass,
4436 						  buffer_list);
4437 		/* success or fail, we are now done with this transaction. */
4438 		freeit = true;
4439 		break;
4440 
4441 	/* unexpected flag values */
4442 	case XLOG_UNMOUNT_TRANS:
4443 		/* just skip trans */
4444 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4445 		freeit = true;
4446 		break;
4447 	case XLOG_START_TRANS:
4448 	default:
4449 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4450 		ASSERT(0);
4451 		error = -EIO;
4452 		break;
4453 	}
4454 	if (error || freeit)
4455 		xlog_recover_free_trans(trans);
4456 	return error;
4457 }
4458 
4459 /*
4460  * Lookup the transaction recovery structure associated with the ID in the
4461  * current ophdr. If the transaction doesn't exist and the start flag is set in
4462  * the ophdr, then allocate a new transaction for future ID matches to find.
4463  * Either way, return what we found during the lookup - an existing transaction
4464  * or nothing.
4465  */
4466 STATIC struct xlog_recover *
4467 xlog_recover_ophdr_to_trans(
4468 	struct hlist_head	rhash[],
4469 	struct xlog_rec_header	*rhead,
4470 	struct xlog_op_header	*ohead)
4471 {
4472 	struct xlog_recover	*trans;
4473 	xlog_tid_t		tid;
4474 	struct hlist_head	*rhp;
4475 
4476 	tid = be32_to_cpu(ohead->oh_tid);
4477 	rhp = &rhash[XLOG_RHASH(tid)];
4478 	hlist_for_each_entry(trans, rhp, r_list) {
4479 		if (trans->r_log_tid == tid)
4480 			return trans;
4481 	}
4482 
4483 	/*
4484 	 * skip over non-start transaction headers - we could be
4485 	 * processing slack space before the next transaction starts
4486 	 */
4487 	if (!(ohead->oh_flags & XLOG_START_TRANS))
4488 		return NULL;
4489 
4490 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4491 
4492 	/*
4493 	 * This is a new transaction so allocate a new recovery container to
4494 	 * hold the recovery ops that will follow.
4495 	 */
4496 	trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4497 	trans->r_log_tid = tid;
4498 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4499 	INIT_LIST_HEAD(&trans->r_itemq);
4500 	INIT_HLIST_NODE(&trans->r_list);
4501 	hlist_add_head(&trans->r_list, rhp);
4502 
4503 	/*
4504 	 * Nothing more to do for this ophdr. Items to be added to this new
4505 	 * transaction will be in subsequent ophdr containers.
4506 	 */
4507 	return NULL;
4508 }
4509 
4510 STATIC int
4511 xlog_recover_process_ophdr(
4512 	struct xlog		*log,
4513 	struct hlist_head	rhash[],
4514 	struct xlog_rec_header	*rhead,
4515 	struct xlog_op_header	*ohead,
4516 	char			*dp,
4517 	char			*end,
4518 	int			pass,
4519 	struct list_head	*buffer_list)
4520 {
4521 	struct xlog_recover	*trans;
4522 	unsigned int		len;
4523 	int			error;
4524 
4525 	/* Do we understand who wrote this op? */
4526 	if (ohead->oh_clientid != XFS_TRANSACTION &&
4527 	    ohead->oh_clientid != XFS_LOG) {
4528 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4529 			__func__, ohead->oh_clientid);
4530 		ASSERT(0);
4531 		return -EIO;
4532 	}
4533 
4534 	/*
4535 	 * Check the ophdr contains all the data it is supposed to contain.
4536 	 */
4537 	len = be32_to_cpu(ohead->oh_len);
4538 	if (dp + len > end) {
4539 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4540 		WARN_ON(1);
4541 		return -EIO;
4542 	}
4543 
4544 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4545 	if (!trans) {
4546 		/* nothing to do, so skip over this ophdr */
4547 		return 0;
4548 	}
4549 
4550 	/*
4551 	 * The recovered buffer queue is drained only once we know that all
4552 	 * recovery items for the current LSN have been processed. This is
4553 	 * required because:
4554 	 *
4555 	 * - Buffer write submission updates the metadata LSN of the buffer.
4556 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
4557 	 *   the recovery item.
4558 	 * - Separate recovery items against the same metadata buffer can share
4559 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
4560 	 *   defined as the starting LSN of the first record in which its
4561 	 *   transaction appears, that a record can hold multiple transactions,
4562 	 *   and/or that a transaction can span multiple records.
4563 	 *
4564 	 * In other words, we are allowed to submit a buffer from log recovery
4565 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
4566 	 * items and cause corruption.
4567 	 *
4568 	 * We don't know up front whether buffers are updated multiple times per
4569 	 * LSN. Therefore, track the current LSN of each commit log record as it
4570 	 * is processed and drain the queue when it changes. Use commit records
4571 	 * because they are ordered correctly by the logging code.
4572 	 */
4573 	if (log->l_recovery_lsn != trans->r_lsn &&
4574 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
4575 		error = xfs_buf_delwri_submit(buffer_list);
4576 		if (error)
4577 			return error;
4578 		log->l_recovery_lsn = trans->r_lsn;
4579 	}
4580 
4581 	return xlog_recovery_process_trans(log, trans, dp, len,
4582 					   ohead->oh_flags, pass, buffer_list);
4583 }
4584 
4585 /*
4586  * There are two valid states of the r_state field.  0 indicates that the
4587  * transaction structure is in a normal state.  We have either seen the
4588  * start of the transaction or the last operation we added was not a partial
4589  * operation.  If the last operation we added to the transaction was a
4590  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4591  *
4592  * NOTE: skip LRs with 0 data length.
4593  */
4594 STATIC int
4595 xlog_recover_process_data(
4596 	struct xlog		*log,
4597 	struct hlist_head	rhash[],
4598 	struct xlog_rec_header	*rhead,
4599 	char			*dp,
4600 	int			pass,
4601 	struct list_head	*buffer_list)
4602 {
4603 	struct xlog_op_header	*ohead;
4604 	char			*end;
4605 	int			num_logops;
4606 	int			error;
4607 
4608 	end = dp + be32_to_cpu(rhead->h_len);
4609 	num_logops = be32_to_cpu(rhead->h_num_logops);
4610 
4611 	/* check the log format matches our own - else we can't recover */
4612 	if (xlog_header_check_recover(log->l_mp, rhead))
4613 		return -EIO;
4614 
4615 	trace_xfs_log_recover_record(log, rhead, pass);
4616 	while ((dp < end) && num_logops) {
4617 
4618 		ohead = (struct xlog_op_header *)dp;
4619 		dp += sizeof(*ohead);
4620 		ASSERT(dp <= end);
4621 
4622 		/* errors will abort recovery */
4623 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4624 						   dp, end, pass, buffer_list);
4625 		if (error)
4626 			return error;
4627 
4628 		dp += be32_to_cpu(ohead->oh_len);
4629 		num_logops--;
4630 	}
4631 	return 0;
4632 }
4633 
4634 /* Recover the EFI if necessary. */
4635 STATIC int
4636 xlog_recover_process_efi(
4637 	struct xfs_mount		*mp,
4638 	struct xfs_ail			*ailp,
4639 	struct xfs_log_item		*lip)
4640 {
4641 	struct xfs_efi_log_item		*efip;
4642 	int				error;
4643 
4644 	/*
4645 	 * Skip EFIs that we've already processed.
4646 	 */
4647 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4648 	if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4649 		return 0;
4650 
4651 	spin_unlock(&ailp->xa_lock);
4652 	error = xfs_efi_recover(mp, efip);
4653 	spin_lock(&ailp->xa_lock);
4654 
4655 	return error;
4656 }
4657 
4658 /* Release the EFI since we're cancelling everything. */
4659 STATIC void
4660 xlog_recover_cancel_efi(
4661 	struct xfs_mount		*mp,
4662 	struct xfs_ail			*ailp,
4663 	struct xfs_log_item		*lip)
4664 {
4665 	struct xfs_efi_log_item		*efip;
4666 
4667 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4668 
4669 	spin_unlock(&ailp->xa_lock);
4670 	xfs_efi_release(efip);
4671 	spin_lock(&ailp->xa_lock);
4672 }
4673 
4674 /* Recover the RUI if necessary. */
4675 STATIC int
4676 xlog_recover_process_rui(
4677 	struct xfs_mount		*mp,
4678 	struct xfs_ail			*ailp,
4679 	struct xfs_log_item		*lip)
4680 {
4681 	struct xfs_rui_log_item		*ruip;
4682 	int				error;
4683 
4684 	/*
4685 	 * Skip RUIs that we've already processed.
4686 	 */
4687 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4688 	if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4689 		return 0;
4690 
4691 	spin_unlock(&ailp->xa_lock);
4692 	error = xfs_rui_recover(mp, ruip);
4693 	spin_lock(&ailp->xa_lock);
4694 
4695 	return error;
4696 }
4697 
4698 /* Release the RUI since we're cancelling everything. */
4699 STATIC void
4700 xlog_recover_cancel_rui(
4701 	struct xfs_mount		*mp,
4702 	struct xfs_ail			*ailp,
4703 	struct xfs_log_item		*lip)
4704 {
4705 	struct xfs_rui_log_item		*ruip;
4706 
4707 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4708 
4709 	spin_unlock(&ailp->xa_lock);
4710 	xfs_rui_release(ruip);
4711 	spin_lock(&ailp->xa_lock);
4712 }
4713 
4714 /* Recover the CUI if necessary. */
4715 STATIC int
4716 xlog_recover_process_cui(
4717 	struct xfs_mount		*mp,
4718 	struct xfs_ail			*ailp,
4719 	struct xfs_log_item		*lip)
4720 {
4721 	struct xfs_cui_log_item		*cuip;
4722 	int				error;
4723 
4724 	/*
4725 	 * Skip CUIs that we've already processed.
4726 	 */
4727 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4728 	if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4729 		return 0;
4730 
4731 	spin_unlock(&ailp->xa_lock);
4732 	error = xfs_cui_recover(mp, cuip);
4733 	spin_lock(&ailp->xa_lock);
4734 
4735 	return error;
4736 }
4737 
4738 /* Release the CUI since we're cancelling everything. */
4739 STATIC void
4740 xlog_recover_cancel_cui(
4741 	struct xfs_mount		*mp,
4742 	struct xfs_ail			*ailp,
4743 	struct xfs_log_item		*lip)
4744 {
4745 	struct xfs_cui_log_item		*cuip;
4746 
4747 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4748 
4749 	spin_unlock(&ailp->xa_lock);
4750 	xfs_cui_release(cuip);
4751 	spin_lock(&ailp->xa_lock);
4752 }
4753 
4754 /* Recover the BUI if necessary. */
4755 STATIC int
4756 xlog_recover_process_bui(
4757 	struct xfs_mount		*mp,
4758 	struct xfs_ail			*ailp,
4759 	struct xfs_log_item		*lip)
4760 {
4761 	struct xfs_bui_log_item		*buip;
4762 	int				error;
4763 
4764 	/*
4765 	 * Skip BUIs that we've already processed.
4766 	 */
4767 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4768 	if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4769 		return 0;
4770 
4771 	spin_unlock(&ailp->xa_lock);
4772 	error = xfs_bui_recover(mp, buip);
4773 	spin_lock(&ailp->xa_lock);
4774 
4775 	return error;
4776 }
4777 
4778 /* Release the BUI since we're cancelling everything. */
4779 STATIC void
4780 xlog_recover_cancel_bui(
4781 	struct xfs_mount		*mp,
4782 	struct xfs_ail			*ailp,
4783 	struct xfs_log_item		*lip)
4784 {
4785 	struct xfs_bui_log_item		*buip;
4786 
4787 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4788 
4789 	spin_unlock(&ailp->xa_lock);
4790 	xfs_bui_release(buip);
4791 	spin_lock(&ailp->xa_lock);
4792 }
4793 
4794 /* Is this log item a deferred action intent? */
4795 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4796 {
4797 	switch (lip->li_type) {
4798 	case XFS_LI_EFI:
4799 	case XFS_LI_RUI:
4800 	case XFS_LI_CUI:
4801 	case XFS_LI_BUI:
4802 		return true;
4803 	default:
4804 		return false;
4805 	}
4806 }
4807 
4808 /*
4809  * When this is called, all of the log intent items which did not have
4810  * corresponding log done items should be in the AIL.  What we do now
4811  * is update the data structures associated with each one.
4812  *
4813  * Since we process the log intent items in normal transactions, they
4814  * will be removed at some point after the commit.  This prevents us
4815  * from just walking down the list processing each one.  We'll use a
4816  * flag in the intent item to skip those that we've already processed
4817  * and use the AIL iteration mechanism's generation count to try to
4818  * speed this up at least a bit.
4819  *
4820  * When we start, we know that the intents are the only things in the
4821  * AIL.  As we process them, however, other items are added to the
4822  * AIL.
4823  */
4824 STATIC int
4825 xlog_recover_process_intents(
4826 	struct xlog		*log)
4827 {
4828 	struct xfs_log_item	*lip;
4829 	int			error = 0;
4830 	struct xfs_ail_cursor	cur;
4831 	struct xfs_ail		*ailp;
4832 #if defined(DEBUG) || defined(XFS_WARN)
4833 	xfs_lsn_t		last_lsn;
4834 #endif
4835 
4836 	ailp = log->l_ailp;
4837 	spin_lock(&ailp->xa_lock);
4838 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4839 #if defined(DEBUG) || defined(XFS_WARN)
4840 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4841 #endif
4842 	while (lip != NULL) {
4843 		/*
4844 		 * We're done when we see something other than an intent.
4845 		 * There should be no intents left in the AIL now.
4846 		 */
4847 		if (!xlog_item_is_intent(lip)) {
4848 #ifdef DEBUG
4849 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4850 				ASSERT(!xlog_item_is_intent(lip));
4851 #endif
4852 			break;
4853 		}
4854 
4855 		/*
4856 		 * We should never see a redo item with a LSN higher than
4857 		 * the last transaction we found in the log at the start
4858 		 * of recovery.
4859 		 */
4860 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4861 
4862 		switch (lip->li_type) {
4863 		case XFS_LI_EFI:
4864 			error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4865 			break;
4866 		case XFS_LI_RUI:
4867 			error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4868 			break;
4869 		case XFS_LI_CUI:
4870 			error = xlog_recover_process_cui(log->l_mp, ailp, lip);
4871 			break;
4872 		case XFS_LI_BUI:
4873 			error = xlog_recover_process_bui(log->l_mp, ailp, lip);
4874 			break;
4875 		}
4876 		if (error)
4877 			goto out;
4878 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4879 	}
4880 out:
4881 	xfs_trans_ail_cursor_done(&cur);
4882 	spin_unlock(&ailp->xa_lock);
4883 	return error;
4884 }
4885 
4886 /*
4887  * A cancel occurs when the mount has failed and we're bailing out.
4888  * Release all pending log intent items so they don't pin the AIL.
4889  */
4890 STATIC int
4891 xlog_recover_cancel_intents(
4892 	struct xlog		*log)
4893 {
4894 	struct xfs_log_item	*lip;
4895 	int			error = 0;
4896 	struct xfs_ail_cursor	cur;
4897 	struct xfs_ail		*ailp;
4898 
4899 	ailp = log->l_ailp;
4900 	spin_lock(&ailp->xa_lock);
4901 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4902 	while (lip != NULL) {
4903 		/*
4904 		 * We're done when we see something other than an intent.
4905 		 * There should be no intents left in the AIL now.
4906 		 */
4907 		if (!xlog_item_is_intent(lip)) {
4908 #ifdef DEBUG
4909 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4910 				ASSERT(!xlog_item_is_intent(lip));
4911 #endif
4912 			break;
4913 		}
4914 
4915 		switch (lip->li_type) {
4916 		case XFS_LI_EFI:
4917 			xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4918 			break;
4919 		case XFS_LI_RUI:
4920 			xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4921 			break;
4922 		case XFS_LI_CUI:
4923 			xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4924 			break;
4925 		case XFS_LI_BUI:
4926 			xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4927 			break;
4928 		}
4929 
4930 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4931 	}
4932 
4933 	xfs_trans_ail_cursor_done(&cur);
4934 	spin_unlock(&ailp->xa_lock);
4935 	return error;
4936 }
4937 
4938 /*
4939  * This routine performs a transaction to null out a bad inode pointer
4940  * in an agi unlinked inode hash bucket.
4941  */
4942 STATIC void
4943 xlog_recover_clear_agi_bucket(
4944 	xfs_mount_t	*mp,
4945 	xfs_agnumber_t	agno,
4946 	int		bucket)
4947 {
4948 	xfs_trans_t	*tp;
4949 	xfs_agi_t	*agi;
4950 	xfs_buf_t	*agibp;
4951 	int		offset;
4952 	int		error;
4953 
4954 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4955 	if (error)
4956 		goto out_error;
4957 
4958 	error = xfs_read_agi(mp, tp, agno, &agibp);
4959 	if (error)
4960 		goto out_abort;
4961 
4962 	agi = XFS_BUF_TO_AGI(agibp);
4963 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4964 	offset = offsetof(xfs_agi_t, agi_unlinked) +
4965 		 (sizeof(xfs_agino_t) * bucket);
4966 	xfs_trans_log_buf(tp, agibp, offset,
4967 			  (offset + sizeof(xfs_agino_t) - 1));
4968 
4969 	error = xfs_trans_commit(tp);
4970 	if (error)
4971 		goto out_error;
4972 	return;
4973 
4974 out_abort:
4975 	xfs_trans_cancel(tp);
4976 out_error:
4977 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4978 	return;
4979 }
4980 
4981 STATIC xfs_agino_t
4982 xlog_recover_process_one_iunlink(
4983 	struct xfs_mount		*mp,
4984 	xfs_agnumber_t			agno,
4985 	xfs_agino_t			agino,
4986 	int				bucket)
4987 {
4988 	struct xfs_buf			*ibp;
4989 	struct xfs_dinode		*dip;
4990 	struct xfs_inode		*ip;
4991 	xfs_ino_t			ino;
4992 	int				error;
4993 
4994 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
4995 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4996 	if (error)
4997 		goto fail;
4998 
4999 	/*
5000 	 * Get the on disk inode to find the next inode in the bucket.
5001 	 */
5002 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
5003 	if (error)
5004 		goto fail_iput;
5005 
5006 	xfs_iflags_clear(ip, XFS_IRECOVERY);
5007 	ASSERT(VFS_I(ip)->i_nlink == 0);
5008 	ASSERT(VFS_I(ip)->i_mode != 0);
5009 
5010 	/* setup for the next pass */
5011 	agino = be32_to_cpu(dip->di_next_unlinked);
5012 	xfs_buf_relse(ibp);
5013 
5014 	/*
5015 	 * Prevent any DMAPI event from being sent when the reference on
5016 	 * the inode is dropped.
5017 	 */
5018 	ip->i_d.di_dmevmask = 0;
5019 
5020 	IRELE(ip);
5021 	return agino;
5022 
5023  fail_iput:
5024 	IRELE(ip);
5025  fail:
5026 	/*
5027 	 * We can't read in the inode this bucket points to, or this inode
5028 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
5029 	 * some inodes and space, but at least we won't hang.
5030 	 *
5031 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5032 	 * clear the inode pointer in the bucket.
5033 	 */
5034 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
5035 	return NULLAGINO;
5036 }
5037 
5038 /*
5039  * xlog_iunlink_recover
5040  *
5041  * This is called during recovery to process any inodes which
5042  * we unlinked but not freed when the system crashed.  These
5043  * inodes will be on the lists in the AGI blocks.  What we do
5044  * here is scan all the AGIs and fully truncate and free any
5045  * inodes found on the lists.  Each inode is removed from the
5046  * lists when it has been fully truncated and is freed.  The
5047  * freeing of the inode and its removal from the list must be
5048  * atomic.
5049  */
5050 STATIC void
5051 xlog_recover_process_iunlinks(
5052 	struct xlog	*log)
5053 {
5054 	xfs_mount_t	*mp;
5055 	xfs_agnumber_t	agno;
5056 	xfs_agi_t	*agi;
5057 	xfs_buf_t	*agibp;
5058 	xfs_agino_t	agino;
5059 	int		bucket;
5060 	int		error;
5061 	uint		mp_dmevmask;
5062 
5063 	mp = log->l_mp;
5064 
5065 	/*
5066 	 * Prevent any DMAPI event from being sent while in this function.
5067 	 */
5068 	mp_dmevmask = mp->m_dmevmask;
5069 	mp->m_dmevmask = 0;
5070 
5071 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5072 		/*
5073 		 * Find the agi for this ag.
5074 		 */
5075 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5076 		if (error) {
5077 			/*
5078 			 * AGI is b0rked. Don't process it.
5079 			 *
5080 			 * We should probably mark the filesystem as corrupt
5081 			 * after we've recovered all the ag's we can....
5082 			 */
5083 			continue;
5084 		}
5085 		/*
5086 		 * Unlock the buffer so that it can be acquired in the normal
5087 		 * course of the transaction to truncate and free each inode.
5088 		 * Because we are not racing with anyone else here for the AGI
5089 		 * buffer, we don't even need to hold it locked to read the
5090 		 * initial unlinked bucket entries out of the buffer. We keep
5091 		 * buffer reference though, so that it stays pinned in memory
5092 		 * while we need the buffer.
5093 		 */
5094 		agi = XFS_BUF_TO_AGI(agibp);
5095 		xfs_buf_unlock(agibp);
5096 
5097 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5098 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5099 			while (agino != NULLAGINO) {
5100 				agino = xlog_recover_process_one_iunlink(mp,
5101 							agno, agino, bucket);
5102 			}
5103 		}
5104 		xfs_buf_rele(agibp);
5105 	}
5106 
5107 	mp->m_dmevmask = mp_dmevmask;
5108 }
5109 
5110 STATIC int
5111 xlog_unpack_data(
5112 	struct xlog_rec_header	*rhead,
5113 	char			*dp,
5114 	struct xlog		*log)
5115 {
5116 	int			i, j, k;
5117 
5118 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5119 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5120 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5121 		dp += BBSIZE;
5122 	}
5123 
5124 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5125 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5126 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5127 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5128 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5129 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5130 			dp += BBSIZE;
5131 		}
5132 	}
5133 
5134 	return 0;
5135 }
5136 
5137 /*
5138  * CRC check, unpack and process a log record.
5139  */
5140 STATIC int
5141 xlog_recover_process(
5142 	struct xlog		*log,
5143 	struct hlist_head	rhash[],
5144 	struct xlog_rec_header	*rhead,
5145 	char			*dp,
5146 	int			pass,
5147 	struct list_head	*buffer_list)
5148 {
5149 	int			error;
5150 	__le32			old_crc = rhead->h_crc;
5151 	__le32			crc;
5152 
5153 
5154 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5155 
5156 	/*
5157 	 * Nothing else to do if this is a CRC verification pass. Just return
5158 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
5159 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5160 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5161 	 * know precisely what failed.
5162 	 */
5163 	if (pass == XLOG_RECOVER_CRCPASS) {
5164 		if (old_crc && crc != old_crc)
5165 			return -EFSBADCRC;
5166 		return 0;
5167 	}
5168 
5169 	/*
5170 	 * We're in the normal recovery path. Issue a warning if and only if the
5171 	 * CRC in the header is non-zero. This is an advisory warning and the
5172 	 * zero CRC check prevents warnings from being emitted when upgrading
5173 	 * the kernel from one that does not add CRCs by default.
5174 	 */
5175 	if (crc != old_crc) {
5176 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5177 			xfs_alert(log->l_mp,
5178 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
5179 					le32_to_cpu(old_crc),
5180 					le32_to_cpu(crc));
5181 			xfs_hex_dump(dp, 32);
5182 		}
5183 
5184 		/*
5185 		 * If the filesystem is CRC enabled, this mismatch becomes a
5186 		 * fatal log corruption failure.
5187 		 */
5188 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5189 			return -EFSCORRUPTED;
5190 	}
5191 
5192 	error = xlog_unpack_data(rhead, dp, log);
5193 	if (error)
5194 		return error;
5195 
5196 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5197 					 buffer_list);
5198 }
5199 
5200 STATIC int
5201 xlog_valid_rec_header(
5202 	struct xlog		*log,
5203 	struct xlog_rec_header	*rhead,
5204 	xfs_daddr_t		blkno)
5205 {
5206 	int			hlen;
5207 
5208 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5209 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5210 				XFS_ERRLEVEL_LOW, log->l_mp);
5211 		return -EFSCORRUPTED;
5212 	}
5213 	if (unlikely(
5214 	    (!rhead->h_version ||
5215 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5216 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5217 			__func__, be32_to_cpu(rhead->h_version));
5218 		return -EIO;
5219 	}
5220 
5221 	/* LR body must have data or it wouldn't have been written */
5222 	hlen = be32_to_cpu(rhead->h_len);
5223 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5224 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5225 				XFS_ERRLEVEL_LOW, log->l_mp);
5226 		return -EFSCORRUPTED;
5227 	}
5228 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5229 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5230 				XFS_ERRLEVEL_LOW, log->l_mp);
5231 		return -EFSCORRUPTED;
5232 	}
5233 	return 0;
5234 }
5235 
5236 /*
5237  * Read the log from tail to head and process the log records found.
5238  * Handle the two cases where the tail and head are in the same cycle
5239  * and where the active portion of the log wraps around the end of
5240  * the physical log separately.  The pass parameter is passed through
5241  * to the routines called to process the data and is not looked at
5242  * here.
5243  */
5244 STATIC int
5245 xlog_do_recovery_pass(
5246 	struct xlog		*log,
5247 	xfs_daddr_t		head_blk,
5248 	xfs_daddr_t		tail_blk,
5249 	int			pass,
5250 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
5251 {
5252 	xlog_rec_header_t	*rhead;
5253 	xfs_daddr_t		blk_no, rblk_no;
5254 	xfs_daddr_t		rhead_blk;
5255 	char			*offset;
5256 	xfs_buf_t		*hbp, *dbp;
5257 	int			error = 0, h_size, h_len;
5258 	int			error2 = 0;
5259 	int			bblks, split_bblks;
5260 	int			hblks, split_hblks, wrapped_hblks;
5261 	int			i;
5262 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
5263 	LIST_HEAD		(buffer_list);
5264 
5265 	ASSERT(head_blk != tail_blk);
5266 	blk_no = rhead_blk = tail_blk;
5267 
5268 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
5269 		INIT_HLIST_HEAD(&rhash[i]);
5270 
5271 	/*
5272 	 * Read the header of the tail block and get the iclog buffer size from
5273 	 * h_size.  Use this to tell how many sectors make up the log header.
5274 	 */
5275 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5276 		/*
5277 		 * When using variable length iclogs, read first sector of
5278 		 * iclog header and extract the header size from it.  Get a
5279 		 * new hbp that is the correct size.
5280 		 */
5281 		hbp = xlog_get_bp(log, 1);
5282 		if (!hbp)
5283 			return -ENOMEM;
5284 
5285 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5286 		if (error)
5287 			goto bread_err1;
5288 
5289 		rhead = (xlog_rec_header_t *)offset;
5290 		error = xlog_valid_rec_header(log, rhead, tail_blk);
5291 		if (error)
5292 			goto bread_err1;
5293 
5294 		/*
5295 		 * xfsprogs has a bug where record length is based on lsunit but
5296 		 * h_size (iclog size) is hardcoded to 32k. Now that we
5297 		 * unconditionally CRC verify the unmount record, this means the
5298 		 * log buffer can be too small for the record and cause an
5299 		 * overrun.
5300 		 *
5301 		 * Detect this condition here. Use lsunit for the buffer size as
5302 		 * long as this looks like the mkfs case. Otherwise, return an
5303 		 * error to avoid a buffer overrun.
5304 		 */
5305 		h_size = be32_to_cpu(rhead->h_size);
5306 		h_len = be32_to_cpu(rhead->h_len);
5307 		if (h_len > h_size) {
5308 			if (h_len <= log->l_mp->m_logbsize &&
5309 			    be32_to_cpu(rhead->h_num_logops) == 1) {
5310 				xfs_warn(log->l_mp,
5311 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
5312 					 h_size, log->l_mp->m_logbsize);
5313 				h_size = log->l_mp->m_logbsize;
5314 			} else
5315 				return -EFSCORRUPTED;
5316 		}
5317 
5318 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5319 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5320 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5321 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
5322 				hblks++;
5323 			xlog_put_bp(hbp);
5324 			hbp = xlog_get_bp(log, hblks);
5325 		} else {
5326 			hblks = 1;
5327 		}
5328 	} else {
5329 		ASSERT(log->l_sectBBsize == 1);
5330 		hblks = 1;
5331 		hbp = xlog_get_bp(log, 1);
5332 		h_size = XLOG_BIG_RECORD_BSIZE;
5333 	}
5334 
5335 	if (!hbp)
5336 		return -ENOMEM;
5337 	dbp = xlog_get_bp(log, BTOBB(h_size));
5338 	if (!dbp) {
5339 		xlog_put_bp(hbp);
5340 		return -ENOMEM;
5341 	}
5342 
5343 	memset(rhash, 0, sizeof(rhash));
5344 	if (tail_blk > head_blk) {
5345 		/*
5346 		 * Perform recovery around the end of the physical log.
5347 		 * When the head is not on the same cycle number as the tail,
5348 		 * we can't do a sequential recovery.
5349 		 */
5350 		while (blk_no < log->l_logBBsize) {
5351 			/*
5352 			 * Check for header wrapping around physical end-of-log
5353 			 */
5354 			offset = hbp->b_addr;
5355 			split_hblks = 0;
5356 			wrapped_hblks = 0;
5357 			if (blk_no + hblks <= log->l_logBBsize) {
5358 				/* Read header in one read */
5359 				error = xlog_bread(log, blk_no, hblks, hbp,
5360 						   &offset);
5361 				if (error)
5362 					goto bread_err2;
5363 			} else {
5364 				/* This LR is split across physical log end */
5365 				if (blk_no != log->l_logBBsize) {
5366 					/* some data before physical log end */
5367 					ASSERT(blk_no <= INT_MAX);
5368 					split_hblks = log->l_logBBsize - (int)blk_no;
5369 					ASSERT(split_hblks > 0);
5370 					error = xlog_bread(log, blk_no,
5371 							   split_hblks, hbp,
5372 							   &offset);
5373 					if (error)
5374 						goto bread_err2;
5375 				}
5376 
5377 				/*
5378 				 * Note: this black magic still works with
5379 				 * large sector sizes (non-512) only because:
5380 				 * - we increased the buffer size originally
5381 				 *   by 1 sector giving us enough extra space
5382 				 *   for the second read;
5383 				 * - the log start is guaranteed to be sector
5384 				 *   aligned;
5385 				 * - we read the log end (LR header start)
5386 				 *   _first_, then the log start (LR header end)
5387 				 *   - order is important.
5388 				 */
5389 				wrapped_hblks = hblks - split_hblks;
5390 				error = xlog_bread_offset(log, 0,
5391 						wrapped_hblks, hbp,
5392 						offset + BBTOB(split_hblks));
5393 				if (error)
5394 					goto bread_err2;
5395 			}
5396 			rhead = (xlog_rec_header_t *)offset;
5397 			error = xlog_valid_rec_header(log, rhead,
5398 						split_hblks ? blk_no : 0);
5399 			if (error)
5400 				goto bread_err2;
5401 
5402 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5403 			blk_no += hblks;
5404 
5405 			/*
5406 			 * Read the log record data in multiple reads if it
5407 			 * wraps around the end of the log. Note that if the
5408 			 * header already wrapped, blk_no could point past the
5409 			 * end of the log. The record data is contiguous in
5410 			 * that case.
5411 			 */
5412 			if (blk_no + bblks <= log->l_logBBsize ||
5413 			    blk_no >= log->l_logBBsize) {
5414 				/* mod blk_no in case the header wrapped and
5415 				 * pushed it beyond the end of the log */
5416 				rblk_no = do_mod(blk_no, log->l_logBBsize);
5417 				error = xlog_bread(log, rblk_no, bblks, dbp,
5418 						   &offset);
5419 				if (error)
5420 					goto bread_err2;
5421 			} else {
5422 				/* This log record is split across the
5423 				 * physical end of log */
5424 				offset = dbp->b_addr;
5425 				split_bblks = 0;
5426 				if (blk_no != log->l_logBBsize) {
5427 					/* some data is before the physical
5428 					 * end of log */
5429 					ASSERT(!wrapped_hblks);
5430 					ASSERT(blk_no <= INT_MAX);
5431 					split_bblks =
5432 						log->l_logBBsize - (int)blk_no;
5433 					ASSERT(split_bblks > 0);
5434 					error = xlog_bread(log, blk_no,
5435 							split_bblks, dbp,
5436 							&offset);
5437 					if (error)
5438 						goto bread_err2;
5439 				}
5440 
5441 				/*
5442 				 * Note: this black magic still works with
5443 				 * large sector sizes (non-512) only because:
5444 				 * - we increased the buffer size originally
5445 				 *   by 1 sector giving us enough extra space
5446 				 *   for the second read;
5447 				 * - the log start is guaranteed to be sector
5448 				 *   aligned;
5449 				 * - we read the log end (LR header start)
5450 				 *   _first_, then the log start (LR header end)
5451 				 *   - order is important.
5452 				 */
5453 				error = xlog_bread_offset(log, 0,
5454 						bblks - split_bblks, dbp,
5455 						offset + BBTOB(split_bblks));
5456 				if (error)
5457 					goto bread_err2;
5458 			}
5459 
5460 			error = xlog_recover_process(log, rhash, rhead, offset,
5461 						     pass, &buffer_list);
5462 			if (error)
5463 				goto bread_err2;
5464 
5465 			blk_no += bblks;
5466 			rhead_blk = blk_no;
5467 		}
5468 
5469 		ASSERT(blk_no >= log->l_logBBsize);
5470 		blk_no -= log->l_logBBsize;
5471 		rhead_blk = blk_no;
5472 	}
5473 
5474 	/* read first part of physical log */
5475 	while (blk_no < head_blk) {
5476 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5477 		if (error)
5478 			goto bread_err2;
5479 
5480 		rhead = (xlog_rec_header_t *)offset;
5481 		error = xlog_valid_rec_header(log, rhead, blk_no);
5482 		if (error)
5483 			goto bread_err2;
5484 
5485 		/* blocks in data section */
5486 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5487 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5488 				   &offset);
5489 		if (error)
5490 			goto bread_err2;
5491 
5492 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
5493 					     &buffer_list);
5494 		if (error)
5495 			goto bread_err2;
5496 
5497 		blk_no += bblks + hblks;
5498 		rhead_blk = blk_no;
5499 	}
5500 
5501  bread_err2:
5502 	xlog_put_bp(dbp);
5503  bread_err1:
5504 	xlog_put_bp(hbp);
5505 
5506 	/*
5507 	 * Submit buffers that have been added from the last record processed,
5508 	 * regardless of error status.
5509 	 */
5510 	if (!list_empty(&buffer_list))
5511 		error2 = xfs_buf_delwri_submit(&buffer_list);
5512 
5513 	if (error && first_bad)
5514 		*first_bad = rhead_blk;
5515 
5516 	/*
5517 	 * Transactions are freed at commit time but transactions without commit
5518 	 * records on disk are never committed. Free any that may be left in the
5519 	 * hash table.
5520 	 */
5521 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5522 		struct hlist_node	*tmp;
5523 		struct xlog_recover	*trans;
5524 
5525 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5526 			xlog_recover_free_trans(trans);
5527 	}
5528 
5529 	return error ? error : error2;
5530 }
5531 
5532 /*
5533  * Do the recovery of the log.  We actually do this in two phases.
5534  * The two passes are necessary in order to implement the function
5535  * of cancelling a record written into the log.  The first pass
5536  * determines those things which have been cancelled, and the
5537  * second pass replays log items normally except for those which
5538  * have been cancelled.  The handling of the replay and cancellations
5539  * takes place in the log item type specific routines.
5540  *
5541  * The table of items which have cancel records in the log is allocated
5542  * and freed at this level, since only here do we know when all of
5543  * the log recovery has been completed.
5544  */
5545 STATIC int
5546 xlog_do_log_recovery(
5547 	struct xlog	*log,
5548 	xfs_daddr_t	head_blk,
5549 	xfs_daddr_t	tail_blk)
5550 {
5551 	int		error, i;
5552 
5553 	ASSERT(head_blk != tail_blk);
5554 
5555 	/*
5556 	 * First do a pass to find all of the cancelled buf log items.
5557 	 * Store them in the buf_cancel_table for use in the second pass.
5558 	 */
5559 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5560 						 sizeof(struct list_head),
5561 						 KM_SLEEP);
5562 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5563 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5564 
5565 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5566 				      XLOG_RECOVER_PASS1, NULL);
5567 	if (error != 0) {
5568 		kmem_free(log->l_buf_cancel_table);
5569 		log->l_buf_cancel_table = NULL;
5570 		return error;
5571 	}
5572 	/*
5573 	 * Then do a second pass to actually recover the items in the log.
5574 	 * When it is complete free the table of buf cancel items.
5575 	 */
5576 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5577 				      XLOG_RECOVER_PASS2, NULL);
5578 #ifdef DEBUG
5579 	if (!error) {
5580 		int	i;
5581 
5582 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5583 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5584 	}
5585 #endif	/* DEBUG */
5586 
5587 	kmem_free(log->l_buf_cancel_table);
5588 	log->l_buf_cancel_table = NULL;
5589 
5590 	return error;
5591 }
5592 
5593 /*
5594  * Do the actual recovery
5595  */
5596 STATIC int
5597 xlog_do_recover(
5598 	struct xlog	*log,
5599 	xfs_daddr_t	head_blk,
5600 	xfs_daddr_t	tail_blk)
5601 {
5602 	struct xfs_mount *mp = log->l_mp;
5603 	int		error;
5604 	xfs_buf_t	*bp;
5605 	xfs_sb_t	*sbp;
5606 
5607 	trace_xfs_log_recover(log, head_blk, tail_blk);
5608 
5609 	/*
5610 	 * First replay the images in the log.
5611 	 */
5612 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
5613 	if (error)
5614 		return error;
5615 
5616 	/*
5617 	 * If IO errors happened during recovery, bail out.
5618 	 */
5619 	if (XFS_FORCED_SHUTDOWN(mp)) {
5620 		return -EIO;
5621 	}
5622 
5623 	/*
5624 	 * We now update the tail_lsn since much of the recovery has completed
5625 	 * and there may be space available to use.  If there were no extent
5626 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
5627 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
5628 	 * lsn of the last known good LR on disk.  If there are extent frees
5629 	 * or iunlinks they will have some entries in the AIL; so we look at
5630 	 * the AIL to determine how to set the tail_lsn.
5631 	 */
5632 	xlog_assign_tail_lsn(mp);
5633 
5634 	/*
5635 	 * Now that we've finished replaying all buffer and inode
5636 	 * updates, re-read in the superblock and reverify it.
5637 	 */
5638 	bp = xfs_getsb(mp, 0);
5639 	bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5640 	ASSERT(!(bp->b_flags & XBF_WRITE));
5641 	bp->b_flags |= XBF_READ;
5642 	bp->b_ops = &xfs_sb_buf_ops;
5643 
5644 	error = xfs_buf_submit_wait(bp);
5645 	if (error) {
5646 		if (!XFS_FORCED_SHUTDOWN(mp)) {
5647 			xfs_buf_ioerror_alert(bp, __func__);
5648 			ASSERT(0);
5649 		}
5650 		xfs_buf_relse(bp);
5651 		return error;
5652 	}
5653 
5654 	/* Convert superblock from on-disk format */
5655 	sbp = &mp->m_sb;
5656 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5657 	xfs_buf_relse(bp);
5658 
5659 	/* re-initialise in-core superblock and geometry structures */
5660 	xfs_reinit_percpu_counters(mp);
5661 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5662 	if (error) {
5663 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5664 		return error;
5665 	}
5666 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5667 
5668 	xlog_recover_check_summary(log);
5669 
5670 	/* Normal transactions can now occur */
5671 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5672 	return 0;
5673 }
5674 
5675 /*
5676  * Perform recovery and re-initialize some log variables in xlog_find_tail.
5677  *
5678  * Return error or zero.
5679  */
5680 int
5681 xlog_recover(
5682 	struct xlog	*log)
5683 {
5684 	xfs_daddr_t	head_blk, tail_blk;
5685 	int		error;
5686 
5687 	/* find the tail of the log */
5688 	error = xlog_find_tail(log, &head_blk, &tail_blk);
5689 	if (error)
5690 		return error;
5691 
5692 	/*
5693 	 * The superblock was read before the log was available and thus the LSN
5694 	 * could not be verified. Check the superblock LSN against the current
5695 	 * LSN now that it's known.
5696 	 */
5697 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5698 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5699 		return -EINVAL;
5700 
5701 	if (tail_blk != head_blk) {
5702 		/* There used to be a comment here:
5703 		 *
5704 		 * disallow recovery on read-only mounts.  note -- mount
5705 		 * checks for ENOSPC and turns it into an intelligent
5706 		 * error message.
5707 		 * ...but this is no longer true.  Now, unless you specify
5708 		 * NORECOVERY (in which case this function would never be
5709 		 * called), we just go ahead and recover.  We do this all
5710 		 * under the vfs layer, so we can get away with it unless
5711 		 * the device itself is read-only, in which case we fail.
5712 		 */
5713 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5714 			return error;
5715 		}
5716 
5717 		/*
5718 		 * Version 5 superblock log feature mask validation. We know the
5719 		 * log is dirty so check if there are any unknown log features
5720 		 * in what we need to recover. If there are unknown features
5721 		 * (e.g. unsupported transactions, then simply reject the
5722 		 * attempt at recovery before touching anything.
5723 		 */
5724 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5725 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5726 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5727 			xfs_warn(log->l_mp,
5728 "Superblock has unknown incompatible log features (0x%x) enabled.",
5729 				(log->l_mp->m_sb.sb_features_log_incompat &
5730 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5731 			xfs_warn(log->l_mp,
5732 "The log can not be fully and/or safely recovered by this kernel.");
5733 			xfs_warn(log->l_mp,
5734 "Please recover the log on a kernel that supports the unknown features.");
5735 			return -EINVAL;
5736 		}
5737 
5738 		/*
5739 		 * Delay log recovery if the debug hook is set. This is debug
5740 		 * instrumention to coordinate simulation of I/O failures with
5741 		 * log recovery.
5742 		 */
5743 		if (xfs_globals.log_recovery_delay) {
5744 			xfs_notice(log->l_mp,
5745 				"Delaying log recovery for %d seconds.",
5746 				xfs_globals.log_recovery_delay);
5747 			msleep(xfs_globals.log_recovery_delay * 1000);
5748 		}
5749 
5750 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5751 				log->l_mp->m_logname ? log->l_mp->m_logname
5752 						     : "internal");
5753 
5754 		error = xlog_do_recover(log, head_blk, tail_blk);
5755 		log->l_flags |= XLOG_RECOVERY_NEEDED;
5756 	}
5757 	return error;
5758 }
5759 
5760 /*
5761  * In the first part of recovery we replay inodes and buffers and build
5762  * up the list of extent free items which need to be processed.  Here
5763  * we process the extent free items and clean up the on disk unlinked
5764  * inode lists.  This is separated from the first part of recovery so
5765  * that the root and real-time bitmap inodes can be read in from disk in
5766  * between the two stages.  This is necessary so that we can free space
5767  * in the real-time portion of the file system.
5768  */
5769 int
5770 xlog_recover_finish(
5771 	struct xlog	*log)
5772 {
5773 	/*
5774 	 * Now we're ready to do the transactions needed for the
5775 	 * rest of recovery.  Start with completing all the extent
5776 	 * free intent records and then process the unlinked inode
5777 	 * lists.  At this point, we essentially run in normal mode
5778 	 * except that we're still performing recovery actions
5779 	 * rather than accepting new requests.
5780 	 */
5781 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5782 		int	error;
5783 		error = xlog_recover_process_intents(log);
5784 		if (error) {
5785 			xfs_alert(log->l_mp, "Failed to recover intents");
5786 			return error;
5787 		}
5788 
5789 		/*
5790 		 * Sync the log to get all the intents out of the AIL.
5791 		 * This isn't absolutely necessary, but it helps in
5792 		 * case the unlink transactions would have problems
5793 		 * pushing the intents out of the way.
5794 		 */
5795 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5796 
5797 		xlog_recover_process_iunlinks(log);
5798 
5799 		xlog_recover_check_summary(log);
5800 
5801 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5802 				log->l_mp->m_logname ? log->l_mp->m_logname
5803 						     : "internal");
5804 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5805 	} else {
5806 		xfs_info(log->l_mp, "Ending clean mount");
5807 	}
5808 	return 0;
5809 }
5810 
5811 int
5812 xlog_recover_cancel(
5813 	struct xlog	*log)
5814 {
5815 	int		error = 0;
5816 
5817 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
5818 		error = xlog_recover_cancel_intents(log);
5819 
5820 	return error;
5821 }
5822 
5823 #if defined(DEBUG)
5824 /*
5825  * Read all of the agf and agi counters and check that they
5826  * are consistent with the superblock counters.
5827  */
5828 STATIC void
5829 xlog_recover_check_summary(
5830 	struct xlog	*log)
5831 {
5832 	xfs_mount_t	*mp;
5833 	xfs_agf_t	*agfp;
5834 	xfs_buf_t	*agfbp;
5835 	xfs_buf_t	*agibp;
5836 	xfs_agnumber_t	agno;
5837 	uint64_t	freeblks;
5838 	uint64_t	itotal;
5839 	uint64_t	ifree;
5840 	int		error;
5841 
5842 	mp = log->l_mp;
5843 
5844 	freeblks = 0LL;
5845 	itotal = 0LL;
5846 	ifree = 0LL;
5847 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5848 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5849 		if (error) {
5850 			xfs_alert(mp, "%s agf read failed agno %d error %d",
5851 						__func__, agno, error);
5852 		} else {
5853 			agfp = XFS_BUF_TO_AGF(agfbp);
5854 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
5855 				    be32_to_cpu(agfp->agf_flcount);
5856 			xfs_buf_relse(agfbp);
5857 		}
5858 
5859 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5860 		if (error) {
5861 			xfs_alert(mp, "%s agi read failed agno %d error %d",
5862 						__func__, agno, error);
5863 		} else {
5864 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
5865 
5866 			itotal += be32_to_cpu(agi->agi_count);
5867 			ifree += be32_to_cpu(agi->agi_freecount);
5868 			xfs_buf_relse(agibp);
5869 		}
5870 	}
5871 }
5872 #endif /* DEBUG */
5873