1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_log.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_alloc.h"
23 #include "xfs_ialloc.h"
24 #include "xfs_trace.h"
25 #include "xfs_icache.h"
26 #include "xfs_error.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_ag.h"
29 #include "xfs_quota.h"
30 #include "xfs_reflink.h"
31
32 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
33
34 STATIC int
35 xlog_find_zeroed(
36 struct xlog *,
37 xfs_daddr_t *);
38 STATIC int
39 xlog_clear_stale_blocks(
40 struct xlog *,
41 xfs_lsn_t);
42 STATIC int
43 xlog_do_recovery_pass(
44 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
45
46 /*
47 * Sector aligned buffer routines for buffer create/read/write/access
48 */
49
50 /*
51 * Verify the log-relative block number and length in basic blocks are valid for
52 * an operation involving the given XFS log buffer. Returns true if the fields
53 * are valid, false otherwise.
54 */
55 static inline bool
xlog_verify_bno(struct xlog * log,xfs_daddr_t blk_no,int bbcount)56 xlog_verify_bno(
57 struct xlog *log,
58 xfs_daddr_t blk_no,
59 int bbcount)
60 {
61 if (blk_no < 0 || blk_no >= log->l_logBBsize)
62 return false;
63 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
64 return false;
65 return true;
66 }
67
68 /*
69 * Allocate a buffer to hold log data. The buffer needs to be able to map to
70 * a range of nbblks basic blocks at any valid offset within the log.
71 */
72 static char *
xlog_alloc_buffer(struct xlog * log,int nbblks)73 xlog_alloc_buffer(
74 struct xlog *log,
75 int nbblks)
76 {
77 /*
78 * Pass log block 0 since we don't have an addr yet, buffer will be
79 * verified on read.
80 */
81 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
83 nbblks);
84 return NULL;
85 }
86
87 /*
88 * We do log I/O in units of log sectors (a power-of-2 multiple of the
89 * basic block size), so we round up the requested size to accommodate
90 * the basic blocks required for complete log sectors.
91 *
92 * In addition, the buffer may be used for a non-sector-aligned block
93 * offset, in which case an I/O of the requested size could extend
94 * beyond the end of the buffer. If the requested size is only 1 basic
95 * block it will never straddle a sector boundary, so this won't be an
96 * issue. Nor will this be a problem if the log I/O is done in basic
97 * blocks (sector size 1). But otherwise we extend the buffer by one
98 * extra log sector to ensure there's space to accommodate this
99 * possibility.
100 */
101 if (nbblks > 1 && log->l_sectBBsize > 1)
102 nbblks += log->l_sectBBsize;
103 nbblks = round_up(nbblks, log->l_sectBBsize);
104 return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
105 }
106
107 /*
108 * Return the address of the start of the given block number's data
109 * in a log buffer. The buffer covers a log sector-aligned region.
110 */
111 static inline unsigned int
xlog_align(struct xlog * log,xfs_daddr_t blk_no)112 xlog_align(
113 struct xlog *log,
114 xfs_daddr_t blk_no)
115 {
116 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
117 }
118
119 static int
xlog_do_io(struct xlog * log,xfs_daddr_t blk_no,unsigned int nbblks,char * data,enum req_op op)120 xlog_do_io(
121 struct xlog *log,
122 xfs_daddr_t blk_no,
123 unsigned int nbblks,
124 char *data,
125 enum req_op op)
126 {
127 int error;
128
129 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
130 xfs_warn(log->l_mp,
131 "Invalid log block/length (0x%llx, 0x%x) for buffer",
132 blk_no, nbblks);
133 return -EFSCORRUPTED;
134 }
135
136 blk_no = round_down(blk_no, log->l_sectBBsize);
137 nbblks = round_up(nbblks, log->l_sectBBsize);
138 ASSERT(nbblks > 0);
139
140 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
141 BBTOB(nbblks), data, op);
142 if (error && !xlog_is_shutdown(log)) {
143 xfs_alert(log->l_mp,
144 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
145 op == REQ_OP_WRITE ? "write" : "read",
146 blk_no, nbblks, error);
147 }
148 return error;
149 }
150
151 STATIC int
xlog_bread_noalign(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)152 xlog_bread_noalign(
153 struct xlog *log,
154 xfs_daddr_t blk_no,
155 int nbblks,
156 char *data)
157 {
158 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
159 }
160
161 STATIC int
xlog_bread(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data,char ** offset)162 xlog_bread(
163 struct xlog *log,
164 xfs_daddr_t blk_no,
165 int nbblks,
166 char *data,
167 char **offset)
168 {
169 int error;
170
171 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
172 if (!error)
173 *offset = data + xlog_align(log, blk_no);
174 return error;
175 }
176
177 STATIC int
xlog_bwrite(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)178 xlog_bwrite(
179 struct xlog *log,
180 xfs_daddr_t blk_no,
181 int nbblks,
182 char *data)
183 {
184 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
185 }
186
187 #ifdef DEBUG
188 /*
189 * dump debug superblock and log record information
190 */
191 STATIC void
xlog_header_check_dump(struct xfs_mount * mp,struct xlog_rec_header * head)192 xlog_header_check_dump(
193 struct xfs_mount *mp,
194 struct xlog_rec_header *head)
195 {
196 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
197 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
198 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
199 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
200 }
201 #else
202 #define xlog_header_check_dump(mp, head)
203 #endif
204
205 /*
206 * check log record header for recovery
207 */
208 STATIC int
xlog_header_check_recover(struct xfs_mount * mp,struct xlog_rec_header * head)209 xlog_header_check_recover(
210 struct xfs_mount *mp,
211 struct xlog_rec_header *head)
212 {
213 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
214
215 /*
216 * IRIX doesn't write the h_fmt field and leaves it zeroed
217 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
218 * a dirty log created in IRIX.
219 */
220 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
221 xfs_warn(mp,
222 "dirty log written in incompatible format - can't recover");
223 xlog_header_check_dump(mp, head);
224 return -EFSCORRUPTED;
225 }
226 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
227 &head->h_fs_uuid))) {
228 xfs_warn(mp,
229 "dirty log entry has mismatched uuid - can't recover");
230 xlog_header_check_dump(mp, head);
231 return -EFSCORRUPTED;
232 }
233 return 0;
234 }
235
236 /*
237 * read the head block of the log and check the header
238 */
239 STATIC int
xlog_header_check_mount(struct xfs_mount * mp,struct xlog_rec_header * head)240 xlog_header_check_mount(
241 struct xfs_mount *mp,
242 struct xlog_rec_header *head)
243 {
244 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
245
246 if (uuid_is_null(&head->h_fs_uuid)) {
247 /*
248 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
249 * h_fs_uuid is null, we assume this log was last mounted
250 * by IRIX and continue.
251 */
252 xfs_warn(mp, "null uuid in log - IRIX style log");
253 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
254 &head->h_fs_uuid))) {
255 xfs_warn(mp, "log has mismatched uuid - can't recover");
256 xlog_header_check_dump(mp, head);
257 return -EFSCORRUPTED;
258 }
259 return 0;
260 }
261
262 /*
263 * This routine finds (to an approximation) the first block in the physical
264 * log which contains the given cycle. It uses a binary search algorithm.
265 * Note that the algorithm can not be perfect because the disk will not
266 * necessarily be perfect.
267 */
268 STATIC int
xlog_find_cycle_start(struct xlog * log,char * buffer,xfs_daddr_t first_blk,xfs_daddr_t * last_blk,uint cycle)269 xlog_find_cycle_start(
270 struct xlog *log,
271 char *buffer,
272 xfs_daddr_t first_blk,
273 xfs_daddr_t *last_blk,
274 uint cycle)
275 {
276 char *offset;
277 xfs_daddr_t mid_blk;
278 xfs_daddr_t end_blk;
279 uint mid_cycle;
280 int error;
281
282 end_blk = *last_blk;
283 mid_blk = BLK_AVG(first_blk, end_blk);
284 while (mid_blk != first_blk && mid_blk != end_blk) {
285 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
286 if (error)
287 return error;
288 mid_cycle = xlog_get_cycle(offset);
289 if (mid_cycle == cycle)
290 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
291 else
292 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
293 mid_blk = BLK_AVG(first_blk, end_blk);
294 }
295 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
296 (mid_blk == end_blk && mid_blk-1 == first_blk));
297
298 *last_blk = end_blk;
299
300 return 0;
301 }
302
303 /*
304 * Check that a range of blocks does not contain stop_on_cycle_no.
305 * Fill in *new_blk with the block offset where such a block is
306 * found, or with -1 (an invalid block number) if there is no such
307 * block in the range. The scan needs to occur from front to back
308 * and the pointer into the region must be updated since a later
309 * routine will need to perform another test.
310 */
311 STATIC int
xlog_find_verify_cycle(struct xlog * log,xfs_daddr_t start_blk,int nbblks,uint stop_on_cycle_no,xfs_daddr_t * new_blk)312 xlog_find_verify_cycle(
313 struct xlog *log,
314 xfs_daddr_t start_blk,
315 int nbblks,
316 uint stop_on_cycle_no,
317 xfs_daddr_t *new_blk)
318 {
319 xfs_daddr_t i, j;
320 uint cycle;
321 char *buffer;
322 xfs_daddr_t bufblks;
323 char *buf = NULL;
324 int error = 0;
325
326 /*
327 * Greedily allocate a buffer big enough to handle the full
328 * range of basic blocks we'll be examining. If that fails,
329 * try a smaller size. We need to be able to read at least
330 * a log sector, or we're out of luck.
331 */
332 bufblks = roundup_pow_of_two(nbblks);
333 while (bufblks > log->l_logBBsize)
334 bufblks >>= 1;
335 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
336 bufblks >>= 1;
337 if (bufblks < log->l_sectBBsize)
338 return -ENOMEM;
339 }
340
341 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
342 int bcount;
343
344 bcount = min(bufblks, (start_blk + nbblks - i));
345
346 error = xlog_bread(log, i, bcount, buffer, &buf);
347 if (error)
348 goto out;
349
350 for (j = 0; j < bcount; j++) {
351 cycle = xlog_get_cycle(buf);
352 if (cycle == stop_on_cycle_no) {
353 *new_blk = i+j;
354 goto out;
355 }
356
357 buf += BBSIZE;
358 }
359 }
360
361 *new_blk = -1;
362
363 out:
364 kvfree(buffer);
365 return error;
366 }
367
368 static inline int
xlog_logrec_hblks(struct xlog * log,struct xlog_rec_header * rh)369 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
370 {
371 if (xfs_has_logv2(log->l_mp)) {
372 int h_size = be32_to_cpu(rh->h_size);
373
374 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
375 h_size > XLOG_HEADER_CYCLE_SIZE)
376 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
377 }
378 return 1;
379 }
380
381 /*
382 * Potentially backup over partial log record write.
383 *
384 * In the typical case, last_blk is the number of the block directly after
385 * a good log record. Therefore, we subtract one to get the block number
386 * of the last block in the given buffer. extra_bblks contains the number
387 * of blocks we would have read on a previous read. This happens when the
388 * last log record is split over the end of the physical log.
389 *
390 * extra_bblks is the number of blocks potentially verified on a previous
391 * call to this routine.
392 */
393 STATIC int
xlog_find_verify_log_record(struct xlog * log,xfs_daddr_t start_blk,xfs_daddr_t * last_blk,int extra_bblks)394 xlog_find_verify_log_record(
395 struct xlog *log,
396 xfs_daddr_t start_blk,
397 xfs_daddr_t *last_blk,
398 int extra_bblks)
399 {
400 xfs_daddr_t i;
401 char *buffer;
402 char *offset = NULL;
403 struct xlog_rec_header *head = NULL;
404 int error = 0;
405 int smallmem = 0;
406 int num_blks = *last_blk - start_blk;
407 int xhdrs;
408
409 ASSERT(start_blk != 0 || *last_blk != start_blk);
410
411 buffer = xlog_alloc_buffer(log, num_blks);
412 if (!buffer) {
413 buffer = xlog_alloc_buffer(log, 1);
414 if (!buffer)
415 return -ENOMEM;
416 smallmem = 1;
417 } else {
418 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
419 if (error)
420 goto out;
421 offset += ((num_blks - 1) << BBSHIFT);
422 }
423
424 for (i = (*last_blk) - 1; i >= 0; i--) {
425 if (i < start_blk) {
426 /* valid log record not found */
427 xfs_warn(log->l_mp,
428 "Log inconsistent (didn't find previous header)");
429 ASSERT(0);
430 error = -EFSCORRUPTED;
431 goto out;
432 }
433
434 if (smallmem) {
435 error = xlog_bread(log, i, 1, buffer, &offset);
436 if (error)
437 goto out;
438 }
439
440 head = (struct xlog_rec_header *)offset;
441
442 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
443 break;
444
445 if (!smallmem)
446 offset -= BBSIZE;
447 }
448
449 /*
450 * We hit the beginning of the physical log & still no header. Return
451 * to caller. If caller can handle a return of -1, then this routine
452 * will be called again for the end of the physical log.
453 */
454 if (i == -1) {
455 error = 1;
456 goto out;
457 }
458
459 /*
460 * We have the final block of the good log (the first block
461 * of the log record _before_ the head. So we check the uuid.
462 */
463 if ((error = xlog_header_check_mount(log->l_mp, head)))
464 goto out;
465
466 /*
467 * We may have found a log record header before we expected one.
468 * last_blk will be the 1st block # with a given cycle #. We may end
469 * up reading an entire log record. In this case, we don't want to
470 * reset last_blk. Only when last_blk points in the middle of a log
471 * record do we update last_blk.
472 */
473 xhdrs = xlog_logrec_hblks(log, head);
474
475 if (*last_blk - i + extra_bblks !=
476 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
477 *last_blk = i;
478
479 out:
480 kvfree(buffer);
481 return error;
482 }
483
484 /*
485 * Head is defined to be the point of the log where the next log write
486 * could go. This means that incomplete LR writes at the end are
487 * eliminated when calculating the head. We aren't guaranteed that previous
488 * LR have complete transactions. We only know that a cycle number of
489 * current cycle number -1 won't be present in the log if we start writing
490 * from our current block number.
491 *
492 * last_blk contains the block number of the first block with a given
493 * cycle number.
494 *
495 * Return: zero if normal, non-zero if error.
496 */
497 STATIC int
xlog_find_head(struct xlog * log,xfs_daddr_t * return_head_blk)498 xlog_find_head(
499 struct xlog *log,
500 xfs_daddr_t *return_head_blk)
501 {
502 char *buffer;
503 char *offset;
504 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
505 int num_scan_bblks;
506 uint first_half_cycle, last_half_cycle;
507 uint stop_on_cycle;
508 int error, log_bbnum = log->l_logBBsize;
509
510 /* Is the end of the log device zeroed? */
511 error = xlog_find_zeroed(log, &first_blk);
512 if (error < 0) {
513 xfs_warn(log->l_mp, "empty log check failed");
514 return error;
515 }
516 if (error == 1) {
517 *return_head_blk = first_blk;
518
519 /* Is the whole lot zeroed? */
520 if (!first_blk) {
521 /* Linux XFS shouldn't generate totally zeroed logs -
522 * mkfs etc write a dummy unmount record to a fresh
523 * log so we can store the uuid in there
524 */
525 xfs_warn(log->l_mp, "totally zeroed log");
526 }
527
528 return 0;
529 }
530
531 first_blk = 0; /* get cycle # of 1st block */
532 buffer = xlog_alloc_buffer(log, 1);
533 if (!buffer)
534 return -ENOMEM;
535
536 error = xlog_bread(log, 0, 1, buffer, &offset);
537 if (error)
538 goto out_free_buffer;
539
540 first_half_cycle = xlog_get_cycle(offset);
541
542 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
543 error = xlog_bread(log, last_blk, 1, buffer, &offset);
544 if (error)
545 goto out_free_buffer;
546
547 last_half_cycle = xlog_get_cycle(offset);
548 ASSERT(last_half_cycle != 0);
549
550 /*
551 * If the 1st half cycle number is equal to the last half cycle number,
552 * then the entire log is stamped with the same cycle number. In this
553 * case, head_blk can't be set to zero (which makes sense). The below
554 * math doesn't work out properly with head_blk equal to zero. Instead,
555 * we set it to log_bbnum which is an invalid block number, but this
556 * value makes the math correct. If head_blk doesn't changed through
557 * all the tests below, *head_blk is set to zero at the very end rather
558 * than log_bbnum. In a sense, log_bbnum and zero are the same block
559 * in a circular file.
560 */
561 if (first_half_cycle == last_half_cycle) {
562 /*
563 * In this case we believe that the entire log should have
564 * cycle number last_half_cycle. We need to scan backwards
565 * from the end verifying that there are no holes still
566 * containing last_half_cycle - 1. If we find such a hole,
567 * then the start of that hole will be the new head. The
568 * simple case looks like
569 * x | x ... | x - 1 | x
570 * Another case that fits this picture would be
571 * x | x + 1 | x ... | x
572 * In this case the head really is somewhere at the end of the
573 * log, as one of the latest writes at the beginning was
574 * incomplete.
575 * One more case is
576 * x | x + 1 | x ... | x - 1 | x
577 * This is really the combination of the above two cases, and
578 * the head has to end up at the start of the x-1 hole at the
579 * end of the log.
580 *
581 * In the 256k log case, we will read from the beginning to the
582 * end of the log and search for cycle numbers equal to x-1.
583 * We don't worry about the x+1 blocks that we encounter,
584 * because we know that they cannot be the head since the log
585 * started with x.
586 */
587 head_blk = log_bbnum;
588 stop_on_cycle = last_half_cycle - 1;
589 } else {
590 /*
591 * In this case we want to find the first block with cycle
592 * number matching last_half_cycle. We expect the log to be
593 * some variation on
594 * x + 1 ... | x ... | x
595 * The first block with cycle number x (last_half_cycle) will
596 * be where the new head belongs. First we do a binary search
597 * for the first occurrence of last_half_cycle. The binary
598 * search may not be totally accurate, so then we scan back
599 * from there looking for occurrences of last_half_cycle before
600 * us. If that backwards scan wraps around the beginning of
601 * the log, then we look for occurrences of last_half_cycle - 1
602 * at the end of the log. The cases we're looking for look
603 * like
604 * v binary search stopped here
605 * x + 1 ... | x | x + 1 | x ... | x
606 * ^ but we want to locate this spot
607 * or
608 * <---------> less than scan distance
609 * x + 1 ... | x ... | x - 1 | x
610 * ^ we want to locate this spot
611 */
612 stop_on_cycle = last_half_cycle;
613 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
614 last_half_cycle);
615 if (error)
616 goto out_free_buffer;
617 }
618
619 /*
620 * Now validate the answer. Scan back some number of maximum possible
621 * blocks and make sure each one has the expected cycle number. The
622 * maximum is determined by the total possible amount of buffering
623 * in the in-core log. The following number can be made tighter if
624 * we actually look at the block size of the filesystem.
625 */
626 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
627 if (head_blk >= num_scan_bblks) {
628 /*
629 * We are guaranteed that the entire check can be performed
630 * in one buffer.
631 */
632 start_blk = head_blk - num_scan_bblks;
633 if ((error = xlog_find_verify_cycle(log,
634 start_blk, num_scan_bblks,
635 stop_on_cycle, &new_blk)))
636 goto out_free_buffer;
637 if (new_blk != -1)
638 head_blk = new_blk;
639 } else { /* need to read 2 parts of log */
640 /*
641 * We are going to scan backwards in the log in two parts.
642 * First we scan the physical end of the log. In this part
643 * of the log, we are looking for blocks with cycle number
644 * last_half_cycle - 1.
645 * If we find one, then we know that the log starts there, as
646 * we've found a hole that didn't get written in going around
647 * the end of the physical log. The simple case for this is
648 * x + 1 ... | x ... | x - 1 | x
649 * <---------> less than scan distance
650 * If all of the blocks at the end of the log have cycle number
651 * last_half_cycle, then we check the blocks at the start of
652 * the log looking for occurrences of last_half_cycle. If we
653 * find one, then our current estimate for the location of the
654 * first occurrence of last_half_cycle is wrong and we move
655 * back to the hole we've found. This case looks like
656 * x + 1 ... | x | x + 1 | x ...
657 * ^ binary search stopped here
658 * Another case we need to handle that only occurs in 256k
659 * logs is
660 * x + 1 ... | x ... | x+1 | x ...
661 * ^ binary search stops here
662 * In a 256k log, the scan at the end of the log will see the
663 * x + 1 blocks. We need to skip past those since that is
664 * certainly not the head of the log. By searching for
665 * last_half_cycle-1 we accomplish that.
666 */
667 ASSERT(head_blk <= INT_MAX &&
668 (xfs_daddr_t) num_scan_bblks >= head_blk);
669 start_blk = log_bbnum - (num_scan_bblks - head_blk);
670 if ((error = xlog_find_verify_cycle(log, start_blk,
671 num_scan_bblks - (int)head_blk,
672 (stop_on_cycle - 1), &new_blk)))
673 goto out_free_buffer;
674 if (new_blk != -1) {
675 head_blk = new_blk;
676 goto validate_head;
677 }
678
679 /*
680 * Scan beginning of log now. The last part of the physical
681 * log is good. This scan needs to verify that it doesn't find
682 * the last_half_cycle.
683 */
684 start_blk = 0;
685 ASSERT(head_blk <= INT_MAX);
686 if ((error = xlog_find_verify_cycle(log,
687 start_blk, (int)head_blk,
688 stop_on_cycle, &new_blk)))
689 goto out_free_buffer;
690 if (new_blk != -1)
691 head_blk = new_blk;
692 }
693
694 validate_head:
695 /*
696 * Now we need to make sure head_blk is not pointing to a block in
697 * the middle of a log record.
698 */
699 num_scan_bblks = XLOG_REC_SHIFT(log);
700 if (head_blk >= num_scan_bblks) {
701 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
702
703 /* start ptr at last block ptr before head_blk */
704 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
705 if (error == 1)
706 error = -EIO;
707 if (error)
708 goto out_free_buffer;
709 } else {
710 start_blk = 0;
711 ASSERT(head_blk <= INT_MAX);
712 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
713 if (error < 0)
714 goto out_free_buffer;
715 if (error == 1) {
716 /* We hit the beginning of the log during our search */
717 start_blk = log_bbnum - (num_scan_bblks - head_blk);
718 new_blk = log_bbnum;
719 ASSERT(start_blk <= INT_MAX &&
720 (xfs_daddr_t) log_bbnum-start_blk >= 0);
721 ASSERT(head_blk <= INT_MAX);
722 error = xlog_find_verify_log_record(log, start_blk,
723 &new_blk, (int)head_blk);
724 if (error == 1)
725 error = -EIO;
726 if (error)
727 goto out_free_buffer;
728 if (new_blk != log_bbnum)
729 head_blk = new_blk;
730 } else if (error)
731 goto out_free_buffer;
732 }
733
734 kvfree(buffer);
735 if (head_blk == log_bbnum)
736 *return_head_blk = 0;
737 else
738 *return_head_blk = head_blk;
739 /*
740 * When returning here, we have a good block number. Bad block
741 * means that during a previous crash, we didn't have a clean break
742 * from cycle number N to cycle number N-1. In this case, we need
743 * to find the first block with cycle number N-1.
744 */
745 return 0;
746
747 out_free_buffer:
748 kvfree(buffer);
749 if (error)
750 xfs_warn(log->l_mp, "failed to find log head");
751 return error;
752 }
753
754 /*
755 * Seek backwards in the log for log record headers.
756 *
757 * Given a starting log block, walk backwards until we find the provided number
758 * of records or hit the provided tail block. The return value is the number of
759 * records encountered or a negative error code. The log block and buffer
760 * pointer of the last record seen are returned in rblk and rhead respectively.
761 */
762 STATIC int
xlog_rseek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)763 xlog_rseek_logrec_hdr(
764 struct xlog *log,
765 xfs_daddr_t head_blk,
766 xfs_daddr_t tail_blk,
767 int count,
768 char *buffer,
769 xfs_daddr_t *rblk,
770 struct xlog_rec_header **rhead,
771 bool *wrapped)
772 {
773 int i;
774 int error;
775 int found = 0;
776 char *offset = NULL;
777 xfs_daddr_t end_blk;
778
779 *wrapped = false;
780
781 /*
782 * Walk backwards from the head block until we hit the tail or the first
783 * block in the log.
784 */
785 end_blk = head_blk > tail_blk ? tail_blk : 0;
786 for (i = (int) head_blk - 1; i >= end_blk; i--) {
787 error = xlog_bread(log, i, 1, buffer, &offset);
788 if (error)
789 goto out_error;
790
791 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
792 *rblk = i;
793 *rhead = (struct xlog_rec_header *) offset;
794 if (++found == count)
795 break;
796 }
797 }
798
799 /*
800 * If we haven't hit the tail block or the log record header count,
801 * start looking again from the end of the physical log. Note that
802 * callers can pass head == tail if the tail is not yet known.
803 */
804 if (tail_blk >= head_blk && found != count) {
805 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
806 error = xlog_bread(log, i, 1, buffer, &offset);
807 if (error)
808 goto out_error;
809
810 if (*(__be32 *)offset ==
811 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
812 *wrapped = true;
813 *rblk = i;
814 *rhead = (struct xlog_rec_header *) offset;
815 if (++found == count)
816 break;
817 }
818 }
819 }
820
821 return found;
822
823 out_error:
824 return error;
825 }
826
827 /*
828 * Seek forward in the log for log record headers.
829 *
830 * Given head and tail blocks, walk forward from the tail block until we find
831 * the provided number of records or hit the head block. The return value is the
832 * number of records encountered or a negative error code. The log block and
833 * buffer pointer of the last record seen are returned in rblk and rhead
834 * respectively.
835 */
836 STATIC int
xlog_seek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)837 xlog_seek_logrec_hdr(
838 struct xlog *log,
839 xfs_daddr_t head_blk,
840 xfs_daddr_t tail_blk,
841 int count,
842 char *buffer,
843 xfs_daddr_t *rblk,
844 struct xlog_rec_header **rhead,
845 bool *wrapped)
846 {
847 int i;
848 int error;
849 int found = 0;
850 char *offset = NULL;
851 xfs_daddr_t end_blk;
852
853 *wrapped = false;
854
855 /*
856 * Walk forward from the tail block until we hit the head or the last
857 * block in the log.
858 */
859 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
860 for (i = (int) tail_blk; i <= end_blk; i++) {
861 error = xlog_bread(log, i, 1, buffer, &offset);
862 if (error)
863 goto out_error;
864
865 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
866 *rblk = i;
867 *rhead = (struct xlog_rec_header *) offset;
868 if (++found == count)
869 break;
870 }
871 }
872
873 /*
874 * If we haven't hit the head block or the log record header count,
875 * start looking again from the start of the physical log.
876 */
877 if (tail_blk > head_blk && found != count) {
878 for (i = 0; i < (int) head_blk; i++) {
879 error = xlog_bread(log, i, 1, buffer, &offset);
880 if (error)
881 goto out_error;
882
883 if (*(__be32 *)offset ==
884 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
885 *wrapped = true;
886 *rblk = i;
887 *rhead = (struct xlog_rec_header *) offset;
888 if (++found == count)
889 break;
890 }
891 }
892 }
893
894 return found;
895
896 out_error:
897 return error;
898 }
899
900 /*
901 * Calculate distance from head to tail (i.e., unused space in the log).
902 */
903 static inline int
xlog_tail_distance(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)904 xlog_tail_distance(
905 struct xlog *log,
906 xfs_daddr_t head_blk,
907 xfs_daddr_t tail_blk)
908 {
909 if (head_blk < tail_blk)
910 return tail_blk - head_blk;
911
912 return tail_blk + (log->l_logBBsize - head_blk);
913 }
914
915 /*
916 * Verify the log tail. This is particularly important when torn or incomplete
917 * writes have been detected near the front of the log and the head has been
918 * walked back accordingly.
919 *
920 * We also have to handle the case where the tail was pinned and the head
921 * blocked behind the tail right before a crash. If the tail had been pushed
922 * immediately prior to the crash and the subsequent checkpoint was only
923 * partially written, it's possible it overwrote the last referenced tail in the
924 * log with garbage. This is not a coherency problem because the tail must have
925 * been pushed before it can be overwritten, but appears as log corruption to
926 * recovery because we have no way to know the tail was updated if the
927 * subsequent checkpoint didn't write successfully.
928 *
929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
930 * offending record is within max iclog bufs from the head, walk the tail
931 * forward and retry until a valid tail is found or corruption is detected out
932 * of the range of a possible overwrite.
933 */
934 STATIC int
xlog_verify_tail(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t * tail_blk,int hsize)935 xlog_verify_tail(
936 struct xlog *log,
937 xfs_daddr_t head_blk,
938 xfs_daddr_t *tail_blk,
939 int hsize)
940 {
941 struct xlog_rec_header *thead;
942 char *buffer;
943 xfs_daddr_t first_bad;
944 int error = 0;
945 bool wrapped;
946 xfs_daddr_t tmp_tail;
947 xfs_daddr_t orig_tail = *tail_blk;
948
949 buffer = xlog_alloc_buffer(log, 1);
950 if (!buffer)
951 return -ENOMEM;
952
953 /*
954 * Make sure the tail points to a record (returns positive count on
955 * success).
956 */
957 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
958 &tmp_tail, &thead, &wrapped);
959 if (error < 0)
960 goto out;
961 if (*tail_blk != tmp_tail)
962 *tail_blk = tmp_tail;
963
964 /*
965 * Run a CRC check from the tail to the head. We can't just check
966 * MAX_ICLOGS records past the tail because the tail may point to stale
967 * blocks cleared during the search for the head/tail. These blocks are
968 * overwritten with zero-length records and thus record count is not a
969 * reliable indicator of the iclog state before a crash.
970 */
971 first_bad = 0;
972 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
973 XLOG_RECOVER_CRCPASS, &first_bad);
974 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
975 int tail_distance;
976
977 /*
978 * Is corruption within range of the head? If so, retry from
979 * the next record. Otherwise return an error.
980 */
981 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
982 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
983 break;
984
985 /* skip to the next record; returns positive count on success */
986 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
987 buffer, &tmp_tail, &thead, &wrapped);
988 if (error < 0)
989 goto out;
990
991 *tail_blk = tmp_tail;
992 first_bad = 0;
993 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
994 XLOG_RECOVER_CRCPASS, &first_bad);
995 }
996
997 if (!error && *tail_blk != orig_tail)
998 xfs_warn(log->l_mp,
999 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1000 orig_tail, *tail_blk);
1001 out:
1002 kvfree(buffer);
1003 return error;
1004 }
1005
1006 /*
1007 * Detect and trim torn writes from the head of the log.
1008 *
1009 * Storage without sector atomicity guarantees can result in torn writes in the
1010 * log in the event of a crash. Our only means to detect this scenario is via
1011 * CRC verification. While we can't always be certain that CRC verification
1012 * failure is due to a torn write vs. an unrelated corruption, we do know that
1013 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1014 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1015 * the log and treat failures in this range as torn writes as a matter of
1016 * policy. In the event of CRC failure, the head is walked back to the last good
1017 * record in the log and the tail is updated from that record and verified.
1018 */
1019 STATIC int
xlog_verify_head(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,char * buffer,xfs_daddr_t * rhead_blk,struct xlog_rec_header ** rhead,bool * wrapped)1020 xlog_verify_head(
1021 struct xlog *log,
1022 xfs_daddr_t *head_blk, /* in/out: unverified head */
1023 xfs_daddr_t *tail_blk, /* out: tail block */
1024 char *buffer,
1025 xfs_daddr_t *rhead_blk, /* start blk of last record */
1026 struct xlog_rec_header **rhead, /* ptr to last record */
1027 bool *wrapped) /* last rec. wraps phys. log */
1028 {
1029 struct xlog_rec_header *tmp_rhead;
1030 char *tmp_buffer;
1031 xfs_daddr_t first_bad;
1032 xfs_daddr_t tmp_rhead_blk;
1033 int found;
1034 int error;
1035 bool tmp_wrapped;
1036
1037 /*
1038 * Check the head of the log for torn writes. Search backwards from the
1039 * head until we hit the tail or the maximum number of log record I/Os
1040 * that could have been in flight at one time. Use a temporary buffer so
1041 * we don't trash the rhead/buffer pointers from the caller.
1042 */
1043 tmp_buffer = xlog_alloc_buffer(log, 1);
1044 if (!tmp_buffer)
1045 return -ENOMEM;
1046 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1047 XLOG_MAX_ICLOGS, tmp_buffer,
1048 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1049 kvfree(tmp_buffer);
1050 if (error < 0)
1051 return error;
1052
1053 /*
1054 * Now run a CRC verification pass over the records starting at the
1055 * block found above to the current head. If a CRC failure occurs, the
1056 * log block of the first bad record is saved in first_bad.
1057 */
1058 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1059 XLOG_RECOVER_CRCPASS, &first_bad);
1060 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1061 /*
1062 * We've hit a potential torn write. Reset the error and warn
1063 * about it.
1064 */
1065 error = 0;
1066 xfs_warn(log->l_mp,
1067 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1068 first_bad, *head_blk);
1069
1070 /*
1071 * Get the header block and buffer pointer for the last good
1072 * record before the bad record.
1073 *
1074 * Note that xlog_find_tail() clears the blocks at the new head
1075 * (i.e., the records with invalid CRC) if the cycle number
1076 * matches the current cycle.
1077 */
1078 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1079 buffer, rhead_blk, rhead, wrapped);
1080 if (found < 0)
1081 return found;
1082 if (found == 0) /* XXX: right thing to do here? */
1083 return -EIO;
1084
1085 /*
1086 * Reset the head block to the starting block of the first bad
1087 * log record and set the tail block based on the last good
1088 * record.
1089 *
1090 * Bail out if the updated head/tail match as this indicates
1091 * possible corruption outside of the acceptable
1092 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1093 */
1094 *head_blk = first_bad;
1095 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1096 if (*head_blk == *tail_blk) {
1097 ASSERT(0);
1098 return 0;
1099 }
1100 }
1101 if (error)
1102 return error;
1103
1104 return xlog_verify_tail(log, *head_blk, tail_blk,
1105 be32_to_cpu((*rhead)->h_size));
1106 }
1107
1108 /*
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1110 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1111 * log.
1112 *
1113 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1114 * operation here and cast it back to a 64 bit daddr on return.
1115 */
1116 static inline xfs_daddr_t
xlog_wrap_logbno(struct xlog * log,xfs_daddr_t bno)1117 xlog_wrap_logbno(
1118 struct xlog *log,
1119 xfs_daddr_t bno)
1120 {
1121 int mod;
1122
1123 div_s64_rem(bno, log->l_logBBsize, &mod);
1124 return mod;
1125 }
1126
1127 /*
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1130 * appropriately.
1131 */
1132 static int
xlog_check_unmount_rec(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,char * buffer,bool * clean)1133 xlog_check_unmount_rec(
1134 struct xlog *log,
1135 xfs_daddr_t *head_blk,
1136 xfs_daddr_t *tail_blk,
1137 struct xlog_rec_header *rhead,
1138 xfs_daddr_t rhead_blk,
1139 char *buffer,
1140 bool *clean)
1141 {
1142 struct xlog_op_header *op_head;
1143 xfs_daddr_t umount_data_blk;
1144 xfs_daddr_t after_umount_blk;
1145 int hblks;
1146 int error;
1147 char *offset;
1148
1149 *clean = false;
1150
1151 /*
1152 * Look for unmount record. If we find it, then we know there was a
1153 * clean unmount. Since 'i' could be the last block in the physical
1154 * log, we convert to a log block before comparing to the head_blk.
1155 *
1156 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1157 * below. We won't want to clear the unmount record if there is one, so
1158 * we pass the lsn of the unmount record rather than the block after it.
1159 */
1160 hblks = xlog_logrec_hblks(log, rhead);
1161 after_umount_blk = xlog_wrap_logbno(log,
1162 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1163
1164 if (*head_blk == after_umount_blk &&
1165 be32_to_cpu(rhead->h_num_logops) == 1) {
1166 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1167 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1168 if (error)
1169 return error;
1170
1171 op_head = (struct xlog_op_header *)offset;
1172 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1173 /*
1174 * Set tail and last sync so that newly written log
1175 * records will point recovery to after the current
1176 * unmount record.
1177 */
1178 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1179 log->l_curr_cycle, after_umount_blk);
1180 log->l_ailp->ail_head_lsn =
1181 atomic64_read(&log->l_tail_lsn);
1182 *tail_blk = after_umount_blk;
1183
1184 *clean = true;
1185 }
1186 }
1187
1188 return 0;
1189 }
1190
1191 static void
xlog_set_state(struct xlog * log,xfs_daddr_t head_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,bool bump_cycle)1192 xlog_set_state(
1193 struct xlog *log,
1194 xfs_daddr_t head_blk,
1195 struct xlog_rec_header *rhead,
1196 xfs_daddr_t rhead_blk,
1197 bool bump_cycle)
1198 {
1199 /*
1200 * Reset log values according to the state of the log when we
1201 * crashed. In the case where head_blk == 0, we bump curr_cycle
1202 * one because the next write starts a new cycle rather than
1203 * continuing the cycle of the last good log record. At this
1204 * point we have guaranteed that all partial log records have been
1205 * accounted for. Therefore, we know that the last good log record
1206 * written was complete and ended exactly on the end boundary
1207 * of the physical log.
1208 */
1209 log->l_prev_block = rhead_blk;
1210 log->l_curr_block = (int)head_blk;
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1212 if (bump_cycle)
1213 log->l_curr_cycle++;
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215 log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn);
1216 }
1217
1218 /*
1219 * Find the sync block number or the tail of the log.
1220 *
1221 * This will be the block number of the last record to have its
1222 * associated buffers synced to disk. Every log record header has
1223 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1224 * to get a sync block number. The only concern is to figure out which
1225 * log record header to believe.
1226 *
1227 * The following algorithm uses the log record header with the largest
1228 * lsn. The entire log record does not need to be valid. We only care
1229 * that the header is valid.
1230 *
1231 * We could speed up search by using current head_blk buffer, but it is not
1232 * available.
1233 */
1234 STATIC int
xlog_find_tail(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk)1235 xlog_find_tail(
1236 struct xlog *log,
1237 xfs_daddr_t *head_blk,
1238 xfs_daddr_t *tail_blk)
1239 {
1240 struct xlog_rec_header *rhead;
1241 char *offset = NULL;
1242 char *buffer;
1243 int error;
1244 xfs_daddr_t rhead_blk;
1245 xfs_lsn_t tail_lsn;
1246 bool wrapped = false;
1247 bool clean = false;
1248
1249 /*
1250 * Find previous log record
1251 */
1252 if ((error = xlog_find_head(log, head_blk)))
1253 return error;
1254 ASSERT(*head_blk < INT_MAX);
1255
1256 buffer = xlog_alloc_buffer(log, 1);
1257 if (!buffer)
1258 return -ENOMEM;
1259 if (*head_blk == 0) { /* special case */
1260 error = xlog_bread(log, 0, 1, buffer, &offset);
1261 if (error)
1262 goto done;
1263
1264 if (xlog_get_cycle(offset) == 0) {
1265 *tail_blk = 0;
1266 /* leave all other log inited values alone */
1267 goto done;
1268 }
1269 }
1270
1271 /*
1272 * Search backwards through the log looking for the log record header
1273 * block. This wraps all the way back around to the head so something is
1274 * seriously wrong if we can't find it.
1275 */
1276 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1277 &rhead_blk, &rhead, &wrapped);
1278 if (error < 0)
1279 goto done;
1280 if (!error) {
1281 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1282 error = -EFSCORRUPTED;
1283 goto done;
1284 }
1285 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1286
1287 /*
1288 * Set the log state based on the current head record.
1289 */
1290 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1291 tail_lsn = atomic64_read(&log->l_tail_lsn);
1292
1293 /*
1294 * Look for an unmount record at the head of the log. This sets the log
1295 * state to determine whether recovery is necessary.
1296 */
1297 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1298 rhead_blk, buffer, &clean);
1299 if (error)
1300 goto done;
1301
1302 /*
1303 * Verify the log head if the log is not clean (e.g., we have anything
1304 * but an unmount record at the head). This uses CRC verification to
1305 * detect and trim torn writes. If discovered, CRC failures are
1306 * considered torn writes and the log head is trimmed accordingly.
1307 *
1308 * Note that we can only run CRC verification when the log is dirty
1309 * because there's no guarantee that the log data behind an unmount
1310 * record is compatible with the current architecture.
1311 */
1312 if (!clean) {
1313 xfs_daddr_t orig_head = *head_blk;
1314
1315 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1316 &rhead_blk, &rhead, &wrapped);
1317 if (error)
1318 goto done;
1319
1320 /* update in-core state again if the head changed */
1321 if (*head_blk != orig_head) {
1322 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1323 wrapped);
1324 tail_lsn = atomic64_read(&log->l_tail_lsn);
1325 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1326 rhead, rhead_blk, buffer,
1327 &clean);
1328 if (error)
1329 goto done;
1330 }
1331 }
1332
1333 /*
1334 * Note that the unmount was clean. If the unmount was not clean, we
1335 * need to know this to rebuild the superblock counters from the perag
1336 * headers if we have a filesystem using non-persistent counters.
1337 */
1338 if (clean)
1339 xfs_set_clean(log->l_mp);
1340
1341 /*
1342 * Make sure that there are no blocks in front of the head
1343 * with the same cycle number as the head. This can happen
1344 * because we allow multiple outstanding log writes concurrently,
1345 * and the later writes might make it out before earlier ones.
1346 *
1347 * We use the lsn from before modifying it so that we'll never
1348 * overwrite the unmount record after a clean unmount.
1349 *
1350 * Do this only if we are going to recover the filesystem
1351 *
1352 * NOTE: This used to say "if (!readonly)"
1353 * However on Linux, we can & do recover a read-only filesystem.
1354 * We only skip recovery if NORECOVERY is specified on mount,
1355 * in which case we would not be here.
1356 *
1357 * But... if the -device- itself is readonly, just skip this.
1358 * We can't recover this device anyway, so it won't matter.
1359 */
1360 if (!xfs_readonly_buftarg(log->l_targ))
1361 error = xlog_clear_stale_blocks(log, tail_lsn);
1362
1363 done:
1364 kvfree(buffer);
1365
1366 if (error)
1367 xfs_warn(log->l_mp, "failed to locate log tail");
1368 return error;
1369 }
1370
1371 /*
1372 * Is the log zeroed at all?
1373 *
1374 * The last binary search should be changed to perform an X block read
1375 * once X becomes small enough. You can then search linearly through
1376 * the X blocks. This will cut down on the number of reads we need to do.
1377 *
1378 * If the log is partially zeroed, this routine will pass back the blkno
1379 * of the first block with cycle number 0. It won't have a complete LR
1380 * preceding it.
1381 *
1382 * Return:
1383 * 0 => the log is completely written to
1384 * 1 => use *blk_no as the first block of the log
1385 * <0 => error has occurred
1386 */
1387 STATIC int
xlog_find_zeroed(struct xlog * log,xfs_daddr_t * blk_no)1388 xlog_find_zeroed(
1389 struct xlog *log,
1390 xfs_daddr_t *blk_no)
1391 {
1392 char *buffer;
1393 char *offset;
1394 uint first_cycle, last_cycle;
1395 xfs_daddr_t new_blk, last_blk, start_blk;
1396 xfs_daddr_t num_scan_bblks;
1397 int error, log_bbnum = log->l_logBBsize;
1398 int ret = 1;
1399
1400 *blk_no = 0;
1401
1402 /* check totally zeroed log */
1403 buffer = xlog_alloc_buffer(log, 1);
1404 if (!buffer)
1405 return -ENOMEM;
1406 error = xlog_bread(log, 0, 1, buffer, &offset);
1407 if (error)
1408 goto out_free_buffer;
1409
1410 first_cycle = xlog_get_cycle(offset);
1411 if (first_cycle == 0) { /* completely zeroed log */
1412 *blk_no = 0;
1413 goto out_free_buffer;
1414 }
1415
1416 /* check partially zeroed log */
1417 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1418 if (error)
1419 goto out_free_buffer;
1420
1421 last_cycle = xlog_get_cycle(offset);
1422 if (last_cycle != 0) { /* log completely written to */
1423 ret = 0;
1424 goto out_free_buffer;
1425 }
1426
1427 /* we have a partially zeroed log */
1428 last_blk = log_bbnum-1;
1429 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1430 if (error)
1431 goto out_free_buffer;
1432
1433 /*
1434 * Validate the answer. Because there is no way to guarantee that
1435 * the entire log is made up of log records which are the same size,
1436 * we scan over the defined maximum blocks. At this point, the maximum
1437 * is not chosen to mean anything special. XXXmiken
1438 */
1439 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1440 ASSERT(num_scan_bblks <= INT_MAX);
1441
1442 if (last_blk < num_scan_bblks)
1443 num_scan_bblks = last_blk;
1444 start_blk = last_blk - num_scan_bblks;
1445
1446 /*
1447 * We search for any instances of cycle number 0 that occur before
1448 * our current estimate of the head. What we're trying to detect is
1449 * 1 ... | 0 | 1 | 0...
1450 * ^ binary search ends here
1451 */
1452 if ((error = xlog_find_verify_cycle(log, start_blk,
1453 (int)num_scan_bblks, 0, &new_blk)))
1454 goto out_free_buffer;
1455 if (new_blk != -1)
1456 last_blk = new_blk;
1457
1458 /*
1459 * Potentially backup over partial log record write. We don't need
1460 * to search the end of the log because we know it is zero.
1461 */
1462 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1463 if (error == 1)
1464 error = -EIO;
1465 if (error)
1466 goto out_free_buffer;
1467
1468 *blk_no = last_blk;
1469 out_free_buffer:
1470 kvfree(buffer);
1471 if (error)
1472 return error;
1473 return ret;
1474 }
1475
1476 /*
1477 * These are simple subroutines used by xlog_clear_stale_blocks() below
1478 * to initialize a buffer full of empty log record headers and write
1479 * them into the log.
1480 */
1481 STATIC void
xlog_add_record(struct xlog * log,char * buf,int cycle,int block,int tail_cycle,int tail_block)1482 xlog_add_record(
1483 struct xlog *log,
1484 char *buf,
1485 int cycle,
1486 int block,
1487 int tail_cycle,
1488 int tail_block)
1489 {
1490 struct xlog_rec_header *recp = (struct xlog_rec_header *)buf;
1491
1492 memset(buf, 0, BBSIZE);
1493 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1494 recp->h_cycle = cpu_to_be32(cycle);
1495 recp->h_version = cpu_to_be32(
1496 xfs_has_logv2(log->l_mp) ? 2 : 1);
1497 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1498 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1499 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1500 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1501 }
1502
1503 STATIC int
xlog_write_log_records(struct xlog * log,int cycle,int start_block,int blocks,int tail_cycle,int tail_block)1504 xlog_write_log_records(
1505 struct xlog *log,
1506 int cycle,
1507 int start_block,
1508 int blocks,
1509 int tail_cycle,
1510 int tail_block)
1511 {
1512 char *offset;
1513 char *buffer;
1514 int balign, ealign;
1515 int sectbb = log->l_sectBBsize;
1516 int end_block = start_block + blocks;
1517 int bufblks;
1518 int error = 0;
1519 int i, j = 0;
1520
1521 /*
1522 * Greedily allocate a buffer big enough to handle the full
1523 * range of basic blocks to be written. If that fails, try
1524 * a smaller size. We need to be able to write at least a
1525 * log sector, or we're out of luck.
1526 */
1527 bufblks = roundup_pow_of_two(blocks);
1528 while (bufblks > log->l_logBBsize)
1529 bufblks >>= 1;
1530 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1531 bufblks >>= 1;
1532 if (bufblks < sectbb)
1533 return -ENOMEM;
1534 }
1535
1536 /* We may need to do a read at the start to fill in part of
1537 * the buffer in the starting sector not covered by the first
1538 * write below.
1539 */
1540 balign = round_down(start_block, sectbb);
1541 if (balign != start_block) {
1542 error = xlog_bread_noalign(log, start_block, 1, buffer);
1543 if (error)
1544 goto out_free_buffer;
1545
1546 j = start_block - balign;
1547 }
1548
1549 for (i = start_block; i < end_block; i += bufblks) {
1550 int bcount, endcount;
1551
1552 bcount = min(bufblks, end_block - start_block);
1553 endcount = bcount - j;
1554
1555 /* We may need to do a read at the end to fill in part of
1556 * the buffer in the final sector not covered by the write.
1557 * If this is the same sector as the above read, skip it.
1558 */
1559 ealign = round_down(end_block, sectbb);
1560 if (j == 0 && (start_block + endcount > ealign)) {
1561 error = xlog_bread_noalign(log, ealign, sectbb,
1562 buffer + BBTOB(ealign - start_block));
1563 if (error)
1564 break;
1565
1566 }
1567
1568 offset = buffer + xlog_align(log, start_block);
1569 for (; j < endcount; j++) {
1570 xlog_add_record(log, offset, cycle, i+j,
1571 tail_cycle, tail_block);
1572 offset += BBSIZE;
1573 }
1574 error = xlog_bwrite(log, start_block, endcount, buffer);
1575 if (error)
1576 break;
1577 start_block += endcount;
1578 j = 0;
1579 }
1580
1581 out_free_buffer:
1582 kvfree(buffer);
1583 return error;
1584 }
1585
1586 /*
1587 * This routine is called to blow away any incomplete log writes out
1588 * in front of the log head. We do this so that we won't become confused
1589 * if we come up, write only a little bit more, and then crash again.
1590 * If we leave the partial log records out there, this situation could
1591 * cause us to think those partial writes are valid blocks since they
1592 * have the current cycle number. We get rid of them by overwriting them
1593 * with empty log records with the old cycle number rather than the
1594 * current one.
1595 *
1596 * The tail lsn is passed in rather than taken from
1597 * the log so that we will not write over the unmount record after a
1598 * clean unmount in a 512 block log. Doing so would leave the log without
1599 * any valid log records in it until a new one was written. If we crashed
1600 * during that time we would not be able to recover.
1601 */
1602 STATIC int
xlog_clear_stale_blocks(struct xlog * log,xfs_lsn_t tail_lsn)1603 xlog_clear_stale_blocks(
1604 struct xlog *log,
1605 xfs_lsn_t tail_lsn)
1606 {
1607 int tail_cycle, head_cycle;
1608 int tail_block, head_block;
1609 int tail_distance, max_distance;
1610 int distance;
1611 int error;
1612
1613 tail_cycle = CYCLE_LSN(tail_lsn);
1614 tail_block = BLOCK_LSN(tail_lsn);
1615 head_cycle = log->l_curr_cycle;
1616 head_block = log->l_curr_block;
1617
1618 /*
1619 * Figure out the distance between the new head of the log
1620 * and the tail. We want to write over any blocks beyond the
1621 * head that we may have written just before the crash, but
1622 * we don't want to overwrite the tail of the log.
1623 */
1624 if (head_cycle == tail_cycle) {
1625 /*
1626 * The tail is behind the head in the physical log,
1627 * so the distance from the head to the tail is the
1628 * distance from the head to the end of the log plus
1629 * the distance from the beginning of the log to the
1630 * tail.
1631 */
1632 if (XFS_IS_CORRUPT(log->l_mp,
1633 head_block < tail_block ||
1634 head_block >= log->l_logBBsize))
1635 return -EFSCORRUPTED;
1636 tail_distance = tail_block + (log->l_logBBsize - head_block);
1637 } else {
1638 /*
1639 * The head is behind the tail in the physical log,
1640 * so the distance from the head to the tail is just
1641 * the tail block minus the head block.
1642 */
1643 if (XFS_IS_CORRUPT(log->l_mp,
1644 head_block >= tail_block ||
1645 head_cycle != tail_cycle + 1))
1646 return -EFSCORRUPTED;
1647 tail_distance = tail_block - head_block;
1648 }
1649
1650 /*
1651 * If the head is right up against the tail, we can't clear
1652 * anything.
1653 */
1654 if (tail_distance <= 0) {
1655 ASSERT(tail_distance == 0);
1656 return 0;
1657 }
1658
1659 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1660 /*
1661 * Take the smaller of the maximum amount of outstanding I/O
1662 * we could have and the distance to the tail to clear out.
1663 * We take the smaller so that we don't overwrite the tail and
1664 * we don't waste all day writing from the head to the tail
1665 * for no reason.
1666 */
1667 max_distance = min(max_distance, tail_distance);
1668
1669 if ((head_block + max_distance) <= log->l_logBBsize) {
1670 /*
1671 * We can stomp all the blocks we need to without
1672 * wrapping around the end of the log. Just do it
1673 * in a single write. Use the cycle number of the
1674 * current cycle minus one so that the log will look like:
1675 * n ... | n - 1 ...
1676 */
1677 error = xlog_write_log_records(log, (head_cycle - 1),
1678 head_block, max_distance, tail_cycle,
1679 tail_block);
1680 if (error)
1681 return error;
1682 } else {
1683 /*
1684 * We need to wrap around the end of the physical log in
1685 * order to clear all the blocks. Do it in two separate
1686 * I/Os. The first write should be from the head to the
1687 * end of the physical log, and it should use the current
1688 * cycle number minus one just like above.
1689 */
1690 distance = log->l_logBBsize - head_block;
1691 error = xlog_write_log_records(log, (head_cycle - 1),
1692 head_block, distance, tail_cycle,
1693 tail_block);
1694
1695 if (error)
1696 return error;
1697
1698 /*
1699 * Now write the blocks at the start of the physical log.
1700 * This writes the remainder of the blocks we want to clear.
1701 * It uses the current cycle number since we're now on the
1702 * same cycle as the head so that we get:
1703 * n ... n ... | n - 1 ...
1704 * ^^^^^ blocks we're writing
1705 */
1706 distance = max_distance - (log->l_logBBsize - head_block);
1707 error = xlog_write_log_records(log, head_cycle, 0, distance,
1708 tail_cycle, tail_block);
1709 if (error)
1710 return error;
1711 }
1712
1713 return 0;
1714 }
1715
1716 /*
1717 * Release the recovered intent item in the AIL that matches the given intent
1718 * type and intent id.
1719 */
1720 void
xlog_recover_release_intent(struct xlog * log,unsigned short intent_type,uint64_t intent_id)1721 xlog_recover_release_intent(
1722 struct xlog *log,
1723 unsigned short intent_type,
1724 uint64_t intent_id)
1725 {
1726 struct xfs_defer_pending *dfp, *n;
1727
1728 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
1729 struct xfs_log_item *lip = dfp->dfp_intent;
1730
1731 if (lip->li_type != intent_type)
1732 continue;
1733 if (!lip->li_ops->iop_match(lip, intent_id))
1734 continue;
1735
1736 ASSERT(xlog_item_is_intent(lip));
1737
1738 xfs_defer_cancel_recovery(log->l_mp, dfp);
1739 }
1740 }
1741
1742 int
xlog_recover_iget(struct xfs_mount * mp,xfs_ino_t ino,struct xfs_inode ** ipp)1743 xlog_recover_iget(
1744 struct xfs_mount *mp,
1745 xfs_ino_t ino,
1746 struct xfs_inode **ipp)
1747 {
1748 int error;
1749
1750 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1751 if (error)
1752 return error;
1753
1754 error = xfs_qm_dqattach(*ipp);
1755 if (error) {
1756 xfs_irele(*ipp);
1757 return error;
1758 }
1759
1760 if (VFS_I(*ipp)->i_nlink == 0)
1761 xfs_iflags_set(*ipp, XFS_IRECOVERY);
1762
1763 return 0;
1764 }
1765
1766 /*
1767 * Get an inode so that we can recover a log operation.
1768 *
1769 * Log intent items that target inodes effectively contain a file handle.
1770 * Check that the generation number matches the intent item like we do for
1771 * other file handles. Log intent items defined after this validation weakness
1772 * was identified must use this function.
1773 */
1774 int
xlog_recover_iget_handle(struct xfs_mount * mp,xfs_ino_t ino,uint32_t gen,struct xfs_inode ** ipp)1775 xlog_recover_iget_handle(
1776 struct xfs_mount *mp,
1777 xfs_ino_t ino,
1778 uint32_t gen,
1779 struct xfs_inode **ipp)
1780 {
1781 struct xfs_inode *ip;
1782 int error;
1783
1784 error = xlog_recover_iget(mp, ino, &ip);
1785 if (error)
1786 return error;
1787
1788 if (VFS_I(ip)->i_generation != gen) {
1789 xfs_irele(ip);
1790 return -EFSCORRUPTED;
1791 }
1792
1793 *ipp = ip;
1794 return 0;
1795 }
1796
1797 /******************************************************************************
1798 *
1799 * Log recover routines
1800 *
1801 ******************************************************************************
1802 */
1803 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1804 &xlog_buf_item_ops,
1805 &xlog_inode_item_ops,
1806 &xlog_dquot_item_ops,
1807 &xlog_quotaoff_item_ops,
1808 &xlog_icreate_item_ops,
1809 &xlog_efi_item_ops,
1810 &xlog_efd_item_ops,
1811 &xlog_rui_item_ops,
1812 &xlog_rud_item_ops,
1813 &xlog_cui_item_ops,
1814 &xlog_cud_item_ops,
1815 &xlog_bui_item_ops,
1816 &xlog_bud_item_ops,
1817 &xlog_attri_item_ops,
1818 &xlog_attrd_item_ops,
1819 &xlog_xmi_item_ops,
1820 &xlog_xmd_item_ops,
1821 &xlog_rtefi_item_ops,
1822 &xlog_rtefd_item_ops,
1823 &xlog_rtrui_item_ops,
1824 &xlog_rtrud_item_ops,
1825 &xlog_rtcui_item_ops,
1826 &xlog_rtcud_item_ops,
1827 };
1828
1829 static const struct xlog_recover_item_ops *
xlog_find_item_ops(struct xlog_recover_item * item)1830 xlog_find_item_ops(
1831 struct xlog_recover_item *item)
1832 {
1833 unsigned int i;
1834
1835 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1836 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1837 return xlog_recover_item_ops[i];
1838
1839 return NULL;
1840 }
1841
1842 /*
1843 * Sort the log items in the transaction.
1844 *
1845 * The ordering constraints are defined by the inode allocation and unlink
1846 * behaviour. The rules are:
1847 *
1848 * 1. Every item is only logged once in a given transaction. Hence it
1849 * represents the last logged state of the item. Hence ordering is
1850 * dependent on the order in which operations need to be performed so
1851 * required initial conditions are always met.
1852 *
1853 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1854 * there's nothing to replay from them so we can simply cull them
1855 * from the transaction. However, we can't do that until after we've
1856 * replayed all the other items because they may be dependent on the
1857 * cancelled buffer and replaying the cancelled buffer can remove it
1858 * form the cancelled buffer table. Hence they have to be done last.
1859 *
1860 * 3. Inode allocation buffers must be replayed before inode items that
1861 * read the buffer and replay changes into it. For filesystems using the
1862 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1863 * treated the same as inode allocation buffers as they create and
1864 * initialise the buffers directly.
1865 *
1866 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1867 * This ensures that inodes are completely flushed to the inode buffer
1868 * in a "free" state before we remove the unlinked inode list pointer.
1869 *
1870 * Hence the ordering needs to be inode allocation buffers first, inode items
1871 * second, inode unlink buffers third and cancelled buffers last.
1872 *
1873 * But there's a problem with that - we can't tell an inode allocation buffer
1874 * apart from a regular buffer, so we can't separate them. We can, however,
1875 * tell an inode unlink buffer from the others, and so we can separate them out
1876 * from all the other buffers and move them to last.
1877 *
1878 * Hence, 4 lists, in order from head to tail:
1879 * - buffer_list for all buffers except cancelled/inode unlink buffers
1880 * - item_list for all non-buffer items
1881 * - inode_buffer_list for inode unlink buffers
1882 * - cancel_list for the cancelled buffers
1883 *
1884 * Note that we add objects to the tail of the lists so that first-to-last
1885 * ordering is preserved within the lists. Adding objects to the head of the
1886 * list means when we traverse from the head we walk them in last-to-first
1887 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1888 * but for all other items there may be specific ordering that we need to
1889 * preserve.
1890 */
1891 STATIC int
xlog_recover_reorder_trans(struct xlog * log,struct xlog_recover * trans,int pass)1892 xlog_recover_reorder_trans(
1893 struct xlog *log,
1894 struct xlog_recover *trans,
1895 int pass)
1896 {
1897 struct xlog_recover_item *item, *n;
1898 int error = 0;
1899 LIST_HEAD(sort_list);
1900 LIST_HEAD(cancel_list);
1901 LIST_HEAD(buffer_list);
1902 LIST_HEAD(inode_buffer_list);
1903 LIST_HEAD(item_list);
1904
1905 list_splice_init(&trans->r_itemq, &sort_list);
1906 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1907 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
1908
1909 item->ri_ops = xlog_find_item_ops(item);
1910 if (!item->ri_ops) {
1911 xfs_warn(log->l_mp,
1912 "%s: unrecognized type of log operation (%d)",
1913 __func__, ITEM_TYPE(item));
1914 ASSERT(0);
1915 /*
1916 * return the remaining items back to the transaction
1917 * item list so they can be freed in caller.
1918 */
1919 if (!list_empty(&sort_list))
1920 list_splice_init(&sort_list, &trans->r_itemq);
1921 error = -EFSCORRUPTED;
1922 break;
1923 }
1924
1925 if (item->ri_ops->reorder)
1926 fate = item->ri_ops->reorder(item);
1927
1928 switch (fate) {
1929 case XLOG_REORDER_BUFFER_LIST:
1930 list_move_tail(&item->ri_list, &buffer_list);
1931 break;
1932 case XLOG_REORDER_CANCEL_LIST:
1933 trace_xfs_log_recover_item_reorder_head(log,
1934 trans, item, pass);
1935 list_move(&item->ri_list, &cancel_list);
1936 break;
1937 case XLOG_REORDER_INODE_BUFFER_LIST:
1938 list_move(&item->ri_list, &inode_buffer_list);
1939 break;
1940 case XLOG_REORDER_ITEM_LIST:
1941 trace_xfs_log_recover_item_reorder_tail(log,
1942 trans, item, pass);
1943 list_move_tail(&item->ri_list, &item_list);
1944 break;
1945 }
1946 }
1947
1948 ASSERT(list_empty(&sort_list));
1949 if (!list_empty(&buffer_list))
1950 list_splice(&buffer_list, &trans->r_itemq);
1951 if (!list_empty(&item_list))
1952 list_splice_tail(&item_list, &trans->r_itemq);
1953 if (!list_empty(&inode_buffer_list))
1954 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1955 if (!list_empty(&cancel_list))
1956 list_splice_tail(&cancel_list, &trans->r_itemq);
1957 return error;
1958 }
1959
1960 void
xlog_buf_readahead(struct xlog * log,xfs_daddr_t blkno,uint len,const struct xfs_buf_ops * ops)1961 xlog_buf_readahead(
1962 struct xlog *log,
1963 xfs_daddr_t blkno,
1964 uint len,
1965 const struct xfs_buf_ops *ops)
1966 {
1967 if (!xlog_is_buffer_cancelled(log, blkno, len))
1968 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1969 }
1970
1971 /*
1972 * Create a deferred work structure for resuming and tracking the progress of a
1973 * log intent item that was found during recovery.
1974 */
1975 void
xlog_recover_intent_item(struct xlog * log,struct xfs_log_item * lip,xfs_lsn_t lsn,const struct xfs_defer_op_type * ops)1976 xlog_recover_intent_item(
1977 struct xlog *log,
1978 struct xfs_log_item *lip,
1979 xfs_lsn_t lsn,
1980 const struct xfs_defer_op_type *ops)
1981 {
1982 ASSERT(xlog_item_is_intent(lip));
1983
1984 xfs_defer_start_recovery(lip, &log->r_dfops, ops);
1985
1986 /*
1987 * Insert the intent into the AIL directly and drop one reference so
1988 * that finishing or canceling the work will drop the other.
1989 */
1990 xfs_trans_ail_insert(log->l_ailp, lip, lsn);
1991 lip->li_ops->iop_unpin(lip, 0);
1992 }
1993
1994 STATIC int
xlog_recover_items_pass2(struct xlog * log,struct xlog_recover * trans,struct list_head * buffer_list,struct list_head * item_list)1995 xlog_recover_items_pass2(
1996 struct xlog *log,
1997 struct xlog_recover *trans,
1998 struct list_head *buffer_list,
1999 struct list_head *item_list)
2000 {
2001 struct xlog_recover_item *item;
2002 int error = 0;
2003
2004 list_for_each_entry(item, item_list, ri_list) {
2005 trace_xfs_log_recover_item_recover(log, trans, item,
2006 XLOG_RECOVER_PASS2);
2007
2008 if (item->ri_ops->commit_pass2)
2009 error = item->ri_ops->commit_pass2(log, buffer_list,
2010 item, trans->r_lsn);
2011 if (error)
2012 return error;
2013 }
2014
2015 return error;
2016 }
2017
2018 /*
2019 * Perform the transaction.
2020 *
2021 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2022 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2023 */
2024 STATIC int
xlog_recover_commit_trans(struct xlog * log,struct xlog_recover * trans,int pass,struct list_head * buffer_list)2025 xlog_recover_commit_trans(
2026 struct xlog *log,
2027 struct xlog_recover *trans,
2028 int pass,
2029 struct list_head *buffer_list)
2030 {
2031 int error = 0;
2032 int items_queued = 0;
2033 struct xlog_recover_item *item;
2034 struct xlog_recover_item *next;
2035 LIST_HEAD (ra_list);
2036 LIST_HEAD (done_list);
2037
2038 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
2039
2040 hlist_del_init(&trans->r_list);
2041
2042 error = xlog_recover_reorder_trans(log, trans, pass);
2043 if (error)
2044 return error;
2045
2046 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2047 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2048
2049 switch (pass) {
2050 case XLOG_RECOVER_PASS1:
2051 if (item->ri_ops->commit_pass1)
2052 error = item->ri_ops->commit_pass1(log, item);
2053 break;
2054 case XLOG_RECOVER_PASS2:
2055 if (item->ri_ops->ra_pass2)
2056 item->ri_ops->ra_pass2(log, item);
2057 list_move_tail(&item->ri_list, &ra_list);
2058 items_queued++;
2059 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2060 error = xlog_recover_items_pass2(log, trans,
2061 buffer_list, &ra_list);
2062 list_splice_tail_init(&ra_list, &done_list);
2063 items_queued = 0;
2064 }
2065
2066 break;
2067 default:
2068 ASSERT(0);
2069 }
2070
2071 if (error)
2072 goto out;
2073 }
2074
2075 out:
2076 if (!list_empty(&ra_list)) {
2077 if (!error)
2078 error = xlog_recover_items_pass2(log, trans,
2079 buffer_list, &ra_list);
2080 list_splice_tail_init(&ra_list, &done_list);
2081 }
2082
2083 if (!list_empty(&done_list))
2084 list_splice_init(&done_list, &trans->r_itemq);
2085
2086 return error;
2087 }
2088
2089 STATIC void
xlog_recover_add_item(struct list_head * head)2090 xlog_recover_add_item(
2091 struct list_head *head)
2092 {
2093 struct xlog_recover_item *item;
2094
2095 item = kzalloc_obj(struct xlog_recover_item, GFP_KERNEL | __GFP_NOFAIL);
2096 INIT_LIST_HEAD(&item->ri_list);
2097 list_add_tail(&item->ri_list, head);
2098 }
2099
2100 STATIC int
xlog_recover_add_to_cont_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2101 xlog_recover_add_to_cont_trans(
2102 struct xlog *log,
2103 struct xlog_recover *trans,
2104 char *dp,
2105 int len)
2106 {
2107 struct xlog_recover_item *item;
2108 char *ptr, *old_ptr;
2109 int old_len;
2110
2111 /*
2112 * If the transaction is empty, the header was split across this and the
2113 * previous record. Copy the rest of the header.
2114 */
2115 if (list_empty(&trans->r_itemq)) {
2116 ASSERT(len <= sizeof(struct xfs_trans_header));
2117 if (len > sizeof(struct xfs_trans_header)) {
2118 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2119 return -EFSCORRUPTED;
2120 }
2121
2122 xlog_recover_add_item(&trans->r_itemq);
2123 ptr = (char *)&trans->r_theader +
2124 sizeof(struct xfs_trans_header) - len;
2125 memcpy(ptr, dp, len);
2126 return 0;
2127 }
2128
2129 /* take the tail entry */
2130 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2131 ri_list);
2132
2133 old_ptr = item->ri_buf[item->ri_cnt-1].iov_base;
2134 old_len = item->ri_buf[item->ri_cnt-1].iov_len;
2135
2136 ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL);
2137 if (!ptr)
2138 return -ENOMEM;
2139 memcpy(&ptr[old_len], dp, len);
2140 item->ri_buf[item->ri_cnt-1].iov_len += len;
2141 item->ri_buf[item->ri_cnt-1].iov_base = ptr;
2142 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2143 return 0;
2144 }
2145
2146 /*
2147 * The next region to add is the start of a new region. It could be
2148 * a whole region or it could be the first part of a new region. Because
2149 * of this, the assumption here is that the type and size fields of all
2150 * format structures fit into the first 32 bits of the structure.
2151 *
2152 * This works because all regions must be 32 bit aligned. Therefore, we
2153 * either have both fields or we have neither field. In the case we have
2154 * neither field, the data part of the region is zero length. We only have
2155 * a log_op_header and can throw away the header since a new one will appear
2156 * later. If we have at least 4 bytes, then we can determine how many regions
2157 * will appear in the current log item.
2158 */
2159 STATIC int
xlog_recover_add_to_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2160 xlog_recover_add_to_trans(
2161 struct xlog *log,
2162 struct xlog_recover *trans,
2163 char *dp,
2164 int len)
2165 {
2166 struct xfs_inode_log_format *in_f; /* any will do */
2167 struct xlog_recover_item *item;
2168 char *ptr;
2169
2170 if (!len)
2171 return 0;
2172 if (list_empty(&trans->r_itemq)) {
2173 /* we need to catch log corruptions here */
2174 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2175 xfs_warn(log->l_mp, "%s: bad header magic number",
2176 __func__);
2177 ASSERT(0);
2178 return -EFSCORRUPTED;
2179 }
2180
2181 if (len > sizeof(struct xfs_trans_header)) {
2182 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2183 ASSERT(0);
2184 return -EFSCORRUPTED;
2185 }
2186
2187 /*
2188 * The transaction header can be arbitrarily split across op
2189 * records. If we don't have the whole thing here, copy what we
2190 * do have and handle the rest in the next record.
2191 */
2192 if (len == sizeof(struct xfs_trans_header))
2193 xlog_recover_add_item(&trans->r_itemq);
2194 memcpy(&trans->r_theader, dp, len);
2195 return 0;
2196 }
2197
2198 ptr = xlog_kvmalloc(len);
2199 memcpy(ptr, dp, len);
2200 in_f = (struct xfs_inode_log_format *)ptr;
2201
2202 /* take the tail entry */
2203 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2204 ri_list);
2205 if (item->ri_total != 0 &&
2206 item->ri_total == item->ri_cnt) {
2207 /* tail item is in use, get a new one */
2208 xlog_recover_add_item(&trans->r_itemq);
2209 item = list_entry(trans->r_itemq.prev,
2210 struct xlog_recover_item, ri_list);
2211 }
2212
2213 if (item->ri_total == 0) { /* first region to be added */
2214 if (in_f->ilf_size == 0 ||
2215 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2216 xfs_warn(log->l_mp,
2217 "bad number of regions (%d) in inode log format",
2218 in_f->ilf_size);
2219 ASSERT(0);
2220 kvfree(ptr);
2221 return -EFSCORRUPTED;
2222 }
2223
2224 item->ri_total = in_f->ilf_size;
2225 item->ri_buf = kzalloc_objs(*item->ri_buf, item->ri_total,
2226 GFP_KERNEL | __GFP_NOFAIL);
2227 }
2228
2229 if (item->ri_total <= item->ri_cnt) {
2230 xfs_warn(log->l_mp,
2231 "log item region count (%d) overflowed size (%d)",
2232 item->ri_cnt, item->ri_total);
2233 ASSERT(0);
2234 kvfree(ptr);
2235 return -EFSCORRUPTED;
2236 }
2237
2238 /* Description region is ri_buf[0] */
2239 item->ri_buf[item->ri_cnt].iov_base = ptr;
2240 item->ri_buf[item->ri_cnt].iov_len = len;
2241 item->ri_cnt++;
2242 trace_xfs_log_recover_item_add(log, trans, item, 0);
2243 return 0;
2244 }
2245
2246 /*
2247 * Free up any resources allocated by the transaction
2248 *
2249 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2250 */
2251 STATIC void
xlog_recover_free_trans(struct xlog_recover * trans)2252 xlog_recover_free_trans(
2253 struct xlog_recover *trans)
2254 {
2255 struct xlog_recover_item *item, *n;
2256 int i;
2257
2258 hlist_del_init(&trans->r_list);
2259
2260 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2261 /* Free the regions in the item. */
2262 list_del(&item->ri_list);
2263 for (i = 0; i < item->ri_cnt; i++)
2264 kvfree(item->ri_buf[i].iov_base);
2265 /* Free the item itself */
2266 kfree(item->ri_buf);
2267 kfree(item);
2268 }
2269 /* Free the transaction recover structure */
2270 kfree(trans);
2271 }
2272
2273 /*
2274 * On error or completion, trans is freed.
2275 */
2276 STATIC int
xlog_recovery_process_trans(struct xlog * log,struct xlog_recover * trans,char * dp,unsigned int len,unsigned int flags,int pass,struct list_head * buffer_list)2277 xlog_recovery_process_trans(
2278 struct xlog *log,
2279 struct xlog_recover *trans,
2280 char *dp,
2281 unsigned int len,
2282 unsigned int flags,
2283 int pass,
2284 struct list_head *buffer_list)
2285 {
2286 int error = 0;
2287 bool freeit = false;
2288
2289 /* mask off ophdr transaction container flags */
2290 flags &= ~XLOG_END_TRANS;
2291 if (flags & XLOG_WAS_CONT_TRANS)
2292 flags &= ~XLOG_CONTINUE_TRANS;
2293
2294 /*
2295 * Callees must not free the trans structure. We'll decide if we need to
2296 * free it or not based on the operation being done and it's result.
2297 */
2298 switch (flags) {
2299 /* expected flag values */
2300 case 0:
2301 case XLOG_CONTINUE_TRANS:
2302 error = xlog_recover_add_to_trans(log, trans, dp, len);
2303 break;
2304 case XLOG_WAS_CONT_TRANS:
2305 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2306 break;
2307 case XLOG_COMMIT_TRANS:
2308 error = xlog_recover_commit_trans(log, trans, pass,
2309 buffer_list);
2310 /* success or fail, we are now done with this transaction. */
2311 freeit = true;
2312 break;
2313
2314 /* unexpected flag values */
2315 case XLOG_UNMOUNT_TRANS:
2316 /* just skip trans */
2317 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2318 freeit = true;
2319 break;
2320 case XLOG_START_TRANS:
2321 default:
2322 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2323 ASSERT(0);
2324 error = -EFSCORRUPTED;
2325 break;
2326 }
2327 if (error || freeit)
2328 xlog_recover_free_trans(trans);
2329 return error;
2330 }
2331
2332 /*
2333 * Lookup the transaction recovery structure associated with the ID in the
2334 * current ophdr. If the transaction doesn't exist and the start flag is set in
2335 * the ophdr, then allocate a new transaction for future ID matches to find.
2336 * Either way, return what we found during the lookup - an existing transaction
2337 * or nothing.
2338 */
2339 STATIC struct xlog_recover *
xlog_recover_ophdr_to_trans(struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead)2340 xlog_recover_ophdr_to_trans(
2341 struct hlist_head rhash[],
2342 struct xlog_rec_header *rhead,
2343 struct xlog_op_header *ohead)
2344 {
2345 struct xlog_recover *trans;
2346 xlog_tid_t tid;
2347 struct hlist_head *rhp;
2348
2349 tid = be32_to_cpu(ohead->oh_tid);
2350 rhp = &rhash[XLOG_RHASH(tid)];
2351 hlist_for_each_entry(trans, rhp, r_list) {
2352 if (trans->r_log_tid == tid)
2353 return trans;
2354 }
2355
2356 /*
2357 * skip over non-start transaction headers - we could be
2358 * processing slack space before the next transaction starts
2359 */
2360 if (!(ohead->oh_flags & XLOG_START_TRANS))
2361 return NULL;
2362
2363 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2364
2365 /*
2366 * This is a new transaction so allocate a new recovery container to
2367 * hold the recovery ops that will follow.
2368 */
2369 trans = kzalloc_obj(struct xlog_recover, GFP_KERNEL | __GFP_NOFAIL);
2370 trans->r_log_tid = tid;
2371 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2372 INIT_LIST_HEAD(&trans->r_itemq);
2373 INIT_HLIST_NODE(&trans->r_list);
2374 hlist_add_head(&trans->r_list, rhp);
2375
2376 /*
2377 * Nothing more to do for this ophdr. Items to be added to this new
2378 * transaction will be in subsequent ophdr containers.
2379 */
2380 return NULL;
2381 }
2382
2383 STATIC int
xlog_recover_process_ophdr(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead,char * dp,char * end,int pass,struct list_head * buffer_list)2384 xlog_recover_process_ophdr(
2385 struct xlog *log,
2386 struct hlist_head rhash[],
2387 struct xlog_rec_header *rhead,
2388 struct xlog_op_header *ohead,
2389 char *dp,
2390 char *end,
2391 int pass,
2392 struct list_head *buffer_list)
2393 {
2394 struct xlog_recover *trans;
2395 unsigned int len;
2396 int error;
2397
2398 /* Do we understand who wrote this op? */
2399 if (ohead->oh_clientid != XFS_TRANSACTION &&
2400 ohead->oh_clientid != XFS_LOG) {
2401 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2402 __func__, ohead->oh_clientid);
2403 ASSERT(0);
2404 return -EFSCORRUPTED;
2405 }
2406
2407 /*
2408 * Check the ophdr contains all the data it is supposed to contain.
2409 */
2410 len = be32_to_cpu(ohead->oh_len);
2411 if (dp + len > end) {
2412 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2413 WARN_ON(1);
2414 return -EFSCORRUPTED;
2415 }
2416
2417 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2418 if (!trans) {
2419 /* nothing to do, so skip over this ophdr */
2420 return 0;
2421 }
2422
2423 /*
2424 * The recovered buffer queue is drained only once we know that all
2425 * recovery items for the current LSN have been processed. This is
2426 * required because:
2427 *
2428 * - Buffer write submission updates the metadata LSN of the buffer.
2429 * - Log recovery skips items with a metadata LSN >= the current LSN of
2430 * the recovery item.
2431 * - Separate recovery items against the same metadata buffer can share
2432 * a current LSN. I.e., consider that the LSN of a recovery item is
2433 * defined as the starting LSN of the first record in which its
2434 * transaction appears, that a record can hold multiple transactions,
2435 * and/or that a transaction can span multiple records.
2436 *
2437 * In other words, we are allowed to submit a buffer from log recovery
2438 * once per current LSN. Otherwise, we may incorrectly skip recovery
2439 * items and cause corruption.
2440 *
2441 * We don't know up front whether buffers are updated multiple times per
2442 * LSN. Therefore, track the current LSN of each commit log record as it
2443 * is processed and drain the queue when it changes. Use commit records
2444 * because they are ordered correctly by the logging code.
2445 */
2446 if (log->l_recovery_lsn != trans->r_lsn &&
2447 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2448 error = xfs_buf_delwri_submit(buffer_list);
2449 if (error)
2450 return error;
2451 log->l_recovery_lsn = trans->r_lsn;
2452 }
2453
2454 return xlog_recovery_process_trans(log, trans, dp, len,
2455 ohead->oh_flags, pass, buffer_list);
2456 }
2457
2458 /*
2459 * There are two valid states of the r_state field. 0 indicates that the
2460 * transaction structure is in a normal state. We have either seen the
2461 * start of the transaction or the last operation we added was not a partial
2462 * operation. If the last operation we added to the transaction was a
2463 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2464 *
2465 * NOTE: skip LRs with 0 data length.
2466 */
2467 STATIC int
xlog_recover_process_data(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2468 xlog_recover_process_data(
2469 struct xlog *log,
2470 struct hlist_head rhash[],
2471 struct xlog_rec_header *rhead,
2472 char *dp,
2473 int pass,
2474 struct list_head *buffer_list)
2475 {
2476 struct xlog_op_header *ohead;
2477 char *end;
2478 int num_logops;
2479 int error;
2480
2481 end = dp + be32_to_cpu(rhead->h_len);
2482 num_logops = be32_to_cpu(rhead->h_num_logops);
2483
2484 /* check the log format matches our own - else we can't recover */
2485 if (xlog_header_check_recover(log->l_mp, rhead))
2486 return -EIO;
2487
2488 trace_xfs_log_recover_record(log, rhead, pass);
2489 while ((dp < end) && num_logops) {
2490
2491 ohead = (struct xlog_op_header *)dp;
2492 dp += sizeof(*ohead);
2493 if (dp > end) {
2494 xfs_warn(log->l_mp, "%s: op header overrun", __func__);
2495 return -EFSCORRUPTED;
2496 }
2497
2498 /* errors will abort recovery */
2499 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2500 dp, end, pass, buffer_list);
2501 if (error)
2502 return error;
2503
2504 dp += be32_to_cpu(ohead->oh_len);
2505 num_logops--;
2506 }
2507 return 0;
2508 }
2509
2510 /* Take all the collected deferred ops and finish them in order. */
2511 static int
xlog_finish_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2512 xlog_finish_defer_ops(
2513 struct xfs_mount *mp,
2514 struct list_head *capture_list)
2515 {
2516 struct xfs_defer_capture *dfc, *next;
2517 struct xfs_trans *tp;
2518 int error = 0;
2519
2520 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2521 struct xfs_trans_res resv;
2522 struct xfs_defer_resources dres;
2523
2524 /*
2525 * Create a new transaction reservation from the captured
2526 * information. Set logcount to 1 to force the new transaction
2527 * to regrant every roll so that we can make forward progress
2528 * in recovery no matter how full the log might be.
2529 */
2530 resv.tr_logres = dfc->dfc_logres;
2531 resv.tr_logcount = 1;
2532 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2533
2534 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2535 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2536 if (error) {
2537 xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
2538 return error;
2539 }
2540
2541 /*
2542 * Transfer to this new transaction all the dfops we captured
2543 * from recovering a single intent item.
2544 */
2545 list_del_init(&dfc->dfc_list);
2546 xfs_defer_ops_continue(dfc, tp, &dres);
2547 error = xfs_trans_commit(tp);
2548 xfs_defer_resources_rele(&dres);
2549 if (error)
2550 return error;
2551 }
2552
2553 ASSERT(list_empty(capture_list));
2554 return 0;
2555 }
2556
2557 /* Release all the captured defer ops and capture structures in this list. */
2558 static void
xlog_abort_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2559 xlog_abort_defer_ops(
2560 struct xfs_mount *mp,
2561 struct list_head *capture_list)
2562 {
2563 struct xfs_defer_capture *dfc;
2564 struct xfs_defer_capture *next;
2565
2566 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2567 list_del_init(&dfc->dfc_list);
2568 xfs_defer_ops_capture_abort(mp, dfc);
2569 }
2570 }
2571
2572 /*
2573 * When this is called, all of the log intent items which did not have
2574 * corresponding log done items should be in the AIL. What we do now is update
2575 * the data structures associated with each one.
2576 *
2577 * Since we process the log intent items in normal transactions, they will be
2578 * removed at some point after the commit. This prevents us from just walking
2579 * down the list processing each one. We'll use a flag in the intent item to
2580 * skip those that we've already processed and use the AIL iteration mechanism's
2581 * generation count to try to speed this up at least a bit.
2582 *
2583 * When we start, we know that the intents are the only things in the AIL. As we
2584 * process them, however, other items are added to the AIL. Hence we know we
2585 * have started recovery on all the pending intents when we find an non-intent
2586 * item in the AIL.
2587 */
2588 STATIC int
xlog_recover_process_intents(struct xlog * log)2589 xlog_recover_process_intents(
2590 struct xlog *log)
2591 {
2592 LIST_HEAD(capture_list);
2593 struct xfs_defer_pending *dfp, *n;
2594 int error = 0;
2595 #if defined(DEBUG) || defined(XFS_WARN)
2596 xfs_lsn_t last_lsn;
2597
2598 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2599 #endif
2600
2601 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2602 ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2603
2604 /*
2605 * We should never see a redo item with a LSN higher than
2606 * the last transaction we found in the log at the start
2607 * of recovery.
2608 */
2609 ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0);
2610
2611 /*
2612 * NOTE: If your intent processing routine can create more
2613 * deferred ops, you /must/ attach them to the capture list in
2614 * the recover routine or else those subsequent intents will be
2615 * replayed in the wrong order!
2616 *
2617 * The recovery function can free the log item, so we must not
2618 * access dfp->dfp_intent after it returns. It must dispose of
2619 * @dfp if it returns 0.
2620 */
2621 error = xfs_defer_finish_recovery(log->l_mp, dfp,
2622 &capture_list);
2623 if (error)
2624 break;
2625 }
2626 if (error)
2627 goto err;
2628
2629 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2630 if (error)
2631 goto err;
2632
2633 return 0;
2634 err:
2635 xlog_abort_defer_ops(log->l_mp, &capture_list);
2636 return error;
2637 }
2638
2639 /*
2640 * A cancel occurs when the mount has failed and we're bailing out. Release all
2641 * pending log intent items that we haven't started recovery on so they don't
2642 * pin the AIL.
2643 */
2644 STATIC void
xlog_recover_cancel_intents(struct xlog * log)2645 xlog_recover_cancel_intents(
2646 struct xlog *log)
2647 {
2648 struct xfs_defer_pending *dfp, *n;
2649
2650 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2651 ASSERT(xlog_item_is_intent(dfp->dfp_intent));
2652
2653 xfs_defer_cancel_recovery(log->l_mp, dfp);
2654 }
2655 }
2656
2657 /*
2658 * Transfer ownership of the recovered pending work to the recovery transaction
2659 * and try to finish the work. If there is more work to be done, the dfp will
2660 * remain attached to the transaction. If not, the dfp is freed.
2661 */
2662 int
xlog_recover_finish_intent(struct xfs_trans * tp,struct xfs_defer_pending * dfp)2663 xlog_recover_finish_intent(
2664 struct xfs_trans *tp,
2665 struct xfs_defer_pending *dfp)
2666 {
2667 int error;
2668
2669 list_move(&dfp->dfp_list, &tp->t_dfops);
2670 error = xfs_defer_finish_one(tp, dfp);
2671 if (error == -EAGAIN)
2672 return 0;
2673 return error;
2674 }
2675
2676 /*
2677 * This routine performs a transaction to null out a bad inode pointer
2678 * in an agi unlinked inode hash bucket.
2679 */
2680 STATIC void
xlog_recover_clear_agi_bucket(struct xfs_perag * pag,int bucket)2681 xlog_recover_clear_agi_bucket(
2682 struct xfs_perag *pag,
2683 int bucket)
2684 {
2685 struct xfs_mount *mp = pag_mount(pag);
2686 struct xfs_trans *tp;
2687 struct xfs_agi *agi;
2688 struct xfs_buf *agibp;
2689 int offset;
2690 int error;
2691
2692 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2693 if (error)
2694 goto out_error;
2695
2696 error = xfs_read_agi(pag, tp, 0, &agibp);
2697 if (error)
2698 goto out_abort;
2699
2700 agi = agibp->b_addr;
2701 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2702 offset = offsetof(xfs_agi_t, agi_unlinked) +
2703 (sizeof(xfs_agino_t) * bucket);
2704 xfs_trans_log_buf(tp, agibp, offset,
2705 (offset + sizeof(xfs_agino_t) - 1));
2706
2707 error = xfs_trans_commit(tp);
2708 if (error)
2709 goto out_error;
2710 return;
2711
2712 out_abort:
2713 xfs_trans_cancel(tp);
2714 out_error:
2715 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
2716 pag_agno(pag));
2717 return;
2718 }
2719
2720 static int
xlog_recover_iunlink_bucket(struct xfs_perag * pag,struct xfs_agi * agi,int bucket)2721 xlog_recover_iunlink_bucket(
2722 struct xfs_perag *pag,
2723 struct xfs_agi *agi,
2724 int bucket)
2725 {
2726 struct xfs_mount *mp = pag_mount(pag);
2727 struct xfs_inode *prev_ip = NULL;
2728 struct xfs_inode *ip;
2729 xfs_agino_t prev_agino, agino;
2730 int error = 0;
2731
2732 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2733 while (agino != NULLAGINO) {
2734 error = xfs_iget(mp, NULL, xfs_agino_to_ino(pag, agino), 0, 0,
2735 &ip);
2736 if (error)
2737 break;
2738
2739 ASSERT(VFS_I(ip)->i_nlink == 0);
2740 ASSERT(VFS_I(ip)->i_mode != 0);
2741 xfs_iflags_clear(ip, XFS_IRECOVERY);
2742 agino = ip->i_next_unlinked;
2743
2744 if (prev_ip) {
2745 ip->i_prev_unlinked = prev_agino;
2746 xfs_irele(prev_ip);
2747
2748 /*
2749 * Ensure the inode is removed from the unlinked list
2750 * before we continue so that it won't race with
2751 * building the in-memory list here. This could be
2752 * serialised with the agibp lock, but that just
2753 * serialises via lockstepping and it's much simpler
2754 * just to flush the inodegc queue and wait for it to
2755 * complete.
2756 */
2757 error = xfs_inodegc_flush(mp);
2758 if (error)
2759 break;
2760 }
2761
2762 prev_agino = agino;
2763 prev_ip = ip;
2764 }
2765
2766 if (prev_ip) {
2767 int error2;
2768
2769 ip->i_prev_unlinked = prev_agino;
2770 xfs_irele(prev_ip);
2771
2772 error2 = xfs_inodegc_flush(mp);
2773 if (error2 && !error)
2774 return error2;
2775 }
2776 return error;
2777 }
2778
2779 /*
2780 * Recover AGI unlinked lists
2781 *
2782 * This is called during recovery to process any inodes which we unlinked but
2783 * not freed when the system crashed. These inodes will be on the lists in the
2784 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2785 * any inodes found on the lists. Each inode is removed from the lists when it
2786 * has been fully truncated and is freed. The freeing of the inode and its
2787 * removal from the list must be atomic.
2788 *
2789 * If everything we touch in the agi processing loop is already in memory, this
2790 * loop can hold the cpu for a long time. It runs without lock contention,
2791 * memory allocation contention, the need wait for IO, etc, and so will run
2792 * until we either run out of inodes to process, run low on memory or we run out
2793 * of log space.
2794 *
2795 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2796 * and can prevent other filesystem work (such as CIL pushes) from running. This
2797 * can lead to deadlocks if the recovery process runs out of log reservation
2798 * space. Hence we need to yield the CPU when there is other kernel work
2799 * scheduled on this CPU to ensure other scheduled work can run without undue
2800 * latency.
2801 */
2802 static void
xlog_recover_iunlink_ag(struct xfs_perag * pag)2803 xlog_recover_iunlink_ag(
2804 struct xfs_perag *pag)
2805 {
2806 struct xfs_agi *agi;
2807 struct xfs_buf *agibp;
2808 int bucket;
2809 int error;
2810
2811 error = xfs_read_agi(pag, NULL, 0, &agibp);
2812 if (error) {
2813 /*
2814 * AGI is b0rked. Don't process it.
2815 *
2816 * We should probably mark the filesystem as corrupt after we've
2817 * recovered all the ag's we can....
2818 */
2819 return;
2820 }
2821
2822 /*
2823 * Unlock the buffer so that it can be acquired in the normal course of
2824 * the transaction to truncate and free each inode. Because we are not
2825 * racing with anyone else here for the AGI buffer, we don't even need
2826 * to hold it locked to read the initial unlinked bucket entries out of
2827 * the buffer. We keep buffer reference though, so that it stays pinned
2828 * in memory while we need the buffer.
2829 */
2830 agi = agibp->b_addr;
2831 xfs_buf_unlock(agibp);
2832
2833 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2834 error = xlog_recover_iunlink_bucket(pag, agi, bucket);
2835 if (error) {
2836 /*
2837 * Bucket is unrecoverable, so only a repair scan can
2838 * free the remaining unlinked inodes. Just empty the
2839 * bucket and remaining inodes on it unreferenced and
2840 * unfreeable.
2841 */
2842 xlog_recover_clear_agi_bucket(pag, bucket);
2843 }
2844 }
2845
2846 xfs_buf_rele(agibp);
2847 }
2848
2849 static void
xlog_recover_process_iunlinks(struct xlog * log)2850 xlog_recover_process_iunlinks(
2851 struct xlog *log)
2852 {
2853 struct xfs_perag *pag = NULL;
2854
2855 while ((pag = xfs_perag_next(log->l_mp, pag)))
2856 xlog_recover_iunlink_ag(pag);
2857 }
2858
2859 STATIC void
xlog_unpack_data(struct xlog_rec_header * rhead,char * dp,struct xlog * log)2860 xlog_unpack_data(
2861 struct xlog_rec_header *rhead,
2862 char *dp,
2863 struct xlog *log)
2864 {
2865 int i;
2866
2867 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2868 *(__be32 *)dp = *xlog_cycle_data(rhead, i);
2869 dp += BBSIZE;
2870 }
2871 }
2872
2873 /*
2874 * CRC check, unpack and process a log record.
2875 */
2876 STATIC int
xlog_recover_process(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2877 xlog_recover_process(
2878 struct xlog *log,
2879 struct hlist_head rhash[],
2880 struct xlog_rec_header *rhead,
2881 char *dp,
2882 int pass,
2883 struct list_head *buffer_list)
2884 {
2885 __le32 expected_crc = rhead->h_crc, crc, other_crc;
2886
2887 crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE,
2888 be32_to_cpu(rhead->h_len));
2889
2890 /*
2891 * Look at the end of the struct xlog_rec_header definition in
2892 * xfs_log_format.h for the glory details.
2893 */
2894 if (expected_crc && crc != expected_crc) {
2895 other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER,
2896 be32_to_cpu(rhead->h_len));
2897 if (other_crc == expected_crc) {
2898 xfs_notice_once(log->l_mp,
2899 "Fixing up incorrect CRC due to padding.");
2900 crc = other_crc;
2901 }
2902 }
2903
2904 /*
2905 * Nothing else to do if this is a CRC verification pass. Just return
2906 * if this a record with a non-zero crc. Unfortunately, mkfs always
2907 * sets expected_crc to 0 so we must consider this valid even on v5
2908 * supers. Otherwise, return EFSBADCRC on failure so the callers up the
2909 * stack know precisely what failed.
2910 */
2911 if (pass == XLOG_RECOVER_CRCPASS) {
2912 if (expected_crc && crc != expected_crc)
2913 return -EFSBADCRC;
2914 return 0;
2915 }
2916
2917 /*
2918 * We're in the normal recovery path. Issue a warning if and only if the
2919 * CRC in the header is non-zero. This is an advisory warning and the
2920 * zero CRC check prevents warnings from being emitted when upgrading
2921 * the kernel from one that does not add CRCs by default.
2922 */
2923 if (crc != expected_crc) {
2924 if (expected_crc || xfs_has_crc(log->l_mp)) {
2925 xfs_alert(log->l_mp,
2926 "log record CRC mismatch: found 0x%x, expected 0x%x.",
2927 le32_to_cpu(expected_crc),
2928 le32_to_cpu(crc));
2929 xfs_hex_dump(dp, 32);
2930 }
2931
2932 /*
2933 * If the filesystem is CRC enabled, this mismatch becomes a
2934 * fatal log corruption failure.
2935 */
2936 if (xfs_has_crc(log->l_mp)) {
2937 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2938 return -EFSCORRUPTED;
2939 }
2940 }
2941
2942 xlog_unpack_data(rhead, dp, log);
2943
2944 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2945 buffer_list);
2946 }
2947
2948 STATIC int
xlog_valid_rec_header(struct xlog * log,struct xlog_rec_header * rhead,xfs_daddr_t blkno,int bufsize)2949 xlog_valid_rec_header(
2950 struct xlog *log,
2951 struct xlog_rec_header *rhead,
2952 xfs_daddr_t blkno,
2953 int bufsize)
2954 {
2955 struct xfs_mount *mp = log->l_mp;
2956 u32 h_version = be32_to_cpu(rhead->h_version);
2957 int hlen;
2958
2959 if (XFS_IS_CORRUPT(mp,
2960 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2961 return -EFSCORRUPTED;
2962
2963 /*
2964 * The log version must match the superblock
2965 */
2966 if (xfs_has_logv2(mp)) {
2967 if (XFS_IS_CORRUPT(mp, h_version != XLOG_VERSION_2))
2968 return -EFSCORRUPTED;
2969 } else {
2970 if (XFS_IS_CORRUPT(mp, h_version != XLOG_VERSION_1))
2971 return -EFSCORRUPTED;
2972 }
2973
2974 /*
2975 * LR body must have data (or it wouldn't have been written)
2976 * and h_len must not be greater than LR buffer size.
2977 */
2978 hlen = be32_to_cpu(rhead->h_len);
2979 if (XFS_IS_CORRUPT(mp, hlen <= 0 || hlen > bufsize))
2980 return -EFSCORRUPTED;
2981
2982 if (XFS_IS_CORRUPT(mp, blkno > log->l_logBBsize || blkno > INT_MAX))
2983 return -EFSCORRUPTED;
2984
2985 return 0;
2986 }
2987
2988 /*
2989 * Read the log from tail to head and process the log records found.
2990 * Handle the two cases where the tail and head are in the same cycle
2991 * and where the active portion of the log wraps around the end of
2992 * the physical log separately. The pass parameter is passed through
2993 * to the routines called to process the data and is not looked at
2994 * here.
2995 */
2996 STATIC int
xlog_do_recovery_pass(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int pass,xfs_daddr_t * first_bad)2997 xlog_do_recovery_pass(
2998 struct xlog *log,
2999 xfs_daddr_t head_blk,
3000 xfs_daddr_t tail_blk,
3001 int pass,
3002 xfs_daddr_t *first_bad) /* out: first bad log rec */
3003 {
3004 struct xlog_rec_header *rhead;
3005 xfs_daddr_t blk_no, rblk_no;
3006 xfs_daddr_t rhead_blk;
3007 char *offset;
3008 char *hbp, *dbp;
3009 int error = 0, h_size, h_len;
3010 int error2 = 0;
3011 int bblks, split_bblks;
3012 int hblks = 1, split_hblks, wrapped_hblks;
3013 int i;
3014 struct hlist_head rhash[XLOG_RHASH_SIZE];
3015 LIST_HEAD (buffer_list);
3016
3017 ASSERT(head_blk != tail_blk);
3018 blk_no = rhead_blk = tail_blk;
3019
3020 for (i = 0; i < XLOG_RHASH_SIZE; i++)
3021 INIT_HLIST_HEAD(&rhash[i]);
3022
3023 hbp = xlog_alloc_buffer(log, hblks);
3024 if (!hbp)
3025 return -ENOMEM;
3026
3027 /*
3028 * Read the header of the tail block and get the iclog buffer size from
3029 * h_size. Use this to tell how many sectors make up the log header.
3030 */
3031 if (xfs_has_logv2(log->l_mp)) {
3032 /*
3033 * When using variable length iclogs, read first sector of
3034 * iclog header and extract the header size from it. Get a
3035 * new hbp that is the correct size.
3036 */
3037 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3038 if (error)
3039 goto bread_err1;
3040
3041 rhead = (struct xlog_rec_header *)offset;
3042
3043 /*
3044 * xfsprogs has a bug where record length is based on lsunit but
3045 * h_size (iclog size) is hardcoded to 32k. Now that we
3046 * unconditionally CRC verify the unmount record, this means the
3047 * log buffer can be too small for the record and cause an
3048 * overrun.
3049 *
3050 * Detect this condition here. Use lsunit for the buffer size as
3051 * long as this looks like the mkfs case. Otherwise, return an
3052 * error to avoid a buffer overrun.
3053 */
3054 h_size = be32_to_cpu(rhead->h_size);
3055 h_len = be32_to_cpu(rhead->h_len);
3056 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3057 rhead->h_num_logops == cpu_to_be32(1)) {
3058 xfs_warn(log->l_mp,
3059 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
3060 h_size, log->l_mp->m_logbsize);
3061 h_size = log->l_mp->m_logbsize;
3062 }
3063
3064 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3065 if (error)
3066 goto bread_err1;
3067
3068 /*
3069 * This open codes xlog_logrec_hblks so that we can reuse the
3070 * fixed up h_size value calculated above. Without that we'd
3071 * still allocate the buffer based on the incorrect on-disk
3072 * size.
3073 */
3074 if (h_size > XLOG_HEADER_CYCLE_SIZE &&
3075 (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
3076 hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
3077 if (hblks > 1) {
3078 kvfree(hbp);
3079 hbp = xlog_alloc_buffer(log, hblks);
3080 if (!hbp)
3081 return -ENOMEM;
3082 }
3083 }
3084 } else {
3085 ASSERT(log->l_sectBBsize == 1);
3086 h_size = XLOG_BIG_RECORD_BSIZE;
3087 }
3088
3089 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3090 if (!dbp) {
3091 kvfree(hbp);
3092 return -ENOMEM;
3093 }
3094
3095 memset(rhash, 0, sizeof(rhash));
3096 if (tail_blk > head_blk) {
3097 /*
3098 * Perform recovery around the end of the physical log.
3099 * When the head is not on the same cycle number as the tail,
3100 * we can't do a sequential recovery.
3101 */
3102 while (blk_no < log->l_logBBsize) {
3103 /*
3104 * Check for header wrapping around physical end-of-log
3105 */
3106 offset = hbp;
3107 split_hblks = 0;
3108 wrapped_hblks = 0;
3109 if (blk_no + hblks <= log->l_logBBsize) {
3110 /* Read header in one read */
3111 error = xlog_bread(log, blk_no, hblks, hbp,
3112 &offset);
3113 if (error)
3114 goto bread_err2;
3115 } else {
3116 /* This LR is split across physical log end */
3117 if (blk_no != log->l_logBBsize) {
3118 /* some data before physical log end */
3119 ASSERT(blk_no <= INT_MAX);
3120 split_hblks = log->l_logBBsize - (int)blk_no;
3121 ASSERT(split_hblks > 0);
3122 error = xlog_bread(log, blk_no,
3123 split_hblks, hbp,
3124 &offset);
3125 if (error)
3126 goto bread_err2;
3127 }
3128
3129 /*
3130 * Note: this black magic still works with
3131 * large sector sizes (non-512) only because:
3132 * - we increased the buffer size originally
3133 * by 1 sector giving us enough extra space
3134 * for the second read;
3135 * - the log start is guaranteed to be sector
3136 * aligned;
3137 * - we read the log end (LR header start)
3138 * _first_, then the log start (LR header end)
3139 * - order is important.
3140 */
3141 wrapped_hblks = hblks - split_hblks;
3142 error = xlog_bread_noalign(log, 0,
3143 wrapped_hblks,
3144 offset + BBTOB(split_hblks));
3145 if (error)
3146 goto bread_err2;
3147 }
3148 rhead = (struct xlog_rec_header *)offset;
3149 error = xlog_valid_rec_header(log, rhead,
3150 split_hblks ? blk_no : 0, h_size);
3151 if (error)
3152 goto bread_err2;
3153
3154 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3155 blk_no += hblks;
3156
3157 /*
3158 * Read the log record data in multiple reads if it
3159 * wraps around the end of the log. Note that if the
3160 * header already wrapped, blk_no could point past the
3161 * end of the log. The record data is contiguous in
3162 * that case.
3163 */
3164 if (blk_no + bblks <= log->l_logBBsize ||
3165 blk_no >= log->l_logBBsize) {
3166 rblk_no = xlog_wrap_logbno(log, blk_no);
3167 error = xlog_bread(log, rblk_no, bblks, dbp,
3168 &offset);
3169 if (error)
3170 goto bread_err2;
3171 } else {
3172 /* This log record is split across the
3173 * physical end of log */
3174 offset = dbp;
3175 split_bblks = 0;
3176 if (blk_no != log->l_logBBsize) {
3177 /* some data is before the physical
3178 * end of log */
3179 ASSERT(!wrapped_hblks);
3180 ASSERT(blk_no <= INT_MAX);
3181 split_bblks =
3182 log->l_logBBsize - (int)blk_no;
3183 ASSERT(split_bblks > 0);
3184 error = xlog_bread(log, blk_no,
3185 split_bblks, dbp,
3186 &offset);
3187 if (error)
3188 goto bread_err2;
3189 }
3190
3191 /*
3192 * Note: this black magic still works with
3193 * large sector sizes (non-512) only because:
3194 * - we increased the buffer size originally
3195 * by 1 sector giving us enough extra space
3196 * for the second read;
3197 * - the log start is guaranteed to be sector
3198 * aligned;
3199 * - we read the log end (LR header start)
3200 * _first_, then the log start (LR header end)
3201 * - order is important.
3202 */
3203 error = xlog_bread_noalign(log, 0,
3204 bblks - split_bblks,
3205 offset + BBTOB(split_bblks));
3206 if (error)
3207 goto bread_err2;
3208 }
3209
3210 error = xlog_recover_process(log, rhash, rhead, offset,
3211 pass, &buffer_list);
3212 if (error)
3213 goto bread_err2;
3214
3215 blk_no += bblks;
3216 rhead_blk = blk_no;
3217 }
3218
3219 ASSERT(blk_no >= log->l_logBBsize);
3220 blk_no -= log->l_logBBsize;
3221 rhead_blk = blk_no;
3222 }
3223
3224 /* read first part of physical log */
3225 while (blk_no < head_blk) {
3226 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3227 if (error)
3228 goto bread_err2;
3229
3230 rhead = (struct xlog_rec_header *)offset;
3231 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3232 if (error)
3233 goto bread_err2;
3234
3235 /* blocks in data section */
3236 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3237 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3238 &offset);
3239 if (error)
3240 goto bread_err2;
3241
3242 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3243 &buffer_list);
3244 if (error)
3245 goto bread_err2;
3246
3247 blk_no += bblks + hblks;
3248 rhead_blk = blk_no;
3249 }
3250
3251 bread_err2:
3252 kvfree(dbp);
3253 bread_err1:
3254 kvfree(hbp);
3255
3256 /*
3257 * Submit buffers that have been dirtied by the last record recovered.
3258 */
3259 if (!list_empty(&buffer_list)) {
3260 if (error) {
3261 /*
3262 * If there has been an item recovery error then we
3263 * cannot allow partial checkpoint writeback to
3264 * occur. We might have multiple checkpoints with the
3265 * same start LSN in this buffer list, and partial
3266 * writeback of a checkpoint in this situation can
3267 * prevent future recovery of all the changes in the
3268 * checkpoints at this start LSN.
3269 *
3270 * Note: Shutting down the filesystem will result in the
3271 * delwri submission marking all the buffers stale,
3272 * completing them and cleaning up _XBF_LOGRECOVERY
3273 * state without doing any IO.
3274 */
3275 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3276 }
3277 error2 = xfs_buf_delwri_submit(&buffer_list);
3278 }
3279
3280 if (error && first_bad)
3281 *first_bad = rhead_blk;
3282
3283 /*
3284 * Transactions are freed at commit time but transactions without commit
3285 * records on disk are never committed. Free any that may be left in the
3286 * hash table.
3287 */
3288 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3289 struct hlist_node *tmp;
3290 struct xlog_recover *trans;
3291
3292 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3293 xlog_recover_free_trans(trans);
3294 }
3295
3296 return error ? error : error2;
3297 }
3298
3299 /*
3300 * Do the recovery of the log. We actually do this in two phases.
3301 * The two passes are necessary in order to implement the function
3302 * of cancelling a record written into the log. The first pass
3303 * determines those things which have been cancelled, and the
3304 * second pass replays log items normally except for those which
3305 * have been cancelled. The handling of the replay and cancellations
3306 * takes place in the log item type specific routines.
3307 *
3308 * The table of items which have cancel records in the log is allocated
3309 * and freed at this level, since only here do we know when all of
3310 * the log recovery has been completed.
3311 */
3312 STATIC int
xlog_do_log_recovery(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3313 xlog_do_log_recovery(
3314 struct xlog *log,
3315 xfs_daddr_t head_blk,
3316 xfs_daddr_t tail_blk)
3317 {
3318 int error;
3319
3320 ASSERT(head_blk != tail_blk);
3321
3322 /*
3323 * First do a pass to find all of the cancelled buf log items.
3324 * Store them in the buf_cancel_table for use in the second pass.
3325 */
3326 error = xlog_alloc_buf_cancel_table(log);
3327 if (error)
3328 return error;
3329
3330 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3331 XLOG_RECOVER_PASS1, NULL);
3332 if (error != 0)
3333 goto out_cancel;
3334
3335 /*
3336 * Then do a second pass to actually recover the items in the log.
3337 * When it is complete free the table of buf cancel items.
3338 */
3339 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3340 XLOG_RECOVER_PASS2, NULL);
3341 if (!error)
3342 xlog_check_buf_cancel_table(log);
3343 out_cancel:
3344 xlog_free_buf_cancel_table(log);
3345 return error;
3346 }
3347
3348 /*
3349 * Do the actual recovery
3350 */
3351 STATIC int
xlog_do_recover(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3352 xlog_do_recover(
3353 struct xlog *log,
3354 xfs_daddr_t head_blk,
3355 xfs_daddr_t tail_blk)
3356 {
3357 struct xfs_mount *mp = log->l_mp;
3358 struct xfs_buf *bp = mp->m_sb_bp;
3359 struct xfs_sb *sbp = &mp->m_sb;
3360 int error;
3361
3362 trace_xfs_log_recover(log, head_blk, tail_blk);
3363
3364 /*
3365 * First replay the images in the log.
3366 */
3367 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3368 if (error)
3369 return error;
3370
3371 if (xlog_is_shutdown(log))
3372 return -EIO;
3373
3374 /*
3375 * We now update the tail_lsn since much of the recovery has completed
3376 * and there may be space available to use. If there were no extent or
3377 * iunlinks, we can free up the entire log. This was set in
3378 * xlog_find_tail to be the lsn of the last known good LR on disk. If
3379 * there are extent frees or iunlinks they will have some entries in the
3380 * AIL; so we look at the AIL to determine how to set the tail_lsn.
3381 */
3382 xfs_ail_assign_tail_lsn(log->l_ailp);
3383
3384 /*
3385 * Now that we've finished replaying all buffer and inode updates,
3386 * re-read the superblock and reverify it.
3387 */
3388 xfs_buf_lock(bp);
3389 xfs_buf_hold(bp);
3390 error = _xfs_buf_read(bp);
3391 if (error) {
3392 if (!xlog_is_shutdown(log)) {
3393 xfs_buf_ioerror_alert(bp, __this_address);
3394 ASSERT(0);
3395 }
3396 xfs_buf_relse(bp);
3397 return error;
3398 }
3399
3400 /* Convert superblock from on-disk format */
3401 xfs_sb_from_disk(sbp, bp->b_addr);
3402 xfs_buf_relse(bp);
3403
3404 /* re-initialise in-core superblock and geometry structures */
3405 mp->m_features |= xfs_sb_version_to_features(sbp);
3406 xfs_reinit_percpu_counters(mp);
3407
3408 /* Normal transactions can now occur */
3409 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3410 return 0;
3411 }
3412
3413 /*
3414 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3415 *
3416 * Return error or zero.
3417 */
3418 int
xlog_recover(struct xlog * log)3419 xlog_recover(
3420 struct xlog *log)
3421 {
3422 xfs_daddr_t head_blk, tail_blk;
3423 int error;
3424
3425 /* find the tail of the log */
3426 error = xlog_find_tail(log, &head_blk, &tail_blk);
3427 if (error)
3428 return error;
3429
3430 /*
3431 * The superblock was read before the log was available and thus the LSN
3432 * could not be verified. Check the superblock LSN against the current
3433 * LSN now that it's known.
3434 */
3435 if (xfs_has_crc(log->l_mp) &&
3436 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3437 return -EINVAL;
3438
3439 if (tail_blk != head_blk) {
3440 /* There used to be a comment here:
3441 *
3442 * disallow recovery on read-only mounts. note -- mount
3443 * checks for ENOSPC and turns it into an intelligent
3444 * error message.
3445 * ...but this is no longer true. Now, unless you specify
3446 * NORECOVERY (in which case this function would never be
3447 * called), we just go ahead and recover. We do this all
3448 * under the vfs layer, so we can get away with it unless
3449 * the device itself is read-only, in which case we fail.
3450 */
3451 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3452 return error;
3453 }
3454
3455 /*
3456 * Version 5 superblock log feature mask validation. We know the
3457 * log is dirty so check if there are any unknown log features
3458 * in what we need to recover. If there are unknown features
3459 * (e.g. unsupported transactions, then simply reject the
3460 * attempt at recovery before touching anything.
3461 */
3462 if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3463 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3464 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3465 xfs_warn(log->l_mp,
3466 "Superblock has unknown incompatible log features (0x%x) enabled.",
3467 (log->l_mp->m_sb.sb_features_log_incompat &
3468 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3469 xfs_warn(log->l_mp,
3470 "The log can not be fully and/or safely recovered by this kernel.");
3471 xfs_warn(log->l_mp,
3472 "Please recover the log on a kernel that supports the unknown features.");
3473 return -EINVAL;
3474 }
3475
3476 /*
3477 * Delay log recovery if the debug hook is set. This is debug
3478 * instrumentation to coordinate simulation of I/O failures with
3479 * log recovery.
3480 */
3481 if (xfs_globals.log_recovery_delay) {
3482 xfs_notice(log->l_mp,
3483 "Delaying log recovery for %d seconds.",
3484 xfs_globals.log_recovery_delay);
3485 msleep(xfs_globals.log_recovery_delay * 1000);
3486 }
3487
3488 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3489 log->l_mp->m_logname ? log->l_mp->m_logname
3490 : "internal");
3491
3492 error = xlog_do_recover(log, head_blk, tail_blk);
3493 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3494 }
3495 return error;
3496 }
3497
3498 /*
3499 * In the first part of recovery we replay inodes and buffers and build up the
3500 * list of intents which need to be processed. Here we process the intents and
3501 * clean up the on disk unlinked inode lists. This is separated from the first
3502 * part of recovery so that the root and real-time bitmap inodes can be read in
3503 * from disk in between the two stages. This is necessary so that we can free
3504 * space in the real-time portion of the file system.
3505 *
3506 * We run this whole process under GFP_NOFS allocation context. We do a
3507 * combination of non-transactional and transactional work, yet we really don't
3508 * want to recurse into the filesystem from direct reclaim during any of this
3509 * processing. This allows all the recovery code run here not to care about the
3510 * memory allocation context it is running in.
3511 */
3512 int
xlog_recover_finish(struct xlog * log)3513 xlog_recover_finish(
3514 struct xlog *log)
3515 {
3516 unsigned int nofs_flags = memalloc_nofs_save();
3517 int error;
3518
3519 error = xlog_recover_process_intents(log);
3520 if (error) {
3521 /*
3522 * Cancel all the unprocessed intent items now so that we don't
3523 * leave them pinned in the AIL. This can cause the AIL to
3524 * livelock on the pinned item if anyone tries to push the AIL
3525 * (inode reclaim does this) before we get around to
3526 * xfs_log_mount_cancel.
3527 */
3528 xlog_recover_cancel_intents(log);
3529 xfs_alert(log->l_mp, "Failed to recover intents");
3530 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3531 goto out_error;
3532 }
3533
3534 /*
3535 * Sync the log to get all the intents out of the AIL. This isn't
3536 * absolutely necessary, but it helps in case the unlink transactions
3537 * would have problems pushing the intents out of the way.
3538 */
3539 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3540
3541 xlog_recover_process_iunlinks(log);
3542
3543 /*
3544 * Recover any CoW staging blocks that are still referenced by the
3545 * ondisk refcount metadata. During mount there cannot be any live
3546 * staging extents as we have not permitted any user modifications.
3547 * Therefore, it is safe to free them all right now, even on a
3548 * read-only mount.
3549 */
3550 error = xfs_reflink_recover_cow(log->l_mp);
3551 if (error) {
3552 xfs_alert(log->l_mp,
3553 "Failed to recover leftover CoW staging extents, err %d.",
3554 error);
3555 /*
3556 * If we get an error here, make sure the log is shut down
3557 * but return zero so that any log items committed since the
3558 * end of intents processing can be pushed through the CIL
3559 * and AIL.
3560 */
3561 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3562 error = 0;
3563 goto out_error;
3564 }
3565
3566 out_error:
3567 memalloc_nofs_restore(nofs_flags);
3568 return error;
3569 }
3570
3571 void
xlog_recover_cancel(struct xlog * log)3572 xlog_recover_cancel(
3573 struct xlog *log)
3574 {
3575 if (xlog_recovery_needed(log))
3576 xlog_recover_cancel_intents(log);
3577 }
3578
3579