1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_errortag.h"
14 #include "xfs_error.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_log.h"
18 #include "xfs_log_priv.h"
19 #include "xfs_trace.h"
20 #include "xfs_sysfs.h"
21 #include "xfs_sb.h"
22 #include "xfs_health.h"
23 #include "xfs_zone_alloc.h"
24
25 struct xlog_write_data {
26 struct xlog_ticket *ticket;
27 struct xlog_in_core *iclog;
28 uint32_t bytes_left;
29 uint32_t record_cnt;
30 uint32_t data_cnt;
31 int log_offset;
32 };
33
34 struct kmem_cache *xfs_log_ticket_cache;
35
36 /* Local miscellaneous function prototypes */
37 STATIC struct xlog *
38 xlog_alloc_log(
39 struct xfs_mount *mp,
40 struct xfs_buftarg *log_target,
41 xfs_daddr_t blk_offset,
42 int num_bblks);
43 STATIC void
44 xlog_dealloc_log(
45 struct xlog *log);
46
47 /* local state machine functions */
48 STATIC void xlog_state_done_syncing(
49 struct xlog_in_core *iclog);
50 STATIC void xlog_state_do_callback(
51 struct xlog *log);
52 STATIC int
53 xlog_state_get_iclog_space(
54 struct xlog *log,
55 struct xlog_write_data *data);
56 STATIC void
57 xlog_sync(
58 struct xlog *log,
59 struct xlog_in_core *iclog,
60 struct xlog_ticket *ticket);
61 #if defined(DEBUG)
62 STATIC void
63 xlog_verify_iclog(
64 struct xlog *log,
65 struct xlog_in_core *iclog,
66 int count);
67 STATIC void
68 xlog_verify_tail_lsn(
69 struct xlog *log,
70 struct xlog_in_core *iclog);
71 #else
72 #define xlog_verify_iclog(a,b,c)
73 #define xlog_verify_tail_lsn(a,b)
74 #endif
75
76 STATIC int
77 xlog_iclogs_empty(
78 struct xlog *log);
79
80 static int
81 xfs_log_cover(struct xfs_mount *);
82
83 static inline void
xlog_grant_sub_space(struct xlog_grant_head * head,int64_t bytes)84 xlog_grant_sub_space(
85 struct xlog_grant_head *head,
86 int64_t bytes)
87 {
88 atomic64_sub(bytes, &head->grant);
89 }
90
91 static inline void
xlog_grant_add_space(struct xlog_grant_head * head,int64_t bytes)92 xlog_grant_add_space(
93 struct xlog_grant_head *head,
94 int64_t bytes)
95 {
96 atomic64_add(bytes, &head->grant);
97 }
98
99 static void
xlog_grant_head_init(struct xlog_grant_head * head)100 xlog_grant_head_init(
101 struct xlog_grant_head *head)
102 {
103 atomic64_set(&head->grant, 0);
104 INIT_LIST_HEAD(&head->waiters);
105 spin_lock_init(&head->lock);
106 }
107
108 void
xlog_grant_return_space(struct xlog * log,xfs_lsn_t old_head,xfs_lsn_t new_head)109 xlog_grant_return_space(
110 struct xlog *log,
111 xfs_lsn_t old_head,
112 xfs_lsn_t new_head)
113 {
114 int64_t diff = xlog_lsn_sub(log, new_head, old_head);
115
116 xlog_grant_sub_space(&log->l_reserve_head, diff);
117 xlog_grant_sub_space(&log->l_write_head, diff);
118 }
119
120 /*
121 * Return the space in the log between the tail and the head. In the case where
122 * we have overrun available reservation space, return 0. The memory barrier
123 * pairs with the smp_wmb() in xlog_cil_ail_insert() to ensure that grant head
124 * vs tail space updates are seen in the correct order and hence avoid
125 * transients as space is transferred from the grant heads to the AIL on commit
126 * completion.
127 */
128 static uint64_t
xlog_grant_space_left(struct xlog * log,struct xlog_grant_head * head)129 xlog_grant_space_left(
130 struct xlog *log,
131 struct xlog_grant_head *head)
132 {
133 int64_t free_bytes;
134
135 smp_rmb(); /* paired with smp_wmb in xlog_cil_ail_insert() */
136 free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) -
137 atomic64_read(&head->grant);
138 if (free_bytes > 0)
139 return free_bytes;
140 return 0;
141 }
142
143 STATIC void
xlog_grant_head_wake_all(struct xlog_grant_head * head)144 xlog_grant_head_wake_all(
145 struct xlog_grant_head *head)
146 {
147 struct xlog_ticket *tic;
148
149 spin_lock(&head->lock);
150 list_for_each_entry(tic, &head->waiters, t_queue)
151 wake_up_process(tic->t_task);
152 spin_unlock(&head->lock);
153 }
154
155 static inline int
xlog_ticket_reservation(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic)156 xlog_ticket_reservation(
157 struct xlog *log,
158 struct xlog_grant_head *head,
159 struct xlog_ticket *tic)
160 {
161 if (head == &log->l_write_head) {
162 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
163 return tic->t_unit_res;
164 }
165
166 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
167 return tic->t_unit_res * tic->t_cnt;
168
169 return tic->t_unit_res;
170 }
171
172 STATIC bool
xlog_grant_head_wake(struct xlog * log,struct xlog_grant_head * head,int * free_bytes)173 xlog_grant_head_wake(
174 struct xlog *log,
175 struct xlog_grant_head *head,
176 int *free_bytes)
177 {
178 struct xlog_ticket *tic;
179 int need_bytes;
180
181 list_for_each_entry(tic, &head->waiters, t_queue) {
182 need_bytes = xlog_ticket_reservation(log, head, tic);
183 if (*free_bytes < need_bytes)
184 return false;
185
186 *free_bytes -= need_bytes;
187 trace_xfs_log_grant_wake_up(log, tic);
188 wake_up_process(tic->t_task);
189 }
190
191 return true;
192 }
193
194 STATIC int
xlog_grant_head_wait(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int need_bytes)195 xlog_grant_head_wait(
196 struct xlog *log,
197 struct xlog_grant_head *head,
198 struct xlog_ticket *tic,
199 int need_bytes) __releases(&head->lock)
200 __acquires(&head->lock)
201 {
202 list_add_tail(&tic->t_queue, &head->waiters);
203
204 do {
205 if (xlog_is_shutdown(log))
206 goto shutdown;
207
208 __set_current_state(TASK_UNINTERRUPTIBLE);
209 spin_unlock(&head->lock);
210
211 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
212
213 /* Push on the AIL to free up all the log space. */
214 xfs_ail_push_all(log->l_ailp);
215
216 trace_xfs_log_grant_sleep(log, tic);
217 schedule();
218 trace_xfs_log_grant_wake(log, tic);
219
220 spin_lock(&head->lock);
221 if (xlog_is_shutdown(log))
222 goto shutdown;
223 } while (xlog_grant_space_left(log, head) < need_bytes);
224
225 list_del_init(&tic->t_queue);
226 return 0;
227 shutdown:
228 list_del_init(&tic->t_queue);
229 return -EIO;
230 }
231
232 /*
233 * Atomically get the log space required for a log ticket.
234 *
235 * Once a ticket gets put onto head->waiters, it will only return after the
236 * needed reservation is satisfied.
237 *
238 * This function is structured so that it has a lock free fast path. This is
239 * necessary because every new transaction reservation will come through this
240 * path. Hence any lock will be globally hot if we take it unconditionally on
241 * every pass.
242 *
243 * As tickets are only ever moved on and off head->waiters under head->lock, we
244 * only need to take that lock if we are going to add the ticket to the queue
245 * and sleep. We can avoid taking the lock if the ticket was never added to
246 * head->waiters because the t_queue list head will be empty and we hold the
247 * only reference to it so it can safely be checked unlocked.
248 */
249 STATIC int
xlog_grant_head_check(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int * need_bytes)250 xlog_grant_head_check(
251 struct xlog *log,
252 struct xlog_grant_head *head,
253 struct xlog_ticket *tic,
254 int *need_bytes)
255 {
256 int free_bytes;
257 int error = 0;
258
259 ASSERT(!xlog_in_recovery(log));
260
261 /*
262 * If there are other waiters on the queue then give them a chance at
263 * logspace before us. Wake up the first waiters, if we do not wake
264 * up all the waiters then go to sleep waiting for more free space,
265 * otherwise try to get some space for this transaction.
266 */
267 *need_bytes = xlog_ticket_reservation(log, head, tic);
268 free_bytes = xlog_grant_space_left(log, head);
269 if (!list_empty_careful(&head->waiters)) {
270 spin_lock(&head->lock);
271 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
272 free_bytes < *need_bytes) {
273 error = xlog_grant_head_wait(log, head, tic,
274 *need_bytes);
275 }
276 spin_unlock(&head->lock);
277 } else if (free_bytes < *need_bytes) {
278 spin_lock(&head->lock);
279 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
280 spin_unlock(&head->lock);
281 }
282
283 return error;
284 }
285
286 bool
xfs_log_writable(struct xfs_mount * mp)287 xfs_log_writable(
288 struct xfs_mount *mp)
289 {
290 /*
291 * Do not write to the log on norecovery mounts, if the data or log
292 * devices are read-only, or if the filesystem is shutdown. Read-only
293 * mounts allow internal writes for log recovery and unmount purposes,
294 * so don't restrict that case.
295 */
296 if (xfs_has_norecovery(mp))
297 return false;
298 if (xfs_readonly_buftarg(mp->m_ddev_targp))
299 return false;
300 if (xfs_readonly_buftarg(mp->m_log->l_targ))
301 return false;
302 if (xlog_is_shutdown(mp->m_log))
303 return false;
304 return true;
305 }
306
307 /*
308 * Replenish the byte reservation required by moving the grant write head.
309 */
310 int
xfs_log_regrant(struct xfs_mount * mp,struct xlog_ticket * tic)311 xfs_log_regrant(
312 struct xfs_mount *mp,
313 struct xlog_ticket *tic)
314 {
315 struct xlog *log = mp->m_log;
316 int need_bytes;
317 int error = 0;
318
319 if (xlog_is_shutdown(log))
320 return -EIO;
321
322 XFS_STATS_INC(mp, xs_try_logspace);
323
324 /*
325 * This is a new transaction on the ticket, so we need to change the
326 * transaction ID so that the next transaction has a different TID in
327 * the log. Just add one to the existing tid so that we can see chains
328 * of rolling transactions in the log easily.
329 */
330 tic->t_tid++;
331 tic->t_curr_res = tic->t_unit_res;
332 if (tic->t_cnt > 0)
333 return 0;
334
335 trace_xfs_log_regrant(log, tic);
336
337 error = xlog_grant_head_check(log, &log->l_write_head, tic,
338 &need_bytes);
339 if (error)
340 goto out_error;
341
342 xlog_grant_add_space(&log->l_write_head, need_bytes);
343 trace_xfs_log_regrant_exit(log, tic);
344 return 0;
345
346 out_error:
347 /*
348 * If we are failing, make sure the ticket doesn't have any current
349 * reservations. We don't want to add this back when the ticket/
350 * transaction gets cancelled.
351 */
352 tic->t_curr_res = 0;
353 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
354 return error;
355 }
356
357 /*
358 * Reserve log space and return a ticket corresponding to the reservation.
359 *
360 * Each reservation is going to reserve extra space for a log record header.
361 * When writes happen to the on-disk log, we don't subtract the length of the
362 * log record header from any reservation. By wasting space in each
363 * reservation, we prevent over allocation problems.
364 */
365 int
xfs_log_reserve(struct xfs_mount * mp,int unit_bytes,int cnt,struct xlog_ticket ** ticp,bool permanent)366 xfs_log_reserve(
367 struct xfs_mount *mp,
368 int unit_bytes,
369 int cnt,
370 struct xlog_ticket **ticp,
371 bool permanent)
372 {
373 struct xlog *log = mp->m_log;
374 struct xlog_ticket *tic;
375 int need_bytes;
376 int error = 0;
377
378 if (xlog_is_shutdown(log))
379 return -EIO;
380
381 XFS_STATS_INC(mp, xs_try_logspace);
382
383 ASSERT(*ticp == NULL);
384 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
385 *ticp = tic;
386 trace_xfs_log_reserve(log, tic);
387 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
388 &need_bytes);
389 if (error)
390 goto out_error;
391
392 xlog_grant_add_space(&log->l_reserve_head, need_bytes);
393 xlog_grant_add_space(&log->l_write_head, need_bytes);
394 trace_xfs_log_reserve_exit(log, tic);
395 return 0;
396
397 out_error:
398 /*
399 * If we are failing, make sure the ticket doesn't have any current
400 * reservations. We don't want to add this back when the ticket/
401 * transaction gets cancelled.
402 */
403 tic->t_curr_res = 0;
404 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
405 return error;
406 }
407
408 /*
409 * Run all the pending iclog callbacks and wake log force waiters and iclog
410 * space waiters so they can process the newly set shutdown state. We really
411 * don't care what order we process callbacks here because the log is shut down
412 * and so state cannot change on disk anymore. However, we cannot wake waiters
413 * until the callbacks have been processed because we may be in unmount and
414 * we must ensure that all AIL operations the callbacks perform have completed
415 * before we tear down the AIL.
416 *
417 * We avoid processing actively referenced iclogs so that we don't run callbacks
418 * while the iclog owner might still be preparing the iclog for IO submssion.
419 * These will be caught by xlog_state_iclog_release() and call this function
420 * again to process any callbacks that may have been added to that iclog.
421 */
422 static void
xlog_state_shutdown_callbacks(struct xlog * log)423 xlog_state_shutdown_callbacks(
424 struct xlog *log)
425 {
426 struct xlog_in_core *iclog;
427 LIST_HEAD(cb_list);
428
429 iclog = log->l_iclog;
430 do {
431 if (atomic_read(&iclog->ic_refcnt)) {
432 /* Reference holder will re-run iclog callbacks. */
433 continue;
434 }
435 list_splice_init(&iclog->ic_callbacks, &cb_list);
436 spin_unlock(&log->l_icloglock);
437
438 xlog_cil_process_committed(&cb_list);
439
440 spin_lock(&log->l_icloglock);
441 wake_up_all(&iclog->ic_write_wait);
442 wake_up_all(&iclog->ic_force_wait);
443 } while ((iclog = iclog->ic_next) != log->l_iclog);
444
445 wake_up_all(&log->l_flush_wait);
446 }
447
448 /*
449 * Flush iclog to disk if this is the last reference to the given iclog and the
450 * it is in the WANT_SYNC state.
451 *
452 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
453 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
454 * written to stable storage, and implies that a commit record is contained
455 * within the iclog. We need to ensure that the log tail does not move beyond
456 * the tail that the first commit record in the iclog ordered against, otherwise
457 * correct recovery of that checkpoint becomes dependent on future operations
458 * performed on this iclog.
459 *
460 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
461 * current tail into iclog. Once the iclog tail is set, future operations must
462 * not modify it, otherwise they potentially violate ordering constraints for
463 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
464 * the iclog will get zeroed on activation of the iclog after sync, so we
465 * always capture the tail lsn on the iclog on the first NEED_FUA release
466 * regardless of the number of active reference counts on this iclog.
467 */
468 int
xlog_state_release_iclog(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)469 xlog_state_release_iclog(
470 struct xlog *log,
471 struct xlog_in_core *iclog,
472 struct xlog_ticket *ticket)
473 {
474 bool last_ref;
475
476 lockdep_assert_held(&log->l_icloglock);
477
478 trace_xlog_iclog_release(iclog, _RET_IP_);
479 /*
480 * Grabbing the current log tail needs to be atomic w.r.t. the writing
481 * of the tail LSN into the iclog so we guarantee that the log tail does
482 * not move between the first time we know that the iclog needs to be
483 * made stable and when we eventually submit it.
484 */
485 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
486 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
487 !iclog->ic_header->h_tail_lsn) {
488 iclog->ic_header->h_tail_lsn =
489 cpu_to_be64(atomic64_read(&log->l_tail_lsn));
490 }
491
492 last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
493
494 if (xlog_is_shutdown(log)) {
495 /*
496 * If there are no more references to this iclog, process the
497 * pending iclog callbacks that were waiting on the release of
498 * this iclog.
499 */
500 if (last_ref)
501 xlog_state_shutdown_callbacks(log);
502 return -EIO;
503 }
504
505 if (!last_ref)
506 return 0;
507
508 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
509 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
510 return 0;
511 }
512
513 iclog->ic_state = XLOG_STATE_SYNCING;
514 xlog_verify_tail_lsn(log, iclog);
515 trace_xlog_iclog_syncing(iclog, _RET_IP_);
516
517 spin_unlock(&log->l_icloglock);
518 xlog_sync(log, iclog, ticket);
519 spin_lock(&log->l_icloglock);
520 return 0;
521 }
522
523 /*
524 * Mount a log filesystem
525 *
526 * mp - ubiquitous xfs mount point structure
527 * log_target - buftarg of on-disk log device
528 * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
529 * num_bblocks - Number of BBSIZE blocks in on-disk log
530 *
531 * Return error or zero.
532 */
533 int
xfs_log_mount(xfs_mount_t * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)534 xfs_log_mount(
535 xfs_mount_t *mp,
536 struct xfs_buftarg *log_target,
537 xfs_daddr_t blk_offset,
538 int num_bblks)
539 {
540 struct xlog *log;
541 int error = 0;
542 int min_logfsbs;
543
544 if (!xfs_has_norecovery(mp)) {
545 xfs_notice(mp, "Mounting V%d Filesystem %pU",
546 XFS_SB_VERSION_NUM(&mp->m_sb),
547 &mp->m_sb.sb_uuid);
548 } else {
549 xfs_notice(mp,
550 "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
551 XFS_SB_VERSION_NUM(&mp->m_sb),
552 &mp->m_sb.sb_uuid);
553 ASSERT(xfs_is_readonly(mp));
554 }
555
556 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
557 if (IS_ERR(log)) {
558 error = PTR_ERR(log);
559 goto out;
560 }
561 mp->m_log = log;
562
563 /*
564 * Now that we have set up the log and it's internal geometry
565 * parameters, we can validate the given log space and drop a critical
566 * message via syslog if the log size is too small. A log that is too
567 * small can lead to unexpected situations in transaction log space
568 * reservation stage. The superblock verifier has already validated all
569 * the other log geometry constraints, so we don't have to check those
570 * here.
571 *
572 * Note: For v4 filesystems, we can't just reject the mount if the
573 * validation fails. This would mean that people would have to
574 * downgrade their kernel just to remedy the situation as there is no
575 * way to grow the log (short of black magic surgery with xfs_db).
576 *
577 * We can, however, reject mounts for V5 format filesystems, as the
578 * mkfs binary being used to make the filesystem should never create a
579 * filesystem with a log that is too small.
580 */
581 min_logfsbs = xfs_log_calc_minimum_size(mp);
582 if (mp->m_sb.sb_logblocks < min_logfsbs) {
583 xfs_warn(mp,
584 "Log size %d blocks too small, minimum size is %d blocks",
585 mp->m_sb.sb_logblocks, min_logfsbs);
586
587 /*
588 * Log check errors are always fatal on v5; or whenever bad
589 * metadata leads to a crash.
590 */
591 if (xfs_has_crc(mp)) {
592 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
593 ASSERT(0);
594 error = -EINVAL;
595 goto out_free_log;
596 }
597 xfs_crit(mp, "Log size out of supported range.");
598 xfs_crit(mp,
599 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
600 }
601
602 /*
603 * Initialize the AIL now we have a log.
604 */
605 error = xfs_trans_ail_init(mp);
606 if (error) {
607 xfs_warn(mp, "AIL initialisation failed: error %d", error);
608 goto out_free_log;
609 }
610 log->l_ailp = mp->m_ail;
611
612 /*
613 * skip log recovery on a norecovery mount. pretend it all
614 * just worked.
615 */
616 if (!xfs_has_norecovery(mp)) {
617 error = xlog_recover(log);
618 if (error) {
619 xfs_warn(mp, "log mount/recovery failed: error %d",
620 error);
621 xlog_recover_cancel(log);
622 goto out_destroy_ail;
623 }
624 }
625
626 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
627 "log");
628 if (error)
629 goto out_destroy_ail;
630
631 /* Normal transactions can now occur */
632 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
633
634 /*
635 * Now the log has been fully initialised and we know were our
636 * space grant counters are, we can initialise the permanent ticket
637 * needed for delayed logging to work.
638 */
639 xlog_cil_init_post_recovery(log);
640
641 return 0;
642
643 out_destroy_ail:
644 xfs_trans_ail_destroy(mp);
645 out_free_log:
646 xlog_dealloc_log(log);
647 out:
648 return error;
649 }
650
651 /*
652 * Finish the recovery of the file system. This is separate from the
653 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
654 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
655 * here.
656 *
657 * If we finish recovery successfully, start the background log work. If we are
658 * not doing recovery, then we have a RO filesystem and we don't need to start
659 * it.
660 */
661 int
xfs_log_mount_finish(struct xfs_mount * mp)662 xfs_log_mount_finish(
663 struct xfs_mount *mp)
664 {
665 struct xlog *log = mp->m_log;
666 int error = 0;
667
668 if (xfs_has_norecovery(mp)) {
669 ASSERT(xfs_is_readonly(mp));
670 return 0;
671 }
672
673 /*
674 * During the second phase of log recovery, we need iget and
675 * iput to behave like they do for an active filesystem.
676 * xfs_fs_drop_inode needs to be able to prevent the deletion
677 * of inodes before we're done replaying log items on those
678 * inodes. Turn it off immediately after recovery finishes
679 * so that we don't leak the quota inodes if subsequent mount
680 * activities fail.
681 *
682 * We let all inodes involved in redo item processing end up on
683 * the LRU instead of being evicted immediately so that if we do
684 * something to an unlinked inode, the irele won't cause
685 * premature truncation and freeing of the inode, which results
686 * in log recovery failure. We have to evict the unreferenced
687 * lru inodes after clearing SB_ACTIVE because we don't
688 * otherwise clean up the lru if there's a subsequent failure in
689 * xfs_mountfs, which leads to us leaking the inodes if nothing
690 * else (e.g. quotacheck) references the inodes before the
691 * mount failure occurs.
692 */
693 mp->m_super->s_flags |= SB_ACTIVE;
694 xfs_log_work_queue(mp);
695 if (xlog_recovery_needed(log))
696 error = xlog_recover_finish(log);
697 mp->m_super->s_flags &= ~SB_ACTIVE;
698 evict_inodes(mp->m_super);
699
700 /*
701 * Drain the buffer LRU after log recovery. This is required for v4
702 * filesystems to avoid leaving around buffers with NULL verifier ops,
703 * but we do it unconditionally to make sure we're always in a clean
704 * cache state after mount.
705 *
706 * Don't push in the error case because the AIL may have pending intents
707 * that aren't removed until recovery is cancelled.
708 */
709 if (xlog_recovery_needed(log)) {
710 if (!error) {
711 xfs_log_force(mp, XFS_LOG_SYNC);
712 xfs_ail_push_all_sync(mp->m_ail);
713 }
714 xfs_notice(mp, "Ending recovery (logdev: %s)",
715 mp->m_logname ? mp->m_logname : "internal");
716 } else {
717 xfs_info(mp, "Ending clean mount");
718 }
719 xfs_buftarg_drain(mp->m_ddev_targp);
720
721 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
722
723 /* Make sure the log is dead if we're returning failure. */
724 ASSERT(!error || xlog_is_shutdown(log));
725
726 return error;
727 }
728
729 /*
730 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
731 * the log.
732 */
733 void
xfs_log_mount_cancel(struct xfs_mount * mp)734 xfs_log_mount_cancel(
735 struct xfs_mount *mp)
736 {
737 xlog_recover_cancel(mp->m_log);
738 xfs_log_unmount(mp);
739 }
740
741 /*
742 * Flush out the iclog to disk ensuring that device caches are flushed and
743 * the iclog hits stable storage before any completion waiters are woken.
744 */
745 static inline int
xlog_force_iclog(struct xlog_in_core * iclog)746 xlog_force_iclog(
747 struct xlog_in_core *iclog)
748 {
749 atomic_inc(&iclog->ic_refcnt);
750 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
751 if (iclog->ic_state == XLOG_STATE_ACTIVE)
752 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
753 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
754 }
755
756 /*
757 * Cycle all the iclogbuf locks to make sure all log IO completion
758 * is done before we tear down these buffers.
759 */
760 static void
xlog_wait_iclog_completion(struct xlog * log)761 xlog_wait_iclog_completion(struct xlog *log)
762 {
763 int i;
764 struct xlog_in_core *iclog = log->l_iclog;
765
766 for (i = 0; i < log->l_iclog_bufs; i++) {
767 down(&iclog->ic_sema);
768 up(&iclog->ic_sema);
769 iclog = iclog->ic_next;
770 }
771 }
772
773 /*
774 * Wait for the iclog and all prior iclogs to be written disk as required by the
775 * log force state machine. Waiting on ic_force_wait ensures iclog completions
776 * have been ordered and callbacks run before we are woken here, hence
777 * guaranteeing that all the iclogs up to this one are on stable storage.
778 */
779 int
xlog_wait_on_iclog(struct xlog_in_core * iclog)780 xlog_wait_on_iclog(
781 struct xlog_in_core *iclog)
782 __releases(iclog->ic_log->l_icloglock)
783 {
784 struct xlog *log = iclog->ic_log;
785
786 trace_xlog_iclog_wait_on(iclog, _RET_IP_);
787 if (!xlog_is_shutdown(log) &&
788 iclog->ic_state != XLOG_STATE_ACTIVE &&
789 iclog->ic_state != XLOG_STATE_DIRTY) {
790 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
791 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
792 } else {
793 spin_unlock(&log->l_icloglock);
794 }
795
796 if (xlog_is_shutdown(log))
797 return -EIO;
798 return 0;
799 }
800
801 int
xlog_write_one_vec(struct xlog * log,struct xfs_cil_ctx * ctx,struct xfs_log_iovec * reg,struct xlog_ticket * ticket)802 xlog_write_one_vec(
803 struct xlog *log,
804 struct xfs_cil_ctx *ctx,
805 struct xfs_log_iovec *reg,
806 struct xlog_ticket *ticket)
807 {
808 struct xfs_log_vec lv = {
809 .lv_niovecs = 1,
810 .lv_iovecp = reg,
811 .lv_bytes = reg->i_len,
812 };
813 LIST_HEAD (lv_chain);
814
815 /* account for space used by record data */
816 ticket->t_curr_res -= lv.lv_bytes;
817
818 list_add(&lv.lv_list, &lv_chain);
819 return xlog_write(log, ctx, &lv_chain, ticket, lv.lv_bytes);
820 }
821
822 /*
823 * Write out an unmount record using the ticket provided. We have to account for
824 * the data space used in the unmount ticket as this write is not done from a
825 * transaction context that has already done the accounting for us.
826 */
827 static int
xlog_write_unmount_record(struct xlog * log,struct xlog_ticket * ticket)828 xlog_write_unmount_record(
829 struct xlog *log,
830 struct xlog_ticket *ticket)
831 {
832 struct {
833 struct xlog_op_header ophdr;
834 struct xfs_unmount_log_format ulf;
835 } unmount_rec = {
836 .ophdr = {
837 .oh_clientid = XFS_LOG,
838 .oh_tid = cpu_to_be32(ticket->t_tid),
839 .oh_flags = XLOG_UNMOUNT_TRANS,
840 },
841 .ulf = {
842 .magic = XLOG_UNMOUNT_TYPE,
843 },
844 };
845 struct xfs_log_iovec reg = {
846 .i_addr = &unmount_rec,
847 .i_len = sizeof(unmount_rec),
848 .i_type = XLOG_REG_TYPE_UNMOUNT,
849 };
850
851 return xlog_write_one_vec(log, NULL, ®, ticket);
852 }
853
854 /*
855 * Mark the filesystem clean by writing an unmount record to the head of the
856 * log.
857 */
858 static void
xlog_unmount_write(struct xlog * log)859 xlog_unmount_write(
860 struct xlog *log)
861 {
862 struct xfs_mount *mp = log->l_mp;
863 struct xlog_in_core *iclog;
864 struct xlog_ticket *tic = NULL;
865 int error;
866
867 error = xfs_log_reserve(mp, 600, 1, &tic, 0);
868 if (error)
869 goto out_err;
870
871 error = xlog_write_unmount_record(log, tic);
872 /*
873 * At this point, we're umounting anyway, so there's no point in
874 * transitioning log state to shutdown. Just continue...
875 */
876 out_err:
877 if (error)
878 xfs_alert(mp, "%s: unmount record failed", __func__);
879
880 spin_lock(&log->l_icloglock);
881 iclog = log->l_iclog;
882 error = xlog_force_iclog(iclog);
883 xlog_wait_on_iclog(iclog);
884
885 if (tic) {
886 trace_xfs_log_umount_write(log, tic);
887 xfs_log_ticket_ungrant(log, tic);
888 }
889 }
890
891 static void
xfs_log_unmount_verify_iclog(struct xlog * log)892 xfs_log_unmount_verify_iclog(
893 struct xlog *log)
894 {
895 struct xlog_in_core *iclog = log->l_iclog;
896
897 do {
898 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
899 ASSERT(iclog->ic_offset == 0);
900 } while ((iclog = iclog->ic_next) != log->l_iclog);
901 }
902
903 /*
904 * Unmount record used to have a string "Unmount filesystem--" in the
905 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
906 * We just write the magic number now since that particular field isn't
907 * currently architecture converted and "Unmount" is a bit foo.
908 * As far as I know, there weren't any dependencies on the old behaviour.
909 */
910 static void
xfs_log_unmount_write(struct xfs_mount * mp)911 xfs_log_unmount_write(
912 struct xfs_mount *mp)
913 {
914 struct xlog *log = mp->m_log;
915
916 if (!xfs_log_writable(mp))
917 return;
918
919 xfs_log_force(mp, XFS_LOG_SYNC);
920
921 if (xlog_is_shutdown(log))
922 return;
923
924 /*
925 * If we think the summary counters are bad, avoid writing the unmount
926 * record to force log recovery at next mount, after which the summary
927 * counters will be recalculated. Refer to xlog_check_unmount_rec for
928 * more details.
929 */
930 if (xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS) ||
931 XFS_TEST_ERROR(mp, XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
932 xfs_alert(mp, "%s: will fix summary counters at next mount",
933 __func__);
934 return;
935 }
936
937 xfs_log_unmount_verify_iclog(log);
938 xlog_unmount_write(log);
939 }
940
941 /*
942 * Empty the log for unmount/freeze.
943 *
944 * To do this, we first need to shut down the background log work so it is not
945 * trying to cover the log as we clean up. We then need to unpin all objects in
946 * the log so we can then flush them out. Once they have completed their IO and
947 * run the callbacks removing themselves from the AIL, we can cover the log.
948 */
949 int
xfs_log_quiesce(struct xfs_mount * mp)950 xfs_log_quiesce(
951 struct xfs_mount *mp)
952 {
953 /*
954 * Clear log incompat features since we're quiescing the log. Report
955 * failures, though it's not fatal to have a higher log feature
956 * protection level than the log contents actually require.
957 */
958 if (xfs_clear_incompat_log_features(mp)) {
959 int error;
960
961 error = xfs_sync_sb(mp, false);
962 if (error)
963 xfs_warn(mp,
964 "Failed to clear log incompat features on quiesce");
965 }
966
967 cancel_delayed_work_sync(&mp->m_log->l_work);
968 xfs_log_force(mp, XFS_LOG_SYNC);
969
970 /*
971 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
972 * will push it, xfs_buftarg_wait() will not wait for it. Further,
973 * xfs_buf_iowait() cannot be used because it was pushed with the
974 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
975 * the IO to complete.
976 */
977 xfs_ail_push_all_sync(mp->m_ail);
978 xfs_buftarg_wait(mp->m_ddev_targp);
979 xfs_buf_lock(mp->m_sb_bp);
980 xfs_buf_unlock(mp->m_sb_bp);
981
982 return xfs_log_cover(mp);
983 }
984
985 void
xfs_log_clean(struct xfs_mount * mp)986 xfs_log_clean(
987 struct xfs_mount *mp)
988 {
989 xfs_log_quiesce(mp);
990 xfs_log_unmount_write(mp);
991 }
992
993 /*
994 * Shut down and release the AIL and Log.
995 *
996 * During unmount, we need to ensure we flush all the dirty metadata objects
997 * from the AIL so that the log is empty before we write the unmount record to
998 * the log. Once this is done, we can tear down the AIL and the log.
999 */
1000 void
xfs_log_unmount(struct xfs_mount * mp)1001 xfs_log_unmount(
1002 struct xfs_mount *mp)
1003 {
1004 xfs_log_clean(mp);
1005
1006 /*
1007 * If shutdown has come from iclog IO context, the log
1008 * cleaning will have been skipped and so we need to wait
1009 * for the iclog to complete shutdown processing before we
1010 * tear anything down.
1011 */
1012 xlog_wait_iclog_completion(mp->m_log);
1013
1014 xfs_buftarg_drain(mp->m_ddev_targp);
1015
1016 xfs_trans_ail_destroy(mp);
1017
1018 xfs_sysfs_del(&mp->m_log->l_kobj);
1019
1020 xlog_dealloc_log(mp->m_log);
1021 }
1022
1023 void
xfs_log_item_init(struct xfs_mount * mp,struct xfs_log_item * item,int type,const struct xfs_item_ops * ops)1024 xfs_log_item_init(
1025 struct xfs_mount *mp,
1026 struct xfs_log_item *item,
1027 int type,
1028 const struct xfs_item_ops *ops)
1029 {
1030 item->li_log = mp->m_log;
1031 item->li_ailp = mp->m_ail;
1032 item->li_type = type;
1033 item->li_ops = ops;
1034 item->li_lv = NULL;
1035
1036 INIT_LIST_HEAD(&item->li_ail);
1037 INIT_LIST_HEAD(&item->li_cil);
1038 INIT_LIST_HEAD(&item->li_bio_list);
1039 INIT_LIST_HEAD(&item->li_trans);
1040 }
1041
1042 /*
1043 * Wake up processes waiting for log space after we have moved the log tail.
1044 */
1045 void
xfs_log_space_wake(struct xfs_mount * mp)1046 xfs_log_space_wake(
1047 struct xfs_mount *mp)
1048 {
1049 struct xlog *log = mp->m_log;
1050 int free_bytes;
1051
1052 if (xlog_is_shutdown(log))
1053 return;
1054
1055 if (!list_empty_careful(&log->l_write_head.waiters)) {
1056 ASSERT(!xlog_in_recovery(log));
1057
1058 spin_lock(&log->l_write_head.lock);
1059 free_bytes = xlog_grant_space_left(log, &log->l_write_head);
1060 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1061 spin_unlock(&log->l_write_head.lock);
1062 }
1063
1064 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1065 ASSERT(!xlog_in_recovery(log));
1066
1067 spin_lock(&log->l_reserve_head.lock);
1068 free_bytes = xlog_grant_space_left(log, &log->l_reserve_head);
1069 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1070 spin_unlock(&log->l_reserve_head.lock);
1071 }
1072 }
1073
1074 /*
1075 * Determine if we have a transaction that has gone to disk that needs to be
1076 * covered. To begin the transition to the idle state firstly the log needs to
1077 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1078 * we start attempting to cover the log.
1079 *
1080 * Only if we are then in a state where covering is needed, the caller is
1081 * informed that dummy transactions are required to move the log into the idle
1082 * state.
1083 *
1084 * If there are any items in the AIl or CIL, then we do not want to attempt to
1085 * cover the log as we may be in a situation where there isn't log space
1086 * available to run a dummy transaction and this can lead to deadlocks when the
1087 * tail of the log is pinned by an item that is modified in the CIL. Hence
1088 * there's no point in running a dummy transaction at this point because we
1089 * can't start trying to idle the log until both the CIL and AIL are empty.
1090 */
1091 static bool
xfs_log_need_covered(struct xfs_mount * mp)1092 xfs_log_need_covered(
1093 struct xfs_mount *mp)
1094 {
1095 struct xlog *log = mp->m_log;
1096 bool needed = false;
1097
1098 if (!xlog_cil_empty(log))
1099 return false;
1100
1101 spin_lock(&log->l_icloglock);
1102 switch (log->l_covered_state) {
1103 case XLOG_STATE_COVER_DONE:
1104 case XLOG_STATE_COVER_DONE2:
1105 case XLOG_STATE_COVER_IDLE:
1106 break;
1107 case XLOG_STATE_COVER_NEED:
1108 case XLOG_STATE_COVER_NEED2:
1109 if (xfs_ail_min_lsn(log->l_ailp))
1110 break;
1111 if (!xlog_iclogs_empty(log))
1112 break;
1113
1114 needed = true;
1115 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1116 log->l_covered_state = XLOG_STATE_COVER_DONE;
1117 else
1118 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1119 break;
1120 default:
1121 needed = true;
1122 break;
1123 }
1124 spin_unlock(&log->l_icloglock);
1125 return needed;
1126 }
1127
1128 /*
1129 * Explicitly cover the log. This is similar to background log covering but
1130 * intended for usage in quiesce codepaths. The caller is responsible to ensure
1131 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1132 * must all be empty.
1133 */
1134 static int
xfs_log_cover(struct xfs_mount * mp)1135 xfs_log_cover(
1136 struct xfs_mount *mp)
1137 {
1138 int error = 0;
1139 bool need_covered;
1140
1141 if (!xlog_is_shutdown(mp->m_log)) {
1142 ASSERT(xlog_cil_empty(mp->m_log));
1143 ASSERT(xlog_iclogs_empty(mp->m_log));
1144 ASSERT(!xfs_ail_min_lsn(mp->m_log->l_ailp));
1145 }
1146
1147 if (!xfs_log_writable(mp))
1148 return 0;
1149
1150 /*
1151 * xfs_log_need_covered() is not idempotent because it progresses the
1152 * state machine if the log requires covering. Therefore, we must call
1153 * this function once and use the result until we've issued an sb sync.
1154 * Do so first to make that abundantly clear.
1155 *
1156 * Fall into the covering sequence if the log needs covering or the
1157 * mount has lazy superblock accounting to sync to disk. The sb sync
1158 * used for covering accumulates the in-core counters, so covering
1159 * handles this for us.
1160 */
1161 need_covered = xfs_log_need_covered(mp);
1162 if (!need_covered && !xfs_has_lazysbcount(mp))
1163 return 0;
1164
1165 /*
1166 * To cover the log, commit the superblock twice (at most) in
1167 * independent checkpoints. The first serves as a reference for the
1168 * tail pointer. The sync transaction and AIL push empties the AIL and
1169 * updates the in-core tail to the LSN of the first checkpoint. The
1170 * second commit updates the on-disk tail with the in-core LSN,
1171 * covering the log. Push the AIL one more time to leave it empty, as
1172 * we found it.
1173 */
1174 do {
1175 error = xfs_sync_sb(mp, true);
1176 if (error)
1177 break;
1178 xfs_ail_push_all_sync(mp->m_ail);
1179 } while (xfs_log_need_covered(mp));
1180
1181 return error;
1182 }
1183
1184 static void
xlog_ioend_work(struct work_struct * work)1185 xlog_ioend_work(
1186 struct work_struct *work)
1187 {
1188 struct xlog_in_core *iclog =
1189 container_of(work, struct xlog_in_core, ic_end_io_work);
1190 struct xlog *log = iclog->ic_log;
1191 int error;
1192
1193 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1194 #ifdef DEBUG
1195 /* treat writes with injected CRC errors as failed */
1196 if (iclog->ic_fail_crc)
1197 error = -EIO;
1198 #endif
1199
1200 /*
1201 * Race to shutdown the filesystem if we see an error.
1202 */
1203 if (error || XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1204 xfs_alert(log->l_mp, "log I/O error %d", error);
1205 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1206 }
1207
1208 xlog_state_done_syncing(iclog);
1209 bio_uninit(&iclog->ic_bio);
1210
1211 /*
1212 * Drop the lock to signal that we are done. Nothing references the
1213 * iclog after this, so an unmount waiting on this lock can now tear it
1214 * down safely. As such, it is unsafe to reference the iclog after the
1215 * unlock as we could race with it being freed.
1216 */
1217 up(&iclog->ic_sema);
1218 }
1219
1220 /*
1221 * Return size of each in-core log record buffer.
1222 *
1223 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1224 *
1225 * If the filesystem blocksize is too large, we may need to choose a
1226 * larger size since the directory code currently logs entire blocks.
1227 */
1228 STATIC void
xlog_get_iclog_buffer_size(struct xfs_mount * mp,struct xlog * log)1229 xlog_get_iclog_buffer_size(
1230 struct xfs_mount *mp,
1231 struct xlog *log)
1232 {
1233 if (mp->m_logbufs <= 0)
1234 mp->m_logbufs = XLOG_MAX_ICLOGS;
1235 if (mp->m_logbsize <= 0)
1236 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1237
1238 log->l_iclog_bufs = mp->m_logbufs;
1239 log->l_iclog_size = mp->m_logbsize;
1240
1241 /*
1242 * Combined size of the log record headers. The first 32k cycles
1243 * are stored directly in the xlog_rec_header, the rest in the
1244 * variable number of xlog_rec_ext_headers at its end.
1245 */
1246 log->l_iclog_hsize = struct_size(log->l_iclog->ic_header, h_ext,
1247 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE) - 1);
1248 }
1249
1250 void
xfs_log_work_queue(struct xfs_mount * mp)1251 xfs_log_work_queue(
1252 struct xfs_mount *mp)
1253 {
1254 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1255 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1256 }
1257
1258 /*
1259 * Clear the log incompat flags if we have the opportunity.
1260 *
1261 * This only happens if we're about to log the second dummy transaction as part
1262 * of covering the log.
1263 */
1264 static inline void
xlog_clear_incompat(struct xlog * log)1265 xlog_clear_incompat(
1266 struct xlog *log)
1267 {
1268 struct xfs_mount *mp = log->l_mp;
1269
1270 if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1271 XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1272 return;
1273
1274 if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1275 return;
1276
1277 xfs_clear_incompat_log_features(mp);
1278 }
1279
1280 /*
1281 * Every sync period we need to unpin all items in the AIL and push them to
1282 * disk. If there is nothing dirty, then we might need to cover the log to
1283 * indicate that the filesystem is idle.
1284 */
1285 static void
xfs_log_worker(struct work_struct * work)1286 xfs_log_worker(
1287 struct work_struct *work)
1288 {
1289 struct xlog *log = container_of(to_delayed_work(work),
1290 struct xlog, l_work);
1291 struct xfs_mount *mp = log->l_mp;
1292
1293 /* dgc: errors ignored - not fatal and nowhere to report them */
1294 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1295 /*
1296 * Dump a transaction into the log that contains no real change.
1297 * This is needed to stamp the current tail LSN into the log
1298 * during the covering operation.
1299 *
1300 * We cannot use an inode here for this - that will push dirty
1301 * state back up into the VFS and then periodic inode flushing
1302 * will prevent log covering from making progress. Hence we
1303 * synchronously log the superblock instead to ensure the
1304 * superblock is immediately unpinned and can be written back.
1305 */
1306 xlog_clear_incompat(log);
1307 xfs_sync_sb(mp, true);
1308 } else
1309 xfs_log_force(mp, 0);
1310
1311 /* start pushing all the metadata that is currently dirty */
1312 xfs_ail_push_all(mp->m_ail);
1313
1314 /* queue us up again */
1315 xfs_log_work_queue(mp);
1316 }
1317
1318 /*
1319 * This routine initializes some of the log structure for a given mount point.
1320 * Its primary purpose is to fill in enough, so recovery can occur. However,
1321 * some other stuff may be filled in too.
1322 */
1323 STATIC struct xlog *
xlog_alloc_log(struct xfs_mount * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)1324 xlog_alloc_log(
1325 struct xfs_mount *mp,
1326 struct xfs_buftarg *log_target,
1327 xfs_daddr_t blk_offset,
1328 int num_bblks)
1329 {
1330 struct xlog *log;
1331 struct xlog_in_core **iclogp;
1332 struct xlog_in_core *iclog, *prev_iclog = NULL;
1333 int i;
1334 int error = -ENOMEM;
1335 uint log2_size = 0;
1336
1337 log = kzalloc_obj(struct xlog, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1338 if (!log) {
1339 xfs_warn(mp, "Log allocation failed: No memory!");
1340 goto out;
1341 }
1342
1343 log->l_mp = mp;
1344 log->l_targ = log_target;
1345 log->l_logsize = BBTOB(num_bblks);
1346 log->l_logBBstart = blk_offset;
1347 log->l_logBBsize = num_bblks;
1348 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1349 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1350 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1351 INIT_LIST_HEAD(&log->r_dfops);
1352
1353 log->l_prev_block = -1;
1354 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1355 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1356 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1357
1358 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1359 log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1360 else
1361 log->l_iclog_roundoff = BBSIZE;
1362
1363 xlog_grant_head_init(&log->l_reserve_head);
1364 xlog_grant_head_init(&log->l_write_head);
1365
1366 error = -EFSCORRUPTED;
1367 if (xfs_has_sector(mp)) {
1368 log2_size = mp->m_sb.sb_logsectlog;
1369 if (log2_size < BBSHIFT) {
1370 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1371 log2_size, BBSHIFT);
1372 goto out_free_log;
1373 }
1374
1375 log2_size -= BBSHIFT;
1376 if (log2_size > mp->m_sectbb_log) {
1377 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1378 log2_size, mp->m_sectbb_log);
1379 goto out_free_log;
1380 }
1381
1382 /* for larger sector sizes, must have v2 or external log */
1383 if (log2_size && log->l_logBBstart > 0 &&
1384 !xfs_has_logv2(mp)) {
1385 xfs_warn(mp,
1386 "log sector size (0x%x) invalid for configuration.",
1387 log2_size);
1388 goto out_free_log;
1389 }
1390 }
1391 log->l_sectBBsize = 1 << log2_size;
1392
1393 xlog_get_iclog_buffer_size(mp, log);
1394
1395 spin_lock_init(&log->l_icloglock);
1396 init_waitqueue_head(&log->l_flush_wait);
1397
1398 iclogp = &log->l_iclog;
1399 ASSERT(log->l_iclog_size >= 4096);
1400 for (i = 0; i < log->l_iclog_bufs; i++) {
1401 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1402 sizeof(struct bio_vec);
1403
1404 iclog = kzalloc(sizeof(*iclog) + bvec_size,
1405 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1406 if (!iclog)
1407 goto out_free_iclog;
1408
1409 *iclogp = iclog;
1410 iclog->ic_prev = prev_iclog;
1411 prev_iclog = iclog;
1412
1413 iclog->ic_header = kvzalloc(log->l_iclog_size,
1414 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1415 if (!iclog->ic_header)
1416 goto out_free_iclog;
1417 iclog->ic_header->h_magicno =
1418 cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1419 iclog->ic_header->h_version = cpu_to_be32(
1420 xfs_has_logv2(log->l_mp) ? 2 : 1);
1421 iclog->ic_header->h_size = cpu_to_be32(log->l_iclog_size);
1422 iclog->ic_header->h_fmt = cpu_to_be32(XLOG_FMT);
1423 memcpy(&iclog->ic_header->h_fs_uuid, &mp->m_sb.sb_uuid,
1424 sizeof(iclog->ic_header->h_fs_uuid));
1425
1426 iclog->ic_datap = (void *)iclog->ic_header + log->l_iclog_hsize;
1427 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1428 iclog->ic_state = XLOG_STATE_ACTIVE;
1429 iclog->ic_log = log;
1430 atomic_set(&iclog->ic_refcnt, 0);
1431 INIT_LIST_HEAD(&iclog->ic_callbacks);
1432
1433 init_waitqueue_head(&iclog->ic_force_wait);
1434 init_waitqueue_head(&iclog->ic_write_wait);
1435 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1436 sema_init(&iclog->ic_sema, 1);
1437
1438 iclogp = &iclog->ic_next;
1439 }
1440 *iclogp = log->l_iclog; /* complete ring */
1441 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1442
1443 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1444 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU),
1445 0, mp->m_super->s_id);
1446 if (!log->l_ioend_workqueue)
1447 goto out_free_iclog;
1448
1449 error = xlog_cil_init(log);
1450 if (error)
1451 goto out_destroy_workqueue;
1452 return log;
1453
1454 out_destroy_workqueue:
1455 destroy_workqueue(log->l_ioend_workqueue);
1456 out_free_iclog:
1457 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1458 prev_iclog = iclog->ic_next;
1459 kvfree(iclog->ic_header);
1460 kfree(iclog);
1461 if (prev_iclog == log->l_iclog)
1462 break;
1463 }
1464 out_free_log:
1465 kfree(log);
1466 out:
1467 return ERR_PTR(error);
1468 } /* xlog_alloc_log */
1469
1470 /*
1471 * Stamp cycle number in every block
1472 */
1473 STATIC void
xlog_pack_data(struct xlog * log,struct xlog_in_core * iclog,int roundoff)1474 xlog_pack_data(
1475 struct xlog *log,
1476 struct xlog_in_core *iclog,
1477 int roundoff)
1478 {
1479 struct xlog_rec_header *rhead = iclog->ic_header;
1480 __be32 cycle_lsn = CYCLE_LSN_DISK(rhead->h_lsn);
1481 char *dp = iclog->ic_datap;
1482 int i;
1483
1484 for (i = 0; i < BTOBB(iclog->ic_offset + roundoff); i++) {
1485 *xlog_cycle_data(rhead, i) = *(__be32 *)dp;
1486 *(__be32 *)dp = cycle_lsn;
1487 dp += BBSIZE;
1488 }
1489
1490 for (i = 0; i < (log->l_iclog_hsize >> BBSHIFT) - 1; i++)
1491 rhead->h_ext[i].xh_cycle = cycle_lsn;
1492 }
1493
1494 /*
1495 * Calculate the checksum for a log buffer.
1496 *
1497 * This is a little more complicated than it should be because the various
1498 * headers and the actual data are non-contiguous.
1499 */
1500 __le32
xlog_cksum(struct xlog * log,struct xlog_rec_header * rhead,char * dp,unsigned int hdrsize,unsigned int size)1501 xlog_cksum(
1502 struct xlog *log,
1503 struct xlog_rec_header *rhead,
1504 char *dp,
1505 unsigned int hdrsize,
1506 unsigned int size)
1507 {
1508 uint32_t crc;
1509
1510 /* first generate the crc for the record header ... */
1511 crc = xfs_start_cksum_update((char *)rhead, hdrsize,
1512 offsetof(struct xlog_rec_header, h_crc));
1513
1514 /* ... then for additional cycle data for v2 logs ... */
1515 if (xfs_has_logv2(log->l_mp)) {
1516 int xheads, i;
1517
1518 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE) - 1;
1519 for (i = 0; i < xheads; i++)
1520 crc = crc32c(crc, &rhead->h_ext[i], XLOG_REC_EXT_SIZE);
1521 }
1522
1523 /* ... and finally for the payload */
1524 crc = crc32c(crc, dp, size);
1525
1526 return xfs_end_cksum(crc);
1527 }
1528
1529 static void
xlog_bio_end_io(struct bio * bio)1530 xlog_bio_end_io(
1531 struct bio *bio)
1532 {
1533 struct xlog_in_core *iclog = bio->bi_private;
1534
1535 queue_work(iclog->ic_log->l_ioend_workqueue,
1536 &iclog->ic_end_io_work);
1537 }
1538
1539 STATIC void
xlog_write_iclog(struct xlog * log,struct xlog_in_core * iclog,uint64_t bno,unsigned int count)1540 xlog_write_iclog(
1541 struct xlog *log,
1542 struct xlog_in_core *iclog,
1543 uint64_t bno,
1544 unsigned int count)
1545 {
1546 ASSERT(bno < log->l_logBBsize);
1547 trace_xlog_iclog_write(iclog, _RET_IP_);
1548
1549 /*
1550 * We lock the iclogbufs here so that we can serialise against I/O
1551 * completion during unmount. We might be processing a shutdown
1552 * triggered during unmount, and that can occur asynchronously to the
1553 * unmount thread, and hence we need to ensure that completes before
1554 * tearing down the iclogbufs. Hence we need to hold the buffer lock
1555 * across the log IO to archieve that.
1556 */
1557 down(&iclog->ic_sema);
1558 if (xlog_is_shutdown(log)) {
1559 /*
1560 * It would seem logical to return EIO here, but we rely on
1561 * the log state machine to propagate I/O errors instead of
1562 * doing it here. We kick of the state machine and unlock
1563 * the buffer manually, the code needs to be kept in sync
1564 * with the I/O completion path.
1565 */
1566 goto sync;
1567 }
1568
1569 /*
1570 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1571 * IOs coming immediately after this one. This prevents the block layer
1572 * writeback throttle from throttling log writes behind background
1573 * metadata writeback and causing priority inversions.
1574 */
1575 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1576 howmany(count, PAGE_SIZE),
1577 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1578 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1579 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1580 iclog->ic_bio.bi_private = iclog;
1581
1582 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1583 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1584 /*
1585 * For external log devices, we also need to flush the data
1586 * device cache first to ensure all metadata writeback covered
1587 * by the LSN in this iclog is on stable storage. This is slow,
1588 * but it *must* complete before we issue the external log IO.
1589 *
1590 * If the flush fails, we cannot conclude that past metadata
1591 * writeback from the log succeeded. Repeating the flush is
1592 * not possible, hence we must shut down with log IO error to
1593 * avoid shutdown re-entering this path and erroring out again.
1594 */
1595 if (log->l_targ != log->l_mp->m_ddev_targp &&
1596 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
1597 goto shutdown;
1598 }
1599 if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1600 iclog->ic_bio.bi_opf |= REQ_FUA;
1601
1602 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1603
1604 if (is_vmalloc_addr(iclog->ic_header)) {
1605 if (!bio_add_vmalloc(&iclog->ic_bio, iclog->ic_header, count))
1606 goto shutdown;
1607 } else {
1608 bio_add_virt_nofail(&iclog->ic_bio, iclog->ic_header, count);
1609 }
1610
1611 /*
1612 * If this log buffer would straddle the end of the log we will have
1613 * to split it up into two bios, so that we can continue at the start.
1614 */
1615 if (bno + BTOBB(count) > log->l_logBBsize) {
1616 struct bio *split;
1617
1618 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1619 GFP_NOIO, &fs_bio_set);
1620 bio_chain(split, &iclog->ic_bio);
1621 submit_bio(split);
1622
1623 /* restart at logical offset zero for the remainder */
1624 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1625 }
1626
1627 submit_bio(&iclog->ic_bio);
1628 return;
1629 shutdown:
1630 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1631 sync:
1632 xlog_state_done_syncing(iclog);
1633 up(&iclog->ic_sema);
1634 }
1635
1636 /*
1637 * We need to bump cycle number for the part of the iclog that is
1638 * written to the start of the log. Watch out for the header magic
1639 * number case, though.
1640 */
1641 static void
xlog_split_iclog(struct xlog * log,void * data,uint64_t bno,unsigned int count)1642 xlog_split_iclog(
1643 struct xlog *log,
1644 void *data,
1645 uint64_t bno,
1646 unsigned int count)
1647 {
1648 unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
1649 unsigned int i;
1650
1651 for (i = split_offset; i < count; i += BBSIZE) {
1652 uint32_t cycle = get_unaligned_be32(data + i);
1653
1654 if (++cycle == XLOG_HEADER_MAGIC_NUM)
1655 cycle++;
1656 put_unaligned_be32(cycle, data + i);
1657 }
1658 }
1659
1660 static int
xlog_calc_iclog_size(struct xlog * log,struct xlog_in_core * iclog,uint32_t * roundoff)1661 xlog_calc_iclog_size(
1662 struct xlog *log,
1663 struct xlog_in_core *iclog,
1664 uint32_t *roundoff)
1665 {
1666 uint32_t count_init, count;
1667
1668 /* Add for LR header */
1669 count_init = log->l_iclog_hsize + iclog->ic_offset;
1670 count = roundup(count_init, log->l_iclog_roundoff);
1671
1672 *roundoff = count - count_init;
1673
1674 ASSERT(count >= count_init);
1675 ASSERT(*roundoff < log->l_iclog_roundoff);
1676 return count;
1677 }
1678
1679 /*
1680 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1681 * fashion. Previously, we should have moved the current iclog
1682 * ptr in the log to point to the next available iclog. This allows further
1683 * write to continue while this code syncs out an iclog ready to go.
1684 * Before an in-core log can be written out, the data section must be scanned
1685 * to save away the 1st word of each BBSIZE block into the header. We replace
1686 * it with the current cycle count. Each BBSIZE block is tagged with the
1687 * cycle count because there in an implicit assumption that drives will
1688 * guarantee that entire 512 byte blocks get written at once. In other words,
1689 * we can't have part of a 512 byte block written and part not written. By
1690 * tagging each block, we will know which blocks are valid when recovering
1691 * after an unclean shutdown.
1692 *
1693 * This routine is single threaded on the iclog. No other thread can be in
1694 * this routine with the same iclog. Changing contents of iclog can there-
1695 * fore be done without grabbing the state machine lock. Updating the global
1696 * log will require grabbing the lock though.
1697 *
1698 * The entire log manager uses a logical block numbering scheme. Only
1699 * xlog_write_iclog knows about the fact that the log may not start with
1700 * block zero on a given device.
1701 */
1702 STATIC void
xlog_sync(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)1703 xlog_sync(
1704 struct xlog *log,
1705 struct xlog_in_core *iclog,
1706 struct xlog_ticket *ticket)
1707 {
1708 unsigned int count; /* byte count of bwrite */
1709 unsigned int roundoff; /* roundoff to BB or stripe */
1710 uint64_t bno;
1711 unsigned int size;
1712
1713 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1714 trace_xlog_iclog_sync(iclog, _RET_IP_);
1715
1716 count = xlog_calc_iclog_size(log, iclog, &roundoff);
1717
1718 /*
1719 * If we have a ticket, account for the roundoff via the ticket
1720 * reservation to avoid touching the hot grant heads needlessly.
1721 * Otherwise, we have to move grant heads directly.
1722 */
1723 if (ticket) {
1724 ticket->t_curr_res -= roundoff;
1725 } else {
1726 xlog_grant_add_space(&log->l_reserve_head, roundoff);
1727 xlog_grant_add_space(&log->l_write_head, roundoff);
1728 }
1729
1730 /* put cycle number in every block */
1731 xlog_pack_data(log, iclog, roundoff);
1732
1733 /* real byte length */
1734 size = iclog->ic_offset;
1735 if (xfs_has_logv2(log->l_mp))
1736 size += roundoff;
1737 iclog->ic_header->h_len = cpu_to_be32(size);
1738
1739 XFS_STATS_INC(log->l_mp, xs_log_writes);
1740 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1741
1742 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header->h_lsn));
1743
1744 /* Do we need to split this write into 2 parts? */
1745 if (bno + BTOBB(count) > log->l_logBBsize)
1746 xlog_split_iclog(log, iclog->ic_header, bno, count);
1747
1748 /* calculcate the checksum */
1749 iclog->ic_header->h_crc = xlog_cksum(log, iclog->ic_header,
1750 iclog->ic_datap, XLOG_REC_SIZE, size);
1751 /*
1752 * Intentionally corrupt the log record CRC based on the error injection
1753 * frequency, if defined. This facilitates testing log recovery in the
1754 * event of torn writes. Hence, set the IOABORT state to abort the log
1755 * write on I/O completion and shutdown the fs. The subsequent mount
1756 * detects the bad CRC and attempts to recover.
1757 */
1758 #ifdef DEBUG
1759 if (XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1760 iclog->ic_header->h_crc &= cpu_to_le32(0xAAAAAAAA);
1761 iclog->ic_fail_crc = true;
1762 xfs_warn(log->l_mp,
1763 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1764 be64_to_cpu(iclog->ic_header->h_lsn));
1765 }
1766 #endif
1767 xlog_verify_iclog(log, iclog, count);
1768 xlog_write_iclog(log, iclog, bno, count);
1769 }
1770
1771 /*
1772 * Deallocate a log structure
1773 */
1774 STATIC void
xlog_dealloc_log(struct xlog * log)1775 xlog_dealloc_log(
1776 struct xlog *log)
1777 {
1778 struct xlog_in_core *iclog, *next_iclog;
1779 int i;
1780
1781 /*
1782 * Destroy the CIL after waiting for iclog IO completion because an
1783 * iclog EIO error will try to shut down the log, which accesses the
1784 * CIL to wake up the waiters.
1785 */
1786 xlog_cil_destroy(log);
1787
1788 iclog = log->l_iclog;
1789 for (i = 0; i < log->l_iclog_bufs; i++) {
1790 next_iclog = iclog->ic_next;
1791 kvfree(iclog->ic_header);
1792 kfree(iclog);
1793 iclog = next_iclog;
1794 }
1795
1796 log->l_mp->m_log = NULL;
1797 destroy_workqueue(log->l_ioend_workqueue);
1798 kfree(log);
1799 }
1800
1801 /*
1802 * Update counters atomically now that memcpy is done.
1803 */
1804 static inline void
xlog_state_finish_copy(struct xlog * log,struct xlog_in_core * iclog,int record_cnt,int copy_bytes)1805 xlog_state_finish_copy(
1806 struct xlog *log,
1807 struct xlog_in_core *iclog,
1808 int record_cnt,
1809 int copy_bytes)
1810 {
1811 lockdep_assert_held(&log->l_icloglock);
1812
1813 be32_add_cpu(&iclog->ic_header->h_num_logops, record_cnt);
1814 iclog->ic_offset += copy_bytes;
1815 }
1816
1817 /*
1818 * print out info relating to regions written which consume
1819 * the reservation
1820 */
1821 void
xlog_print_tic_res(struct xfs_mount * mp,struct xlog_ticket * ticket)1822 xlog_print_tic_res(
1823 struct xfs_mount *mp,
1824 struct xlog_ticket *ticket)
1825 {
1826 xfs_warn(mp, "ticket reservation summary:");
1827 xfs_warn(mp, " unit res = %d bytes", ticket->t_unit_res);
1828 xfs_warn(mp, " current res = %d bytes", ticket->t_curr_res);
1829 xfs_warn(mp, " original count = %d", ticket->t_ocnt);
1830 xfs_warn(mp, " remaining count = %d", ticket->t_cnt);
1831 }
1832
1833 /*
1834 * Print a summary of the transaction.
1835 */
1836 void
xlog_print_trans(struct xfs_trans * tp)1837 xlog_print_trans(
1838 struct xfs_trans *tp)
1839 {
1840 struct xfs_mount *mp = tp->t_mountp;
1841 struct xfs_log_item *lip;
1842
1843 /* dump core transaction and ticket info */
1844 xfs_warn(mp, "transaction summary:");
1845 xfs_warn(mp, " log res = %d", tp->t_log_res);
1846 xfs_warn(mp, " log count = %d", tp->t_log_count);
1847 xfs_warn(mp, " flags = 0x%x", tp->t_flags);
1848
1849 xlog_print_tic_res(mp, tp->t_ticket);
1850
1851 /* dump each log item */
1852 list_for_each_entry(lip, &tp->t_items, li_trans) {
1853 struct xfs_log_vec *lv = lip->li_lv;
1854 struct xfs_log_iovec *vec;
1855 int i;
1856
1857 xfs_warn(mp, "log item: ");
1858 xfs_warn(mp, " type = 0x%x", lip->li_type);
1859 xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
1860 if (!lv)
1861 continue;
1862 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
1863 xfs_warn(mp, " alloc_size = %d", lv->lv_alloc_size);
1864 xfs_warn(mp, " bytes = %d", lv->lv_bytes);
1865 xfs_warn(mp, " buf used= %d", lv->lv_buf_used);
1866
1867 /* dump each iovec for the log item */
1868 vec = lv->lv_iovecp;
1869 for (i = 0; i < lv->lv_niovecs; i++) {
1870 int dumplen = min(vec->i_len, 32);
1871
1872 xfs_warn(mp, " iovec[%d]", i);
1873 xfs_warn(mp, " type = 0x%x", vec->i_type);
1874 xfs_warn(mp, " len = %d", vec->i_len);
1875 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
1876 xfs_hex_dump(vec->i_addr, dumplen);
1877
1878 vec++;
1879 }
1880 }
1881 }
1882
xlog_write_space_left(struct xlog_write_data * data)1883 static inline uint32_t xlog_write_space_left(struct xlog_write_data *data)
1884 {
1885 return data->iclog->ic_size - data->log_offset;
1886 }
1887
1888 static void *
xlog_write_space_advance(struct xlog_write_data * data,unsigned int len)1889 xlog_write_space_advance(
1890 struct xlog_write_data *data,
1891 unsigned int len)
1892 {
1893 void *p = data->iclog->ic_datap + data->log_offset;
1894
1895 ASSERT(xlog_write_space_left(data) >= len);
1896 ASSERT(data->log_offset % sizeof(int32_t) == 0);
1897 ASSERT(len % sizeof(int32_t) == 0);
1898
1899 data->data_cnt += len;
1900 data->log_offset += len;
1901 data->bytes_left -= len;
1902 return p;
1903 }
1904
1905 static inline void
xlog_write_iovec(struct xlog_write_data * data,void * buf,uint32_t buf_len)1906 xlog_write_iovec(
1907 struct xlog_write_data *data,
1908 void *buf,
1909 uint32_t buf_len)
1910 {
1911 memcpy(xlog_write_space_advance(data, buf_len), buf, buf_len);
1912 data->record_cnt++;
1913 }
1914
1915 /*
1916 * Write log vectors into a single iclog which is guaranteed by the caller
1917 * to have enough space to write the entire log vector into.
1918 */
1919 static void
xlog_write_full(struct xfs_log_vec * lv,struct xlog_write_data * data)1920 xlog_write_full(
1921 struct xfs_log_vec *lv,
1922 struct xlog_write_data *data)
1923 {
1924 int index;
1925
1926 ASSERT(data->bytes_left <= xlog_write_space_left(data) ||
1927 data->iclog->ic_state == XLOG_STATE_WANT_SYNC);
1928
1929 /*
1930 * Ordered log vectors have no regions to write so this
1931 * loop will naturally skip them.
1932 */
1933 for (index = 0; index < lv->lv_niovecs; index++) {
1934 struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
1935 struct xlog_op_header *ophdr = reg->i_addr;
1936
1937 ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
1938 xlog_write_iovec(data, reg->i_addr, reg->i_len);
1939 }
1940 }
1941
1942 static int
xlog_write_get_more_iclog_space(struct xlog_write_data * data)1943 xlog_write_get_more_iclog_space(
1944 struct xlog_write_data *data)
1945 {
1946 struct xlog *log = data->iclog->ic_log;
1947 int error;
1948
1949 spin_lock(&log->l_icloglock);
1950 ASSERT(data->iclog->ic_state == XLOG_STATE_WANT_SYNC);
1951 xlog_state_finish_copy(log, data->iclog, data->record_cnt,
1952 data->data_cnt);
1953 error = xlog_state_release_iclog(log, data->iclog, data->ticket);
1954 spin_unlock(&log->l_icloglock);
1955 if (error)
1956 return error;
1957
1958 error = xlog_state_get_iclog_space(log, data);
1959 if (error)
1960 return error;
1961 data->record_cnt = 0;
1962 data->data_cnt = 0;
1963 return 0;
1964 }
1965
1966 /*
1967 * Write log vectors into a single iclog which is smaller than the current chain
1968 * length. We write until we cannot fit a full record into the remaining space
1969 * and then stop. We return the log vector that is to be written that cannot
1970 * wholly fit in the iclog.
1971 */
1972 static int
xlog_write_partial(struct xfs_log_vec * lv,struct xlog_write_data * data)1973 xlog_write_partial(
1974 struct xfs_log_vec *lv,
1975 struct xlog_write_data *data)
1976 {
1977 struct xlog_op_header *ophdr;
1978 int index = 0;
1979 uint32_t rlen;
1980 int error;
1981
1982 /* walk the logvec, copying until we run out of space in the iclog */
1983 for (index = 0; index < lv->lv_niovecs; index++) {
1984 struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
1985 uint32_t reg_offset = 0;
1986
1987 /*
1988 * The first region of a continuation must have a non-zero
1989 * length otherwise log recovery will just skip over it and
1990 * start recovering from the next opheader it finds. Because we
1991 * mark the next opheader as a continuation, recovery will then
1992 * incorrectly add the continuation to the previous region and
1993 * that breaks stuff.
1994 *
1995 * Hence if there isn't space for region data after the
1996 * opheader, then we need to start afresh with a new iclog.
1997 */
1998 if (xlog_write_space_left(data) <=
1999 sizeof(struct xlog_op_header)) {
2000 error = xlog_write_get_more_iclog_space(data);
2001 if (error)
2002 return error;
2003 }
2004
2005 ophdr = reg->i_addr;
2006 rlen = min_t(uint32_t, reg->i_len, xlog_write_space_left(data));
2007
2008 ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
2009 ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2010 if (rlen != reg->i_len)
2011 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2012
2013 xlog_write_iovec(data, reg->i_addr, rlen);
2014
2015 /* If we wrote the whole region, move to the next. */
2016 if (rlen == reg->i_len)
2017 continue;
2018
2019 /*
2020 * We now have a partially written iovec, but it can span
2021 * multiple iclogs so we loop here. First we release the iclog
2022 * we currently have, then we get a new iclog and add a new
2023 * opheader. Then we continue copying from where we were until
2024 * we either complete the iovec or fill the iclog. If we
2025 * complete the iovec, then we increment the index and go right
2026 * back to the top of the outer loop. if we fill the iclog, we
2027 * run the inner loop again.
2028 *
2029 * This is complicated by the tail of a region using all the
2030 * space in an iclog and hence requiring us to release the iclog
2031 * and get a new one before returning to the outer loop. We must
2032 * always guarantee that we exit this inner loop with at least
2033 * space for log transaction opheaders left in the current
2034 * iclog, hence we cannot just terminate the loop at the end
2035 * of the of the continuation. So we loop while there is no
2036 * space left in the current iclog, and check for the end of the
2037 * continuation after getting a new iclog.
2038 */
2039 do {
2040 /*
2041 * Ensure we include the continuation opheader in the
2042 * space we need in the new iclog by adding that size
2043 * to the length we require. This continuation opheader
2044 * needs to be accounted to the ticket as the space it
2045 * consumes hasn't been accounted to the lv we are
2046 * writing.
2047 */
2048 data->bytes_left += sizeof(struct xlog_op_header);
2049 error = xlog_write_get_more_iclog_space(data);
2050 if (error)
2051 return error;
2052
2053 ophdr = xlog_write_space_advance(data,
2054 sizeof(struct xlog_op_header));
2055 ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
2056 ophdr->oh_clientid = XFS_TRANSACTION;
2057 ophdr->oh_res2 = 0;
2058 ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2059
2060 data->ticket->t_curr_res -=
2061 sizeof(struct xlog_op_header);
2062
2063 /*
2064 * If rlen fits in the iclog, then end the region
2065 * continuation. Otherwise we're going around again.
2066 */
2067 reg_offset += rlen;
2068 rlen = reg->i_len - reg_offset;
2069 if (rlen <= xlog_write_space_left(data))
2070 ophdr->oh_flags |= XLOG_END_TRANS;
2071 else
2072 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2073
2074 rlen = min_t(uint32_t, rlen,
2075 xlog_write_space_left(data));
2076 ophdr->oh_len = cpu_to_be32(rlen);
2077
2078 xlog_write_iovec(data, reg->i_addr + reg_offset, rlen);
2079 } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2080 }
2081
2082 return 0;
2083 }
2084
2085 /*
2086 * Write some region out to in-core log
2087 *
2088 * This will be called when writing externally provided regions or when
2089 * writing out a commit record for a given transaction.
2090 *
2091 * General algorithm:
2092 * 1. Find total length of this write. This may include adding to the
2093 * lengths passed in.
2094 * 2. Check whether we violate the tickets reservation.
2095 * 3. While writing to this iclog
2096 * A. Reserve as much space in this iclog as can get
2097 * B. If this is first write, save away start lsn
2098 * C. While writing this region:
2099 * 1. If first write of transaction, write start record
2100 * 2. Write log operation header (header per region)
2101 * 3. Find out if we can fit entire region into this iclog
2102 * 4. Potentially, verify destination memcpy ptr
2103 * 5. Memcpy (partial) region
2104 * 6. If partial copy, release iclog; otherwise, continue
2105 * copying more regions into current iclog
2106 * 4. Mark want sync bit (in simulation mode)
2107 * 5. Release iclog for potential flush to on-disk log.
2108 *
2109 * ERRORS:
2110 * 1. Panic if reservation is overrun. This should never happen since
2111 * reservation amounts are generated internal to the filesystem.
2112 * NOTES:
2113 * 1. Tickets are single threaded data structures.
2114 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2115 * syncing routine. When a single log_write region needs to span
2116 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2117 * on all log operation writes which don't contain the end of the
2118 * region. The XLOG_END_TRANS bit is used for the in-core log
2119 * operation which contains the end of the continued log_write region.
2120 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2121 * we don't really know exactly how much space will be used. As a result,
2122 * we don't update ic_offset until the end when we know exactly how many
2123 * bytes have been written out.
2124 */
2125 int
xlog_write(struct xlog * log,struct xfs_cil_ctx * ctx,struct list_head * lv_chain,struct xlog_ticket * ticket,uint32_t len)2126 xlog_write(
2127 struct xlog *log,
2128 struct xfs_cil_ctx *ctx,
2129 struct list_head *lv_chain,
2130 struct xlog_ticket *ticket,
2131 uint32_t len)
2132
2133 {
2134 struct xfs_log_vec *lv;
2135 struct xlog_write_data data = {
2136 .ticket = ticket,
2137 .bytes_left = len,
2138 };
2139 int error;
2140
2141 if (ticket->t_curr_res < 0) {
2142 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2143 "ctx ticket reservation ran out. Need to up reservation");
2144 xlog_print_tic_res(log->l_mp, ticket);
2145 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2146 }
2147
2148 error = xlog_state_get_iclog_space(log, &data);
2149 if (error)
2150 return error;
2151
2152 ASSERT(xlog_write_space_left(&data) > 0);
2153
2154 /*
2155 * If we have a context pointer, pass it the first iclog we are
2156 * writing to so it can record state needed for iclog write
2157 * ordering.
2158 */
2159 if (ctx)
2160 xlog_cil_set_ctx_write_state(ctx, data.iclog);
2161
2162 list_for_each_entry(lv, lv_chain, lv_list) {
2163 /*
2164 * If the entire log vec does not fit in the iclog, punt it to
2165 * the partial copy loop which can handle this case.
2166 */
2167 if (lv->lv_niovecs &&
2168 lv->lv_bytes > xlog_write_space_left(&data)) {
2169 error = xlog_write_partial(lv, &data);
2170 if (error) {
2171 /*
2172 * We have no iclog to release, so just return
2173 * the error immediately.
2174 */
2175 return error;
2176 }
2177 } else {
2178 xlog_write_full(lv, &data);
2179 }
2180 }
2181 ASSERT(data.bytes_left == 0);
2182
2183 /*
2184 * We've already been guaranteed that the last writes will fit inside
2185 * the current iclog, and hence it will already have the space used by
2186 * those writes accounted to it. Hence we do not need to update the
2187 * iclog with the number of bytes written here.
2188 */
2189 spin_lock(&log->l_icloglock);
2190 xlog_state_finish_copy(log, data.iclog, data.record_cnt, 0);
2191 error = xlog_state_release_iclog(log, data.iclog, ticket);
2192 spin_unlock(&log->l_icloglock);
2193
2194 return error;
2195 }
2196
2197 static void
xlog_state_activate_iclog(struct xlog_in_core * iclog,int * iclogs_changed)2198 xlog_state_activate_iclog(
2199 struct xlog_in_core *iclog,
2200 int *iclogs_changed)
2201 {
2202 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2203 trace_xlog_iclog_activate(iclog, _RET_IP_);
2204
2205 /*
2206 * If the number of ops in this iclog indicate it just contains the
2207 * dummy transaction, we can change state into IDLE (the second time
2208 * around). Otherwise we should change the state into NEED a dummy.
2209 * We don't need to cover the dummy.
2210 */
2211 if (*iclogs_changed == 0 &&
2212 iclog->ic_header->h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2213 *iclogs_changed = 1;
2214 } else {
2215 /*
2216 * We have two dirty iclogs so start over. This could also be
2217 * num of ops indicating this is not the dummy going out.
2218 */
2219 *iclogs_changed = 2;
2220 }
2221
2222 iclog->ic_state = XLOG_STATE_ACTIVE;
2223 iclog->ic_offset = 0;
2224 iclog->ic_header->h_num_logops = 0;
2225 memset(iclog->ic_header->h_cycle_data, 0,
2226 sizeof(iclog->ic_header->h_cycle_data));
2227 iclog->ic_header->h_lsn = 0;
2228 iclog->ic_header->h_tail_lsn = 0;
2229 }
2230
2231 /*
2232 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2233 * ACTIVE after iclog I/O has completed.
2234 */
2235 static void
xlog_state_activate_iclogs(struct xlog * log,int * iclogs_changed)2236 xlog_state_activate_iclogs(
2237 struct xlog *log,
2238 int *iclogs_changed)
2239 {
2240 struct xlog_in_core *iclog = log->l_iclog;
2241
2242 do {
2243 if (iclog->ic_state == XLOG_STATE_DIRTY)
2244 xlog_state_activate_iclog(iclog, iclogs_changed);
2245 /*
2246 * The ordering of marking iclogs ACTIVE must be maintained, so
2247 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2248 */
2249 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2250 break;
2251 } while ((iclog = iclog->ic_next) != log->l_iclog);
2252 }
2253
2254 static int
xlog_covered_state(int prev_state,int iclogs_changed)2255 xlog_covered_state(
2256 int prev_state,
2257 int iclogs_changed)
2258 {
2259 /*
2260 * We go to NEED for any non-covering writes. We go to NEED2 if we just
2261 * wrote the first covering record (DONE). We go to IDLE if we just
2262 * wrote the second covering record (DONE2) and remain in IDLE until a
2263 * non-covering write occurs.
2264 */
2265 switch (prev_state) {
2266 case XLOG_STATE_COVER_IDLE:
2267 if (iclogs_changed == 1)
2268 return XLOG_STATE_COVER_IDLE;
2269 fallthrough;
2270 case XLOG_STATE_COVER_NEED:
2271 case XLOG_STATE_COVER_NEED2:
2272 break;
2273 case XLOG_STATE_COVER_DONE:
2274 if (iclogs_changed == 1)
2275 return XLOG_STATE_COVER_NEED2;
2276 break;
2277 case XLOG_STATE_COVER_DONE2:
2278 if (iclogs_changed == 1)
2279 return XLOG_STATE_COVER_IDLE;
2280 break;
2281 default:
2282 ASSERT(0);
2283 }
2284
2285 return XLOG_STATE_COVER_NEED;
2286 }
2287
2288 STATIC void
xlog_state_clean_iclog(struct xlog * log,struct xlog_in_core * dirty_iclog)2289 xlog_state_clean_iclog(
2290 struct xlog *log,
2291 struct xlog_in_core *dirty_iclog)
2292 {
2293 int iclogs_changed = 0;
2294
2295 trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2296
2297 dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2298
2299 xlog_state_activate_iclogs(log, &iclogs_changed);
2300 wake_up_all(&dirty_iclog->ic_force_wait);
2301
2302 if (iclogs_changed) {
2303 log->l_covered_state = xlog_covered_state(log->l_covered_state,
2304 iclogs_changed);
2305 }
2306 }
2307
2308 STATIC xfs_lsn_t
xlog_get_lowest_lsn(struct xlog * log)2309 xlog_get_lowest_lsn(
2310 struct xlog *log)
2311 {
2312 struct xlog_in_core *iclog = log->l_iclog;
2313 xfs_lsn_t lowest_lsn = 0, lsn;
2314
2315 do {
2316 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2317 iclog->ic_state == XLOG_STATE_DIRTY)
2318 continue;
2319
2320 lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2321 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2322 lowest_lsn = lsn;
2323 } while ((iclog = iclog->ic_next) != log->l_iclog);
2324
2325 return lowest_lsn;
2326 }
2327
2328 /*
2329 * Return true if we need to stop processing, false to continue to the next
2330 * iclog. The caller will need to run callbacks if the iclog is returned in the
2331 * XLOG_STATE_CALLBACK state.
2332 */
2333 static bool
xlog_state_iodone_process_iclog(struct xlog * log,struct xlog_in_core * iclog)2334 xlog_state_iodone_process_iclog(
2335 struct xlog *log,
2336 struct xlog_in_core *iclog)
2337 {
2338 xfs_lsn_t lowest_lsn;
2339 xfs_lsn_t header_lsn;
2340
2341 switch (iclog->ic_state) {
2342 case XLOG_STATE_ACTIVE:
2343 case XLOG_STATE_DIRTY:
2344 /*
2345 * Skip all iclogs in the ACTIVE & DIRTY states:
2346 */
2347 return false;
2348 case XLOG_STATE_DONE_SYNC:
2349 /*
2350 * Now that we have an iclog that is in the DONE_SYNC state, do
2351 * one more check here to see if we have chased our tail around.
2352 * If this is not the lowest lsn iclog, then we will leave it
2353 * for another completion to process.
2354 */
2355 header_lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2356 lowest_lsn = xlog_get_lowest_lsn(log);
2357 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2358 return false;
2359 /*
2360 * If there are no callbacks on this iclog, we can mark it clean
2361 * immediately and return. Otherwise we need to run the
2362 * callbacks.
2363 */
2364 if (list_empty(&iclog->ic_callbacks)) {
2365 xlog_state_clean_iclog(log, iclog);
2366 return false;
2367 }
2368 trace_xlog_iclog_callback(iclog, _RET_IP_);
2369 iclog->ic_state = XLOG_STATE_CALLBACK;
2370 return false;
2371 default:
2372 /*
2373 * Can only perform callbacks in order. Since this iclog is not
2374 * in the DONE_SYNC state, we skip the rest and just try to
2375 * clean up.
2376 */
2377 return true;
2378 }
2379 }
2380
2381 /*
2382 * Loop over all the iclogs, running attached callbacks on them. Return true if
2383 * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2384 * to handle transient shutdown state here at all because
2385 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2386 * cleanup of the callbacks.
2387 */
2388 static bool
xlog_state_do_iclog_callbacks(struct xlog * log)2389 xlog_state_do_iclog_callbacks(
2390 struct xlog *log)
2391 __releases(&log->l_icloglock)
2392 __acquires(&log->l_icloglock)
2393 {
2394 struct xlog_in_core *first_iclog = log->l_iclog;
2395 struct xlog_in_core *iclog = first_iclog;
2396 bool ran_callback = false;
2397
2398 do {
2399 LIST_HEAD(cb_list);
2400
2401 if (xlog_state_iodone_process_iclog(log, iclog))
2402 break;
2403 if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2404 iclog = iclog->ic_next;
2405 continue;
2406 }
2407 list_splice_init(&iclog->ic_callbacks, &cb_list);
2408 spin_unlock(&log->l_icloglock);
2409
2410 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2411 xlog_cil_process_committed(&cb_list);
2412 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2413 ran_callback = true;
2414
2415 spin_lock(&log->l_icloglock);
2416 xlog_state_clean_iclog(log, iclog);
2417 iclog = iclog->ic_next;
2418 } while (iclog != first_iclog);
2419
2420 return ran_callback;
2421 }
2422
2423
2424 /*
2425 * Loop running iclog completion callbacks until there are no more iclogs in a
2426 * state that can run callbacks.
2427 */
2428 STATIC void
xlog_state_do_callback(struct xlog * log)2429 xlog_state_do_callback(
2430 struct xlog *log)
2431 {
2432 int flushcnt = 0;
2433 int repeats = 0;
2434
2435 spin_lock(&log->l_icloglock);
2436 while (xlog_state_do_iclog_callbacks(log)) {
2437 if (xlog_is_shutdown(log))
2438 break;
2439
2440 if (++repeats > 5000) {
2441 flushcnt += repeats;
2442 repeats = 0;
2443 xfs_warn(log->l_mp,
2444 "%s: possible infinite loop (%d iterations)",
2445 __func__, flushcnt);
2446 }
2447 }
2448
2449 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2450 wake_up_all(&log->l_flush_wait);
2451
2452 spin_unlock(&log->l_icloglock);
2453 }
2454
2455
2456 /*
2457 * Finish transitioning this iclog to the dirty state.
2458 *
2459 * Callbacks could take time, so they are done outside the scope of the
2460 * global state machine log lock.
2461 */
2462 STATIC void
xlog_state_done_syncing(struct xlog_in_core * iclog)2463 xlog_state_done_syncing(
2464 struct xlog_in_core *iclog)
2465 {
2466 struct xlog *log = iclog->ic_log;
2467
2468 spin_lock(&log->l_icloglock);
2469 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2470 trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2471
2472 /*
2473 * If we got an error, either on the first buffer, or in the case of
2474 * split log writes, on the second, we shut down the file system and
2475 * no iclogs should ever be attempted to be written to disk again.
2476 */
2477 if (!xlog_is_shutdown(log)) {
2478 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2479 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2480 }
2481
2482 /*
2483 * Someone could be sleeping prior to writing out the next
2484 * iclog buffer, we wake them all, one will get to do the
2485 * I/O, the others get to wait for the result.
2486 */
2487 wake_up_all(&iclog->ic_write_wait);
2488 spin_unlock(&log->l_icloglock);
2489 xlog_state_do_callback(log);
2490 }
2491
2492 /*
2493 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2494 * sleep. We wait on the flush queue on the head iclog as that should be
2495 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2496 * we will wait here and all new writes will sleep until a sync completes.
2497 *
2498 * The in-core logs are used in a circular fashion. They are not used
2499 * out-of-order even when an iclog past the head is free.
2500 *
2501 * return:
2502 * * log_offset where xlog_write() can start writing into the in-core
2503 * log's data space.
2504 * * in-core log pointer to which xlog_write() should write.
2505 * * boolean indicating this is a continued write to an in-core log.
2506 * If this is the last write, then the in-core log's offset field
2507 * needs to be incremented, depending on the amount of data which
2508 * is copied.
2509 */
2510 STATIC int
xlog_state_get_iclog_space(struct xlog * log,struct xlog_write_data * data)2511 xlog_state_get_iclog_space(
2512 struct xlog *log,
2513 struct xlog_write_data *data)
2514 {
2515 int log_offset;
2516 struct xlog_rec_header *head;
2517 struct xlog_in_core *iclog;
2518
2519 restart:
2520 spin_lock(&log->l_icloglock);
2521 if (xlog_is_shutdown(log)) {
2522 spin_unlock(&log->l_icloglock);
2523 return -EIO;
2524 }
2525
2526 iclog = log->l_iclog;
2527 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2528 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2529
2530 /* Wait for log writes to have flushed */
2531 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2532 goto restart;
2533 }
2534
2535 head = iclog->ic_header;
2536
2537 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2538 log_offset = iclog->ic_offset;
2539
2540 trace_xlog_iclog_get_space(iclog, _RET_IP_);
2541
2542 /* On the 1st write to an iclog, figure out lsn. This works
2543 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2544 * committing to. If the offset is set, that's how many blocks
2545 * must be written.
2546 */
2547 if (log_offset == 0) {
2548 data->ticket->t_curr_res -= log->l_iclog_hsize;
2549 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2550 head->h_lsn = cpu_to_be64(
2551 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2552 ASSERT(log->l_curr_block >= 0);
2553 }
2554
2555 /* If there is enough room to write everything, then do it. Otherwise,
2556 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2557 * bit is on, so this will get flushed out. Don't update ic_offset
2558 * until you know exactly how many bytes get copied. Therefore, wait
2559 * until later to update ic_offset.
2560 *
2561 * xlog_write() algorithm assumes that at least 2 xlog_op_header's
2562 * can fit into remaining data section.
2563 */
2564 if (iclog->ic_size - iclog->ic_offset <
2565 2 * sizeof(struct xlog_op_header)) {
2566 int error = 0;
2567
2568 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2569
2570 /*
2571 * If we are the only one writing to this iclog, sync it to
2572 * disk. We need to do an atomic compare and decrement here to
2573 * avoid racing with concurrent atomic_dec_and_lock() calls in
2574 * xlog_state_release_iclog() when there is more than one
2575 * reference to the iclog.
2576 */
2577 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2578 error = xlog_state_release_iclog(log, iclog,
2579 data->ticket);
2580 spin_unlock(&log->l_icloglock);
2581 if (error)
2582 return error;
2583 goto restart;
2584 }
2585
2586 /* Do we have enough room to write the full amount in the remainder
2587 * of this iclog? Or must we continue a write on the next iclog and
2588 * mark this iclog as completely taken? In the case where we switch
2589 * iclogs (to mark it taken), this particular iclog will release/sync
2590 * to disk in xlog_write().
2591 */
2592 if (data->bytes_left <= iclog->ic_size - iclog->ic_offset)
2593 iclog->ic_offset += data->bytes_left;
2594 else
2595 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2596 data->iclog = iclog;
2597
2598 ASSERT(iclog->ic_offset <= iclog->ic_size);
2599 spin_unlock(&log->l_icloglock);
2600
2601 data->log_offset = log_offset;
2602 return 0;
2603 }
2604
2605 /*
2606 * The first cnt-1 times a ticket goes through here we don't need to move the
2607 * grant write head because the permanent reservation has reserved cnt times the
2608 * unit amount. Release part of current permanent unit reservation and reset
2609 * current reservation to be one units worth. Also move grant reservation head
2610 * forward.
2611 */
2612 void
xfs_log_ticket_regrant(struct xlog * log,struct xlog_ticket * ticket)2613 xfs_log_ticket_regrant(
2614 struct xlog *log,
2615 struct xlog_ticket *ticket)
2616 {
2617 trace_xfs_log_ticket_regrant(log, ticket);
2618
2619 if (ticket->t_cnt > 0)
2620 ticket->t_cnt--;
2621
2622 xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res);
2623 xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res);
2624 ticket->t_curr_res = ticket->t_unit_res;
2625
2626 trace_xfs_log_ticket_regrant_sub(log, ticket);
2627
2628 /* just return if we still have some of the pre-reserved space */
2629 if (!ticket->t_cnt) {
2630 xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
2631 trace_xfs_log_ticket_regrant_exit(log, ticket);
2632 }
2633
2634 xfs_log_ticket_put(ticket);
2635 }
2636
2637 /*
2638 * Give back the space left from a reservation.
2639 *
2640 * All the information we need to make a correct determination of space left
2641 * is present. For non-permanent reservations, things are quite easy. The
2642 * count should have been decremented to zero. We only need to deal with the
2643 * space remaining in the current reservation part of the ticket. If the
2644 * ticket contains a permanent reservation, there may be left over space which
2645 * needs to be released. A count of N means that N-1 refills of the current
2646 * reservation can be done before we need to ask for more space. The first
2647 * one goes to fill up the first current reservation. Once we run out of
2648 * space, the count will stay at zero and the only space remaining will be
2649 * in the current reservation field.
2650 */
2651 void
xfs_log_ticket_ungrant(struct xlog * log,struct xlog_ticket * ticket)2652 xfs_log_ticket_ungrant(
2653 struct xlog *log,
2654 struct xlog_ticket *ticket)
2655 {
2656 int bytes;
2657
2658 trace_xfs_log_ticket_ungrant(log, ticket);
2659
2660 if (ticket->t_cnt > 0)
2661 ticket->t_cnt--;
2662
2663 trace_xfs_log_ticket_ungrant_sub(log, ticket);
2664
2665 /*
2666 * If this is a permanent reservation ticket, we may be able to free
2667 * up more space based on the remaining count.
2668 */
2669 bytes = ticket->t_curr_res;
2670 if (ticket->t_cnt > 0) {
2671 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2672 bytes += ticket->t_unit_res*ticket->t_cnt;
2673 }
2674
2675 xlog_grant_sub_space(&log->l_reserve_head, bytes);
2676 xlog_grant_sub_space(&log->l_write_head, bytes);
2677
2678 trace_xfs_log_ticket_ungrant_exit(log, ticket);
2679
2680 xfs_log_space_wake(log->l_mp);
2681 xfs_log_ticket_put(ticket);
2682 }
2683
2684 /*
2685 * This routine will mark the current iclog in the ring as WANT_SYNC and move
2686 * the current iclog pointer to the next iclog in the ring.
2687 */
2688 void
xlog_state_switch_iclogs(struct xlog * log,struct xlog_in_core * iclog,int eventual_size)2689 xlog_state_switch_iclogs(
2690 struct xlog *log,
2691 struct xlog_in_core *iclog,
2692 int eventual_size)
2693 {
2694 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2695 assert_spin_locked(&log->l_icloglock);
2696 trace_xlog_iclog_switch(iclog, _RET_IP_);
2697
2698 if (!eventual_size)
2699 eventual_size = iclog->ic_offset;
2700 iclog->ic_state = XLOG_STATE_WANT_SYNC;
2701 iclog->ic_header->h_prev_block = cpu_to_be32(log->l_prev_block);
2702 log->l_prev_block = log->l_curr_block;
2703 log->l_prev_cycle = log->l_curr_cycle;
2704
2705 /* roll log?: ic_offset changed later */
2706 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2707
2708 /* Round up to next log-sunit */
2709 if (log->l_iclog_roundoff > BBSIZE) {
2710 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
2711 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2712 }
2713
2714 if (log->l_curr_block >= log->l_logBBsize) {
2715 /*
2716 * Rewind the current block before the cycle is bumped to make
2717 * sure that the combined LSN never transiently moves forward
2718 * when the log wraps to the next cycle. This is to support the
2719 * unlocked sample of these fields from xlog_valid_lsn(). Most
2720 * other cases should acquire l_icloglock.
2721 */
2722 log->l_curr_block -= log->l_logBBsize;
2723 ASSERT(log->l_curr_block >= 0);
2724 smp_wmb();
2725 log->l_curr_cycle++;
2726 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2727 log->l_curr_cycle++;
2728 }
2729 ASSERT(iclog == log->l_iclog);
2730 log->l_iclog = iclog->ic_next;
2731 }
2732
2733 /*
2734 * Force the iclog to disk and check if the iclog has been completed before
2735 * xlog_force_iclog() returns. This can happen on synchronous (e.g.
2736 * pmem) or fast async storage because we drop the icloglock to issue the IO.
2737 * If completion has already occurred, tell the caller so that it can avoid an
2738 * unnecessary wait on the iclog.
2739 */
2740 static int
xlog_force_and_check_iclog(struct xlog_in_core * iclog,bool * completed)2741 xlog_force_and_check_iclog(
2742 struct xlog_in_core *iclog,
2743 bool *completed)
2744 {
2745 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2746 int error;
2747
2748 *completed = false;
2749 error = xlog_force_iclog(iclog);
2750 if (error)
2751 return error;
2752
2753 /*
2754 * If the iclog has already been completed and reused the header LSN
2755 * will have been rewritten by completion
2756 */
2757 if (be64_to_cpu(iclog->ic_header->h_lsn) != lsn)
2758 *completed = true;
2759 return 0;
2760 }
2761
2762 /*
2763 * Write out all data in the in-core log as of this exact moment in time.
2764 *
2765 * Data may be written to the in-core log during this call. However,
2766 * we don't guarantee this data will be written out. A change from past
2767 * implementation means this routine will *not* write out zero length LRs.
2768 *
2769 * Basically, we try and perform an intelligent scan of the in-core logs.
2770 * If we determine there is no flushable data, we just return. There is no
2771 * flushable data if:
2772 *
2773 * 1. the current iclog is active and has no data; the previous iclog
2774 * is in the active or dirty state.
2775 * 2. the current iclog is dirty, and the previous iclog is in the
2776 * active or dirty state.
2777 *
2778 * We may sleep if:
2779 *
2780 * 1. the current iclog is not in the active nor dirty state.
2781 * 2. the current iclog dirty, and the previous iclog is not in the
2782 * active nor dirty state.
2783 * 3. the current iclog is active, and there is another thread writing
2784 * to this particular iclog.
2785 * 4. a) the current iclog is active and has no other writers
2786 * b) when we return from flushing out this iclog, it is still
2787 * not in the active nor dirty state.
2788 */
2789 int
xfs_log_force(struct xfs_mount * mp,uint flags)2790 xfs_log_force(
2791 struct xfs_mount *mp,
2792 uint flags)
2793 {
2794 struct xlog *log = mp->m_log;
2795 struct xlog_in_core *iclog;
2796
2797 XFS_STATS_INC(mp, xs_log_force);
2798 trace_xfs_log_force(mp, 0, _RET_IP_);
2799
2800 xlog_cil_force(log);
2801
2802 spin_lock(&log->l_icloglock);
2803 if (xlog_is_shutdown(log))
2804 goto out_error;
2805
2806 iclog = log->l_iclog;
2807 trace_xlog_iclog_force(iclog, _RET_IP_);
2808
2809 if (iclog->ic_state == XLOG_STATE_DIRTY ||
2810 (iclog->ic_state == XLOG_STATE_ACTIVE &&
2811 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
2812 /*
2813 * If the head is dirty or (active and empty), then we need to
2814 * look at the previous iclog.
2815 *
2816 * If the previous iclog is active or dirty we are done. There
2817 * is nothing to sync out. Otherwise, we attach ourselves to the
2818 * previous iclog and go to sleep.
2819 */
2820 iclog = iclog->ic_prev;
2821 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
2822 if (atomic_read(&iclog->ic_refcnt) == 0) {
2823 /* We have exclusive access to this iclog. */
2824 bool completed;
2825
2826 if (xlog_force_and_check_iclog(iclog, &completed))
2827 goto out_error;
2828
2829 if (completed)
2830 goto out_unlock;
2831 } else {
2832 /*
2833 * Someone else is still writing to this iclog, so we
2834 * need to ensure that when they release the iclog it
2835 * gets synced immediately as we may be waiting on it.
2836 */
2837 xlog_state_switch_iclogs(log, iclog, 0);
2838 }
2839 }
2840
2841 /*
2842 * The iclog we are about to wait on may contain the checkpoint pushed
2843 * by the above xlog_cil_force() call, but it may not have been pushed
2844 * to disk yet. Like the ACTIVE case above, we need to make sure caches
2845 * are flushed when this iclog is written.
2846 */
2847 if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
2848 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
2849
2850 if (flags & XFS_LOG_SYNC)
2851 return xlog_wait_on_iclog(iclog);
2852 out_unlock:
2853 spin_unlock(&log->l_icloglock);
2854 return 0;
2855 out_error:
2856 spin_unlock(&log->l_icloglock);
2857 return -EIO;
2858 }
2859
2860 /*
2861 * Force the log to a specific LSN.
2862 *
2863 * If an iclog with that lsn can be found:
2864 * If it is in the DIRTY state, just return.
2865 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2866 * state and go to sleep or return.
2867 * If it is in any other state, go to sleep or return.
2868 *
2869 * Synchronous forces are implemented with a wait queue. All callers trying
2870 * to force a given lsn to disk must wait on the queue attached to the
2871 * specific in-core log. When given in-core log finally completes its write
2872 * to disk, that thread will wake up all threads waiting on the queue.
2873 */
2874 static int
xlog_force_lsn(struct xlog * log,xfs_lsn_t lsn,uint flags,int * log_flushed,bool already_slept)2875 xlog_force_lsn(
2876 struct xlog *log,
2877 xfs_lsn_t lsn,
2878 uint flags,
2879 int *log_flushed,
2880 bool already_slept)
2881 {
2882 struct xlog_in_core *iclog;
2883 bool completed;
2884
2885 spin_lock(&log->l_icloglock);
2886 if (xlog_is_shutdown(log))
2887 goto out_error;
2888
2889 iclog = log->l_iclog;
2890 while (be64_to_cpu(iclog->ic_header->h_lsn) != lsn) {
2891 trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
2892 iclog = iclog->ic_next;
2893 if (iclog == log->l_iclog)
2894 goto out_unlock;
2895 }
2896
2897 switch (iclog->ic_state) {
2898 case XLOG_STATE_ACTIVE:
2899 /*
2900 * We sleep here if we haven't already slept (e.g. this is the
2901 * first time we've looked at the correct iclog buf) and the
2902 * buffer before us is going to be sync'ed. The reason for this
2903 * is that if we are doing sync transactions here, by waiting
2904 * for the previous I/O to complete, we can allow a few more
2905 * transactions into this iclog before we close it down.
2906 *
2907 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
2908 * refcnt so we can release the log (which drops the ref count).
2909 * The state switch keeps new transaction commits from using
2910 * this buffer. When the current commits finish writing into
2911 * the buffer, the refcount will drop to zero and the buffer
2912 * will go out then.
2913 */
2914 if (!already_slept &&
2915 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
2916 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
2917 xlog_wait(&iclog->ic_prev->ic_write_wait,
2918 &log->l_icloglock);
2919 return -EAGAIN;
2920 }
2921 if (xlog_force_and_check_iclog(iclog, &completed))
2922 goto out_error;
2923 if (log_flushed)
2924 *log_flushed = 1;
2925 if (completed)
2926 goto out_unlock;
2927 break;
2928 case XLOG_STATE_WANT_SYNC:
2929 /*
2930 * This iclog may contain the checkpoint pushed by the
2931 * xlog_cil_force_seq() call, but there are other writers still
2932 * accessing it so it hasn't been pushed to disk yet. Like the
2933 * ACTIVE case above, we need to make sure caches are flushed
2934 * when this iclog is written.
2935 */
2936 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
2937 break;
2938 default:
2939 /*
2940 * The entire checkpoint was written by the CIL force and is on
2941 * its way to disk already. It will be stable when it
2942 * completes, so we don't need to manipulate caches here at all.
2943 * We just need to wait for completion if necessary.
2944 */
2945 break;
2946 }
2947
2948 if (flags & XFS_LOG_SYNC)
2949 return xlog_wait_on_iclog(iclog);
2950 out_unlock:
2951 spin_unlock(&log->l_icloglock);
2952 return 0;
2953 out_error:
2954 spin_unlock(&log->l_icloglock);
2955 return -EIO;
2956 }
2957
2958 /*
2959 * Force the log to a specific checkpoint sequence.
2960 *
2961 * First force the CIL so that all the required changes have been flushed to the
2962 * iclogs. If the CIL force completed it will return a commit LSN that indicates
2963 * the iclog that needs to be flushed to stable storage. If the caller needs
2964 * a synchronous log force, we will wait on the iclog with the LSN returned by
2965 * xlog_cil_force_seq() to be completed.
2966 */
2967 int
xfs_log_force_seq(struct xfs_mount * mp,xfs_csn_t seq,uint flags,int * log_flushed)2968 xfs_log_force_seq(
2969 struct xfs_mount *mp,
2970 xfs_csn_t seq,
2971 uint flags,
2972 int *log_flushed)
2973 {
2974 struct xlog *log = mp->m_log;
2975 xfs_lsn_t lsn;
2976 int ret;
2977 ASSERT(seq != 0);
2978
2979 XFS_STATS_INC(mp, xs_log_force);
2980 trace_xfs_log_force(mp, seq, _RET_IP_);
2981
2982 lsn = xlog_cil_force_seq(log, seq);
2983 if (lsn == NULLCOMMITLSN)
2984 return 0;
2985
2986 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
2987 if (ret == -EAGAIN) {
2988 XFS_STATS_INC(mp, xs_log_force_sleep);
2989 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
2990 }
2991 return ret;
2992 }
2993
2994 /*
2995 * Free a used ticket when its refcount falls to zero.
2996 */
2997 void
xfs_log_ticket_put(struct xlog_ticket * ticket)2998 xfs_log_ticket_put(
2999 struct xlog_ticket *ticket)
3000 {
3001 ASSERT(atomic_read(&ticket->t_ref) > 0);
3002 if (atomic_dec_and_test(&ticket->t_ref))
3003 kmem_cache_free(xfs_log_ticket_cache, ticket);
3004 }
3005
3006 struct xlog_ticket *
xfs_log_ticket_get(struct xlog_ticket * ticket)3007 xfs_log_ticket_get(
3008 struct xlog_ticket *ticket)
3009 {
3010 ASSERT(atomic_read(&ticket->t_ref) > 0);
3011 atomic_inc(&ticket->t_ref);
3012 return ticket;
3013 }
3014
3015 /*
3016 * Figure out the total log space unit (in bytes) that would be
3017 * required for a log ticket.
3018 */
3019 static int
xlog_calc_unit_res(struct xlog * log,int unit_bytes,int * niclogs)3020 xlog_calc_unit_res(
3021 struct xlog *log,
3022 int unit_bytes,
3023 int *niclogs)
3024 {
3025 int iclog_space;
3026 uint num_headers;
3027
3028 /*
3029 * Permanent reservations have up to 'cnt'-1 active log operations
3030 * in the log. A unit in this case is the amount of space for one
3031 * of these log operations. Normal reservations have a cnt of 1
3032 * and their unit amount is the total amount of space required.
3033 *
3034 * The following lines of code account for non-transaction data
3035 * which occupy space in the on-disk log.
3036 *
3037 * Normal form of a transaction is:
3038 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3039 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3040 *
3041 * We need to account for all the leadup data and trailer data
3042 * around the transaction data.
3043 * And then we need to account for the worst case in terms of using
3044 * more space.
3045 * The worst case will happen if:
3046 * - the placement of the transaction happens to be such that the
3047 * roundoff is at its maximum
3048 * - the transaction data is synced before the commit record is synced
3049 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3050 * Therefore the commit record is in its own Log Record.
3051 * This can happen as the commit record is called with its
3052 * own region to xlog_write().
3053 * This then means that in the worst case, roundoff can happen for
3054 * the commit-rec as well.
3055 * The commit-rec is smaller than padding in this scenario and so it is
3056 * not added separately.
3057 */
3058
3059 /* for trans header */
3060 unit_bytes += sizeof(struct xlog_op_header);
3061 unit_bytes += sizeof(struct xfs_trans_header);
3062
3063 /* for start-rec */
3064 unit_bytes += sizeof(struct xlog_op_header);
3065
3066 /*
3067 * for LR headers - the space for data in an iclog is the size minus
3068 * the space used for the headers. If we use the iclog size, then we
3069 * undercalculate the number of headers required.
3070 *
3071 * Furthermore - the addition of op headers for split-recs might
3072 * increase the space required enough to require more log and op
3073 * headers, so take that into account too.
3074 *
3075 * IMPORTANT: This reservation makes the assumption that if this
3076 * transaction is the first in an iclog and hence has the LR headers
3077 * accounted to it, then the remaining space in the iclog is
3078 * exclusively for this transaction. i.e. if the transaction is larger
3079 * than the iclog, it will be the only thing in that iclog.
3080 * Fundamentally, this means we must pass the entire log vector to
3081 * xlog_write to guarantee this.
3082 */
3083 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3084 num_headers = howmany(unit_bytes, iclog_space);
3085
3086 /* for split-recs - ophdrs added when data split over LRs */
3087 unit_bytes += sizeof(struct xlog_op_header) * num_headers;
3088
3089 /* add extra header reservations if we overrun */
3090 while (!num_headers ||
3091 howmany(unit_bytes, iclog_space) > num_headers) {
3092 unit_bytes += sizeof(struct xlog_op_header);
3093 num_headers++;
3094 }
3095 unit_bytes += log->l_iclog_hsize * num_headers;
3096
3097 /* for commit-rec LR header - note: padding will subsume the ophdr */
3098 unit_bytes += log->l_iclog_hsize;
3099
3100 /* roundoff padding for transaction data and one for commit record */
3101 unit_bytes += 2 * log->l_iclog_roundoff;
3102
3103 if (niclogs)
3104 *niclogs = num_headers;
3105 return unit_bytes;
3106 }
3107
3108 int
xfs_log_calc_unit_res(struct xfs_mount * mp,int unit_bytes)3109 xfs_log_calc_unit_res(
3110 struct xfs_mount *mp,
3111 int unit_bytes)
3112 {
3113 return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3114 }
3115
3116 /*
3117 * Allocate and initialise a new log ticket.
3118 */
3119 struct xlog_ticket *
xlog_ticket_alloc(struct xlog * log,int unit_bytes,int cnt,bool permanent)3120 xlog_ticket_alloc(
3121 struct xlog *log,
3122 int unit_bytes,
3123 int cnt,
3124 bool permanent)
3125 {
3126 struct xlog_ticket *tic;
3127 int unit_res;
3128
3129 tic = kmem_cache_zalloc(xfs_log_ticket_cache,
3130 GFP_KERNEL | __GFP_NOFAIL);
3131
3132 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3133
3134 atomic_set(&tic->t_ref, 1);
3135 tic->t_task = current;
3136 INIT_LIST_HEAD(&tic->t_queue);
3137 tic->t_unit_res = unit_res;
3138 tic->t_curr_res = unit_res;
3139 tic->t_cnt = cnt;
3140 tic->t_ocnt = cnt;
3141 tic->t_tid = get_random_u32();
3142 if (permanent)
3143 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3144
3145 return tic;
3146 }
3147
3148 #if defined(DEBUG)
3149 static void
xlog_verify_dump_tail(struct xlog * log,struct xlog_in_core * iclog)3150 xlog_verify_dump_tail(
3151 struct xlog *log,
3152 struct xlog_in_core *iclog)
3153 {
3154 xfs_alert(log->l_mp,
3155 "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
3156 iclog ? be64_to_cpu(iclog->ic_header->h_tail_lsn) : -1,
3157 atomic64_read(&log->l_tail_lsn),
3158 log->l_ailp->ail_head_lsn,
3159 log->l_curr_cycle, log->l_curr_block,
3160 log->l_prev_cycle, log->l_prev_block);
3161 xfs_alert(log->l_mp,
3162 "write grant 0x%llx, reserve grant 0x%llx, tail_space 0x%llx, size 0x%x, iclog flags 0x%x",
3163 atomic64_read(&log->l_write_head.grant),
3164 atomic64_read(&log->l_reserve_head.grant),
3165 log->l_tail_space, log->l_logsize,
3166 iclog ? iclog->ic_flags : -1);
3167 }
3168
3169 /* Check if the new iclog will fit in the log. */
3170 STATIC void
xlog_verify_tail_lsn(struct xlog * log,struct xlog_in_core * iclog)3171 xlog_verify_tail_lsn(
3172 struct xlog *log,
3173 struct xlog_in_core *iclog)
3174 {
3175 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header->h_tail_lsn);
3176 int blocks;
3177
3178 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3179 blocks = log->l_logBBsize -
3180 (log->l_prev_block - BLOCK_LSN(tail_lsn));
3181 if (blocks < BTOBB(iclog->ic_offset) +
3182 BTOBB(log->l_iclog_hsize)) {
3183 xfs_emerg(log->l_mp,
3184 "%s: ran out of log space", __func__);
3185 xlog_verify_dump_tail(log, iclog);
3186 }
3187 return;
3188 }
3189
3190 if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) {
3191 xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__);
3192 xlog_verify_dump_tail(log, iclog);
3193 return;
3194 }
3195 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) {
3196 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3197 xlog_verify_dump_tail(log, iclog);
3198 return;
3199 }
3200
3201 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3202 if (blocks < BTOBB(iclog->ic_offset) + 1) {
3203 xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__);
3204 xlog_verify_dump_tail(log, iclog);
3205 }
3206 }
3207
3208 /*
3209 * Perform a number of checks on the iclog before writing to disk.
3210 *
3211 * 1. Make sure the iclogs are still circular
3212 * 2. Make sure we have a good magic number
3213 * 3. Make sure we don't have magic numbers in the data
3214 * 4. Check fields of each log operation header for:
3215 * A. Valid client identifier
3216 * B. tid ptr value falls in valid ptr space (user space code)
3217 * C. Length in log record header is correct according to the
3218 * individual operation headers within record.
3219 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3220 * log, check the preceding blocks of the physical log to make sure all
3221 * the cycle numbers agree with the current cycle number.
3222 */
3223 STATIC void
xlog_verify_iclog(struct xlog * log,struct xlog_in_core * iclog,int count)3224 xlog_verify_iclog(
3225 struct xlog *log,
3226 struct xlog_in_core *iclog,
3227 int count)
3228 {
3229 struct xlog_rec_header *rhead = iclog->ic_header;
3230 struct xlog_in_core *icptr;
3231 void *base_ptr, *ptr;
3232 ptrdiff_t field_offset;
3233 uint8_t clientid;
3234 int len, i, op_len;
3235 int idx;
3236
3237 /* check validity of iclog pointers */
3238 spin_lock(&log->l_icloglock);
3239 icptr = log->l_iclog;
3240 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3241 ASSERT(icptr);
3242
3243 if (icptr != log->l_iclog)
3244 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3245 spin_unlock(&log->l_icloglock);
3246
3247 /* check log magic numbers */
3248 if (rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3249 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3250
3251 base_ptr = ptr = rhead;
3252 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3253 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3254 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3255 __func__);
3256 }
3257
3258 /* check fields */
3259 len = be32_to_cpu(rhead->h_num_logops);
3260 base_ptr = ptr = iclog->ic_datap;
3261 for (i = 0; i < len; i++) {
3262 struct xlog_op_header *ophead = ptr;
3263 void *p = &ophead->oh_clientid;
3264
3265 /* clientid is only 1 byte */
3266 field_offset = p - base_ptr;
3267 if (field_offset & 0x1ff) {
3268 clientid = ophead->oh_clientid;
3269 } else {
3270 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3271 clientid = xlog_get_client_id(*xlog_cycle_data(rhead, idx));
3272 }
3273 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3274 xfs_warn(log->l_mp,
3275 "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3276 __func__, i, clientid, ophead,
3277 (unsigned long)field_offset);
3278 }
3279
3280 /* check length */
3281 p = &ophead->oh_len;
3282 field_offset = p - base_ptr;
3283 if (field_offset & 0x1ff) {
3284 op_len = be32_to_cpu(ophead->oh_len);
3285 } else {
3286 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3287 op_len = be32_to_cpu(*xlog_cycle_data(rhead, idx));
3288 }
3289 ptr += sizeof(struct xlog_op_header) + op_len;
3290 }
3291 }
3292 #endif
3293
3294 /*
3295 * Perform a forced shutdown on the log.
3296 *
3297 * This can be called from low level log code to trigger a shutdown, or from the
3298 * high level mount shutdown code when the mount shuts down.
3299 *
3300 * Our main objectives here are to make sure that:
3301 * a. if the shutdown was not due to a log IO error, flush the logs to
3302 * disk. Anything modified after this is ignored.
3303 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3304 * parties to find out. Nothing new gets queued after this is done.
3305 * c. Tasks sleeping on log reservations, pinned objects and
3306 * other resources get woken up.
3307 * d. The mount is also marked as shut down so that log triggered shutdowns
3308 * still behave the same as if they called xfs_forced_shutdown().
3309 *
3310 * Return true if the shutdown cause was a log IO error and we actually shut the
3311 * log down.
3312 */
3313 bool
xlog_force_shutdown(struct xlog * log,uint32_t shutdown_flags)3314 xlog_force_shutdown(
3315 struct xlog *log,
3316 uint32_t shutdown_flags)
3317 {
3318 bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3319
3320 if (!log)
3321 return false;
3322
3323 /*
3324 * Ensure that there is only ever one log shutdown being processed.
3325 * If we allow the log force below on a second pass after shutting
3326 * down the log, we risk deadlocking the CIL push as it may require
3327 * locks on objects the current shutdown context holds (e.g. taking
3328 * buffer locks to abort buffers on last unpin of buf log items).
3329 */
3330 if (test_and_set_bit(XLOG_SHUTDOWN_STARTED, &log->l_opstate))
3331 return false;
3332
3333 /*
3334 * Flush all the completed transactions to disk before marking the log
3335 * being shut down. We need to do this first as shutting down the log
3336 * before the force will prevent the log force from flushing the iclogs
3337 * to disk.
3338 *
3339 * When we are in recovery, there are no transactions to flush, and
3340 * we don't want to touch the log because we don't want to perturb the
3341 * current head/tail for future recovery attempts. Hence we need to
3342 * avoid a log force in this case.
3343 *
3344 * If we are shutting down due to a log IO error, then we must avoid
3345 * trying to write the log as that may just result in more IO errors and
3346 * an endless shutdown/force loop.
3347 */
3348 if (!log_error && !xlog_in_recovery(log))
3349 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3350
3351 /*
3352 * Atomically set the shutdown state. If the shutdown state is already
3353 * set, there someone else is performing the shutdown and so we are done
3354 * here. This should never happen because we should only ever get called
3355 * once by the first shutdown caller.
3356 *
3357 * Much of the log state machine transitions assume that shutdown state
3358 * cannot change once they hold the log->l_icloglock. Hence we need to
3359 * hold that lock here, even though we use the atomic test_and_set_bit()
3360 * operation to set the shutdown state.
3361 */
3362 spin_lock(&log->l_icloglock);
3363 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3364 spin_unlock(&log->l_icloglock);
3365 ASSERT(0);
3366 return false;
3367 }
3368 spin_unlock(&log->l_icloglock);
3369
3370 /*
3371 * If this log shutdown also sets the mount shutdown state, issue a
3372 * shutdown warning message.
3373 */
3374 if (!xfs_set_shutdown(log->l_mp)) {
3375 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3376 "Filesystem has been shut down due to log error (0x%x).",
3377 shutdown_flags);
3378 xfs_alert(log->l_mp,
3379 "Please unmount the filesystem and rectify the problem(s).");
3380 if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3381 xfs_stack_trace();
3382 }
3383
3384 /*
3385 * We don't want anybody waiting for log reservations after this. That
3386 * means we have to wake up everybody queued up on reserveq as well as
3387 * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3388 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3389 * action is protected by the grant locks.
3390 */
3391 xlog_grant_head_wake_all(&log->l_reserve_head);
3392 xlog_grant_head_wake_all(&log->l_write_head);
3393
3394 /*
3395 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3396 * as if the log writes were completed. The abort handling in the log
3397 * item committed callback functions will do this again under lock to
3398 * avoid races.
3399 */
3400 spin_lock(&log->l_cilp->xc_push_lock);
3401 wake_up_all(&log->l_cilp->xc_start_wait);
3402 wake_up_all(&log->l_cilp->xc_commit_wait);
3403 spin_unlock(&log->l_cilp->xc_push_lock);
3404
3405 spin_lock(&log->l_icloglock);
3406 xlog_state_shutdown_callbacks(log);
3407 spin_unlock(&log->l_icloglock);
3408
3409 wake_up_var(&log->l_opstate);
3410 if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(log->l_mp))
3411 xfs_zoned_wake_all(log->l_mp);
3412
3413 return log_error;
3414 }
3415
3416 STATIC int
xlog_iclogs_empty(struct xlog * log)3417 xlog_iclogs_empty(
3418 struct xlog *log)
3419 {
3420 struct xlog_in_core *iclog = log->l_iclog;
3421
3422 do {
3423 /* endianness does not matter here, zero is zero in
3424 * any language.
3425 */
3426 if (iclog->ic_header->h_num_logops)
3427 return 0;
3428 iclog = iclog->ic_next;
3429 } while (iclog != log->l_iclog);
3430
3431 return 1;
3432 }
3433
3434 /*
3435 * Verify that an LSN stamped into a piece of metadata is valid. This is
3436 * intended for use in read verifiers on v5 superblocks.
3437 */
3438 bool
xfs_log_check_lsn(struct xfs_mount * mp,xfs_lsn_t lsn)3439 xfs_log_check_lsn(
3440 struct xfs_mount *mp,
3441 xfs_lsn_t lsn)
3442 {
3443 struct xlog *log = mp->m_log;
3444 bool valid;
3445
3446 /*
3447 * norecovery mode skips mount-time log processing and unconditionally
3448 * resets the in-core LSN. We can't validate in this mode, but
3449 * modifications are not allowed anyways so just return true.
3450 */
3451 if (xfs_has_norecovery(mp))
3452 return true;
3453
3454 /*
3455 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3456 * handled by recovery and thus safe to ignore here.
3457 */
3458 if (lsn == NULLCOMMITLSN)
3459 return true;
3460
3461 valid = xlog_valid_lsn(mp->m_log, lsn);
3462
3463 /* warn the user about what's gone wrong before verifier failure */
3464 if (!valid) {
3465 spin_lock(&log->l_icloglock);
3466 xfs_warn(mp,
3467 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3468 "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3469 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3470 log->l_curr_cycle, log->l_curr_block);
3471 spin_unlock(&log->l_icloglock);
3472 }
3473
3474 return valid;
3475 }
3476