xref: /linux/fs/xfs/xfs_log.c (revision 399af66228cfd7df79dc360810b6b673000f8090)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_errortag.h"
14 #include "xfs_error.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_log.h"
18 #include "xfs_log_priv.h"
19 #include "xfs_trace.h"
20 #include "xfs_sysfs.h"
21 #include "xfs_sb.h"
22 #include "xfs_health.h"
23 #include "xfs_zone_alloc.h"
24 
25 struct xlog_write_data {
26 	struct xlog_ticket	*ticket;
27 	struct xlog_in_core	*iclog;
28 	uint32_t		bytes_left;
29 	uint32_t		record_cnt;
30 	uint32_t		data_cnt;
31 	int			log_offset;
32 };
33 
34 struct kmem_cache	*xfs_log_ticket_cache;
35 
36 /* Local miscellaneous function prototypes */
37 STATIC struct xlog *
38 xlog_alloc_log(
39 	struct xfs_mount	*mp,
40 	struct xfs_buftarg	*log_target,
41 	xfs_daddr_t		blk_offset,
42 	int			num_bblks);
43 STATIC void
44 xlog_dealloc_log(
45 	struct xlog		*log);
46 
47 /* local state machine functions */
48 STATIC void xlog_state_done_syncing(
49 	struct xlog_in_core	*iclog);
50 STATIC void xlog_state_do_callback(
51 	struct xlog		*log);
52 STATIC int
53 xlog_state_get_iclog_space(
54 	struct xlog		*log,
55 	struct xlog_write_data	*data);
56 STATIC void
57 xlog_sync(
58 	struct xlog		*log,
59 	struct xlog_in_core	*iclog,
60 	struct xlog_ticket	*ticket);
61 #if defined(DEBUG)
62 STATIC void
63 xlog_verify_iclog(
64 	struct xlog		*log,
65 	struct xlog_in_core	*iclog,
66 	int			count);
67 STATIC void
68 xlog_verify_tail_lsn(
69 	struct xlog		*log,
70 	struct xlog_in_core	*iclog);
71 #else
72 #define xlog_verify_iclog(a,b,c)
73 #define xlog_verify_tail_lsn(a,b)
74 #endif
75 
76 STATIC int
77 xlog_iclogs_empty(
78 	struct xlog		*log);
79 
80 static int
81 xfs_log_cover(struct xfs_mount *);
82 
83 static inline void
xlog_grant_sub_space(struct xlog_grant_head * head,int64_t bytes)84 xlog_grant_sub_space(
85 	struct xlog_grant_head	*head,
86 	int64_t			bytes)
87 {
88 	atomic64_sub(bytes, &head->grant);
89 }
90 
91 static inline void
xlog_grant_add_space(struct xlog_grant_head * head,int64_t bytes)92 xlog_grant_add_space(
93 	struct xlog_grant_head	*head,
94 	int64_t			bytes)
95 {
96 	atomic64_add(bytes, &head->grant);
97 }
98 
99 static void
xlog_grant_head_init(struct xlog_grant_head * head)100 xlog_grant_head_init(
101 	struct xlog_grant_head	*head)
102 {
103 	atomic64_set(&head->grant, 0);
104 	INIT_LIST_HEAD(&head->waiters);
105 	spin_lock_init(&head->lock);
106 }
107 
108 void
xlog_grant_return_space(struct xlog * log,xfs_lsn_t old_head,xfs_lsn_t new_head)109 xlog_grant_return_space(
110 	struct xlog	*log,
111 	xfs_lsn_t	old_head,
112 	xfs_lsn_t	new_head)
113 {
114 	int64_t		diff = xlog_lsn_sub(log, new_head, old_head);
115 
116 	xlog_grant_sub_space(&log->l_reserve_head, diff);
117 	xlog_grant_sub_space(&log->l_write_head, diff);
118 }
119 
120 /*
121  * Return the space in the log between the tail and the head.  In the case where
122  * we have overrun available reservation space, return 0. The memory barrier
123  * pairs with the smp_wmb() in xlog_cil_ail_insert() to ensure that grant head
124  * vs tail space updates are seen in the correct order and hence avoid
125  * transients as space is transferred from the grant heads to the AIL on commit
126  * completion.
127  */
128 static uint64_t
xlog_grant_space_left(struct xlog * log,struct xlog_grant_head * head)129 xlog_grant_space_left(
130 	struct xlog		*log,
131 	struct xlog_grant_head	*head)
132 {
133 	int64_t			free_bytes;
134 
135 	smp_rmb();	/* paired with smp_wmb in xlog_cil_ail_insert() */
136 	free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) -
137 			atomic64_read(&head->grant);
138 	if (free_bytes > 0)
139 		return free_bytes;
140 	return 0;
141 }
142 
143 STATIC void
xlog_grant_head_wake_all(struct xlog_grant_head * head)144 xlog_grant_head_wake_all(
145 	struct xlog_grant_head	*head)
146 {
147 	struct xlog_ticket	*tic;
148 
149 	spin_lock(&head->lock);
150 	list_for_each_entry(tic, &head->waiters, t_queue)
151 		wake_up_process(tic->t_task);
152 	spin_unlock(&head->lock);
153 }
154 
155 static inline int
xlog_ticket_reservation(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic)156 xlog_ticket_reservation(
157 	struct xlog		*log,
158 	struct xlog_grant_head	*head,
159 	struct xlog_ticket	*tic)
160 {
161 	if (head == &log->l_write_head) {
162 		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
163 		return tic->t_unit_res;
164 	}
165 
166 	if (tic->t_flags & XLOG_TIC_PERM_RESERV)
167 		return tic->t_unit_res * tic->t_cnt;
168 
169 	return tic->t_unit_res;
170 }
171 
172 STATIC bool
xlog_grant_head_wake(struct xlog * log,struct xlog_grant_head * head,int * free_bytes)173 xlog_grant_head_wake(
174 	struct xlog		*log,
175 	struct xlog_grant_head	*head,
176 	int			*free_bytes)
177 {
178 	struct xlog_ticket	*tic;
179 	int			need_bytes;
180 
181 	list_for_each_entry(tic, &head->waiters, t_queue) {
182 		need_bytes = xlog_ticket_reservation(log, head, tic);
183 		if (*free_bytes < need_bytes)
184 			return false;
185 
186 		*free_bytes -= need_bytes;
187 		trace_xfs_log_grant_wake_up(log, tic);
188 		wake_up_process(tic->t_task);
189 	}
190 
191 	return true;
192 }
193 
194 STATIC int
xlog_grant_head_wait(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int need_bytes)195 xlog_grant_head_wait(
196 	struct xlog		*log,
197 	struct xlog_grant_head	*head,
198 	struct xlog_ticket	*tic,
199 	int			need_bytes) __releases(&head->lock)
200 					    __acquires(&head->lock)
201 {
202 	list_add_tail(&tic->t_queue, &head->waiters);
203 
204 	do {
205 		if (xlog_is_shutdown(log))
206 			goto shutdown;
207 
208 		__set_current_state(TASK_UNINTERRUPTIBLE);
209 		spin_unlock(&head->lock);
210 
211 		XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
212 
213 		/* Push on the AIL to free up all the log space. */
214 		xfs_ail_push_all(log->l_ailp);
215 
216 		trace_xfs_log_grant_sleep(log, tic);
217 		schedule();
218 		trace_xfs_log_grant_wake(log, tic);
219 
220 		spin_lock(&head->lock);
221 		if (xlog_is_shutdown(log))
222 			goto shutdown;
223 	} while (xlog_grant_space_left(log, head) < need_bytes);
224 
225 	list_del_init(&tic->t_queue);
226 	return 0;
227 shutdown:
228 	list_del_init(&tic->t_queue);
229 	return -EIO;
230 }
231 
232 /*
233  * Atomically get the log space required for a log ticket.
234  *
235  * Once a ticket gets put onto head->waiters, it will only return after the
236  * needed reservation is satisfied.
237  *
238  * This function is structured so that it has a lock free fast path. This is
239  * necessary because every new transaction reservation will come through this
240  * path. Hence any lock will be globally hot if we take it unconditionally on
241  * every pass.
242  *
243  * As tickets are only ever moved on and off head->waiters under head->lock, we
244  * only need to take that lock if we are going to add the ticket to the queue
245  * and sleep. We can avoid taking the lock if the ticket was never added to
246  * head->waiters because the t_queue list head will be empty and we hold the
247  * only reference to it so it can safely be checked unlocked.
248  */
249 STATIC int
xlog_grant_head_check(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int * need_bytes)250 xlog_grant_head_check(
251 	struct xlog		*log,
252 	struct xlog_grant_head	*head,
253 	struct xlog_ticket	*tic,
254 	int			*need_bytes)
255 {
256 	int			free_bytes;
257 	int			error = 0;
258 
259 	ASSERT(!xlog_in_recovery(log));
260 
261 	/*
262 	 * If there are other waiters on the queue then give them a chance at
263 	 * logspace before us.  Wake up the first waiters, if we do not wake
264 	 * up all the waiters then go to sleep waiting for more free space,
265 	 * otherwise try to get some space for this transaction.
266 	 */
267 	*need_bytes = xlog_ticket_reservation(log, head, tic);
268 	free_bytes = xlog_grant_space_left(log, head);
269 	if (!list_empty_careful(&head->waiters)) {
270 		spin_lock(&head->lock);
271 		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
272 		    free_bytes < *need_bytes) {
273 			error = xlog_grant_head_wait(log, head, tic,
274 						     *need_bytes);
275 		}
276 		spin_unlock(&head->lock);
277 	} else if (free_bytes < *need_bytes) {
278 		spin_lock(&head->lock);
279 		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
280 		spin_unlock(&head->lock);
281 	}
282 
283 	return error;
284 }
285 
286 bool
xfs_log_writable(struct xfs_mount * mp)287 xfs_log_writable(
288 	struct xfs_mount	*mp)
289 {
290 	/*
291 	 * Do not write to the log on norecovery mounts, if the data or log
292 	 * devices are read-only, or if the filesystem is shutdown. Read-only
293 	 * mounts allow internal writes for log recovery and unmount purposes,
294 	 * so don't restrict that case.
295 	 */
296 	if (xfs_has_norecovery(mp))
297 		return false;
298 	if (xfs_readonly_buftarg(mp->m_ddev_targp))
299 		return false;
300 	if (xfs_readonly_buftarg(mp->m_log->l_targ))
301 		return false;
302 	if (xlog_is_shutdown(mp->m_log))
303 		return false;
304 	return true;
305 }
306 
307 /*
308  * Replenish the byte reservation required by moving the grant write head.
309  */
310 int
xfs_log_regrant(struct xfs_mount * mp,struct xlog_ticket * tic)311 xfs_log_regrant(
312 	struct xfs_mount	*mp,
313 	struct xlog_ticket	*tic)
314 {
315 	struct xlog		*log = mp->m_log;
316 	int			need_bytes;
317 	int			error = 0;
318 
319 	if (xlog_is_shutdown(log))
320 		return -EIO;
321 
322 	XFS_STATS_INC(mp, xs_try_logspace);
323 
324 	/*
325 	 * This is a new transaction on the ticket, so we need to change the
326 	 * transaction ID so that the next transaction has a different TID in
327 	 * the log. Just add one to the existing tid so that we can see chains
328 	 * of rolling transactions in the log easily.
329 	 */
330 	tic->t_tid++;
331 	tic->t_curr_res = tic->t_unit_res;
332 	if (tic->t_cnt > 0)
333 		return 0;
334 
335 	trace_xfs_log_regrant(log, tic);
336 
337 	error = xlog_grant_head_check(log, &log->l_write_head, tic,
338 				      &need_bytes);
339 	if (error)
340 		goto out_error;
341 
342 	xlog_grant_add_space(&log->l_write_head, need_bytes);
343 	trace_xfs_log_regrant_exit(log, tic);
344 	return 0;
345 
346 out_error:
347 	/*
348 	 * If we are failing, make sure the ticket doesn't have any current
349 	 * reservations.  We don't want to add this back when the ticket/
350 	 * transaction gets cancelled.
351 	 */
352 	tic->t_curr_res = 0;
353 	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
354 	return error;
355 }
356 
357 /*
358  * Reserve log space and return a ticket corresponding to the reservation.
359  *
360  * Each reservation is going to reserve extra space for a log record header.
361  * When writes happen to the on-disk log, we don't subtract the length of the
362  * log record header from any reservation.  By wasting space in each
363  * reservation, we prevent over allocation problems.
364  */
365 int
xfs_log_reserve(struct xfs_mount * mp,int unit_bytes,int cnt,struct xlog_ticket ** ticp,bool permanent)366 xfs_log_reserve(
367 	struct xfs_mount	*mp,
368 	int			unit_bytes,
369 	int			cnt,
370 	struct xlog_ticket	**ticp,
371 	bool			permanent)
372 {
373 	struct xlog		*log = mp->m_log;
374 	struct xlog_ticket	*tic;
375 	int			need_bytes;
376 	int			error = 0;
377 
378 	if (xlog_is_shutdown(log))
379 		return -EIO;
380 
381 	XFS_STATS_INC(mp, xs_try_logspace);
382 
383 	ASSERT(*ticp == NULL);
384 	tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
385 	*ticp = tic;
386 	trace_xfs_log_reserve(log, tic);
387 	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
388 				      &need_bytes);
389 	if (error)
390 		goto out_error;
391 
392 	xlog_grant_add_space(&log->l_reserve_head, need_bytes);
393 	xlog_grant_add_space(&log->l_write_head, need_bytes);
394 	trace_xfs_log_reserve_exit(log, tic);
395 	return 0;
396 
397 out_error:
398 	/*
399 	 * If we are failing, make sure the ticket doesn't have any current
400 	 * reservations.  We don't want to add this back when the ticket/
401 	 * transaction gets cancelled.
402 	 */
403 	tic->t_curr_res = 0;
404 	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
405 	return error;
406 }
407 
408 /*
409  * Run all the pending iclog callbacks and wake log force waiters and iclog
410  * space waiters so they can process the newly set shutdown state. We really
411  * don't care what order we process callbacks here because the log is shut down
412  * and so state cannot change on disk anymore. However, we cannot wake waiters
413  * until the callbacks have been processed because we may be in unmount and
414  * we must ensure that all AIL operations the callbacks perform have completed
415  * before we tear down the AIL.
416  *
417  * We avoid processing actively referenced iclogs so that we don't run callbacks
418  * while the iclog owner might still be preparing the iclog for IO submssion.
419  * These will be caught by xlog_state_iclog_release() and call this function
420  * again to process any callbacks that may have been added to that iclog.
421  */
422 static void
xlog_state_shutdown_callbacks(struct xlog * log)423 xlog_state_shutdown_callbacks(
424 	struct xlog		*log)
425 {
426 	struct xlog_in_core	*iclog;
427 	LIST_HEAD(cb_list);
428 
429 	iclog = log->l_iclog;
430 	do {
431 		if (atomic_read(&iclog->ic_refcnt)) {
432 			/* Reference holder will re-run iclog callbacks. */
433 			continue;
434 		}
435 		list_splice_init(&iclog->ic_callbacks, &cb_list);
436 		spin_unlock(&log->l_icloglock);
437 
438 		xlog_cil_process_committed(&cb_list);
439 
440 		spin_lock(&log->l_icloglock);
441 		wake_up_all(&iclog->ic_write_wait);
442 		wake_up_all(&iclog->ic_force_wait);
443 	} while ((iclog = iclog->ic_next) != log->l_iclog);
444 
445 	wake_up_all(&log->l_flush_wait);
446 }
447 
448 /*
449  * Flush iclog to disk if this is the last reference to the given iclog and the
450  * it is in the WANT_SYNC state.
451  *
452  * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
453  * log tail is updated correctly. NEED_FUA indicates that the iclog will be
454  * written to stable storage, and implies that a commit record is contained
455  * within the iclog. We need to ensure that the log tail does not move beyond
456  * the tail that the first commit record in the iclog ordered against, otherwise
457  * correct recovery of that checkpoint becomes dependent on future operations
458  * performed on this iclog.
459  *
460  * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
461  * current tail into iclog. Once the iclog tail is set, future operations must
462  * not modify it, otherwise they potentially violate ordering constraints for
463  * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
464  * the iclog will get zeroed on activation of the iclog after sync, so we
465  * always capture the tail lsn on the iclog on the first NEED_FUA release
466  * regardless of the number of active reference counts on this iclog.
467  */
468 int
xlog_state_release_iclog(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)469 xlog_state_release_iclog(
470 	struct xlog		*log,
471 	struct xlog_in_core	*iclog,
472 	struct xlog_ticket	*ticket)
473 {
474 	bool			last_ref;
475 
476 	lockdep_assert_held(&log->l_icloglock);
477 
478 	trace_xlog_iclog_release(iclog, _RET_IP_);
479 	/*
480 	 * Grabbing the current log tail needs to be atomic w.r.t. the writing
481 	 * of the tail LSN into the iclog so we guarantee that the log tail does
482 	 * not move between the first time we know that the iclog needs to be
483 	 * made stable and when we eventually submit it.
484 	 */
485 	if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
486 	     (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
487 	    !iclog->ic_header->h_tail_lsn) {
488 		iclog->ic_header->h_tail_lsn =
489 				cpu_to_be64(atomic64_read(&log->l_tail_lsn));
490 	}
491 
492 	last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
493 
494 	if (xlog_is_shutdown(log)) {
495 		/*
496 		 * If there are no more references to this iclog, process the
497 		 * pending iclog callbacks that were waiting on the release of
498 		 * this iclog.
499 		 */
500 		if (last_ref)
501 			xlog_state_shutdown_callbacks(log);
502 		return -EIO;
503 	}
504 
505 	if (!last_ref)
506 		return 0;
507 
508 	if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
509 		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
510 		return 0;
511 	}
512 
513 	iclog->ic_state = XLOG_STATE_SYNCING;
514 	xlog_verify_tail_lsn(log, iclog);
515 	trace_xlog_iclog_syncing(iclog, _RET_IP_);
516 
517 	spin_unlock(&log->l_icloglock);
518 	xlog_sync(log, iclog, ticket);
519 	spin_lock(&log->l_icloglock);
520 	return 0;
521 }
522 
523 /*
524  * Mount a log filesystem
525  *
526  * mp		- ubiquitous xfs mount point structure
527  * log_target	- buftarg of on-disk log device
528  * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
529  * num_bblocks	- Number of BBSIZE blocks in on-disk log
530  *
531  * Return error or zero.
532  */
533 int
xfs_log_mount(xfs_mount_t * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)534 xfs_log_mount(
535 	xfs_mount_t		*mp,
536 	struct xfs_buftarg	*log_target,
537 	xfs_daddr_t		blk_offset,
538 	int			num_bblks)
539 {
540 	struct xlog		*log;
541 	int			error = 0;
542 	int			min_logfsbs;
543 
544 	if (!xfs_has_norecovery(mp)) {
545 		xfs_notice(mp, "Mounting V%d Filesystem %pU",
546 			   XFS_SB_VERSION_NUM(&mp->m_sb),
547 			   &mp->m_sb.sb_uuid);
548 	} else {
549 		xfs_notice(mp,
550 "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
551 			   XFS_SB_VERSION_NUM(&mp->m_sb),
552 			   &mp->m_sb.sb_uuid);
553 		ASSERT(xfs_is_readonly(mp));
554 	}
555 
556 	log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
557 	if (IS_ERR(log)) {
558 		error = PTR_ERR(log);
559 		goto out;
560 	}
561 	mp->m_log = log;
562 
563 	/*
564 	 * Now that we have set up the log and it's internal geometry
565 	 * parameters, we can validate the given log space and drop a critical
566 	 * message via syslog if the log size is too small. A log that is too
567 	 * small can lead to unexpected situations in transaction log space
568 	 * reservation stage. The superblock verifier has already validated all
569 	 * the other log geometry constraints, so we don't have to check those
570 	 * here.
571 	 *
572 	 * Note: For v4 filesystems, we can't just reject the mount if the
573 	 * validation fails.  This would mean that people would have to
574 	 * downgrade their kernel just to remedy the situation as there is no
575 	 * way to grow the log (short of black magic surgery with xfs_db).
576 	 *
577 	 * We can, however, reject mounts for V5 format filesystems, as the
578 	 * mkfs binary being used to make the filesystem should never create a
579 	 * filesystem with a log that is too small.
580 	 */
581 	min_logfsbs = xfs_log_calc_minimum_size(mp);
582 	if (mp->m_sb.sb_logblocks < min_logfsbs) {
583 		xfs_warn(mp,
584 		"Log size %d blocks too small, minimum size is %d blocks",
585 			 mp->m_sb.sb_logblocks, min_logfsbs);
586 
587 		/*
588 		 * Log check errors are always fatal on v5; or whenever bad
589 		 * metadata leads to a crash.
590 		 */
591 		if (xfs_has_crc(mp)) {
592 			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
593 			ASSERT(0);
594 			error = -EINVAL;
595 			goto out_free_log;
596 		}
597 		xfs_crit(mp, "Log size out of supported range.");
598 		xfs_crit(mp,
599 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
600 	}
601 
602 	/*
603 	 * Initialize the AIL now we have a log.
604 	 */
605 	error = xfs_trans_ail_init(mp);
606 	if (error) {
607 		xfs_warn(mp, "AIL initialisation failed: error %d", error);
608 		goto out_free_log;
609 	}
610 	log->l_ailp = mp->m_ail;
611 
612 	/*
613 	 * skip log recovery on a norecovery mount.  pretend it all
614 	 * just worked.
615 	 */
616 	if (!xfs_has_norecovery(mp)) {
617 		error = xlog_recover(log);
618 		if (error) {
619 			xfs_warn(mp, "log mount/recovery failed: error %d",
620 				error);
621 			xlog_recover_cancel(log);
622 			goto out_destroy_ail;
623 		}
624 	}
625 
626 	error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
627 			       "log");
628 	if (error)
629 		goto out_destroy_ail;
630 
631 	/* Normal transactions can now occur */
632 	clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
633 
634 	/*
635 	 * Now the log has been fully initialised and we know were our
636 	 * space grant counters are, we can initialise the permanent ticket
637 	 * needed for delayed logging to work.
638 	 */
639 	xlog_cil_init_post_recovery(log);
640 
641 	return 0;
642 
643 out_destroy_ail:
644 	xfs_trans_ail_destroy(mp);
645 out_free_log:
646 	xlog_dealloc_log(log);
647 out:
648 	return error;
649 }
650 
651 /*
652  * Finish the recovery of the file system.  This is separate from the
653  * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
654  * in the root and real-time bitmap inodes between calling xfs_log_mount() and
655  * here.
656  *
657  * If we finish recovery successfully, start the background log work. If we are
658  * not doing recovery, then we have a RO filesystem and we don't need to start
659  * it.
660  */
661 int
xfs_log_mount_finish(struct xfs_mount * mp)662 xfs_log_mount_finish(
663 	struct xfs_mount	*mp)
664 {
665 	struct xlog		*log = mp->m_log;
666 	int			error = 0;
667 
668 	if (xfs_has_norecovery(mp)) {
669 		ASSERT(xfs_is_readonly(mp));
670 		return 0;
671 	}
672 
673 	/*
674 	 * During the second phase of log recovery, we need iget and
675 	 * iput to behave like they do for an active filesystem.
676 	 * xfs_fs_drop_inode needs to be able to prevent the deletion
677 	 * of inodes before we're done replaying log items on those
678 	 * inodes.  Turn it off immediately after recovery finishes
679 	 * so that we don't leak the quota inodes if subsequent mount
680 	 * activities fail.
681 	 *
682 	 * We let all inodes involved in redo item processing end up on
683 	 * the LRU instead of being evicted immediately so that if we do
684 	 * something to an unlinked inode, the irele won't cause
685 	 * premature truncation and freeing of the inode, which results
686 	 * in log recovery failure.  We have to evict the unreferenced
687 	 * lru inodes after clearing SB_ACTIVE because we don't
688 	 * otherwise clean up the lru if there's a subsequent failure in
689 	 * xfs_mountfs, which leads to us leaking the inodes if nothing
690 	 * else (e.g. quotacheck) references the inodes before the
691 	 * mount failure occurs.
692 	 */
693 	mp->m_super->s_flags |= SB_ACTIVE;
694 	xfs_log_work_queue(mp);
695 	if (xlog_recovery_needed(log))
696 		error = xlog_recover_finish(log);
697 	mp->m_super->s_flags &= ~SB_ACTIVE;
698 	evict_inodes(mp->m_super);
699 
700 	/*
701 	 * Drain the buffer LRU after log recovery. This is required for v4
702 	 * filesystems to avoid leaving around buffers with NULL verifier ops,
703 	 * but we do it unconditionally to make sure we're always in a clean
704 	 * cache state after mount.
705 	 *
706 	 * Don't push in the error case because the AIL may have pending intents
707 	 * that aren't removed until recovery is cancelled.
708 	 */
709 	if (xlog_recovery_needed(log)) {
710 		if (!error) {
711 			xfs_log_force(mp, XFS_LOG_SYNC);
712 			xfs_ail_push_all_sync(mp->m_ail);
713 		}
714 		xfs_notice(mp, "Ending recovery (logdev: %s)",
715 				mp->m_logname ? mp->m_logname : "internal");
716 	} else {
717 		xfs_info(mp, "Ending clean mount");
718 	}
719 	xfs_buftarg_drain(mp->m_ddev_targp);
720 
721 	clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
722 
723 	/* Make sure the log is dead if we're returning failure. */
724 	ASSERT(!error || xlog_is_shutdown(log));
725 
726 	return error;
727 }
728 
729 /*
730  * The mount has failed. Cancel the recovery if it hasn't completed and destroy
731  * the log.
732  */
733 void
xfs_log_mount_cancel(struct xfs_mount * mp)734 xfs_log_mount_cancel(
735 	struct xfs_mount	*mp)
736 {
737 	xlog_recover_cancel(mp->m_log);
738 	xfs_log_unmount(mp);
739 }
740 
741 /*
742  * Flush out the iclog to disk ensuring that device caches are flushed and
743  * the iclog hits stable storage before any completion waiters are woken.
744  */
745 static inline int
xlog_force_iclog(struct xlog_in_core * iclog)746 xlog_force_iclog(
747 	struct xlog_in_core	*iclog)
748 {
749 	atomic_inc(&iclog->ic_refcnt);
750 	iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
751 	if (iclog->ic_state == XLOG_STATE_ACTIVE)
752 		xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
753 	return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
754 }
755 
756 /*
757  * Cycle all the iclogbuf locks to make sure all log IO completion
758  * is done before we tear down these buffers.
759  */
760 static void
xlog_wait_iclog_completion(struct xlog * log)761 xlog_wait_iclog_completion(struct xlog *log)
762 {
763 	int		i;
764 	struct xlog_in_core	*iclog = log->l_iclog;
765 
766 	for (i = 0; i < log->l_iclog_bufs; i++) {
767 		down(&iclog->ic_sema);
768 		up(&iclog->ic_sema);
769 		iclog = iclog->ic_next;
770 	}
771 }
772 
773 /*
774  * Wait for the iclog and all prior iclogs to be written disk as required by the
775  * log force state machine. Waiting on ic_force_wait ensures iclog completions
776  * have been ordered and callbacks run before we are woken here, hence
777  * guaranteeing that all the iclogs up to this one are on stable storage.
778  */
779 int
xlog_wait_on_iclog(struct xlog_in_core * iclog)780 xlog_wait_on_iclog(
781 	struct xlog_in_core	*iclog)
782 		__releases(iclog->ic_log->l_icloglock)
783 {
784 	struct xlog		*log = iclog->ic_log;
785 
786 	trace_xlog_iclog_wait_on(iclog, _RET_IP_);
787 	if (!xlog_is_shutdown(log) &&
788 	    iclog->ic_state != XLOG_STATE_ACTIVE &&
789 	    iclog->ic_state != XLOG_STATE_DIRTY) {
790 		XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
791 		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
792 	} else {
793 		spin_unlock(&log->l_icloglock);
794 	}
795 
796 	if (xlog_is_shutdown(log))
797 		return -EIO;
798 	return 0;
799 }
800 
801 int
xlog_write_one_vec(struct xlog * log,struct xfs_cil_ctx * ctx,struct xfs_log_iovec * reg,struct xlog_ticket * ticket)802 xlog_write_one_vec(
803 	struct xlog		*log,
804 	struct xfs_cil_ctx	*ctx,
805 	struct xfs_log_iovec	*reg,
806 	struct xlog_ticket	*ticket)
807 {
808 	struct xfs_log_vec	lv = {
809 		.lv_niovecs	= 1,
810 		.lv_iovecp	= reg,
811 		.lv_bytes	= reg->i_len,
812 	};
813 	LIST_HEAD		(lv_chain);
814 
815 	/* account for space used by record data */
816 	ticket->t_curr_res -= lv.lv_bytes;
817 
818 	list_add(&lv.lv_list, &lv_chain);
819 	return xlog_write(log, ctx, &lv_chain, ticket, lv.lv_bytes);
820 }
821 
822 /*
823  * Write out an unmount record using the ticket provided. We have to account for
824  * the data space used in the unmount ticket as this write is not done from a
825  * transaction context that has already done the accounting for us.
826  */
827 static int
xlog_write_unmount_record(struct xlog * log,struct xlog_ticket * ticket)828 xlog_write_unmount_record(
829 	struct xlog		*log,
830 	struct xlog_ticket	*ticket)
831 {
832 	struct  {
833 		struct xlog_op_header ophdr;
834 		struct xfs_unmount_log_format ulf;
835 	} unmount_rec = {
836 		.ophdr = {
837 			.oh_clientid = XFS_LOG,
838 			.oh_tid = cpu_to_be32(ticket->t_tid),
839 			.oh_flags = XLOG_UNMOUNT_TRANS,
840 		},
841 		.ulf = {
842 			.magic = XLOG_UNMOUNT_TYPE,
843 		},
844 	};
845 	struct xfs_log_iovec reg = {
846 		.i_addr = &unmount_rec,
847 		.i_len = sizeof(unmount_rec),
848 		.i_type = XLOG_REG_TYPE_UNMOUNT,
849 	};
850 
851 	return xlog_write_one_vec(log, NULL, &reg, ticket);
852 }
853 
854 /*
855  * Mark the filesystem clean by writing an unmount record to the head of the
856  * log.
857  */
858 static void
xlog_unmount_write(struct xlog * log)859 xlog_unmount_write(
860 	struct xlog		*log)
861 {
862 	struct xfs_mount	*mp = log->l_mp;
863 	struct xlog_in_core	*iclog;
864 	struct xlog_ticket	*tic = NULL;
865 	int			error;
866 
867 	error = xfs_log_reserve(mp, 600, 1, &tic, 0);
868 	if (error)
869 		goto out_err;
870 
871 	error = xlog_write_unmount_record(log, tic);
872 	/*
873 	 * At this point, we're umounting anyway, so there's no point in
874 	 * transitioning log state to shutdown. Just continue...
875 	 */
876 out_err:
877 	if (error)
878 		xfs_alert(mp, "%s: unmount record failed", __func__);
879 
880 	spin_lock(&log->l_icloglock);
881 	iclog = log->l_iclog;
882 	error = xlog_force_iclog(iclog);
883 	xlog_wait_on_iclog(iclog);
884 
885 	if (tic) {
886 		trace_xfs_log_umount_write(log, tic);
887 		xfs_log_ticket_ungrant(log, tic);
888 	}
889 }
890 
891 static void
xfs_log_unmount_verify_iclog(struct xlog * log)892 xfs_log_unmount_verify_iclog(
893 	struct xlog		*log)
894 {
895 	struct xlog_in_core	*iclog = log->l_iclog;
896 
897 	do {
898 		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
899 		ASSERT(iclog->ic_offset == 0);
900 	} while ((iclog = iclog->ic_next) != log->l_iclog);
901 }
902 
903 /*
904  * Unmount record used to have a string "Unmount filesystem--" in the
905  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
906  * We just write the magic number now since that particular field isn't
907  * currently architecture converted and "Unmount" is a bit foo.
908  * As far as I know, there weren't any dependencies on the old behaviour.
909  */
910 static void
xfs_log_unmount_write(struct xfs_mount * mp)911 xfs_log_unmount_write(
912 	struct xfs_mount	*mp)
913 {
914 	struct xlog		*log = mp->m_log;
915 
916 	if (!xfs_log_writable(mp))
917 		return;
918 
919 	xfs_log_force(mp, XFS_LOG_SYNC);
920 
921 	if (xlog_is_shutdown(log))
922 		return;
923 
924 	/*
925 	 * If we think the summary counters are bad, avoid writing the unmount
926 	 * record to force log recovery at next mount, after which the summary
927 	 * counters will be recalculated.  Refer to xlog_check_unmount_rec for
928 	 * more details.
929 	 */
930 	if (xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS) ||
931 	    XFS_TEST_ERROR(mp, XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
932 		xfs_alert(mp, "%s: will fix summary counters at next mount",
933 				__func__);
934 		return;
935 	}
936 
937 	xfs_log_unmount_verify_iclog(log);
938 	xlog_unmount_write(log);
939 }
940 
941 /*
942  * Empty the log for unmount/freeze.
943  *
944  * To do this, we first need to shut down the background log work so it is not
945  * trying to cover the log as we clean up. We then need to unpin all objects in
946  * the log so we can then flush them out. Once they have completed their IO and
947  * run the callbacks removing themselves from the AIL, we can cover the log.
948  */
949 int
xfs_log_quiesce(struct xfs_mount * mp)950 xfs_log_quiesce(
951 	struct xfs_mount	*mp)
952 {
953 	/*
954 	 * Clear log incompat features since we're quiescing the log.  Report
955 	 * failures, though it's not fatal to have a higher log feature
956 	 * protection level than the log contents actually require.
957 	 */
958 	if (xfs_clear_incompat_log_features(mp)) {
959 		int error;
960 
961 		error = xfs_sync_sb(mp, false);
962 		if (error)
963 			xfs_warn(mp,
964 	"Failed to clear log incompat features on quiesce");
965 	}
966 
967 	cancel_delayed_work_sync(&mp->m_log->l_work);
968 	xfs_log_force(mp, XFS_LOG_SYNC);
969 
970 	/*
971 	 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
972 	 * will push it, xfs_buftarg_wait() will not wait for it. Further,
973 	 * xfs_buf_iowait() cannot be used because it was pushed with the
974 	 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
975 	 * the IO to complete.
976 	 */
977 	xfs_ail_push_all_sync(mp->m_ail);
978 	xfs_buftarg_wait(mp->m_ddev_targp);
979 	xfs_buf_lock(mp->m_sb_bp);
980 	xfs_buf_unlock(mp->m_sb_bp);
981 
982 	return xfs_log_cover(mp);
983 }
984 
985 void
xfs_log_clean(struct xfs_mount * mp)986 xfs_log_clean(
987 	struct xfs_mount	*mp)
988 {
989 	xfs_log_quiesce(mp);
990 	xfs_log_unmount_write(mp);
991 }
992 
993 /*
994  * Shut down and release the AIL and Log.
995  *
996  * During unmount, we need to ensure we flush all the dirty metadata objects
997  * from the AIL so that the log is empty before we write the unmount record to
998  * the log. Once this is done, we can tear down the AIL and the log.
999  */
1000 void
xfs_log_unmount(struct xfs_mount * mp)1001 xfs_log_unmount(
1002 	struct xfs_mount	*mp)
1003 {
1004 	xfs_log_clean(mp);
1005 
1006 	/*
1007 	 * If shutdown has come from iclog IO context, the log
1008 	 * cleaning will have been skipped and so we need to wait
1009 	 * for the iclog to complete shutdown processing before we
1010 	 * tear anything down.
1011 	 */
1012 	xlog_wait_iclog_completion(mp->m_log);
1013 
1014 	xfs_buftarg_drain(mp->m_ddev_targp);
1015 
1016 	xfs_trans_ail_destroy(mp);
1017 
1018 	xfs_sysfs_del(&mp->m_log->l_kobj);
1019 
1020 	xlog_dealloc_log(mp->m_log);
1021 }
1022 
1023 void
xfs_log_item_init(struct xfs_mount * mp,struct xfs_log_item * item,int type,const struct xfs_item_ops * ops)1024 xfs_log_item_init(
1025 	struct xfs_mount	*mp,
1026 	struct xfs_log_item	*item,
1027 	int			type,
1028 	const struct xfs_item_ops *ops)
1029 {
1030 	item->li_log = mp->m_log;
1031 	item->li_ailp = mp->m_ail;
1032 	item->li_type = type;
1033 	item->li_ops = ops;
1034 	item->li_lv = NULL;
1035 
1036 	INIT_LIST_HEAD(&item->li_ail);
1037 	INIT_LIST_HEAD(&item->li_cil);
1038 	INIT_LIST_HEAD(&item->li_bio_list);
1039 	INIT_LIST_HEAD(&item->li_trans);
1040 }
1041 
1042 /*
1043  * Wake up processes waiting for log space after we have moved the log tail.
1044  */
1045 void
xfs_log_space_wake(struct xfs_mount * mp)1046 xfs_log_space_wake(
1047 	struct xfs_mount	*mp)
1048 {
1049 	struct xlog		*log = mp->m_log;
1050 	int			free_bytes;
1051 
1052 	if (xlog_is_shutdown(log))
1053 		return;
1054 
1055 	if (!list_empty_careful(&log->l_write_head.waiters)) {
1056 		ASSERT(!xlog_in_recovery(log));
1057 
1058 		spin_lock(&log->l_write_head.lock);
1059 		free_bytes = xlog_grant_space_left(log, &log->l_write_head);
1060 		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1061 		spin_unlock(&log->l_write_head.lock);
1062 	}
1063 
1064 	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1065 		ASSERT(!xlog_in_recovery(log));
1066 
1067 		spin_lock(&log->l_reserve_head.lock);
1068 		free_bytes = xlog_grant_space_left(log, &log->l_reserve_head);
1069 		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1070 		spin_unlock(&log->l_reserve_head.lock);
1071 	}
1072 }
1073 
1074 /*
1075  * Determine if we have a transaction that has gone to disk that needs to be
1076  * covered. To begin the transition to the idle state firstly the log needs to
1077  * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1078  * we start attempting to cover the log.
1079  *
1080  * Only if we are then in a state where covering is needed, the caller is
1081  * informed that dummy transactions are required to move the log into the idle
1082  * state.
1083  *
1084  * If there are any items in the AIl or CIL, then we do not want to attempt to
1085  * cover the log as we may be in a situation where there isn't log space
1086  * available to run a dummy transaction and this can lead to deadlocks when the
1087  * tail of the log is pinned by an item that is modified in the CIL.  Hence
1088  * there's no point in running a dummy transaction at this point because we
1089  * can't start trying to idle the log until both the CIL and AIL are empty.
1090  */
1091 static bool
xfs_log_need_covered(struct xfs_mount * mp)1092 xfs_log_need_covered(
1093 	struct xfs_mount	*mp)
1094 {
1095 	struct xlog		*log = mp->m_log;
1096 	bool			needed = false;
1097 
1098 	if (!xlog_cil_empty(log))
1099 		return false;
1100 
1101 	spin_lock(&log->l_icloglock);
1102 	switch (log->l_covered_state) {
1103 	case XLOG_STATE_COVER_DONE:
1104 	case XLOG_STATE_COVER_DONE2:
1105 	case XLOG_STATE_COVER_IDLE:
1106 		break;
1107 	case XLOG_STATE_COVER_NEED:
1108 	case XLOG_STATE_COVER_NEED2:
1109 		if (xfs_ail_min_lsn(log->l_ailp))
1110 			break;
1111 		if (!xlog_iclogs_empty(log))
1112 			break;
1113 
1114 		needed = true;
1115 		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1116 			log->l_covered_state = XLOG_STATE_COVER_DONE;
1117 		else
1118 			log->l_covered_state = XLOG_STATE_COVER_DONE2;
1119 		break;
1120 	default:
1121 		needed = true;
1122 		break;
1123 	}
1124 	spin_unlock(&log->l_icloglock);
1125 	return needed;
1126 }
1127 
1128 /*
1129  * Explicitly cover the log. This is similar to background log covering but
1130  * intended for usage in quiesce codepaths. The caller is responsible to ensure
1131  * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1132  * must all be empty.
1133  */
1134 static int
xfs_log_cover(struct xfs_mount * mp)1135 xfs_log_cover(
1136 	struct xfs_mount	*mp)
1137 {
1138 	int			error = 0;
1139 	bool			need_covered;
1140 
1141 	if (!xlog_is_shutdown(mp->m_log)) {
1142 		ASSERT(xlog_cil_empty(mp->m_log));
1143 		ASSERT(xlog_iclogs_empty(mp->m_log));
1144 		ASSERT(!xfs_ail_min_lsn(mp->m_log->l_ailp));
1145 	}
1146 
1147 	if (!xfs_log_writable(mp))
1148 		return 0;
1149 
1150 	/*
1151 	 * xfs_log_need_covered() is not idempotent because it progresses the
1152 	 * state machine if the log requires covering. Therefore, we must call
1153 	 * this function once and use the result until we've issued an sb sync.
1154 	 * Do so first to make that abundantly clear.
1155 	 *
1156 	 * Fall into the covering sequence if the log needs covering or the
1157 	 * mount has lazy superblock accounting to sync to disk. The sb sync
1158 	 * used for covering accumulates the in-core counters, so covering
1159 	 * handles this for us.
1160 	 */
1161 	need_covered = xfs_log_need_covered(mp);
1162 	if (!need_covered && !xfs_has_lazysbcount(mp))
1163 		return 0;
1164 
1165 	/*
1166 	 * To cover the log, commit the superblock twice (at most) in
1167 	 * independent checkpoints. The first serves as a reference for the
1168 	 * tail pointer. The sync transaction and AIL push empties the AIL and
1169 	 * updates the in-core tail to the LSN of the first checkpoint. The
1170 	 * second commit updates the on-disk tail with the in-core LSN,
1171 	 * covering the log. Push the AIL one more time to leave it empty, as
1172 	 * we found it.
1173 	 */
1174 	do {
1175 		error = xfs_sync_sb(mp, true);
1176 		if (error)
1177 			break;
1178 		xfs_ail_push_all_sync(mp->m_ail);
1179 	} while (xfs_log_need_covered(mp));
1180 
1181 	return error;
1182 }
1183 
1184 static void
xlog_ioend_work(struct work_struct * work)1185 xlog_ioend_work(
1186 	struct work_struct	*work)
1187 {
1188 	struct xlog_in_core     *iclog =
1189 		container_of(work, struct xlog_in_core, ic_end_io_work);
1190 	struct xlog		*log = iclog->ic_log;
1191 	int			error;
1192 
1193 	error = blk_status_to_errno(iclog->ic_bio.bi_status);
1194 #ifdef DEBUG
1195 	/* treat writes with injected CRC errors as failed */
1196 	if (iclog->ic_fail_crc)
1197 		error = -EIO;
1198 #endif
1199 
1200 	/*
1201 	 * Race to shutdown the filesystem if we see an error.
1202 	 */
1203 	if (error || XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1204 		xfs_alert(log->l_mp, "log I/O error %d", error);
1205 		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1206 	}
1207 
1208 	xlog_state_done_syncing(iclog);
1209 	bio_uninit(&iclog->ic_bio);
1210 
1211 	/*
1212 	 * Drop the lock to signal that we are done. Nothing references the
1213 	 * iclog after this, so an unmount waiting on this lock can now tear it
1214 	 * down safely. As such, it is unsafe to reference the iclog after the
1215 	 * unlock as we could race with it being freed.
1216 	 */
1217 	up(&iclog->ic_sema);
1218 }
1219 
1220 /*
1221  * Return size of each in-core log record buffer.
1222  *
1223  * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1224  *
1225  * If the filesystem blocksize is too large, we may need to choose a
1226  * larger size since the directory code currently logs entire blocks.
1227  */
1228 STATIC void
xlog_get_iclog_buffer_size(struct xfs_mount * mp,struct xlog * log)1229 xlog_get_iclog_buffer_size(
1230 	struct xfs_mount	*mp,
1231 	struct xlog		*log)
1232 {
1233 	if (mp->m_logbufs <= 0)
1234 		mp->m_logbufs = XLOG_MAX_ICLOGS;
1235 	if (mp->m_logbsize <= 0)
1236 		mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1237 
1238 	log->l_iclog_bufs = mp->m_logbufs;
1239 	log->l_iclog_size = mp->m_logbsize;
1240 
1241 	/*
1242 	 * Combined size of the log record headers.  The first 32k cycles
1243 	 * are stored directly in the xlog_rec_header, the rest in the
1244 	 * variable number of xlog_rec_ext_headers at its end.
1245 	 */
1246 	log->l_iclog_hsize = struct_size(log->l_iclog->ic_header, h_ext,
1247 		DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE) - 1);
1248 }
1249 
1250 void
xfs_log_work_queue(struct xfs_mount * mp)1251 xfs_log_work_queue(
1252 	struct xfs_mount        *mp)
1253 {
1254 	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1255 				msecs_to_jiffies(xfs_syncd_centisecs * 10));
1256 }
1257 
1258 /*
1259  * Clear the log incompat flags if we have the opportunity.
1260  *
1261  * This only happens if we're about to log the second dummy transaction as part
1262  * of covering the log.
1263  */
1264 static inline void
xlog_clear_incompat(struct xlog * log)1265 xlog_clear_incompat(
1266 	struct xlog		*log)
1267 {
1268 	struct xfs_mount	*mp = log->l_mp;
1269 
1270 	if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1271 				XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1272 		return;
1273 
1274 	if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1275 		return;
1276 
1277 	xfs_clear_incompat_log_features(mp);
1278 }
1279 
1280 /*
1281  * Every sync period we need to unpin all items in the AIL and push them to
1282  * disk. If there is nothing dirty, then we might need to cover the log to
1283  * indicate that the filesystem is idle.
1284  */
1285 static void
xfs_log_worker(struct work_struct * work)1286 xfs_log_worker(
1287 	struct work_struct	*work)
1288 {
1289 	struct xlog		*log = container_of(to_delayed_work(work),
1290 						struct xlog, l_work);
1291 	struct xfs_mount	*mp = log->l_mp;
1292 
1293 	/* dgc: errors ignored - not fatal and nowhere to report them */
1294 	if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1295 		/*
1296 		 * Dump a transaction into the log that contains no real change.
1297 		 * This is needed to stamp the current tail LSN into the log
1298 		 * during the covering operation.
1299 		 *
1300 		 * We cannot use an inode here for this - that will push dirty
1301 		 * state back up into the VFS and then periodic inode flushing
1302 		 * will prevent log covering from making progress. Hence we
1303 		 * synchronously log the superblock instead to ensure the
1304 		 * superblock is immediately unpinned and can be written back.
1305 		 */
1306 		xlog_clear_incompat(log);
1307 		xfs_sync_sb(mp, true);
1308 	} else
1309 		xfs_log_force(mp, 0);
1310 
1311 	/* start pushing all the metadata that is currently dirty */
1312 	xfs_ail_push_all(mp->m_ail);
1313 
1314 	/* queue us up again */
1315 	xfs_log_work_queue(mp);
1316 }
1317 
1318 /*
1319  * This routine initializes some of the log structure for a given mount point.
1320  * Its primary purpose is to fill in enough, so recovery can occur.  However,
1321  * some other stuff may be filled in too.
1322  */
1323 STATIC struct xlog *
xlog_alloc_log(struct xfs_mount * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)1324 xlog_alloc_log(
1325 	struct xfs_mount	*mp,
1326 	struct xfs_buftarg	*log_target,
1327 	xfs_daddr_t		blk_offset,
1328 	int			num_bblks)
1329 {
1330 	struct xlog		*log;
1331 	struct xlog_in_core	**iclogp;
1332 	struct xlog_in_core	*iclog, *prev_iclog = NULL;
1333 	int			i;
1334 	int			error = -ENOMEM;
1335 	uint			log2_size = 0;
1336 
1337 	log = kzalloc_obj(struct xlog, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1338 	if (!log) {
1339 		xfs_warn(mp, "Log allocation failed: No memory!");
1340 		goto out;
1341 	}
1342 
1343 	log->l_mp	   = mp;
1344 	log->l_targ	   = log_target;
1345 	log->l_logsize     = BBTOB(num_bblks);
1346 	log->l_logBBstart  = blk_offset;
1347 	log->l_logBBsize   = num_bblks;
1348 	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1349 	set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1350 	INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1351 	INIT_LIST_HEAD(&log->r_dfops);
1352 
1353 	log->l_prev_block  = -1;
1354 	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1355 	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1356 	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1357 
1358 	if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1359 		log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1360 	else if (mp->m_sb.sb_logsectsize > 0)
1361 		log->l_iclog_roundoff = mp->m_sb.sb_logsectsize;
1362 	else
1363 		log->l_iclog_roundoff = BBSIZE;
1364 
1365 	xlog_grant_head_init(&log->l_reserve_head);
1366 	xlog_grant_head_init(&log->l_write_head);
1367 
1368 	error = -EFSCORRUPTED;
1369 	if (xfs_has_sector(mp)) {
1370 	        log2_size = mp->m_sb.sb_logsectlog;
1371 		if (log2_size < BBSHIFT) {
1372 			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1373 				log2_size, BBSHIFT);
1374 			goto out_free_log;
1375 		}
1376 
1377 	        log2_size -= BBSHIFT;
1378 		if (log2_size > mp->m_sectbb_log) {
1379 			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1380 				log2_size, mp->m_sectbb_log);
1381 			goto out_free_log;
1382 		}
1383 
1384 		/* for larger sector sizes, must have v2 or external log */
1385 		if (log2_size && log->l_logBBstart > 0 &&
1386 			    !xfs_has_logv2(mp)) {
1387 			xfs_warn(mp,
1388 		"log sector size (0x%x) invalid for configuration.",
1389 				log2_size);
1390 			goto out_free_log;
1391 		}
1392 	}
1393 	log->l_sectBBsize = 1 << log2_size;
1394 
1395 	xlog_get_iclog_buffer_size(mp, log);
1396 
1397 	spin_lock_init(&log->l_icloglock);
1398 	init_waitqueue_head(&log->l_flush_wait);
1399 
1400 	iclogp = &log->l_iclog;
1401 	ASSERT(log->l_iclog_size >= 4096);
1402 	for (i = 0; i < log->l_iclog_bufs; i++) {
1403 		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1404 				sizeof(struct bio_vec);
1405 
1406 		iclog = kzalloc(sizeof(*iclog) + bvec_size,
1407 				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1408 		if (!iclog)
1409 			goto out_free_iclog;
1410 
1411 		*iclogp = iclog;
1412 		iclog->ic_prev = prev_iclog;
1413 		prev_iclog = iclog;
1414 
1415 		iclog->ic_header = kvzalloc(log->l_iclog_size,
1416 				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1417 		if (!iclog->ic_header)
1418 			goto out_free_iclog;
1419 		iclog->ic_header->h_magicno =
1420 			cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1421 		iclog->ic_header->h_version = cpu_to_be32(
1422 			xfs_has_logv2(log->l_mp) ? 2 : 1);
1423 		iclog->ic_header->h_size = cpu_to_be32(log->l_iclog_size);
1424 		iclog->ic_header->h_fmt = cpu_to_be32(XLOG_FMT);
1425 		memcpy(&iclog->ic_header->h_fs_uuid, &mp->m_sb.sb_uuid,
1426 			sizeof(iclog->ic_header->h_fs_uuid));
1427 
1428 		iclog->ic_datap = (void *)iclog->ic_header + log->l_iclog_hsize;
1429 		iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1430 		iclog->ic_state = XLOG_STATE_ACTIVE;
1431 		iclog->ic_log = log;
1432 		atomic_set(&iclog->ic_refcnt, 0);
1433 		INIT_LIST_HEAD(&iclog->ic_callbacks);
1434 
1435 		init_waitqueue_head(&iclog->ic_force_wait);
1436 		init_waitqueue_head(&iclog->ic_write_wait);
1437 		INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1438 		sema_init(&iclog->ic_sema, 1);
1439 
1440 		iclogp = &iclog->ic_next;
1441 	}
1442 	*iclogp = log->l_iclog;			/* complete ring */
1443 	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1444 
1445 	log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1446 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU),
1447 			0, mp->m_super->s_id);
1448 	if (!log->l_ioend_workqueue)
1449 		goto out_free_iclog;
1450 
1451 	error = xlog_cil_init(log);
1452 	if (error)
1453 		goto out_destroy_workqueue;
1454 	return log;
1455 
1456 out_destroy_workqueue:
1457 	destroy_workqueue(log->l_ioend_workqueue);
1458 out_free_iclog:
1459 	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1460 		prev_iclog = iclog->ic_next;
1461 		kvfree(iclog->ic_header);
1462 		kfree(iclog);
1463 		if (prev_iclog == log->l_iclog)
1464 			break;
1465 	}
1466 out_free_log:
1467 	kfree(log);
1468 out:
1469 	return ERR_PTR(error);
1470 }	/* xlog_alloc_log */
1471 
1472 /*
1473  * Stamp cycle number in every block
1474  */
1475 STATIC void
xlog_pack_data(struct xlog * log,struct xlog_in_core * iclog,int roundoff)1476 xlog_pack_data(
1477 	struct xlog		*log,
1478 	struct xlog_in_core	*iclog,
1479 	int			roundoff)
1480 {
1481 	struct xlog_rec_header	*rhead = iclog->ic_header;
1482 	__be32			cycle_lsn = CYCLE_LSN_DISK(rhead->h_lsn);
1483 	char			*dp = iclog->ic_datap;
1484 	int			i;
1485 
1486 	for (i = 0; i < BTOBB(iclog->ic_offset + roundoff); i++) {
1487 		*xlog_cycle_data(rhead, i) = *(__be32 *)dp;
1488 		*(__be32 *)dp = cycle_lsn;
1489 		dp += BBSIZE;
1490 	}
1491 
1492 	for (i = 0; i < (log->l_iclog_hsize >> BBSHIFT) - 1; i++)
1493 		rhead->h_ext[i].xh_cycle = cycle_lsn;
1494 }
1495 
1496 /*
1497  * Calculate the checksum for a log buffer.
1498  *
1499  * This is a little more complicated than it should be because the various
1500  * headers and the actual data are non-contiguous.
1501  */
1502 __le32
xlog_cksum(struct xlog * log,struct xlog_rec_header * rhead,char * dp,unsigned int hdrsize,unsigned int size)1503 xlog_cksum(
1504 	struct xlog		*log,
1505 	struct xlog_rec_header	*rhead,
1506 	char			*dp,
1507 	unsigned int		hdrsize,
1508 	unsigned int		size)
1509 {
1510 	uint32_t		crc;
1511 
1512 	/* first generate the crc for the record header ... */
1513 	crc = xfs_start_cksum_update((char *)rhead, hdrsize,
1514 			      offsetof(struct xlog_rec_header, h_crc));
1515 
1516 	/* ... then for additional cycle data for v2 logs ... */
1517 	if (xfs_has_logv2(log->l_mp)) {
1518 		int		xheads, i;
1519 
1520 		xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE) - 1;
1521 		for (i = 0; i < xheads; i++)
1522 			crc = crc32c(crc, &rhead->h_ext[i], XLOG_REC_EXT_SIZE);
1523 	}
1524 
1525 	/* ... and finally for the payload */
1526 	crc = crc32c(crc, dp, size);
1527 
1528 	return xfs_end_cksum(crc);
1529 }
1530 
1531 static void
xlog_bio_end_io(struct bio * bio)1532 xlog_bio_end_io(
1533 	struct bio		*bio)
1534 {
1535 	struct xlog_in_core	*iclog = bio->bi_private;
1536 
1537 	queue_work(iclog->ic_log->l_ioend_workqueue,
1538 		   &iclog->ic_end_io_work);
1539 }
1540 
1541 STATIC void
xlog_write_iclog(struct xlog * log,struct xlog_in_core * iclog,uint64_t bno,unsigned int count)1542 xlog_write_iclog(
1543 	struct xlog		*log,
1544 	struct xlog_in_core	*iclog,
1545 	uint64_t		bno,
1546 	unsigned int		count)
1547 {
1548 	ASSERT(bno < log->l_logBBsize);
1549 	trace_xlog_iclog_write(iclog, _RET_IP_);
1550 
1551 	/*
1552 	 * We lock the iclogbufs here so that we can serialise against I/O
1553 	 * completion during unmount.  We might be processing a shutdown
1554 	 * triggered during unmount, and that can occur asynchronously to the
1555 	 * unmount thread, and hence we need to ensure that completes before
1556 	 * tearing down the iclogbufs.  Hence we need to hold the buffer lock
1557 	 * across the log IO to archieve that.
1558 	 */
1559 	down(&iclog->ic_sema);
1560 	if (xlog_is_shutdown(log)) {
1561 		/*
1562 		 * It would seem logical to return EIO here, but we rely on
1563 		 * the log state machine to propagate I/O errors instead of
1564 		 * doing it here.  We kick of the state machine and unlock
1565 		 * the buffer manually, the code needs to be kept in sync
1566 		 * with the I/O completion path.
1567 		 */
1568 		goto sync;
1569 	}
1570 
1571 	/*
1572 	 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1573 	 * IOs coming immediately after this one. This prevents the block layer
1574 	 * writeback throttle from throttling log writes behind background
1575 	 * metadata writeback and causing priority inversions.
1576 	 */
1577 	bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1578 		 howmany(count, PAGE_SIZE),
1579 		 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1580 	iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1581 	iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1582 	iclog->ic_bio.bi_private = iclog;
1583 
1584 	if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1585 		iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1586 		/*
1587 		 * For external log devices, we also need to flush the data
1588 		 * device cache first to ensure all metadata writeback covered
1589 		 * by the LSN in this iclog is on stable storage. This is slow,
1590 		 * but it *must* complete before we issue the external log IO.
1591 		 *
1592 		 * If the flush fails, we cannot conclude that past metadata
1593 		 * writeback from the log succeeded.  Repeating the flush is
1594 		 * not possible, hence we must shut down with log IO error to
1595 		 * avoid shutdown re-entering this path and erroring out again.
1596 		 */
1597 		if (log->l_targ != log->l_mp->m_ddev_targp &&
1598 		    blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
1599 			goto shutdown;
1600 	}
1601 	if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1602 		iclog->ic_bio.bi_opf |= REQ_FUA;
1603 
1604 	iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1605 
1606 	if (is_vmalloc_addr(iclog->ic_header)) {
1607 		if (!bio_add_vmalloc(&iclog->ic_bio, iclog->ic_header, count))
1608 			goto shutdown;
1609 	} else {
1610 		bio_add_virt_nofail(&iclog->ic_bio, iclog->ic_header, count);
1611 	}
1612 
1613 	/*
1614 	 * If this log buffer would straddle the end of the log we will have
1615 	 * to split it up into two bios, so that we can continue at the start.
1616 	 */
1617 	if (bno + BTOBB(count) > log->l_logBBsize) {
1618 		struct bio *split;
1619 
1620 		split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1621 				  GFP_NOIO, &fs_bio_set);
1622 		bio_chain(split, &iclog->ic_bio);
1623 		submit_bio(split);
1624 
1625 		/* restart at logical offset zero for the remainder */
1626 		iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1627 	}
1628 
1629 	submit_bio(&iclog->ic_bio);
1630 	return;
1631 shutdown:
1632 	xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1633 sync:
1634 	xlog_state_done_syncing(iclog);
1635 	up(&iclog->ic_sema);
1636 }
1637 
1638 /*
1639  * We need to bump cycle number for the part of the iclog that is
1640  * written to the start of the log. Watch out for the header magic
1641  * number case, though.
1642  */
1643 static void
xlog_split_iclog(struct xlog * log,void * data,uint64_t bno,unsigned int count)1644 xlog_split_iclog(
1645 	struct xlog		*log,
1646 	void			*data,
1647 	uint64_t		bno,
1648 	unsigned int		count)
1649 {
1650 	unsigned int		split_offset = BBTOB(log->l_logBBsize - bno);
1651 	unsigned int		i;
1652 
1653 	for (i = split_offset; i < count; i += BBSIZE) {
1654 		uint32_t cycle = get_unaligned_be32(data + i);
1655 
1656 		if (++cycle == XLOG_HEADER_MAGIC_NUM)
1657 			cycle++;
1658 		put_unaligned_be32(cycle, data + i);
1659 	}
1660 }
1661 
1662 static int
xlog_calc_iclog_size(struct xlog * log,struct xlog_in_core * iclog,uint32_t * roundoff)1663 xlog_calc_iclog_size(
1664 	struct xlog		*log,
1665 	struct xlog_in_core	*iclog,
1666 	uint32_t		*roundoff)
1667 {
1668 	uint32_t		count_init, count;
1669 
1670 	/* Add for LR header */
1671 	count_init = log->l_iclog_hsize + iclog->ic_offset;
1672 	count = roundup(count_init, log->l_iclog_roundoff);
1673 
1674 	*roundoff = count - count_init;
1675 
1676 	ASSERT(count >= count_init);
1677 	ASSERT(*roundoff < log->l_iclog_roundoff);
1678 	return count;
1679 }
1680 
1681 /*
1682  * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1683  * fashion.  Previously, we should have moved the current iclog
1684  * ptr in the log to point to the next available iclog.  This allows further
1685  * write to continue while this code syncs out an iclog ready to go.
1686  * Before an in-core log can be written out, the data section must be scanned
1687  * to save away the 1st word of each BBSIZE block into the header.  We replace
1688  * it with the current cycle count.  Each BBSIZE block is tagged with the
1689  * cycle count because there in an implicit assumption that drives will
1690  * guarantee that entire 512 byte blocks get written at once.  In other words,
1691  * we can't have part of a 512 byte block written and part not written.  By
1692  * tagging each block, we will know which blocks are valid when recovering
1693  * after an unclean shutdown.
1694  *
1695  * This routine is single threaded on the iclog.  No other thread can be in
1696  * this routine with the same iclog.  Changing contents of iclog can there-
1697  * fore be done without grabbing the state machine lock.  Updating the global
1698  * log will require grabbing the lock though.
1699  *
1700  * The entire log manager uses a logical block numbering scheme.  Only
1701  * xlog_write_iclog knows about the fact that the log may not start with
1702  * block zero on a given device.
1703  */
1704 STATIC void
xlog_sync(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)1705 xlog_sync(
1706 	struct xlog		*log,
1707 	struct xlog_in_core	*iclog,
1708 	struct xlog_ticket	*ticket)
1709 {
1710 	unsigned int		count;		/* byte count of bwrite */
1711 	unsigned int		roundoff;       /* roundoff to BB or stripe */
1712 	uint64_t		bno;
1713 	unsigned int		size;
1714 
1715 	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1716 	trace_xlog_iclog_sync(iclog, _RET_IP_);
1717 
1718 	count = xlog_calc_iclog_size(log, iclog, &roundoff);
1719 
1720 	/*
1721 	 * If we have a ticket, account for the roundoff via the ticket
1722 	 * reservation to avoid touching the hot grant heads needlessly.
1723 	 * Otherwise, we have to move grant heads directly.
1724 	 */
1725 	if (ticket) {
1726 		ticket->t_curr_res -= roundoff;
1727 	} else {
1728 		xlog_grant_add_space(&log->l_reserve_head, roundoff);
1729 		xlog_grant_add_space(&log->l_write_head, roundoff);
1730 	}
1731 
1732 	/* put cycle number in every block */
1733 	xlog_pack_data(log, iclog, roundoff);
1734 
1735 	/* real byte length */
1736 	size = iclog->ic_offset;
1737 	if (xfs_has_logv2(log->l_mp))
1738 		size += roundoff;
1739 	iclog->ic_header->h_len = cpu_to_be32(size);
1740 
1741 	XFS_STATS_INC(log->l_mp, xs_log_writes);
1742 	XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1743 
1744 	bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header->h_lsn));
1745 
1746 	/* Do we need to split this write into 2 parts? */
1747 	if (bno + BTOBB(count) > log->l_logBBsize)
1748 		xlog_split_iclog(log, iclog->ic_header, bno, count);
1749 
1750 	/* calculcate the checksum */
1751 	iclog->ic_header->h_crc = xlog_cksum(log, iclog->ic_header,
1752 			iclog->ic_datap, XLOG_REC_SIZE, size);
1753 	/*
1754 	 * Intentionally corrupt the log record CRC based on the error injection
1755 	 * frequency, if defined. This facilitates testing log recovery in the
1756 	 * event of torn writes. Hence, set the IOABORT state to abort the log
1757 	 * write on I/O completion and shutdown the fs. The subsequent mount
1758 	 * detects the bad CRC and attempts to recover.
1759 	 */
1760 #ifdef DEBUG
1761 	if (XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1762 		iclog->ic_header->h_crc &= cpu_to_le32(0xAAAAAAAA);
1763 		iclog->ic_fail_crc = true;
1764 		xfs_warn(log->l_mp,
1765 	"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1766 			 be64_to_cpu(iclog->ic_header->h_lsn));
1767 	}
1768 #endif
1769 	xlog_verify_iclog(log, iclog, count);
1770 	xlog_write_iclog(log, iclog, bno, count);
1771 }
1772 
1773 /*
1774  * Deallocate a log structure
1775  */
1776 STATIC void
xlog_dealloc_log(struct xlog * log)1777 xlog_dealloc_log(
1778 	struct xlog		*log)
1779 {
1780 	struct xlog_in_core	*iclog, *next_iclog;
1781 	int			i;
1782 
1783 	/*
1784 	 * Destroy the CIL after waiting for iclog IO completion because an
1785 	 * iclog EIO error will try to shut down the log, which accesses the
1786 	 * CIL to wake up the waiters.
1787 	 */
1788 	xlog_cil_destroy(log);
1789 
1790 	iclog = log->l_iclog;
1791 	for (i = 0; i < log->l_iclog_bufs; i++) {
1792 		next_iclog = iclog->ic_next;
1793 		kvfree(iclog->ic_header);
1794 		kfree(iclog);
1795 		iclog = next_iclog;
1796 	}
1797 
1798 	log->l_mp->m_log = NULL;
1799 	destroy_workqueue(log->l_ioend_workqueue);
1800 	kfree(log);
1801 }
1802 
1803 /*
1804  * Update counters atomically now that memcpy is done.
1805  */
1806 static inline void
xlog_state_finish_copy(struct xlog * log,struct xlog_in_core * iclog,int record_cnt,int copy_bytes)1807 xlog_state_finish_copy(
1808 	struct xlog		*log,
1809 	struct xlog_in_core	*iclog,
1810 	int			record_cnt,
1811 	int			copy_bytes)
1812 {
1813 	lockdep_assert_held(&log->l_icloglock);
1814 
1815 	be32_add_cpu(&iclog->ic_header->h_num_logops, record_cnt);
1816 	iclog->ic_offset += copy_bytes;
1817 }
1818 
1819 /*
1820  * print out info relating to regions written which consume
1821  * the reservation
1822  */
1823 void
xlog_print_tic_res(struct xfs_mount * mp,struct xlog_ticket * ticket)1824 xlog_print_tic_res(
1825 	struct xfs_mount	*mp,
1826 	struct xlog_ticket	*ticket)
1827 {
1828 	xfs_warn(mp, "ticket reservation summary:");
1829 	xfs_warn(mp, "  unit res    = %d bytes", ticket->t_unit_res);
1830 	xfs_warn(mp, "  current res = %d bytes", ticket->t_curr_res);
1831 	xfs_warn(mp, "  original count  = %d", ticket->t_ocnt);
1832 	xfs_warn(mp, "  remaining count = %d", ticket->t_cnt);
1833 }
1834 
1835 /*
1836  * Print a summary of the transaction.
1837  */
1838 void
xlog_print_trans(struct xfs_trans * tp)1839 xlog_print_trans(
1840 	struct xfs_trans	*tp)
1841 {
1842 	struct xfs_mount	*mp = tp->t_mountp;
1843 	struct xfs_log_item	*lip;
1844 
1845 	/* dump core transaction and ticket info */
1846 	xfs_warn(mp, "transaction summary:");
1847 	xfs_warn(mp, "  log res   = %d", tp->t_log_res);
1848 	xfs_warn(mp, "  log count = %d", tp->t_log_count);
1849 	xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
1850 
1851 	xlog_print_tic_res(mp, tp->t_ticket);
1852 
1853 	/* dump each log item */
1854 	list_for_each_entry(lip, &tp->t_items, li_trans) {
1855 		struct xfs_log_vec	*lv = lip->li_lv;
1856 		struct xfs_log_iovec	*vec;
1857 		int			i;
1858 
1859 		xfs_warn(mp, "log item: ");
1860 		xfs_warn(mp, "  type	= 0x%x", lip->li_type);
1861 		xfs_warn(mp, "  flags	= 0x%lx", lip->li_flags);
1862 		if (!lv)
1863 			continue;
1864 		xfs_warn(mp, "  niovecs	= %d", lv->lv_niovecs);
1865 		xfs_warn(mp, "  alloc_size = %d", lv->lv_alloc_size);
1866 		xfs_warn(mp, "  bytes	= %d", lv->lv_bytes);
1867 		xfs_warn(mp, "  buf used= %d", lv->lv_buf_used);
1868 
1869 		/* dump each iovec for the log item */
1870 		vec = lv->lv_iovecp;
1871 		for (i = 0; i < lv->lv_niovecs; i++) {
1872 			int dumplen = min(vec->i_len, 32);
1873 
1874 			xfs_warn(mp, "  iovec[%d]", i);
1875 			xfs_warn(mp, "    type	= 0x%x", vec->i_type);
1876 			xfs_warn(mp, "    len	= %d", vec->i_len);
1877 			xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
1878 			xfs_hex_dump(vec->i_addr, dumplen);
1879 
1880 			vec++;
1881 		}
1882 	}
1883 }
1884 
xlog_write_space_left(struct xlog_write_data * data)1885 static inline uint32_t xlog_write_space_left(struct xlog_write_data *data)
1886 {
1887 	return data->iclog->ic_size - data->log_offset;
1888 }
1889 
1890 static void *
xlog_write_space_advance(struct xlog_write_data * data,unsigned int len)1891 xlog_write_space_advance(
1892 	struct xlog_write_data	*data,
1893 	unsigned int		len)
1894 {
1895 	void			*p = data->iclog->ic_datap + data->log_offset;
1896 
1897 	ASSERT(xlog_write_space_left(data) >= len);
1898 	ASSERT(data->log_offset % sizeof(int32_t) == 0);
1899 	ASSERT(len % sizeof(int32_t) == 0);
1900 
1901 	data->data_cnt += len;
1902 	data->log_offset += len;
1903 	data->bytes_left -= len;
1904 	return p;
1905 }
1906 
1907 static inline void
xlog_write_iovec(struct xlog_write_data * data,void * buf,uint32_t buf_len)1908 xlog_write_iovec(
1909 	struct xlog_write_data	*data,
1910 	void			*buf,
1911 	uint32_t		buf_len)
1912 {
1913 	memcpy(xlog_write_space_advance(data, buf_len), buf, buf_len);
1914 	data->record_cnt++;
1915 }
1916 
1917 /*
1918  * Write log vectors into a single iclog which is guaranteed by the caller
1919  * to have enough space to write the entire log vector into.
1920  */
1921 static void
xlog_write_full(struct xfs_log_vec * lv,struct xlog_write_data * data)1922 xlog_write_full(
1923 	struct xfs_log_vec	*lv,
1924 	struct xlog_write_data	*data)
1925 {
1926 	int			index;
1927 
1928 	ASSERT(data->bytes_left <= xlog_write_space_left(data) ||
1929 		data->iclog->ic_state == XLOG_STATE_WANT_SYNC);
1930 
1931 	/*
1932 	 * Ordered log vectors have no regions to write so this
1933 	 * loop will naturally skip them.
1934 	 */
1935 	for (index = 0; index < lv->lv_niovecs; index++) {
1936 		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
1937 		struct xlog_op_header	*ophdr = reg->i_addr;
1938 
1939 		ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
1940 		xlog_write_iovec(data, reg->i_addr, reg->i_len);
1941 	}
1942 }
1943 
1944 static int
xlog_write_get_more_iclog_space(struct xlog_write_data * data)1945 xlog_write_get_more_iclog_space(
1946 	struct xlog_write_data	*data)
1947 {
1948 	struct xlog		*log = data->iclog->ic_log;
1949 	int			error;
1950 
1951 	spin_lock(&log->l_icloglock);
1952 	ASSERT(data->iclog->ic_state == XLOG_STATE_WANT_SYNC);
1953 	xlog_state_finish_copy(log, data->iclog, data->record_cnt,
1954 			data->data_cnt);
1955 	error = xlog_state_release_iclog(log, data->iclog, data->ticket);
1956 	spin_unlock(&log->l_icloglock);
1957 	if (error)
1958 		return error;
1959 
1960 	error = xlog_state_get_iclog_space(log, data);
1961 	if (error)
1962 		return error;
1963 	data->record_cnt = 0;
1964 	data->data_cnt = 0;
1965 	return 0;
1966 }
1967 
1968 /*
1969  * Write log vectors into a single iclog which is smaller than the current chain
1970  * length. We write until we cannot fit a full record into the remaining space
1971  * and then stop. We return the log vector that is to be written that cannot
1972  * wholly fit in the iclog.
1973  */
1974 static int
xlog_write_partial(struct xfs_log_vec * lv,struct xlog_write_data * data)1975 xlog_write_partial(
1976 	struct xfs_log_vec	*lv,
1977 	struct xlog_write_data	*data)
1978 {
1979 	struct xlog_op_header	*ophdr;
1980 	int			index = 0;
1981 	uint32_t		rlen;
1982 	int			error;
1983 
1984 	/* walk the logvec, copying until we run out of space in the iclog */
1985 	for (index = 0; index < lv->lv_niovecs; index++) {
1986 		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
1987 		uint32_t		reg_offset = 0;
1988 
1989 		/*
1990 		 * The first region of a continuation must have a non-zero
1991 		 * length otherwise log recovery will just skip over it and
1992 		 * start recovering from the next opheader it finds. Because we
1993 		 * mark the next opheader as a continuation, recovery will then
1994 		 * incorrectly add the continuation to the previous region and
1995 		 * that breaks stuff.
1996 		 *
1997 		 * Hence if there isn't space for region data after the
1998 		 * opheader, then we need to start afresh with a new iclog.
1999 		 */
2000 		if (xlog_write_space_left(data) <=
2001 					sizeof(struct xlog_op_header)) {
2002 			error = xlog_write_get_more_iclog_space(data);
2003 			if (error)
2004 				return error;
2005 		}
2006 
2007 		ophdr = reg->i_addr;
2008 		rlen = min_t(uint32_t, reg->i_len, xlog_write_space_left(data));
2009 
2010 		ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
2011 		ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2012 		if (rlen != reg->i_len)
2013 			ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2014 
2015 		xlog_write_iovec(data, reg->i_addr, rlen);
2016 
2017 		/* If we wrote the whole region, move to the next. */
2018 		if (rlen == reg->i_len)
2019 			continue;
2020 
2021 		/*
2022 		 * We now have a partially written iovec, but it can span
2023 		 * multiple iclogs so we loop here. First we release the iclog
2024 		 * we currently have, then we get a new iclog and add a new
2025 		 * opheader. Then we continue copying from where we were until
2026 		 * we either complete the iovec or fill the iclog. If we
2027 		 * complete the iovec, then we increment the index and go right
2028 		 * back to the top of the outer loop. if we fill the iclog, we
2029 		 * run the inner loop again.
2030 		 *
2031 		 * This is complicated by the tail of a region using all the
2032 		 * space in an iclog and hence requiring us to release the iclog
2033 		 * and get a new one before returning to the outer loop. We must
2034 		 * always guarantee that we exit this inner loop with at least
2035 		 * space for log transaction opheaders left in the current
2036 		 * iclog, hence we cannot just terminate the loop at the end
2037 		 * of the of the continuation. So we loop while there is no
2038 		 * space left in the current iclog, and check for the end of the
2039 		 * continuation after getting a new iclog.
2040 		 */
2041 		do {
2042 			/*
2043 			 * Ensure we include the continuation opheader in the
2044 			 * space we need in the new iclog by adding that size
2045 			 * to the length we require. This continuation opheader
2046 			 * needs to be accounted to the ticket as the space it
2047 			 * consumes hasn't been accounted to the lv we are
2048 			 * writing.
2049 			 */
2050 			data->bytes_left += sizeof(struct xlog_op_header);
2051 			error = xlog_write_get_more_iclog_space(data);
2052 			if (error)
2053 				return error;
2054 
2055 			ophdr = xlog_write_space_advance(data,
2056 					sizeof(struct xlog_op_header));
2057 			ophdr->oh_tid = cpu_to_be32(data->ticket->t_tid);
2058 			ophdr->oh_clientid = XFS_TRANSACTION;
2059 			ophdr->oh_res2 = 0;
2060 			ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2061 
2062 			data->ticket->t_curr_res -=
2063 				sizeof(struct xlog_op_header);
2064 
2065 			/*
2066 			 * If rlen fits in the iclog, then end the region
2067 			 * continuation. Otherwise we're going around again.
2068 			 */
2069 			reg_offset += rlen;
2070 			rlen = reg->i_len - reg_offset;
2071 			if (rlen <= xlog_write_space_left(data))
2072 				ophdr->oh_flags |= XLOG_END_TRANS;
2073 			else
2074 				ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2075 
2076 			rlen = min_t(uint32_t, rlen,
2077 					xlog_write_space_left(data));
2078 			ophdr->oh_len = cpu_to_be32(rlen);
2079 
2080 			xlog_write_iovec(data, reg->i_addr + reg_offset, rlen);
2081 		} while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2082 	}
2083 
2084 	return 0;
2085 }
2086 
2087 /*
2088  * Write some region out to in-core log
2089  *
2090  * This will be called when writing externally provided regions or when
2091  * writing out a commit record for a given transaction.
2092  *
2093  * General algorithm:
2094  *	1. Find total length of this write.  This may include adding to the
2095  *		lengths passed in.
2096  *	2. Check whether we violate the tickets reservation.
2097  *	3. While writing to this iclog
2098  *	    A. Reserve as much space in this iclog as can get
2099  *	    B. If this is first write, save away start lsn
2100  *	    C. While writing this region:
2101  *		1. If first write of transaction, write start record
2102  *		2. Write log operation header (header per region)
2103  *		3. Find out if we can fit entire region into this iclog
2104  *		4. Potentially, verify destination memcpy ptr
2105  *		5. Memcpy (partial) region
2106  *		6. If partial copy, release iclog; otherwise, continue
2107  *			copying more regions into current iclog
2108  *	4. Mark want sync bit (in simulation mode)
2109  *	5. Release iclog for potential flush to on-disk log.
2110  *
2111  * ERRORS:
2112  * 1.	Panic if reservation is overrun.  This should never happen since
2113  *	reservation amounts are generated internal to the filesystem.
2114  * NOTES:
2115  * 1. Tickets are single threaded data structures.
2116  * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2117  *	syncing routine.  When a single log_write region needs to span
2118  *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2119  *	on all log operation writes which don't contain the end of the
2120  *	region.  The XLOG_END_TRANS bit is used for the in-core log
2121  *	operation which contains the end of the continued log_write region.
2122  * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2123  *	we don't really know exactly how much space will be used.  As a result,
2124  *	we don't update ic_offset until the end when we know exactly how many
2125  *	bytes have been written out.
2126  */
2127 int
xlog_write(struct xlog * log,struct xfs_cil_ctx * ctx,struct list_head * lv_chain,struct xlog_ticket * ticket,uint32_t len)2128 xlog_write(
2129 	struct xlog		*log,
2130 	struct xfs_cil_ctx	*ctx,
2131 	struct list_head	*lv_chain,
2132 	struct xlog_ticket	*ticket,
2133 	uint32_t		len)
2134 
2135 {
2136 	struct xfs_log_vec	*lv;
2137 	struct xlog_write_data	data = {
2138 		.ticket		= ticket,
2139 		.bytes_left	= len,
2140 	};
2141 	int			error;
2142 
2143 	if (ticket->t_curr_res < 0) {
2144 		xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2145 		     "ctx ticket reservation ran out. Need to up reservation");
2146 		xlog_print_tic_res(log->l_mp, ticket);
2147 		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2148 	}
2149 
2150 	error = xlog_state_get_iclog_space(log, &data);
2151 	if (error)
2152 		return error;
2153 
2154 	ASSERT(xlog_write_space_left(&data) > 0);
2155 
2156 	/*
2157 	 * If we have a context pointer, pass it the first iclog we are
2158 	 * writing to so it can record state needed for iclog write
2159 	 * ordering.
2160 	 */
2161 	if (ctx)
2162 		xlog_cil_set_ctx_write_state(ctx, data.iclog);
2163 
2164 	list_for_each_entry(lv, lv_chain, lv_list) {
2165 		/*
2166 		 * If the entire log vec does not fit in the iclog, punt it to
2167 		 * the partial copy loop which can handle this case.
2168 		 */
2169 		if (lv->lv_niovecs &&
2170 		    lv->lv_bytes > xlog_write_space_left(&data)) {
2171 			error = xlog_write_partial(lv, &data);
2172 			if (error) {
2173 				/*
2174 				 * We have no iclog to release, so just return
2175 				 * the error immediately.
2176 				 */
2177 				return error;
2178 			}
2179 		} else {
2180 			xlog_write_full(lv, &data);
2181 		}
2182 	}
2183 	ASSERT(data.bytes_left == 0);
2184 
2185 	/*
2186 	 * We've already been guaranteed that the last writes will fit inside
2187 	 * the current iclog, and hence it will already have the space used by
2188 	 * those writes accounted to it. Hence we do not need to update the
2189 	 * iclog with the number of bytes written here.
2190 	 */
2191 	spin_lock(&log->l_icloglock);
2192 	xlog_state_finish_copy(log, data.iclog, data.record_cnt, 0);
2193 	error = xlog_state_release_iclog(log, data.iclog, ticket);
2194 	spin_unlock(&log->l_icloglock);
2195 
2196 	return error;
2197 }
2198 
2199 static void
xlog_state_activate_iclog(struct xlog_in_core * iclog,int * iclogs_changed)2200 xlog_state_activate_iclog(
2201 	struct xlog_in_core	*iclog,
2202 	int			*iclogs_changed)
2203 {
2204 	ASSERT(list_empty_careful(&iclog->ic_callbacks));
2205 	trace_xlog_iclog_activate(iclog, _RET_IP_);
2206 
2207 	/*
2208 	 * If the number of ops in this iclog indicate it just contains the
2209 	 * dummy transaction, we can change state into IDLE (the second time
2210 	 * around). Otherwise we should change the state into NEED a dummy.
2211 	 * We don't need to cover the dummy.
2212 	 */
2213 	if (*iclogs_changed == 0 &&
2214 	    iclog->ic_header->h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2215 		*iclogs_changed = 1;
2216 	} else {
2217 		/*
2218 		 * We have two dirty iclogs so start over.  This could also be
2219 		 * num of ops indicating this is not the dummy going out.
2220 		 */
2221 		*iclogs_changed = 2;
2222 	}
2223 
2224 	iclog->ic_state	= XLOG_STATE_ACTIVE;
2225 	iclog->ic_offset = 0;
2226 	iclog->ic_header->h_num_logops = 0;
2227 	memset(iclog->ic_header->h_cycle_data, 0,
2228 		sizeof(iclog->ic_header->h_cycle_data));
2229 	iclog->ic_header->h_lsn = 0;
2230 	iclog->ic_header->h_tail_lsn = 0;
2231 }
2232 
2233 /*
2234  * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2235  * ACTIVE after iclog I/O has completed.
2236  */
2237 static void
xlog_state_activate_iclogs(struct xlog * log,int * iclogs_changed)2238 xlog_state_activate_iclogs(
2239 	struct xlog		*log,
2240 	int			*iclogs_changed)
2241 {
2242 	struct xlog_in_core	*iclog = log->l_iclog;
2243 
2244 	do {
2245 		if (iclog->ic_state == XLOG_STATE_DIRTY)
2246 			xlog_state_activate_iclog(iclog, iclogs_changed);
2247 		/*
2248 		 * The ordering of marking iclogs ACTIVE must be maintained, so
2249 		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2250 		 */
2251 		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2252 			break;
2253 	} while ((iclog = iclog->ic_next) != log->l_iclog);
2254 }
2255 
2256 static int
xlog_covered_state(int prev_state,int iclogs_changed)2257 xlog_covered_state(
2258 	int			prev_state,
2259 	int			iclogs_changed)
2260 {
2261 	/*
2262 	 * We go to NEED for any non-covering writes. We go to NEED2 if we just
2263 	 * wrote the first covering record (DONE). We go to IDLE if we just
2264 	 * wrote the second covering record (DONE2) and remain in IDLE until a
2265 	 * non-covering write occurs.
2266 	 */
2267 	switch (prev_state) {
2268 	case XLOG_STATE_COVER_IDLE:
2269 		if (iclogs_changed == 1)
2270 			return XLOG_STATE_COVER_IDLE;
2271 		fallthrough;
2272 	case XLOG_STATE_COVER_NEED:
2273 	case XLOG_STATE_COVER_NEED2:
2274 		break;
2275 	case XLOG_STATE_COVER_DONE:
2276 		if (iclogs_changed == 1)
2277 			return XLOG_STATE_COVER_NEED2;
2278 		break;
2279 	case XLOG_STATE_COVER_DONE2:
2280 		if (iclogs_changed == 1)
2281 			return XLOG_STATE_COVER_IDLE;
2282 		break;
2283 	default:
2284 		ASSERT(0);
2285 	}
2286 
2287 	return XLOG_STATE_COVER_NEED;
2288 }
2289 
2290 STATIC void
xlog_state_clean_iclog(struct xlog * log,struct xlog_in_core * dirty_iclog)2291 xlog_state_clean_iclog(
2292 	struct xlog		*log,
2293 	struct xlog_in_core	*dirty_iclog)
2294 {
2295 	int			iclogs_changed = 0;
2296 
2297 	trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2298 
2299 	dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2300 
2301 	xlog_state_activate_iclogs(log, &iclogs_changed);
2302 	wake_up_all(&dirty_iclog->ic_force_wait);
2303 
2304 	if (iclogs_changed) {
2305 		log->l_covered_state = xlog_covered_state(log->l_covered_state,
2306 				iclogs_changed);
2307 	}
2308 }
2309 
2310 STATIC xfs_lsn_t
xlog_get_lowest_lsn(struct xlog * log)2311 xlog_get_lowest_lsn(
2312 	struct xlog		*log)
2313 {
2314 	struct xlog_in_core	*iclog = log->l_iclog;
2315 	xfs_lsn_t		lowest_lsn = 0, lsn;
2316 
2317 	do {
2318 		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2319 		    iclog->ic_state == XLOG_STATE_DIRTY)
2320 			continue;
2321 
2322 		lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2323 		if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2324 			lowest_lsn = lsn;
2325 	} while ((iclog = iclog->ic_next) != log->l_iclog);
2326 
2327 	return lowest_lsn;
2328 }
2329 
2330 /*
2331  * Return true if we need to stop processing, false to continue to the next
2332  * iclog. The caller will need to run callbacks if the iclog is returned in the
2333  * XLOG_STATE_CALLBACK state.
2334  */
2335 static bool
xlog_state_iodone_process_iclog(struct xlog * log,struct xlog_in_core * iclog)2336 xlog_state_iodone_process_iclog(
2337 	struct xlog		*log,
2338 	struct xlog_in_core	*iclog)
2339 {
2340 	xfs_lsn_t		lowest_lsn;
2341 	xfs_lsn_t		header_lsn;
2342 
2343 	switch (iclog->ic_state) {
2344 	case XLOG_STATE_ACTIVE:
2345 	case XLOG_STATE_DIRTY:
2346 		/*
2347 		 * Skip all iclogs in the ACTIVE & DIRTY states:
2348 		 */
2349 		return false;
2350 	case XLOG_STATE_DONE_SYNC:
2351 		/*
2352 		 * Now that we have an iclog that is in the DONE_SYNC state, do
2353 		 * one more check here to see if we have chased our tail around.
2354 		 * If this is not the lowest lsn iclog, then we will leave it
2355 		 * for another completion to process.
2356 		 */
2357 		header_lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2358 		lowest_lsn = xlog_get_lowest_lsn(log);
2359 		if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2360 			return false;
2361 		/*
2362 		 * If there are no callbacks on this iclog, we can mark it clean
2363 		 * immediately and return. Otherwise we need to run the
2364 		 * callbacks.
2365 		 */
2366 		if (list_empty(&iclog->ic_callbacks)) {
2367 			xlog_state_clean_iclog(log, iclog);
2368 			return false;
2369 		}
2370 		trace_xlog_iclog_callback(iclog, _RET_IP_);
2371 		iclog->ic_state = XLOG_STATE_CALLBACK;
2372 		return false;
2373 	default:
2374 		/*
2375 		 * Can only perform callbacks in order.  Since this iclog is not
2376 		 * in the DONE_SYNC state, we skip the rest and just try to
2377 		 * clean up.
2378 		 */
2379 		return true;
2380 	}
2381 }
2382 
2383 /*
2384  * Loop over all the iclogs, running attached callbacks on them. Return true if
2385  * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2386  * to handle transient shutdown state here at all because
2387  * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2388  * cleanup of the callbacks.
2389  */
2390 static bool
xlog_state_do_iclog_callbacks(struct xlog * log)2391 xlog_state_do_iclog_callbacks(
2392 	struct xlog		*log)
2393 		__releases(&log->l_icloglock)
2394 		__acquires(&log->l_icloglock)
2395 {
2396 	struct xlog_in_core	*first_iclog = log->l_iclog;
2397 	struct xlog_in_core	*iclog = first_iclog;
2398 	bool			ran_callback = false;
2399 
2400 	do {
2401 		LIST_HEAD(cb_list);
2402 
2403 		if (xlog_state_iodone_process_iclog(log, iclog))
2404 			break;
2405 		if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2406 			iclog = iclog->ic_next;
2407 			continue;
2408 		}
2409 		list_splice_init(&iclog->ic_callbacks, &cb_list);
2410 		spin_unlock(&log->l_icloglock);
2411 
2412 		trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2413 		xlog_cil_process_committed(&cb_list);
2414 		trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2415 		ran_callback = true;
2416 
2417 		spin_lock(&log->l_icloglock);
2418 		xlog_state_clean_iclog(log, iclog);
2419 		iclog = iclog->ic_next;
2420 	} while (iclog != first_iclog);
2421 
2422 	return ran_callback;
2423 }
2424 
2425 
2426 /*
2427  * Loop running iclog completion callbacks until there are no more iclogs in a
2428  * state that can run callbacks.
2429  */
2430 STATIC void
xlog_state_do_callback(struct xlog * log)2431 xlog_state_do_callback(
2432 	struct xlog		*log)
2433 {
2434 	int			flushcnt = 0;
2435 	int			repeats = 0;
2436 
2437 	spin_lock(&log->l_icloglock);
2438 	while (xlog_state_do_iclog_callbacks(log)) {
2439 		if (xlog_is_shutdown(log))
2440 			break;
2441 
2442 		if (++repeats > 5000) {
2443 			flushcnt += repeats;
2444 			repeats = 0;
2445 			xfs_warn(log->l_mp,
2446 				"%s: possible infinite loop (%d iterations)",
2447 				__func__, flushcnt);
2448 		}
2449 	}
2450 
2451 	if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2452 		wake_up_all(&log->l_flush_wait);
2453 
2454 	spin_unlock(&log->l_icloglock);
2455 }
2456 
2457 
2458 /*
2459  * Finish transitioning this iclog to the dirty state.
2460  *
2461  * Callbacks could take time, so they are done outside the scope of the
2462  * global state machine log lock.
2463  */
2464 STATIC void
xlog_state_done_syncing(struct xlog_in_core * iclog)2465 xlog_state_done_syncing(
2466 	struct xlog_in_core	*iclog)
2467 {
2468 	struct xlog		*log = iclog->ic_log;
2469 
2470 	spin_lock(&log->l_icloglock);
2471 	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2472 	trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2473 
2474 	/*
2475 	 * If we got an error, either on the first buffer, or in the case of
2476 	 * split log writes, on the second, we shut down the file system and
2477 	 * no iclogs should ever be attempted to be written to disk again.
2478 	 */
2479 	if (!xlog_is_shutdown(log)) {
2480 		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2481 		iclog->ic_state = XLOG_STATE_DONE_SYNC;
2482 	}
2483 
2484 	/*
2485 	 * Someone could be sleeping prior to writing out the next
2486 	 * iclog buffer, we wake them all, one will get to do the
2487 	 * I/O, the others get to wait for the result.
2488 	 */
2489 	wake_up_all(&iclog->ic_write_wait);
2490 	spin_unlock(&log->l_icloglock);
2491 	xlog_state_do_callback(log);
2492 }
2493 
2494 /*
2495  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2496  * sleep.  We wait on the flush queue on the head iclog as that should be
2497  * the first iclog to complete flushing. Hence if all iclogs are syncing,
2498  * we will wait here and all new writes will sleep until a sync completes.
2499  *
2500  * The in-core logs are used in a circular fashion. They are not used
2501  * out-of-order even when an iclog past the head is free.
2502  *
2503  * return:
2504  *	* log_offset where xlog_write() can start writing into the in-core
2505  *		log's data space.
2506  *	* in-core log pointer to which xlog_write() should write.
2507  *	* boolean indicating this is a continued write to an in-core log.
2508  *		If this is the last write, then the in-core log's offset field
2509  *		needs to be incremented, depending on the amount of data which
2510  *		is copied.
2511  */
2512 STATIC int
xlog_state_get_iclog_space(struct xlog * log,struct xlog_write_data * data)2513 xlog_state_get_iclog_space(
2514 	struct xlog		*log,
2515 	struct xlog_write_data	*data)
2516 {
2517 	int			log_offset;
2518 	struct xlog_rec_header	*head;
2519 	struct xlog_in_core	*iclog;
2520 
2521 restart:
2522 	spin_lock(&log->l_icloglock);
2523 	if (xlog_is_shutdown(log)) {
2524 		spin_unlock(&log->l_icloglock);
2525 		return -EIO;
2526 	}
2527 
2528 	iclog = log->l_iclog;
2529 	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2530 		XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2531 
2532 		/* Wait for log writes to have flushed */
2533 		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2534 		goto restart;
2535 	}
2536 
2537 	head = iclog->ic_header;
2538 
2539 	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
2540 	log_offset = iclog->ic_offset;
2541 
2542 	trace_xlog_iclog_get_space(iclog, _RET_IP_);
2543 
2544 	/* On the 1st write to an iclog, figure out lsn.  This works
2545 	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2546 	 * committing to.  If the offset is set, that's how many blocks
2547 	 * must be written.
2548 	 */
2549 	if (log_offset == 0) {
2550 		data->ticket->t_curr_res -= log->l_iclog_hsize;
2551 		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2552 		head->h_lsn = cpu_to_be64(
2553 			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2554 		ASSERT(log->l_curr_block >= 0);
2555 	}
2556 
2557 	/* If there is enough room to write everything, then do it.  Otherwise,
2558 	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2559 	 * bit is on, so this will get flushed out.  Don't update ic_offset
2560 	 * until you know exactly how many bytes get copied.  Therefore, wait
2561 	 * until later to update ic_offset.
2562 	 *
2563 	 * xlog_write() algorithm assumes that at least 2 xlog_op_header's
2564 	 * can fit into remaining data section.
2565 	 */
2566 	if (iclog->ic_size - iclog->ic_offset <
2567 	    2 * sizeof(struct xlog_op_header)) {
2568 		int		error = 0;
2569 
2570 		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2571 
2572 		/*
2573 		 * If we are the only one writing to this iclog, sync it to
2574 		 * disk.  We need to do an atomic compare and decrement here to
2575 		 * avoid racing with concurrent atomic_dec_and_lock() calls in
2576 		 * xlog_state_release_iclog() when there is more than one
2577 		 * reference to the iclog.
2578 		 */
2579 		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2580 			error = xlog_state_release_iclog(log, iclog,
2581 					data->ticket);
2582 		spin_unlock(&log->l_icloglock);
2583 		if (error)
2584 			return error;
2585 		goto restart;
2586 	}
2587 
2588 	/* Do we have enough room to write the full amount in the remainder
2589 	 * of this iclog?  Or must we continue a write on the next iclog and
2590 	 * mark this iclog as completely taken?  In the case where we switch
2591 	 * iclogs (to mark it taken), this particular iclog will release/sync
2592 	 * to disk in xlog_write().
2593 	 */
2594 	if (data->bytes_left <= iclog->ic_size - iclog->ic_offset)
2595 		iclog->ic_offset += data->bytes_left;
2596 	else
2597 		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2598 	data->iclog = iclog;
2599 
2600 	ASSERT(iclog->ic_offset <= iclog->ic_size);
2601 	spin_unlock(&log->l_icloglock);
2602 
2603 	data->log_offset = log_offset;
2604 	return 0;
2605 }
2606 
2607 /*
2608  * The first cnt-1 times a ticket goes through here we don't need to move the
2609  * grant write head because the permanent reservation has reserved cnt times the
2610  * unit amount.  Release part of current permanent unit reservation and reset
2611  * current reservation to be one units worth.  Also move grant reservation head
2612  * forward.
2613  */
2614 void
xfs_log_ticket_regrant(struct xlog * log,struct xlog_ticket * ticket)2615 xfs_log_ticket_regrant(
2616 	struct xlog		*log,
2617 	struct xlog_ticket	*ticket)
2618 {
2619 	trace_xfs_log_ticket_regrant(log, ticket);
2620 
2621 	if (ticket->t_cnt > 0)
2622 		ticket->t_cnt--;
2623 
2624 	xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res);
2625 	xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res);
2626 	ticket->t_curr_res = ticket->t_unit_res;
2627 
2628 	trace_xfs_log_ticket_regrant_sub(log, ticket);
2629 
2630 	/* just return if we still have some of the pre-reserved space */
2631 	if (!ticket->t_cnt) {
2632 		xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
2633 		trace_xfs_log_ticket_regrant_exit(log, ticket);
2634 	}
2635 
2636 	xfs_log_ticket_put(ticket);
2637 }
2638 
2639 /*
2640  * Give back the space left from a reservation.
2641  *
2642  * All the information we need to make a correct determination of space left
2643  * is present.  For non-permanent reservations, things are quite easy.  The
2644  * count should have been decremented to zero.  We only need to deal with the
2645  * space remaining in the current reservation part of the ticket.  If the
2646  * ticket contains a permanent reservation, there may be left over space which
2647  * needs to be released.  A count of N means that N-1 refills of the current
2648  * reservation can be done before we need to ask for more space.  The first
2649  * one goes to fill up the first current reservation.  Once we run out of
2650  * space, the count will stay at zero and the only space remaining will be
2651  * in the current reservation field.
2652  */
2653 void
xfs_log_ticket_ungrant(struct xlog * log,struct xlog_ticket * ticket)2654 xfs_log_ticket_ungrant(
2655 	struct xlog		*log,
2656 	struct xlog_ticket	*ticket)
2657 {
2658 	int			bytes;
2659 
2660 	trace_xfs_log_ticket_ungrant(log, ticket);
2661 
2662 	if (ticket->t_cnt > 0)
2663 		ticket->t_cnt--;
2664 
2665 	trace_xfs_log_ticket_ungrant_sub(log, ticket);
2666 
2667 	/*
2668 	 * If this is a permanent reservation ticket, we may be able to free
2669 	 * up more space based on the remaining count.
2670 	 */
2671 	bytes = ticket->t_curr_res;
2672 	if (ticket->t_cnt > 0) {
2673 		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2674 		bytes += ticket->t_unit_res*ticket->t_cnt;
2675 	}
2676 
2677 	xlog_grant_sub_space(&log->l_reserve_head, bytes);
2678 	xlog_grant_sub_space(&log->l_write_head, bytes);
2679 
2680 	trace_xfs_log_ticket_ungrant_exit(log, ticket);
2681 
2682 	xfs_log_space_wake(log->l_mp);
2683 	xfs_log_ticket_put(ticket);
2684 }
2685 
2686 /*
2687  * This routine will mark the current iclog in the ring as WANT_SYNC and move
2688  * the current iclog pointer to the next iclog in the ring.
2689  */
2690 void
xlog_state_switch_iclogs(struct xlog * log,struct xlog_in_core * iclog,int eventual_size)2691 xlog_state_switch_iclogs(
2692 	struct xlog		*log,
2693 	struct xlog_in_core	*iclog,
2694 	int			eventual_size)
2695 {
2696 	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2697 	assert_spin_locked(&log->l_icloglock);
2698 	trace_xlog_iclog_switch(iclog, _RET_IP_);
2699 
2700 	if (!eventual_size)
2701 		eventual_size = iclog->ic_offset;
2702 	iclog->ic_state = XLOG_STATE_WANT_SYNC;
2703 	iclog->ic_header->h_prev_block = cpu_to_be32(log->l_prev_block);
2704 	log->l_prev_block = log->l_curr_block;
2705 	log->l_prev_cycle = log->l_curr_cycle;
2706 
2707 	/* roll log?: ic_offset changed later */
2708 	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2709 
2710 	/* Round up to next log-sunit */
2711 	if (log->l_iclog_roundoff > BBSIZE) {
2712 		uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
2713 		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2714 	}
2715 
2716 	if (log->l_curr_block >= log->l_logBBsize) {
2717 		/*
2718 		 * Rewind the current block before the cycle is bumped to make
2719 		 * sure that the combined LSN never transiently moves forward
2720 		 * when the log wraps to the next cycle. This is to support the
2721 		 * unlocked sample of these fields from xlog_valid_lsn(). Most
2722 		 * other cases should acquire l_icloglock.
2723 		 */
2724 		log->l_curr_block -= log->l_logBBsize;
2725 		ASSERT(log->l_curr_block >= 0);
2726 		smp_wmb();
2727 		log->l_curr_cycle++;
2728 		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2729 			log->l_curr_cycle++;
2730 	}
2731 	ASSERT(iclog == log->l_iclog);
2732 	log->l_iclog = iclog->ic_next;
2733 }
2734 
2735 /*
2736  * Force the iclog to disk and check if the iclog has been completed before
2737  * xlog_force_iclog() returns. This can happen on synchronous (e.g.
2738  * pmem) or fast async storage because we drop the icloglock to issue the IO.
2739  * If completion has already occurred, tell the caller so that it can avoid an
2740  * unnecessary wait on the iclog.
2741  */
2742 static int
xlog_force_and_check_iclog(struct xlog_in_core * iclog,bool * completed)2743 xlog_force_and_check_iclog(
2744 	struct xlog_in_core	*iclog,
2745 	bool			*completed)
2746 {
2747 	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header->h_lsn);
2748 	int			error;
2749 
2750 	*completed = false;
2751 	error = xlog_force_iclog(iclog);
2752 	if (error)
2753 		return error;
2754 
2755 	/*
2756 	 * If the iclog has already been completed and reused the header LSN
2757 	 * will have been rewritten by completion
2758 	 */
2759 	if (be64_to_cpu(iclog->ic_header->h_lsn) != lsn)
2760 		*completed = true;
2761 	return 0;
2762 }
2763 
2764 /*
2765  * Write out all data in the in-core log as of this exact moment in time.
2766  *
2767  * Data may be written to the in-core log during this call.  However,
2768  * we don't guarantee this data will be written out.  A change from past
2769  * implementation means this routine will *not* write out zero length LRs.
2770  *
2771  * Basically, we try and perform an intelligent scan of the in-core logs.
2772  * If we determine there is no flushable data, we just return.  There is no
2773  * flushable data if:
2774  *
2775  *	1. the current iclog is active and has no data; the previous iclog
2776  *		is in the active or dirty state.
2777  *	2. the current iclog is dirty, and the previous iclog is in the
2778  *		active or dirty state.
2779  *
2780  * We may sleep if:
2781  *
2782  *	1. the current iclog is not in the active nor dirty state.
2783  *	2. the current iclog dirty, and the previous iclog is not in the
2784  *		active nor dirty state.
2785  *	3. the current iclog is active, and there is another thread writing
2786  *		to this particular iclog.
2787  *	4. a) the current iclog is active and has no other writers
2788  *	   b) when we return from flushing out this iclog, it is still
2789  *		not in the active nor dirty state.
2790  */
2791 int
xfs_log_force(struct xfs_mount * mp,uint flags)2792 xfs_log_force(
2793 	struct xfs_mount	*mp,
2794 	uint			flags)
2795 {
2796 	struct xlog		*log = mp->m_log;
2797 	struct xlog_in_core	*iclog;
2798 
2799 	XFS_STATS_INC(mp, xs_log_force);
2800 	trace_xfs_log_force(mp, 0, _RET_IP_);
2801 
2802 	xlog_cil_force(log);
2803 
2804 	spin_lock(&log->l_icloglock);
2805 	if (xlog_is_shutdown(log))
2806 		goto out_error;
2807 
2808 	iclog = log->l_iclog;
2809 	trace_xlog_iclog_force(iclog, _RET_IP_);
2810 
2811 	if (iclog->ic_state == XLOG_STATE_DIRTY ||
2812 	    (iclog->ic_state == XLOG_STATE_ACTIVE &&
2813 	     atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
2814 		/*
2815 		 * If the head is dirty or (active and empty), then we need to
2816 		 * look at the previous iclog.
2817 		 *
2818 		 * If the previous iclog is active or dirty we are done.  There
2819 		 * is nothing to sync out. Otherwise, we attach ourselves to the
2820 		 * previous iclog and go to sleep.
2821 		 */
2822 		iclog = iclog->ic_prev;
2823 	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
2824 		if (atomic_read(&iclog->ic_refcnt) == 0) {
2825 			/* We have exclusive access to this iclog. */
2826 			bool	completed;
2827 
2828 			if (xlog_force_and_check_iclog(iclog, &completed))
2829 				goto out_error;
2830 
2831 			if (completed)
2832 				goto out_unlock;
2833 		} else {
2834 			/*
2835 			 * Someone else is still writing to this iclog, so we
2836 			 * need to ensure that when they release the iclog it
2837 			 * gets synced immediately as we may be waiting on it.
2838 			 */
2839 			xlog_state_switch_iclogs(log, iclog, 0);
2840 		}
2841 	}
2842 
2843 	/*
2844 	 * The iclog we are about to wait on may contain the checkpoint pushed
2845 	 * by the above xlog_cil_force() call, but it may not have been pushed
2846 	 * to disk yet. Like the ACTIVE case above, we need to make sure caches
2847 	 * are flushed when this iclog is written.
2848 	 */
2849 	if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
2850 		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
2851 
2852 	if (flags & XFS_LOG_SYNC)
2853 		return xlog_wait_on_iclog(iclog);
2854 out_unlock:
2855 	spin_unlock(&log->l_icloglock);
2856 	return 0;
2857 out_error:
2858 	spin_unlock(&log->l_icloglock);
2859 	return -EIO;
2860 }
2861 
2862 /*
2863  * Force the log to a specific LSN.
2864  *
2865  * If an iclog with that lsn can be found:
2866  *	If it is in the DIRTY state, just return.
2867  *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2868  *		state and go to sleep or return.
2869  *	If it is in any other state, go to sleep or return.
2870  *
2871  * Synchronous forces are implemented with a wait queue.  All callers trying
2872  * to force a given lsn to disk must wait on the queue attached to the
2873  * specific in-core log.  When given in-core log finally completes its write
2874  * to disk, that thread will wake up all threads waiting on the queue.
2875  */
2876 static int
xlog_force_lsn(struct xlog * log,xfs_lsn_t lsn,uint flags,int * log_flushed,bool already_slept)2877 xlog_force_lsn(
2878 	struct xlog		*log,
2879 	xfs_lsn_t		lsn,
2880 	uint			flags,
2881 	int			*log_flushed,
2882 	bool			already_slept)
2883 {
2884 	struct xlog_in_core	*iclog;
2885 	bool			completed;
2886 
2887 	spin_lock(&log->l_icloglock);
2888 	if (xlog_is_shutdown(log))
2889 		goto out_error;
2890 
2891 	iclog = log->l_iclog;
2892 	while (be64_to_cpu(iclog->ic_header->h_lsn) != lsn) {
2893 		trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
2894 		iclog = iclog->ic_next;
2895 		if (iclog == log->l_iclog)
2896 			goto out_unlock;
2897 	}
2898 
2899 	switch (iclog->ic_state) {
2900 	case XLOG_STATE_ACTIVE:
2901 		/*
2902 		 * We sleep here if we haven't already slept (e.g. this is the
2903 		 * first time we've looked at the correct iclog buf) and the
2904 		 * buffer before us is going to be sync'ed.  The reason for this
2905 		 * is that if we are doing sync transactions here, by waiting
2906 		 * for the previous I/O to complete, we can allow a few more
2907 		 * transactions into this iclog before we close it down.
2908 		 *
2909 		 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
2910 		 * refcnt so we can release the log (which drops the ref count).
2911 		 * The state switch keeps new transaction commits from using
2912 		 * this buffer.  When the current commits finish writing into
2913 		 * the buffer, the refcount will drop to zero and the buffer
2914 		 * will go out then.
2915 		 */
2916 		if (!already_slept &&
2917 		    (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
2918 		     iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
2919 			xlog_wait(&iclog->ic_prev->ic_write_wait,
2920 					&log->l_icloglock);
2921 			return -EAGAIN;
2922 		}
2923 		if (xlog_force_and_check_iclog(iclog, &completed))
2924 			goto out_error;
2925 		if (log_flushed)
2926 			*log_flushed = 1;
2927 		if (completed)
2928 			goto out_unlock;
2929 		break;
2930 	case XLOG_STATE_WANT_SYNC:
2931 		/*
2932 		 * This iclog may contain the checkpoint pushed by the
2933 		 * xlog_cil_force_seq() call, but there are other writers still
2934 		 * accessing it so it hasn't been pushed to disk yet. Like the
2935 		 * ACTIVE case above, we need to make sure caches are flushed
2936 		 * when this iclog is written.
2937 		 */
2938 		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
2939 		break;
2940 	default:
2941 		/*
2942 		 * The entire checkpoint was written by the CIL force and is on
2943 		 * its way to disk already. It will be stable when it
2944 		 * completes, so we don't need to manipulate caches here at all.
2945 		 * We just need to wait for completion if necessary.
2946 		 */
2947 		break;
2948 	}
2949 
2950 	if (flags & XFS_LOG_SYNC)
2951 		return xlog_wait_on_iclog(iclog);
2952 out_unlock:
2953 	spin_unlock(&log->l_icloglock);
2954 	return 0;
2955 out_error:
2956 	spin_unlock(&log->l_icloglock);
2957 	return -EIO;
2958 }
2959 
2960 /*
2961  * Force the log to a specific checkpoint sequence.
2962  *
2963  * First force the CIL so that all the required changes have been flushed to the
2964  * iclogs. If the CIL force completed it will return a commit LSN that indicates
2965  * the iclog that needs to be flushed to stable storage. If the caller needs
2966  * a synchronous log force, we will wait on the iclog with the LSN returned by
2967  * xlog_cil_force_seq() to be completed.
2968  */
2969 int
xfs_log_force_seq(struct xfs_mount * mp,xfs_csn_t seq,uint flags,int * log_flushed)2970 xfs_log_force_seq(
2971 	struct xfs_mount	*mp,
2972 	xfs_csn_t		seq,
2973 	uint			flags,
2974 	int			*log_flushed)
2975 {
2976 	struct xlog		*log = mp->m_log;
2977 	xfs_lsn_t		lsn;
2978 	int			ret;
2979 	ASSERT(seq != 0);
2980 
2981 	XFS_STATS_INC(mp, xs_log_force);
2982 	trace_xfs_log_force(mp, seq, _RET_IP_);
2983 
2984 	lsn = xlog_cil_force_seq(log, seq);
2985 	if (lsn == NULLCOMMITLSN)
2986 		return 0;
2987 
2988 	ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
2989 	if (ret == -EAGAIN) {
2990 		XFS_STATS_INC(mp, xs_log_force_sleep);
2991 		ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
2992 	}
2993 	return ret;
2994 }
2995 
2996 /*
2997  * Free a used ticket when its refcount falls to zero.
2998  */
2999 void
xfs_log_ticket_put(struct xlog_ticket * ticket)3000 xfs_log_ticket_put(
3001 	struct xlog_ticket	*ticket)
3002 {
3003 	ASSERT(atomic_read(&ticket->t_ref) > 0);
3004 	if (atomic_dec_and_test(&ticket->t_ref))
3005 		kmem_cache_free(xfs_log_ticket_cache, ticket);
3006 }
3007 
3008 struct xlog_ticket *
xfs_log_ticket_get(struct xlog_ticket * ticket)3009 xfs_log_ticket_get(
3010 	struct xlog_ticket	*ticket)
3011 {
3012 	ASSERT(atomic_read(&ticket->t_ref) > 0);
3013 	atomic_inc(&ticket->t_ref);
3014 	return ticket;
3015 }
3016 
3017 /*
3018  * Figure out the total log space unit (in bytes) that would be
3019  * required for a log ticket.
3020  */
3021 static int
xlog_calc_unit_res(struct xlog * log,int unit_bytes,int * niclogs)3022 xlog_calc_unit_res(
3023 	struct xlog		*log,
3024 	int			unit_bytes,
3025 	int			*niclogs)
3026 {
3027 	int			iclog_space;
3028 	uint			num_headers;
3029 
3030 	/*
3031 	 * Permanent reservations have up to 'cnt'-1 active log operations
3032 	 * in the log.  A unit in this case is the amount of space for one
3033 	 * of these log operations.  Normal reservations have a cnt of 1
3034 	 * and their unit amount is the total amount of space required.
3035 	 *
3036 	 * The following lines of code account for non-transaction data
3037 	 * which occupy space in the on-disk log.
3038 	 *
3039 	 * Normal form of a transaction is:
3040 	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3041 	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3042 	 *
3043 	 * We need to account for all the leadup data and trailer data
3044 	 * around the transaction data.
3045 	 * And then we need to account for the worst case in terms of using
3046 	 * more space.
3047 	 * The worst case will happen if:
3048 	 * - the placement of the transaction happens to be such that the
3049 	 *   roundoff is at its maximum
3050 	 * - the transaction data is synced before the commit record is synced
3051 	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3052 	 *   Therefore the commit record is in its own Log Record.
3053 	 *   This can happen as the commit record is called with its
3054 	 *   own region to xlog_write().
3055 	 *   This then means that in the worst case, roundoff can happen for
3056 	 *   the commit-rec as well.
3057 	 *   The commit-rec is smaller than padding in this scenario and so it is
3058 	 *   not added separately.
3059 	 */
3060 
3061 	/* for trans header */
3062 	unit_bytes += sizeof(struct xlog_op_header);
3063 	unit_bytes += sizeof(struct xfs_trans_header);
3064 
3065 	/* for start-rec */
3066 	unit_bytes += sizeof(struct xlog_op_header);
3067 
3068 	/*
3069 	 * for LR headers - the space for data in an iclog is the size minus
3070 	 * the space used for the headers. If we use the iclog size, then we
3071 	 * undercalculate the number of headers required.
3072 	 *
3073 	 * Furthermore - the addition of op headers for split-recs might
3074 	 * increase the space required enough to require more log and op
3075 	 * headers, so take that into account too.
3076 	 *
3077 	 * IMPORTANT: This reservation makes the assumption that if this
3078 	 * transaction is the first in an iclog and hence has the LR headers
3079 	 * accounted to it, then the remaining space in the iclog is
3080 	 * exclusively for this transaction.  i.e. if the transaction is larger
3081 	 * than the iclog, it will be the only thing in that iclog.
3082 	 * Fundamentally, this means we must pass the entire log vector to
3083 	 * xlog_write to guarantee this.
3084 	 */
3085 	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3086 	num_headers = howmany(unit_bytes, iclog_space);
3087 
3088 	/* for split-recs - ophdrs added when data split over LRs */
3089 	unit_bytes += sizeof(struct xlog_op_header) * num_headers;
3090 
3091 	/* add extra header reservations if we overrun */
3092 	while (!num_headers ||
3093 	       howmany(unit_bytes, iclog_space) > num_headers) {
3094 		unit_bytes += sizeof(struct xlog_op_header);
3095 		num_headers++;
3096 	}
3097 	unit_bytes += log->l_iclog_hsize * num_headers;
3098 
3099 	/* for commit-rec LR header - note: padding will subsume the ophdr */
3100 	unit_bytes += log->l_iclog_hsize;
3101 
3102 	/* roundoff padding for transaction data and one for commit record */
3103 	unit_bytes += 2 * log->l_iclog_roundoff;
3104 
3105 	if (niclogs)
3106 		*niclogs = num_headers;
3107 	return unit_bytes;
3108 }
3109 
3110 int
xfs_log_calc_unit_res(struct xfs_mount * mp,int unit_bytes)3111 xfs_log_calc_unit_res(
3112 	struct xfs_mount	*mp,
3113 	int			unit_bytes)
3114 {
3115 	return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3116 }
3117 
3118 /*
3119  * Allocate and initialise a new log ticket.
3120  */
3121 struct xlog_ticket *
xlog_ticket_alloc(struct xlog * log,int unit_bytes,int cnt,bool permanent)3122 xlog_ticket_alloc(
3123 	struct xlog		*log,
3124 	int			unit_bytes,
3125 	int			cnt,
3126 	bool			permanent)
3127 {
3128 	struct xlog_ticket	*tic;
3129 	int			unit_res;
3130 
3131 	tic = kmem_cache_zalloc(xfs_log_ticket_cache,
3132 			GFP_KERNEL | __GFP_NOFAIL);
3133 
3134 	unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3135 
3136 	atomic_set(&tic->t_ref, 1);
3137 	tic->t_task		= current;
3138 	INIT_LIST_HEAD(&tic->t_queue);
3139 	tic->t_unit_res		= unit_res;
3140 	tic->t_curr_res		= unit_res;
3141 	tic->t_cnt		= cnt;
3142 	tic->t_ocnt		= cnt;
3143 	tic->t_tid		= get_random_u32();
3144 	if (permanent)
3145 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
3146 
3147 	return tic;
3148 }
3149 
3150 #if defined(DEBUG)
3151 static void
xlog_verify_dump_tail(struct xlog * log,struct xlog_in_core * iclog)3152 xlog_verify_dump_tail(
3153 	struct xlog		*log,
3154 	struct xlog_in_core	*iclog)
3155 {
3156 	xfs_alert(log->l_mp,
3157 "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
3158 			iclog ? be64_to_cpu(iclog->ic_header->h_tail_lsn) : -1,
3159 			atomic64_read(&log->l_tail_lsn),
3160 			log->l_ailp->ail_head_lsn,
3161 			log->l_curr_cycle, log->l_curr_block,
3162 			log->l_prev_cycle, log->l_prev_block);
3163 	xfs_alert(log->l_mp,
3164 "write grant 0x%llx, reserve grant 0x%llx, tail_space 0x%llx, size 0x%x, iclog flags 0x%x",
3165 			atomic64_read(&log->l_write_head.grant),
3166 			atomic64_read(&log->l_reserve_head.grant),
3167 			log->l_tail_space, log->l_logsize,
3168 			iclog ? iclog->ic_flags : -1);
3169 }
3170 
3171 /* Check if the new iclog will fit in the log. */
3172 STATIC void
xlog_verify_tail_lsn(struct xlog * log,struct xlog_in_core * iclog)3173 xlog_verify_tail_lsn(
3174 	struct xlog		*log,
3175 	struct xlog_in_core	*iclog)
3176 {
3177 	xfs_lsn_t	tail_lsn = be64_to_cpu(iclog->ic_header->h_tail_lsn);
3178 	int		blocks;
3179 
3180 	if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3181 		blocks = log->l_logBBsize -
3182 				(log->l_prev_block - BLOCK_LSN(tail_lsn));
3183 		if (blocks < BTOBB(iclog->ic_offset) +
3184 					BTOBB(log->l_iclog_hsize)) {
3185 			xfs_emerg(log->l_mp,
3186 					"%s: ran out of log space", __func__);
3187 			xlog_verify_dump_tail(log, iclog);
3188 		}
3189 		return;
3190 	}
3191 
3192 	if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) {
3193 		xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__);
3194 		xlog_verify_dump_tail(log, iclog);
3195 		return;
3196 	}
3197 	if (BLOCK_LSN(tail_lsn) == log->l_prev_block) {
3198 		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3199 		xlog_verify_dump_tail(log, iclog);
3200 		return;
3201 	}
3202 
3203 	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3204 	if (blocks < BTOBB(iclog->ic_offset) + 1) {
3205 		xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__);
3206 		xlog_verify_dump_tail(log, iclog);
3207 	}
3208 }
3209 
3210 /*
3211  * Perform a number of checks on the iclog before writing to disk.
3212  *
3213  * 1. Make sure the iclogs are still circular
3214  * 2. Make sure we have a good magic number
3215  * 3. Make sure we don't have magic numbers in the data
3216  * 4. Check fields of each log operation header for:
3217  *	A. Valid client identifier
3218  *	B. tid ptr value falls in valid ptr space (user space code)
3219  *	C. Length in log record header is correct according to the
3220  *		individual operation headers within record.
3221  * 5. When a bwrite will occur within 5 blocks of the front of the physical
3222  *	log, check the preceding blocks of the physical log to make sure all
3223  *	the cycle numbers agree with the current cycle number.
3224  */
3225 STATIC void
xlog_verify_iclog(struct xlog * log,struct xlog_in_core * iclog,int count)3226 xlog_verify_iclog(
3227 	struct xlog		*log,
3228 	struct xlog_in_core	*iclog,
3229 	int			count)
3230 {
3231 	struct xlog_rec_header	*rhead = iclog->ic_header;
3232 	struct xlog_in_core	*icptr;
3233 	void			*base_ptr, *ptr;
3234 	ptrdiff_t		field_offset;
3235 	uint8_t			clientid;
3236 	int			len, i, op_len;
3237 	int			idx;
3238 
3239 	/* check validity of iclog pointers */
3240 	spin_lock(&log->l_icloglock);
3241 	icptr = log->l_iclog;
3242 	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3243 		ASSERT(icptr);
3244 
3245 	if (icptr != log->l_iclog)
3246 		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3247 	spin_unlock(&log->l_icloglock);
3248 
3249 	/* check log magic numbers */
3250 	if (rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3251 		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3252 
3253 	base_ptr = ptr = rhead;
3254 	for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3255 		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3256 			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3257 				__func__);
3258 	}
3259 
3260 	/* check fields */
3261 	len = be32_to_cpu(rhead->h_num_logops);
3262 	base_ptr = ptr = iclog->ic_datap;
3263 	for (i = 0; i < len; i++) {
3264 		struct xlog_op_header	*ophead = ptr;
3265 		void			*p = &ophead->oh_clientid;
3266 
3267 		/* clientid is only 1 byte */
3268 		field_offset = p - base_ptr;
3269 		if (field_offset & 0x1ff) {
3270 			clientid = ophead->oh_clientid;
3271 		} else {
3272 			idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3273 			clientid = xlog_get_client_id(*xlog_cycle_data(rhead, idx));
3274 		}
3275 		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3276 			xfs_warn(log->l_mp,
3277 				"%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3278 				__func__, i, clientid, ophead,
3279 				(unsigned long)field_offset);
3280 		}
3281 
3282 		/* check length */
3283 		p = &ophead->oh_len;
3284 		field_offset = p - base_ptr;
3285 		if (field_offset & 0x1ff) {
3286 			op_len = be32_to_cpu(ophead->oh_len);
3287 		} else {
3288 			idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3289 			op_len = be32_to_cpu(*xlog_cycle_data(rhead, idx));
3290 		}
3291 		ptr += sizeof(struct xlog_op_header) + op_len;
3292 	}
3293 }
3294 #endif
3295 
3296 /*
3297  * Perform a forced shutdown on the log.
3298  *
3299  * This can be called from low level log code to trigger a shutdown, or from the
3300  * high level mount shutdown code when the mount shuts down.
3301  *
3302  * Our main objectives here are to make sure that:
3303  *	a. if the shutdown was not due to a log IO error, flush the logs to
3304  *	   disk. Anything modified after this is ignored.
3305  *	b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3306  *	   parties to find out. Nothing new gets queued after this is done.
3307  *	c. Tasks sleeping on log reservations, pinned objects and
3308  *	   other resources get woken up.
3309  *	d. The mount is also marked as shut down so that log triggered shutdowns
3310  *	   still behave the same as if they called xfs_forced_shutdown().
3311  *
3312  * Return true if the shutdown cause was a log IO error and we actually shut the
3313  * log down.
3314  */
3315 bool
xlog_force_shutdown(struct xlog * log,uint32_t shutdown_flags)3316 xlog_force_shutdown(
3317 	struct xlog	*log,
3318 	uint32_t	shutdown_flags)
3319 {
3320 	bool		log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3321 
3322 	if (!log)
3323 		return false;
3324 
3325 	/*
3326 	 * Ensure that there is only ever one log shutdown being processed.
3327 	 * If we allow the log force below on a second pass after shutting
3328 	 * down the log, we risk deadlocking the CIL push as it may require
3329 	 * locks on objects the current shutdown context holds (e.g. taking
3330 	 * buffer locks to abort buffers on last unpin of buf log items).
3331 	 */
3332 	if (test_and_set_bit(XLOG_SHUTDOWN_STARTED, &log->l_opstate))
3333 		return false;
3334 
3335 	/*
3336 	 * Flush all the completed transactions to disk before marking the log
3337 	 * being shut down. We need to do this first as shutting down the log
3338 	 * before the force will prevent the log force from flushing the iclogs
3339 	 * to disk.
3340 	 *
3341 	 * When we are in recovery, there are no transactions to flush, and
3342 	 * we don't want to touch the log because we don't want to perturb the
3343 	 * current head/tail for future recovery attempts. Hence we need to
3344 	 * avoid a log force in this case.
3345 	 *
3346 	 * If we are shutting down due to a log IO error, then we must avoid
3347 	 * trying to write the log as that may just result in more IO errors and
3348 	 * an endless shutdown/force loop.
3349 	 */
3350 	if (!log_error && !xlog_in_recovery(log))
3351 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3352 
3353 	/*
3354 	 * Atomically set the shutdown state. If the shutdown state is already
3355 	 * set, there someone else is performing the shutdown and so we are done
3356 	 * here. This should never happen because we should only ever get called
3357 	 * once by the first shutdown caller.
3358 	 *
3359 	 * Much of the log state machine transitions assume that shutdown state
3360 	 * cannot change once they hold the log->l_icloglock. Hence we need to
3361 	 * hold that lock here, even though we use the atomic test_and_set_bit()
3362 	 * operation to set the shutdown state.
3363 	 */
3364 	spin_lock(&log->l_icloglock);
3365 	if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3366 		spin_unlock(&log->l_icloglock);
3367 		ASSERT(0);
3368 		return false;
3369 	}
3370 	spin_unlock(&log->l_icloglock);
3371 
3372 	/*
3373 	 * If this log shutdown also sets the mount shutdown state, issue a
3374 	 * shutdown warning message.
3375 	 */
3376 	if (!xfs_set_shutdown(log->l_mp)) {
3377 		xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3378 "Filesystem has been shut down due to log error (0x%x).",
3379 				shutdown_flags);
3380 		xfs_alert(log->l_mp,
3381 "Please unmount the filesystem and rectify the problem(s).");
3382 		if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3383 			xfs_stack_trace();
3384 	}
3385 
3386 	/*
3387 	 * We don't want anybody waiting for log reservations after this. That
3388 	 * means we have to wake up everybody queued up on reserveq as well as
3389 	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3390 	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3391 	 * action is protected by the grant locks.
3392 	 */
3393 	xlog_grant_head_wake_all(&log->l_reserve_head);
3394 	xlog_grant_head_wake_all(&log->l_write_head);
3395 
3396 	/*
3397 	 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3398 	 * as if the log writes were completed. The abort handling in the log
3399 	 * item committed callback functions will do this again under lock to
3400 	 * avoid races.
3401 	 */
3402 	spin_lock(&log->l_cilp->xc_push_lock);
3403 	wake_up_all(&log->l_cilp->xc_start_wait);
3404 	wake_up_all(&log->l_cilp->xc_commit_wait);
3405 	spin_unlock(&log->l_cilp->xc_push_lock);
3406 
3407 	spin_lock(&log->l_icloglock);
3408 	xlog_state_shutdown_callbacks(log);
3409 	spin_unlock(&log->l_icloglock);
3410 
3411 	wake_up_var(&log->l_opstate);
3412 	if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(log->l_mp))
3413 		xfs_zoned_wake_all(log->l_mp);
3414 
3415 	return log_error;
3416 }
3417 
3418 STATIC int
xlog_iclogs_empty(struct xlog * log)3419 xlog_iclogs_empty(
3420 	struct xlog		*log)
3421 {
3422 	struct xlog_in_core	*iclog = log->l_iclog;
3423 
3424 	do {
3425 		/* endianness does not matter here, zero is zero in
3426 		 * any language.
3427 		 */
3428 		if (iclog->ic_header->h_num_logops)
3429 			return 0;
3430 		iclog = iclog->ic_next;
3431 	} while (iclog != log->l_iclog);
3432 
3433 	return 1;
3434 }
3435 
3436 /*
3437  * Verify that an LSN stamped into a piece of metadata is valid. This is
3438  * intended for use in read verifiers on v5 superblocks.
3439  */
3440 bool
xfs_log_check_lsn(struct xfs_mount * mp,xfs_lsn_t lsn)3441 xfs_log_check_lsn(
3442 	struct xfs_mount	*mp,
3443 	xfs_lsn_t		lsn)
3444 {
3445 	struct xlog		*log = mp->m_log;
3446 	bool			valid;
3447 
3448 	/*
3449 	 * norecovery mode skips mount-time log processing and unconditionally
3450 	 * resets the in-core LSN. We can't validate in this mode, but
3451 	 * modifications are not allowed anyways so just return true.
3452 	 */
3453 	if (xfs_has_norecovery(mp))
3454 		return true;
3455 
3456 	/*
3457 	 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3458 	 * handled by recovery and thus safe to ignore here.
3459 	 */
3460 	if (lsn == NULLCOMMITLSN)
3461 		return true;
3462 
3463 	valid = xlog_valid_lsn(mp->m_log, lsn);
3464 
3465 	/* warn the user about what's gone wrong before verifier failure */
3466 	if (!valid) {
3467 		spin_lock(&log->l_icloglock);
3468 		xfs_warn(mp,
3469 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3470 "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3471 			 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3472 			 log->l_curr_cycle, log->l_curr_block);
3473 		spin_unlock(&log->l_icloglock);
3474 	}
3475 
3476 	return valid;
3477 }
3478