xref: /linux/fs/xfs/xfs_log_cil.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4  */
5 
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_log.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
19 
20 struct workqueue_struct *xfs_discard_wq;
21 
22 /*
23  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24  * recover, so we don't allow failure here. Also, we allocate in a context that
25  * we don't want to be issuing transactions from, so we need to tell the
26  * allocation code this as well.
27  *
28  * We don't reserve any space for the ticket - we are going to steal whatever
29  * space we require from transactions as they commit. To ensure we reserve all
30  * the space required, we need to set the current reservation of the ticket to
31  * zero so that we know to steal the initial transaction overhead from the
32  * first transaction commit.
33  */
34 static struct xlog_ticket *
35 xlog_cil_ticket_alloc(
36 	struct xlog	*log)
37 {
38 	struct xlog_ticket *tic;
39 
40 	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0);
41 
42 	/*
43 	 * set the current reservation to zero so we know to steal the basic
44 	 * transaction overhead reservation from the first transaction commit.
45 	 */
46 	tic->t_curr_res = 0;
47 	return tic;
48 }
49 
50 /*
51  * After the first stage of log recovery is done, we know where the head and
52  * tail of the log are. We need this log initialisation done before we can
53  * initialise the first CIL checkpoint context.
54  *
55  * Here we allocate a log ticket to track space usage during a CIL push.  This
56  * ticket is passed to xlog_write() directly so that we don't slowly leak log
57  * space by failing to account for space used by log headers and additional
58  * region headers for split regions.
59  */
60 void
61 xlog_cil_init_post_recovery(
62 	struct xlog	*log)
63 {
64 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
65 	log->l_cilp->xc_ctx->sequence = 1;
66 }
67 
68 static inline int
69 xlog_cil_iovec_space(
70 	uint	niovecs)
71 {
72 	return round_up((sizeof(struct xfs_log_vec) +
73 					niovecs * sizeof(struct xfs_log_iovec)),
74 			sizeof(uint64_t));
75 }
76 
77 /*
78  * Allocate or pin log vector buffers for CIL insertion.
79  *
80  * The CIL currently uses disposable buffers for copying a snapshot of the
81  * modified items into the log during a push. The biggest problem with this is
82  * the requirement to allocate the disposable buffer during the commit if:
83  *	a) does not exist; or
84  *	b) it is too small
85  *
86  * If we do this allocation within xlog_cil_insert_format_items(), it is done
87  * under the xc_ctx_lock, which means that a CIL push cannot occur during
88  * the memory allocation. This means that we have a potential deadlock situation
89  * under low memory conditions when we have lots of dirty metadata pinned in
90  * the CIL and we need a CIL commit to occur to free memory.
91  *
92  * To avoid this, we need to move the memory allocation outside the
93  * xc_ctx_lock, but because the log vector buffers are disposable, that opens
94  * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
95  * vector buffers between the check and the formatting of the item into the
96  * log vector buffer within the xc_ctx_lock.
97  *
98  * Because the log vector buffer needs to be unchanged during the CIL push
99  * process, we cannot share the buffer between the transaction commit (which
100  * modifies the buffer) and the CIL push context that is writing the changes
101  * into the log. This means skipping preallocation of buffer space is
102  * unreliable, but we most definitely do not want to be allocating and freeing
103  * buffers unnecessarily during commits when overwrites can be done safely.
104  *
105  * The simplest solution to this problem is to allocate a shadow buffer when a
106  * log item is committed for the second time, and then to only use this buffer
107  * if necessary. The buffer can remain attached to the log item until such time
108  * it is needed, and this is the buffer that is reallocated to match the size of
109  * the incoming modification. Then during the formatting of the item we can swap
110  * the active buffer with the new one if we can't reuse the existing buffer. We
111  * don't free the old buffer as it may be reused on the next modification if
112  * it's size is right, otherwise we'll free and reallocate it at that point.
113  *
114  * This function builds a vector for the changes in each log item in the
115  * transaction. It then works out the length of the buffer needed for each log
116  * item, allocates them and attaches the vector to the log item in preparation
117  * for the formatting step which occurs under the xc_ctx_lock.
118  *
119  * While this means the memory footprint goes up, it avoids the repeated
120  * alloc/free pattern that repeated modifications of an item would otherwise
121  * cause, and hence minimises the CPU overhead of such behaviour.
122  */
123 static void
124 xlog_cil_alloc_shadow_bufs(
125 	struct xlog		*log,
126 	struct xfs_trans	*tp)
127 {
128 	struct xfs_log_item	*lip;
129 
130 	list_for_each_entry(lip, &tp->t_items, li_trans) {
131 		struct xfs_log_vec *lv;
132 		int	niovecs = 0;
133 		int	nbytes = 0;
134 		int	buf_size;
135 		bool	ordered = false;
136 
137 		/* Skip items which aren't dirty in this transaction. */
138 		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
139 			continue;
140 
141 		/* get number of vecs and size of data to be stored */
142 		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
143 
144 		/*
145 		 * Ordered items need to be tracked but we do not wish to write
146 		 * them. We need a logvec to track the object, but we do not
147 		 * need an iovec or buffer to be allocated for copying data.
148 		 */
149 		if (niovecs == XFS_LOG_VEC_ORDERED) {
150 			ordered = true;
151 			niovecs = 0;
152 			nbytes = 0;
153 		}
154 
155 		/*
156 		 * We 64-bit align the length of each iovec so that the start
157 		 * of the next one is naturally aligned.  We'll need to
158 		 * account for that slack space here. Then round nbytes up
159 		 * to 64-bit alignment so that the initial buffer alignment is
160 		 * easy to calculate and verify.
161 		 */
162 		nbytes += niovecs * sizeof(uint64_t);
163 		nbytes = round_up(nbytes, sizeof(uint64_t));
164 
165 		/*
166 		 * The data buffer needs to start 64-bit aligned, so round up
167 		 * that space to ensure we can align it appropriately and not
168 		 * overrun the buffer.
169 		 */
170 		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
171 
172 		/*
173 		 * if we have no shadow buffer, or it is too small, we need to
174 		 * reallocate it.
175 		 */
176 		if (!lip->li_lv_shadow ||
177 		    buf_size > lip->li_lv_shadow->lv_size) {
178 
179 			/*
180 			 * We free and allocate here as a realloc would copy
181 			 * unnecessary data. We don't use kmem_zalloc() for the
182 			 * same reason - we don't need to zero the data area in
183 			 * the buffer, only the log vector header and the iovec
184 			 * storage.
185 			 */
186 			kmem_free(lip->li_lv_shadow);
187 
188 			lv = kmem_alloc_large(buf_size, KM_NOFS);
189 			memset(lv, 0, xlog_cil_iovec_space(niovecs));
190 
191 			lv->lv_item = lip;
192 			lv->lv_size = buf_size;
193 			if (ordered)
194 				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
195 			else
196 				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
197 			lip->li_lv_shadow = lv;
198 		} else {
199 			/* same or smaller, optimise common overwrite case */
200 			lv = lip->li_lv_shadow;
201 			if (ordered)
202 				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
203 			else
204 				lv->lv_buf_len = 0;
205 			lv->lv_bytes = 0;
206 			lv->lv_next = NULL;
207 		}
208 
209 		/* Ensure the lv is set up according to ->iop_size */
210 		lv->lv_niovecs = niovecs;
211 
212 		/* The allocated data region lies beyond the iovec region */
213 		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
214 	}
215 
216 }
217 
218 /*
219  * Prepare the log item for insertion into the CIL. Calculate the difference in
220  * log space and vectors it will consume, and if it is a new item pin it as
221  * well.
222  */
223 STATIC void
224 xfs_cil_prepare_item(
225 	struct xlog		*log,
226 	struct xfs_log_vec	*lv,
227 	struct xfs_log_vec	*old_lv,
228 	int			*diff_len,
229 	int			*diff_iovecs)
230 {
231 	/* Account for the new LV being passed in */
232 	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
233 		*diff_len += lv->lv_bytes;
234 		*diff_iovecs += lv->lv_niovecs;
235 	}
236 
237 	/*
238 	 * If there is no old LV, this is the first time we've seen the item in
239 	 * this CIL context and so we need to pin it. If we are replacing the
240 	 * old_lv, then remove the space it accounts for and make it the shadow
241 	 * buffer for later freeing. In both cases we are now switching to the
242 	 * shadow buffer, so update the pointer to it appropriately.
243 	 */
244 	if (!old_lv) {
245 		if (lv->lv_item->li_ops->iop_pin)
246 			lv->lv_item->li_ops->iop_pin(lv->lv_item);
247 		lv->lv_item->li_lv_shadow = NULL;
248 	} else if (old_lv != lv) {
249 		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
250 
251 		*diff_len -= old_lv->lv_bytes;
252 		*diff_iovecs -= old_lv->lv_niovecs;
253 		lv->lv_item->li_lv_shadow = old_lv;
254 	}
255 
256 	/* attach new log vector to log item */
257 	lv->lv_item->li_lv = lv;
258 
259 	/*
260 	 * If this is the first time the item is being committed to the
261 	 * CIL, store the sequence number on the log item so we can
262 	 * tell in future commits whether this is the first checkpoint
263 	 * the item is being committed into.
264 	 */
265 	if (!lv->lv_item->li_seq)
266 		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
267 }
268 
269 /*
270  * Format log item into a flat buffers
271  *
272  * For delayed logging, we need to hold a formatted buffer containing all the
273  * changes on the log item. This enables us to relog the item in memory and
274  * write it out asynchronously without needing to relock the object that was
275  * modified at the time it gets written into the iclog.
276  *
277  * This function takes the prepared log vectors attached to each log item, and
278  * formats the changes into the log vector buffer. The buffer it uses is
279  * dependent on the current state of the vector in the CIL - the shadow lv is
280  * guaranteed to be large enough for the current modification, but we will only
281  * use that if we can't reuse the existing lv. If we can't reuse the existing
282  * lv, then simple swap it out for the shadow lv. We don't free it - that is
283  * done lazily either by th enext modification or the freeing of the log item.
284  *
285  * We don't set up region headers during this process; we simply copy the
286  * regions into the flat buffer. We can do this because we still have to do a
287  * formatting step to write the regions into the iclog buffer.  Writing the
288  * ophdrs during the iclog write means that we can support splitting large
289  * regions across iclog boundares without needing a change in the format of the
290  * item/region encapsulation.
291  *
292  * Hence what we need to do now is change the rewrite the vector array to point
293  * to the copied region inside the buffer we just allocated. This allows us to
294  * format the regions into the iclog as though they are being formatted
295  * directly out of the objects themselves.
296  */
297 static void
298 xlog_cil_insert_format_items(
299 	struct xlog		*log,
300 	struct xfs_trans	*tp,
301 	int			*diff_len,
302 	int			*diff_iovecs)
303 {
304 	struct xfs_log_item	*lip;
305 
306 
307 	/* Bail out if we didn't find a log item.  */
308 	if (list_empty(&tp->t_items)) {
309 		ASSERT(0);
310 		return;
311 	}
312 
313 	list_for_each_entry(lip, &tp->t_items, li_trans) {
314 		struct xfs_log_vec *lv;
315 		struct xfs_log_vec *old_lv = NULL;
316 		struct xfs_log_vec *shadow;
317 		bool	ordered = false;
318 
319 		/* Skip items which aren't dirty in this transaction. */
320 		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
321 			continue;
322 
323 		/*
324 		 * The formatting size information is already attached to
325 		 * the shadow lv on the log item.
326 		 */
327 		shadow = lip->li_lv_shadow;
328 		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
329 			ordered = true;
330 
331 		/* Skip items that do not have any vectors for writing */
332 		if (!shadow->lv_niovecs && !ordered)
333 			continue;
334 
335 		/* compare to existing item size */
336 		old_lv = lip->li_lv;
337 		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
338 			/* same or smaller, optimise common overwrite case */
339 			lv = lip->li_lv;
340 			lv->lv_next = NULL;
341 
342 			if (ordered)
343 				goto insert;
344 
345 			/*
346 			 * set the item up as though it is a new insertion so
347 			 * that the space reservation accounting is correct.
348 			 */
349 			*diff_iovecs -= lv->lv_niovecs;
350 			*diff_len -= lv->lv_bytes;
351 
352 			/* Ensure the lv is set up according to ->iop_size */
353 			lv->lv_niovecs = shadow->lv_niovecs;
354 
355 			/* reset the lv buffer information for new formatting */
356 			lv->lv_buf_len = 0;
357 			lv->lv_bytes = 0;
358 			lv->lv_buf = (char *)lv +
359 					xlog_cil_iovec_space(lv->lv_niovecs);
360 		} else {
361 			/* switch to shadow buffer! */
362 			lv = shadow;
363 			lv->lv_item = lip;
364 			if (ordered) {
365 				/* track as an ordered logvec */
366 				ASSERT(lip->li_lv == NULL);
367 				goto insert;
368 			}
369 		}
370 
371 		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
372 		lip->li_ops->iop_format(lip, lv);
373 insert:
374 		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
375 	}
376 }
377 
378 /*
379  * Insert the log items into the CIL and calculate the difference in space
380  * consumed by the item. Add the space to the checkpoint ticket and calculate
381  * if the change requires additional log metadata. If it does, take that space
382  * as well. Remove the amount of space we added to the checkpoint ticket from
383  * the current transaction ticket so that the accounting works out correctly.
384  */
385 static void
386 xlog_cil_insert_items(
387 	struct xlog		*log,
388 	struct xfs_trans	*tp)
389 {
390 	struct xfs_cil		*cil = log->l_cilp;
391 	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
392 	struct xfs_log_item	*lip;
393 	int			len = 0;
394 	int			diff_iovecs = 0;
395 	int			iclog_space;
396 	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
397 
398 	ASSERT(tp);
399 
400 	/*
401 	 * We can do this safely because the context can't checkpoint until we
402 	 * are done so it doesn't matter exactly how we update the CIL.
403 	 */
404 	xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
405 
406 	spin_lock(&cil->xc_cil_lock);
407 
408 	/* account for space used by new iovec headers  */
409 	iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
410 	len += iovhdr_res;
411 	ctx->nvecs += diff_iovecs;
412 
413 	/* attach the transaction to the CIL if it has any busy extents */
414 	if (!list_empty(&tp->t_busy))
415 		list_splice_init(&tp->t_busy, &ctx->busy_extents);
416 
417 	/*
418 	 * Now transfer enough transaction reservation to the context ticket
419 	 * for the checkpoint. The context ticket is special - the unit
420 	 * reservation has to grow as well as the current reservation as we
421 	 * steal from tickets so we can correctly determine the space used
422 	 * during the transaction commit.
423 	 */
424 	if (ctx->ticket->t_curr_res == 0) {
425 		ctx_res = ctx->ticket->t_unit_res;
426 		ctx->ticket->t_curr_res = ctx_res;
427 		tp->t_ticket->t_curr_res -= ctx_res;
428 	}
429 
430 	/* do we need space for more log record headers? */
431 	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
432 	if (len > 0 && (ctx->space_used / iclog_space !=
433 				(ctx->space_used + len) / iclog_space)) {
434 		split_res = (len + iclog_space - 1) / iclog_space;
435 		/* need to take into account split region headers, too */
436 		split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
437 		ctx->ticket->t_unit_res += split_res;
438 		ctx->ticket->t_curr_res += split_res;
439 		tp->t_ticket->t_curr_res -= split_res;
440 		ASSERT(tp->t_ticket->t_curr_res >= len);
441 	}
442 	tp->t_ticket->t_curr_res -= len;
443 	ctx->space_used += len;
444 
445 	/*
446 	 * If we've overrun the reservation, dump the tx details before we move
447 	 * the log items. Shutdown is imminent...
448 	 */
449 	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
450 		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
451 		xfs_warn(log->l_mp,
452 			 "  log items: %d bytes (iov hdrs: %d bytes)",
453 			 len, iovhdr_res);
454 		xfs_warn(log->l_mp, "  split region headers: %d bytes",
455 			 split_res);
456 		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
457 		xlog_print_trans(tp);
458 	}
459 
460 	/*
461 	 * Now (re-)position everything modified at the tail of the CIL.
462 	 * We do this here so we only need to take the CIL lock once during
463 	 * the transaction commit.
464 	 */
465 	list_for_each_entry(lip, &tp->t_items, li_trans) {
466 
467 		/* Skip items which aren't dirty in this transaction. */
468 		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
469 			continue;
470 
471 		/*
472 		 * Only move the item if it isn't already at the tail. This is
473 		 * to prevent a transient list_empty() state when reinserting
474 		 * an item that is already the only item in the CIL.
475 		 */
476 		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
477 			list_move_tail(&lip->li_cil, &cil->xc_cil);
478 	}
479 
480 	spin_unlock(&cil->xc_cil_lock);
481 
482 	if (tp->t_ticket->t_curr_res < 0)
483 		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
484 }
485 
486 static void
487 xlog_cil_free_logvec(
488 	struct xfs_log_vec	*log_vector)
489 {
490 	struct xfs_log_vec	*lv;
491 
492 	for (lv = log_vector; lv; ) {
493 		struct xfs_log_vec *next = lv->lv_next;
494 		kmem_free(lv);
495 		lv = next;
496 	}
497 }
498 
499 static void
500 xlog_discard_endio_work(
501 	struct work_struct	*work)
502 {
503 	struct xfs_cil_ctx	*ctx =
504 		container_of(work, struct xfs_cil_ctx, discard_endio_work);
505 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
506 
507 	xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
508 	kmem_free(ctx);
509 }
510 
511 /*
512  * Queue up the actual completion to a thread to avoid IRQ-safe locking for
513  * pagb_lock.  Note that we need a unbounded workqueue, otherwise we might
514  * get the execution delayed up to 30 seconds for weird reasons.
515  */
516 static void
517 xlog_discard_endio(
518 	struct bio		*bio)
519 {
520 	struct xfs_cil_ctx	*ctx = bio->bi_private;
521 
522 	INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
523 	queue_work(xfs_discard_wq, &ctx->discard_endio_work);
524 	bio_put(bio);
525 }
526 
527 static void
528 xlog_discard_busy_extents(
529 	struct xfs_mount	*mp,
530 	struct xfs_cil_ctx	*ctx)
531 {
532 	struct list_head	*list = &ctx->busy_extents;
533 	struct xfs_extent_busy	*busyp;
534 	struct bio		*bio = NULL;
535 	struct blk_plug		plug;
536 	int			error = 0;
537 
538 	ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
539 
540 	blk_start_plug(&plug);
541 	list_for_each_entry(busyp, list, list) {
542 		trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
543 					 busyp->length);
544 
545 		error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
546 				XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
547 				XFS_FSB_TO_BB(mp, busyp->length),
548 				GFP_NOFS, 0, &bio);
549 		if (error && error != -EOPNOTSUPP) {
550 			xfs_info(mp,
551 	 "discard failed for extent [0x%llx,%u], error %d",
552 				 (unsigned long long)busyp->bno,
553 				 busyp->length,
554 				 error);
555 			break;
556 		}
557 	}
558 
559 	if (bio) {
560 		bio->bi_private = ctx;
561 		bio->bi_end_io = xlog_discard_endio;
562 		submit_bio(bio);
563 	} else {
564 		xlog_discard_endio_work(&ctx->discard_endio_work);
565 	}
566 	blk_finish_plug(&plug);
567 }
568 
569 /*
570  * Mark all items committed and clear busy extents. We free the log vector
571  * chains in a separate pass so that we unpin the log items as quickly as
572  * possible.
573  */
574 static void
575 xlog_cil_committed(
576 	struct xfs_cil_ctx	*ctx)
577 {
578 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
579 	bool			abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
580 
581 	/*
582 	 * If the I/O failed, we're aborting the commit and already shutdown.
583 	 * Wake any commit waiters before aborting the log items so we don't
584 	 * block async log pushers on callbacks. Async log pushers explicitly do
585 	 * not wait on log force completion because they may be holding locks
586 	 * required to unpin items.
587 	 */
588 	if (abort) {
589 		spin_lock(&ctx->cil->xc_push_lock);
590 		wake_up_all(&ctx->cil->xc_commit_wait);
591 		spin_unlock(&ctx->cil->xc_push_lock);
592 	}
593 
594 	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
595 					ctx->start_lsn, abort);
596 
597 	xfs_extent_busy_sort(&ctx->busy_extents);
598 	xfs_extent_busy_clear(mp, &ctx->busy_extents,
599 			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
600 
601 	spin_lock(&ctx->cil->xc_push_lock);
602 	list_del(&ctx->committing);
603 	spin_unlock(&ctx->cil->xc_push_lock);
604 
605 	xlog_cil_free_logvec(ctx->lv_chain);
606 
607 	if (!list_empty(&ctx->busy_extents))
608 		xlog_discard_busy_extents(mp, ctx);
609 	else
610 		kmem_free(ctx);
611 }
612 
613 void
614 xlog_cil_process_committed(
615 	struct list_head	*list)
616 {
617 	struct xfs_cil_ctx	*ctx;
618 
619 	while ((ctx = list_first_entry_or_null(list,
620 			struct xfs_cil_ctx, iclog_entry))) {
621 		list_del(&ctx->iclog_entry);
622 		xlog_cil_committed(ctx);
623 	}
624 }
625 
626 /*
627  * Push the Committed Item List to the log.
628  *
629  * If the current sequence is the same as xc_push_seq we need to do a flush. If
630  * xc_push_seq is less than the current sequence, then it has already been
631  * flushed and we don't need to do anything - the caller will wait for it to
632  * complete if necessary.
633  *
634  * xc_push_seq is checked unlocked against the sequence number for a match.
635  * Hence we can allow log forces to run racily and not issue pushes for the
636  * same sequence twice.  If we get a race between multiple pushes for the same
637  * sequence they will block on the first one and then abort, hence avoiding
638  * needless pushes.
639  */
640 static void
641 xlog_cil_push_work(
642 	struct work_struct	*work)
643 {
644 	struct xfs_cil		*cil =
645 		container_of(work, struct xfs_cil, xc_push_work);
646 	struct xlog		*log = cil->xc_log;
647 	struct xfs_log_vec	*lv;
648 	struct xfs_cil_ctx	*ctx;
649 	struct xfs_cil_ctx	*new_ctx;
650 	struct xlog_in_core	*commit_iclog;
651 	struct xlog_ticket	*tic;
652 	int			num_iovecs;
653 	int			error = 0;
654 	struct xfs_trans_header thdr;
655 	struct xfs_log_iovec	lhdr;
656 	struct xfs_log_vec	lvhdr = { NULL };
657 	xfs_lsn_t		commit_lsn;
658 	xfs_lsn_t		push_seq;
659 
660 	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
661 	new_ctx->ticket = xlog_cil_ticket_alloc(log);
662 
663 	down_write(&cil->xc_ctx_lock);
664 	ctx = cil->xc_ctx;
665 
666 	spin_lock(&cil->xc_push_lock);
667 	push_seq = cil->xc_push_seq;
668 	ASSERT(push_seq <= ctx->sequence);
669 
670 	/*
671 	 * Wake up any background push waiters now this context is being pushed.
672 	 */
673 	if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
674 		wake_up_all(&cil->xc_push_wait);
675 
676 	/*
677 	 * Check if we've anything to push. If there is nothing, then we don't
678 	 * move on to a new sequence number and so we have to be able to push
679 	 * this sequence again later.
680 	 */
681 	if (list_empty(&cil->xc_cil)) {
682 		cil->xc_push_seq = 0;
683 		spin_unlock(&cil->xc_push_lock);
684 		goto out_skip;
685 	}
686 
687 
688 	/* check for a previously pushed sequence */
689 	if (push_seq < cil->xc_ctx->sequence) {
690 		spin_unlock(&cil->xc_push_lock);
691 		goto out_skip;
692 	}
693 
694 	/*
695 	 * We are now going to push this context, so add it to the committing
696 	 * list before we do anything else. This ensures that anyone waiting on
697 	 * this push can easily detect the difference between a "push in
698 	 * progress" and "CIL is empty, nothing to do".
699 	 *
700 	 * IOWs, a wait loop can now check for:
701 	 *	the current sequence not being found on the committing list;
702 	 *	an empty CIL; and
703 	 *	an unchanged sequence number
704 	 * to detect a push that had nothing to do and therefore does not need
705 	 * waiting on. If the CIL is not empty, we get put on the committing
706 	 * list before emptying the CIL and bumping the sequence number. Hence
707 	 * an empty CIL and an unchanged sequence number means we jumped out
708 	 * above after doing nothing.
709 	 *
710 	 * Hence the waiter will either find the commit sequence on the
711 	 * committing list or the sequence number will be unchanged and the CIL
712 	 * still dirty. In that latter case, the push has not yet started, and
713 	 * so the waiter will have to continue trying to check the CIL
714 	 * committing list until it is found. In extreme cases of delay, the
715 	 * sequence may fully commit between the attempts the wait makes to wait
716 	 * on the commit sequence.
717 	 */
718 	list_add(&ctx->committing, &cil->xc_committing);
719 	spin_unlock(&cil->xc_push_lock);
720 
721 	/*
722 	 * pull all the log vectors off the items in the CIL, and
723 	 * remove the items from the CIL. We don't need the CIL lock
724 	 * here because it's only needed on the transaction commit
725 	 * side which is currently locked out by the flush lock.
726 	 */
727 	lv = NULL;
728 	num_iovecs = 0;
729 	while (!list_empty(&cil->xc_cil)) {
730 		struct xfs_log_item	*item;
731 
732 		item = list_first_entry(&cil->xc_cil,
733 					struct xfs_log_item, li_cil);
734 		list_del_init(&item->li_cil);
735 		if (!ctx->lv_chain)
736 			ctx->lv_chain = item->li_lv;
737 		else
738 			lv->lv_next = item->li_lv;
739 		lv = item->li_lv;
740 		item->li_lv = NULL;
741 		num_iovecs += lv->lv_niovecs;
742 	}
743 
744 	/*
745 	 * initialise the new context and attach it to the CIL. Then attach
746 	 * the current context to the CIL committing list so it can be found
747 	 * during log forces to extract the commit lsn of the sequence that
748 	 * needs to be forced.
749 	 */
750 	INIT_LIST_HEAD(&new_ctx->committing);
751 	INIT_LIST_HEAD(&new_ctx->busy_extents);
752 	new_ctx->sequence = ctx->sequence + 1;
753 	new_ctx->cil = cil;
754 	cil->xc_ctx = new_ctx;
755 
756 	/*
757 	 * The switch is now done, so we can drop the context lock and move out
758 	 * of a shared context. We can't just go straight to the commit record,
759 	 * though - we need to synchronise with previous and future commits so
760 	 * that the commit records are correctly ordered in the log to ensure
761 	 * that we process items during log IO completion in the correct order.
762 	 *
763 	 * For example, if we get an EFI in one checkpoint and the EFD in the
764 	 * next (e.g. due to log forces), we do not want the checkpoint with
765 	 * the EFD to be committed before the checkpoint with the EFI.  Hence
766 	 * we must strictly order the commit records of the checkpoints so
767 	 * that: a) the checkpoint callbacks are attached to the iclogs in the
768 	 * correct order; and b) the checkpoints are replayed in correct order
769 	 * in log recovery.
770 	 *
771 	 * Hence we need to add this context to the committing context list so
772 	 * that higher sequences will wait for us to write out a commit record
773 	 * before they do.
774 	 *
775 	 * xfs_log_force_lsn requires us to mirror the new sequence into the cil
776 	 * structure atomically with the addition of this sequence to the
777 	 * committing list. This also ensures that we can do unlocked checks
778 	 * against the current sequence in log forces without risking
779 	 * deferencing a freed context pointer.
780 	 */
781 	spin_lock(&cil->xc_push_lock);
782 	cil->xc_current_sequence = new_ctx->sequence;
783 	spin_unlock(&cil->xc_push_lock);
784 	up_write(&cil->xc_ctx_lock);
785 
786 	/*
787 	 * Build a checkpoint transaction header and write it to the log to
788 	 * begin the transaction. We need to account for the space used by the
789 	 * transaction header here as it is not accounted for in xlog_write().
790 	 *
791 	 * The LSN we need to pass to the log items on transaction commit is
792 	 * the LSN reported by the first log vector write. If we use the commit
793 	 * record lsn then we can move the tail beyond the grant write head.
794 	 */
795 	tic = ctx->ticket;
796 	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
797 	thdr.th_type = XFS_TRANS_CHECKPOINT;
798 	thdr.th_tid = tic->t_tid;
799 	thdr.th_num_items = num_iovecs;
800 	lhdr.i_addr = &thdr;
801 	lhdr.i_len = sizeof(xfs_trans_header_t);
802 	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
803 	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
804 
805 	lvhdr.lv_niovecs = 1;
806 	lvhdr.lv_iovecp = &lhdr;
807 	lvhdr.lv_next = ctx->lv_chain;
808 
809 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true);
810 	if (error)
811 		goto out_abort_free_ticket;
812 
813 	/*
814 	 * now that we've written the checkpoint into the log, strictly
815 	 * order the commit records so replay will get them in the right order.
816 	 */
817 restart:
818 	spin_lock(&cil->xc_push_lock);
819 	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
820 		/*
821 		 * Avoid getting stuck in this loop because we were woken by the
822 		 * shutdown, but then went back to sleep once already in the
823 		 * shutdown state.
824 		 */
825 		if (XLOG_FORCED_SHUTDOWN(log)) {
826 			spin_unlock(&cil->xc_push_lock);
827 			goto out_abort_free_ticket;
828 		}
829 
830 		/*
831 		 * Higher sequences will wait for this one so skip them.
832 		 * Don't wait for our own sequence, either.
833 		 */
834 		if (new_ctx->sequence >= ctx->sequence)
835 			continue;
836 		if (!new_ctx->commit_lsn) {
837 			/*
838 			 * It is still being pushed! Wait for the push to
839 			 * complete, then start again from the beginning.
840 			 */
841 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
842 			goto restart;
843 		}
844 	}
845 	spin_unlock(&cil->xc_push_lock);
846 
847 	error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn);
848 	if (error)
849 		goto out_abort_free_ticket;
850 
851 	xfs_log_ticket_ungrant(log, tic);
852 
853 	spin_lock(&commit_iclog->ic_callback_lock);
854 	if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
855 		spin_unlock(&commit_iclog->ic_callback_lock);
856 		goto out_abort;
857 	}
858 	ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
859 		      commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
860 	list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
861 	spin_unlock(&commit_iclog->ic_callback_lock);
862 
863 	/*
864 	 * now the checkpoint commit is complete and we've attached the
865 	 * callbacks to the iclog we can assign the commit LSN to the context
866 	 * and wake up anyone who is waiting for the commit to complete.
867 	 */
868 	spin_lock(&cil->xc_push_lock);
869 	ctx->commit_lsn = commit_lsn;
870 	wake_up_all(&cil->xc_commit_wait);
871 	spin_unlock(&cil->xc_push_lock);
872 
873 	/* release the hounds! */
874 	xfs_log_release_iclog(commit_iclog);
875 	return;
876 
877 out_skip:
878 	up_write(&cil->xc_ctx_lock);
879 	xfs_log_ticket_put(new_ctx->ticket);
880 	kmem_free(new_ctx);
881 	return;
882 
883 out_abort_free_ticket:
884 	xfs_log_ticket_ungrant(log, tic);
885 out_abort:
886 	ASSERT(XLOG_FORCED_SHUTDOWN(log));
887 	xlog_cil_committed(ctx);
888 }
889 
890 /*
891  * We need to push CIL every so often so we don't cache more than we can fit in
892  * the log. The limit really is that a checkpoint can't be more than half the
893  * log (the current checkpoint is not allowed to overwrite the previous
894  * checkpoint), but commit latency and memory usage limit this to a smaller
895  * size.
896  */
897 static void
898 xlog_cil_push_background(
899 	struct xlog	*log) __releases(cil->xc_ctx_lock)
900 {
901 	struct xfs_cil	*cil = log->l_cilp;
902 
903 	/*
904 	 * The cil won't be empty because we are called while holding the
905 	 * context lock so whatever we added to the CIL will still be there
906 	 */
907 	ASSERT(!list_empty(&cil->xc_cil));
908 
909 	/*
910 	 * don't do a background push if we haven't used up all the
911 	 * space available yet.
912 	 */
913 	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
914 		up_read(&cil->xc_ctx_lock);
915 		return;
916 	}
917 
918 	spin_lock(&cil->xc_push_lock);
919 	if (cil->xc_push_seq < cil->xc_current_sequence) {
920 		cil->xc_push_seq = cil->xc_current_sequence;
921 		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
922 	}
923 
924 	/*
925 	 * Drop the context lock now, we can't hold that if we need to sleep
926 	 * because we are over the blocking threshold. The push_lock is still
927 	 * held, so blocking threshold sleep/wakeup is still correctly
928 	 * serialised here.
929 	 */
930 	up_read(&cil->xc_ctx_lock);
931 
932 	/*
933 	 * If we are well over the space limit, throttle the work that is being
934 	 * done until the push work on this context has begun.
935 	 */
936 	if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
937 		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
938 		ASSERT(cil->xc_ctx->space_used < log->l_logsize);
939 		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
940 		return;
941 	}
942 
943 	spin_unlock(&cil->xc_push_lock);
944 
945 }
946 
947 /*
948  * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
949  * number that is passed. When it returns, the work will be queued for
950  * @push_seq, but it won't be completed. The caller is expected to do any
951  * waiting for push_seq to complete if it is required.
952  */
953 static void
954 xlog_cil_push_now(
955 	struct xlog	*log,
956 	xfs_lsn_t	push_seq)
957 {
958 	struct xfs_cil	*cil = log->l_cilp;
959 
960 	if (!cil)
961 		return;
962 
963 	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
964 
965 	/* start on any pending background push to minimise wait time on it */
966 	flush_work(&cil->xc_push_work);
967 
968 	/*
969 	 * If the CIL is empty or we've already pushed the sequence then
970 	 * there's no work we need to do.
971 	 */
972 	spin_lock(&cil->xc_push_lock);
973 	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
974 		spin_unlock(&cil->xc_push_lock);
975 		return;
976 	}
977 
978 	cil->xc_push_seq = push_seq;
979 	queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
980 	spin_unlock(&cil->xc_push_lock);
981 }
982 
983 bool
984 xlog_cil_empty(
985 	struct xlog	*log)
986 {
987 	struct xfs_cil	*cil = log->l_cilp;
988 	bool		empty = false;
989 
990 	spin_lock(&cil->xc_push_lock);
991 	if (list_empty(&cil->xc_cil))
992 		empty = true;
993 	spin_unlock(&cil->xc_push_lock);
994 	return empty;
995 }
996 
997 /*
998  * Commit a transaction with the given vector to the Committed Item List.
999  *
1000  * To do this, we need to format the item, pin it in memory if required and
1001  * account for the space used by the transaction. Once we have done that we
1002  * need to release the unused reservation for the transaction, attach the
1003  * transaction to the checkpoint context so we carry the busy extents through
1004  * to checkpoint completion, and then unlock all the items in the transaction.
1005  *
1006  * Called with the context lock already held in read mode to lock out
1007  * background commit, returns without it held once background commits are
1008  * allowed again.
1009  */
1010 void
1011 xfs_log_commit_cil(
1012 	struct xfs_mount	*mp,
1013 	struct xfs_trans	*tp,
1014 	xfs_lsn_t		*commit_lsn,
1015 	bool			regrant)
1016 {
1017 	struct xlog		*log = mp->m_log;
1018 	struct xfs_cil		*cil = log->l_cilp;
1019 	struct xfs_log_item	*lip, *next;
1020 	xfs_lsn_t		xc_commit_lsn;
1021 
1022 	/*
1023 	 * Do all necessary memory allocation before we lock the CIL.
1024 	 * This ensures the allocation does not deadlock with a CIL
1025 	 * push in memory reclaim (e.g. from kswapd).
1026 	 */
1027 	xlog_cil_alloc_shadow_bufs(log, tp);
1028 
1029 	/* lock out background commit */
1030 	down_read(&cil->xc_ctx_lock);
1031 
1032 	xlog_cil_insert_items(log, tp);
1033 
1034 	xc_commit_lsn = cil->xc_ctx->sequence;
1035 	if (commit_lsn)
1036 		*commit_lsn = xc_commit_lsn;
1037 
1038 	if (regrant && !XLOG_FORCED_SHUTDOWN(log))
1039 		xfs_log_ticket_regrant(log, tp->t_ticket);
1040 	else
1041 		xfs_log_ticket_ungrant(log, tp->t_ticket);
1042 	tp->t_ticket = NULL;
1043 	xfs_trans_unreserve_and_mod_sb(tp);
1044 
1045 	/*
1046 	 * Once all the items of the transaction have been copied to the CIL,
1047 	 * the items can be unlocked and possibly freed.
1048 	 *
1049 	 * This needs to be done before we drop the CIL context lock because we
1050 	 * have to update state in the log items and unlock them before they go
1051 	 * to disk. If we don't, then the CIL checkpoint can race with us and
1052 	 * we can run checkpoint completion before we've updated and unlocked
1053 	 * the log items. This affects (at least) processing of stale buffers,
1054 	 * inodes and EFIs.
1055 	 */
1056 	trace_xfs_trans_commit_items(tp, _RET_IP_);
1057 	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1058 		xfs_trans_del_item(lip);
1059 		if (lip->li_ops->iop_committing)
1060 			lip->li_ops->iop_committing(lip, xc_commit_lsn);
1061 	}
1062 
1063 	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1064 	xlog_cil_push_background(log);
1065 }
1066 
1067 /*
1068  * Conditionally push the CIL based on the sequence passed in.
1069  *
1070  * We only need to push if we haven't already pushed the sequence
1071  * number given. Hence the only time we will trigger a push here is
1072  * if the push sequence is the same as the current context.
1073  *
1074  * We return the current commit lsn to allow the callers to determine if a
1075  * iclog flush is necessary following this call.
1076  */
1077 xfs_lsn_t
1078 xlog_cil_force_lsn(
1079 	struct xlog	*log,
1080 	xfs_lsn_t	sequence)
1081 {
1082 	struct xfs_cil		*cil = log->l_cilp;
1083 	struct xfs_cil_ctx	*ctx;
1084 	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1085 
1086 	ASSERT(sequence <= cil->xc_current_sequence);
1087 
1088 	/*
1089 	 * check to see if we need to force out the current context.
1090 	 * xlog_cil_push() handles racing pushes for the same sequence,
1091 	 * so no need to deal with it here.
1092 	 */
1093 restart:
1094 	xlog_cil_push_now(log, sequence);
1095 
1096 	/*
1097 	 * See if we can find a previous sequence still committing.
1098 	 * We need to wait for all previous sequence commits to complete
1099 	 * before allowing the force of push_seq to go ahead. Hence block
1100 	 * on commits for those as well.
1101 	 */
1102 	spin_lock(&cil->xc_push_lock);
1103 	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1104 		/*
1105 		 * Avoid getting stuck in this loop because we were woken by the
1106 		 * shutdown, but then went back to sleep once already in the
1107 		 * shutdown state.
1108 		 */
1109 		if (XLOG_FORCED_SHUTDOWN(log))
1110 			goto out_shutdown;
1111 		if (ctx->sequence > sequence)
1112 			continue;
1113 		if (!ctx->commit_lsn) {
1114 			/*
1115 			 * It is still being pushed! Wait for the push to
1116 			 * complete, then start again from the beginning.
1117 			 */
1118 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1119 			goto restart;
1120 		}
1121 		if (ctx->sequence != sequence)
1122 			continue;
1123 		/* found it! */
1124 		commit_lsn = ctx->commit_lsn;
1125 	}
1126 
1127 	/*
1128 	 * The call to xlog_cil_push_now() executes the push in the background.
1129 	 * Hence by the time we have got here it our sequence may not have been
1130 	 * pushed yet. This is true if the current sequence still matches the
1131 	 * push sequence after the above wait loop and the CIL still contains
1132 	 * dirty objects. This is guaranteed by the push code first adding the
1133 	 * context to the committing list before emptying the CIL.
1134 	 *
1135 	 * Hence if we don't find the context in the committing list and the
1136 	 * current sequence number is unchanged then the CIL contents are
1137 	 * significant.  If the CIL is empty, if means there was nothing to push
1138 	 * and that means there is nothing to wait for. If the CIL is not empty,
1139 	 * it means we haven't yet started the push, because if it had started
1140 	 * we would have found the context on the committing list.
1141 	 */
1142 	if (sequence == cil->xc_current_sequence &&
1143 	    !list_empty(&cil->xc_cil)) {
1144 		spin_unlock(&cil->xc_push_lock);
1145 		goto restart;
1146 	}
1147 
1148 	spin_unlock(&cil->xc_push_lock);
1149 	return commit_lsn;
1150 
1151 	/*
1152 	 * We detected a shutdown in progress. We need to trigger the log force
1153 	 * to pass through it's iclog state machine error handling, even though
1154 	 * we are already in a shutdown state. Hence we can't return
1155 	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1156 	 * LSN is already stable), so we return a zero LSN instead.
1157 	 */
1158 out_shutdown:
1159 	spin_unlock(&cil->xc_push_lock);
1160 	return 0;
1161 }
1162 
1163 /*
1164  * Check if the current log item was first committed in this sequence.
1165  * We can't rely on just the log item being in the CIL, we have to check
1166  * the recorded commit sequence number.
1167  *
1168  * Note: for this to be used in a non-racy manner, it has to be called with
1169  * CIL flushing locked out. As a result, it should only be used during the
1170  * transaction commit process when deciding what to format into the item.
1171  */
1172 bool
1173 xfs_log_item_in_current_chkpt(
1174 	struct xfs_log_item *lip)
1175 {
1176 	struct xfs_cil_ctx *ctx;
1177 
1178 	if (list_empty(&lip->li_cil))
1179 		return false;
1180 
1181 	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1182 
1183 	/*
1184 	 * li_seq is written on the first commit of a log item to record the
1185 	 * first checkpoint it is written to. Hence if it is different to the
1186 	 * current sequence, we're in a new checkpoint.
1187 	 */
1188 	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
1189 		return false;
1190 	return true;
1191 }
1192 
1193 /*
1194  * Perform initial CIL structure initialisation.
1195  */
1196 int
1197 xlog_cil_init(
1198 	struct xlog	*log)
1199 {
1200 	struct xfs_cil	*cil;
1201 	struct xfs_cil_ctx *ctx;
1202 
1203 	cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1204 	if (!cil)
1205 		return -ENOMEM;
1206 
1207 	ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1208 	if (!ctx) {
1209 		kmem_free(cil);
1210 		return -ENOMEM;
1211 	}
1212 
1213 	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1214 	INIT_LIST_HEAD(&cil->xc_cil);
1215 	INIT_LIST_HEAD(&cil->xc_committing);
1216 	spin_lock_init(&cil->xc_cil_lock);
1217 	spin_lock_init(&cil->xc_push_lock);
1218 	init_waitqueue_head(&cil->xc_push_wait);
1219 	init_rwsem(&cil->xc_ctx_lock);
1220 	init_waitqueue_head(&cil->xc_commit_wait);
1221 
1222 	INIT_LIST_HEAD(&ctx->committing);
1223 	INIT_LIST_HEAD(&ctx->busy_extents);
1224 	ctx->sequence = 1;
1225 	ctx->cil = cil;
1226 	cil->xc_ctx = ctx;
1227 	cil->xc_current_sequence = ctx->sequence;
1228 
1229 	cil->xc_log = log;
1230 	log->l_cilp = cil;
1231 	return 0;
1232 }
1233 
1234 void
1235 xlog_cil_destroy(
1236 	struct xlog	*log)
1237 {
1238 	if (log->l_cilp->xc_ctx) {
1239 		if (log->l_cilp->xc_ctx->ticket)
1240 			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1241 		kmem_free(log->l_cilp->xc_ctx);
1242 	}
1243 
1244 	ASSERT(list_empty(&log->l_cilp->xc_cil));
1245 	kmem_free(log->l_cilp);
1246 }
1247 
1248