1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4 */
5
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_log.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
19 #include "xfs_discard.h"
20
21 /*
22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
23 * recover, so we don't allow failure here. Also, we allocate in a context that
24 * we don't want to be issuing transactions from, so we need to tell the
25 * allocation code this as well.
26 *
27 * We don't reserve any space for the ticket - we are going to steal whatever
28 * space we require from transactions as they commit. To ensure we reserve all
29 * the space required, we need to set the current reservation of the ticket to
30 * zero so that we know to steal the initial transaction overhead from the
31 * first transaction commit.
32 */
33 static struct xlog_ticket *
xlog_cil_ticket_alloc(struct xlog * log)34 xlog_cil_ticket_alloc(
35 struct xlog *log)
36 {
37 struct xlog_ticket *tic;
38
39 tic = xlog_ticket_alloc(log, 0, 1, 0);
40
41 /*
42 * set the current reservation to zero so we know to steal the basic
43 * transaction overhead reservation from the first transaction commit.
44 */
45 tic->t_curr_res = 0;
46 tic->t_iclog_hdrs = 0;
47 return tic;
48 }
49
50 static inline void
xlog_cil_set_iclog_hdr_count(struct xfs_cil * cil)51 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
52 {
53 struct xlog *log = cil->xc_log;
54
55 atomic_set(&cil->xc_iclog_hdrs,
56 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
57 (log->l_iclog_size - log->l_iclog_hsize)));
58 }
59
60 /*
61 * Check if the current log item was first committed in this sequence.
62 * We can't rely on just the log item being in the CIL, we have to check
63 * the recorded commit sequence number.
64 *
65 * Note: for this to be used in a non-racy manner, it has to be called with
66 * CIL flushing locked out. As a result, it should only be used during the
67 * transaction commit process when deciding what to format into the item.
68 */
69 static bool
xlog_item_in_current_chkpt(struct xfs_cil * cil,struct xfs_log_item * lip)70 xlog_item_in_current_chkpt(
71 struct xfs_cil *cil,
72 struct xfs_log_item *lip)
73 {
74 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
75 return false;
76
77 /*
78 * li_seq is written on the first commit of a log item to record the
79 * first checkpoint it is written to. Hence if it is different to the
80 * current sequence, we're in a new checkpoint.
81 */
82 return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
83 }
84
85 bool
xfs_log_item_in_current_chkpt(struct xfs_log_item * lip)86 xfs_log_item_in_current_chkpt(
87 struct xfs_log_item *lip)
88 {
89 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
90 }
91
92 /*
93 * Unavoidable forward declaration - xlog_cil_push_work() calls
94 * xlog_cil_ctx_alloc() itself.
95 */
96 static void xlog_cil_push_work(struct work_struct *work);
97
98 static struct xfs_cil_ctx *
xlog_cil_ctx_alloc(void)99 xlog_cil_ctx_alloc(void)
100 {
101 struct xfs_cil_ctx *ctx;
102
103 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
104 INIT_LIST_HEAD(&ctx->committing);
105 INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
106 INIT_LIST_HEAD(&ctx->log_items);
107 INIT_LIST_HEAD(&ctx->lv_chain);
108 INIT_WORK(&ctx->push_work, xlog_cil_push_work);
109 return ctx;
110 }
111
112 /*
113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
114 * clear the percpu state ready for the next context to use. This is called
115 * from the push code with the context lock held exclusively, hence nothing else
116 * will be accessing or modifying the per-cpu counters.
117 */
118 static void
xlog_cil_push_pcp_aggregate(struct xfs_cil * cil,struct xfs_cil_ctx * ctx)119 xlog_cil_push_pcp_aggregate(
120 struct xfs_cil *cil,
121 struct xfs_cil_ctx *ctx)
122 {
123 struct xlog_cil_pcp *cilpcp;
124 int cpu;
125
126 for_each_cpu(cpu, &ctx->cil_pcpmask) {
127 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
128
129 ctx->ticket->t_curr_res += cilpcp->space_reserved;
130 cilpcp->space_reserved = 0;
131
132 if (!list_empty(&cilpcp->busy_extents)) {
133 list_splice_init(&cilpcp->busy_extents,
134 &ctx->busy_extents.extent_list);
135 }
136 if (!list_empty(&cilpcp->log_items))
137 list_splice_init(&cilpcp->log_items, &ctx->log_items);
138
139 /*
140 * We're in the middle of switching cil contexts. Reset the
141 * counter we use to detect when the current context is nearing
142 * full.
143 */
144 cilpcp->space_used = 0;
145 }
146 }
147
148 /*
149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
150 * This is called when the per-cpu counter aggregation will first pass the soft
151 * limit threshold so we can switch to atomic counter aggregation for accurate
152 * detection of hard limit traversal.
153 */
154 static void
xlog_cil_insert_pcp_aggregate(struct xfs_cil * cil,struct xfs_cil_ctx * ctx)155 xlog_cil_insert_pcp_aggregate(
156 struct xfs_cil *cil,
157 struct xfs_cil_ctx *ctx)
158 {
159 int cpu;
160 int count = 0;
161
162 /* Trigger atomic updates then aggregate only for the first caller */
163 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
164 return;
165
166 /*
167 * We can race with other cpus setting cil_pcpmask. However, we've
168 * atomically cleared PCP_SPACE which forces other threads to add to
169 * the global space used count. cil_pcpmask is a superset of cilpcp
170 * structures that could have a nonzero space_used.
171 */
172 for_each_cpu(cpu, &ctx->cil_pcpmask) {
173 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
174
175 count += xchg(&cilpcp->space_used, 0);
176 }
177 atomic_add(count, &ctx->space_used);
178 }
179
180 static void
xlog_cil_ctx_switch(struct xfs_cil * cil,struct xfs_cil_ctx * ctx)181 xlog_cil_ctx_switch(
182 struct xfs_cil *cil,
183 struct xfs_cil_ctx *ctx)
184 {
185 xlog_cil_set_iclog_hdr_count(cil);
186 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
187 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
188 ctx->sequence = ++cil->xc_current_sequence;
189 ctx->cil = cil;
190 cil->xc_ctx = ctx;
191 }
192
193 /*
194 * After the first stage of log recovery is done, we know where the head and
195 * tail of the log are. We need this log initialisation done before we can
196 * initialise the first CIL checkpoint context.
197 *
198 * Here we allocate a log ticket to track space usage during a CIL push. This
199 * ticket is passed to xlog_write() directly so that we don't slowly leak log
200 * space by failing to account for space used by log headers and additional
201 * region headers for split regions.
202 */
203 void
xlog_cil_init_post_recovery(struct xlog * log)204 xlog_cil_init_post_recovery(
205 struct xlog *log)
206 {
207 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
208 log->l_cilp->xc_ctx->sequence = 1;
209 xlog_cil_set_iclog_hdr_count(log->l_cilp);
210 }
211
212 static inline int
xlog_cil_iovec_space(uint niovecs)213 xlog_cil_iovec_space(
214 uint niovecs)
215 {
216 return round_up((sizeof(struct xfs_log_vec) +
217 niovecs * sizeof(struct xfs_log_iovec)),
218 sizeof(uint64_t));
219 }
220
221 /*
222 * Allocate or pin log vector buffers for CIL insertion.
223 *
224 * The CIL currently uses disposable buffers for copying a snapshot of the
225 * modified items into the log during a push. The biggest problem with this is
226 * the requirement to allocate the disposable buffer during the commit if:
227 * a) does not exist; or
228 * b) it is too small
229 *
230 * If we do this allocation within xlog_cil_insert_format_items(), it is done
231 * under the xc_ctx_lock, which means that a CIL push cannot occur during
232 * the memory allocation. This means that we have a potential deadlock situation
233 * under low memory conditions when we have lots of dirty metadata pinned in
234 * the CIL and we need a CIL commit to occur to free memory.
235 *
236 * To avoid this, we need to move the memory allocation outside the
237 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
238 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
239 * vector buffers between the check and the formatting of the item into the
240 * log vector buffer within the xc_ctx_lock.
241 *
242 * Because the log vector buffer needs to be unchanged during the CIL push
243 * process, we cannot share the buffer between the transaction commit (which
244 * modifies the buffer) and the CIL push context that is writing the changes
245 * into the log. This means skipping preallocation of buffer space is
246 * unreliable, but we most definitely do not want to be allocating and freeing
247 * buffers unnecessarily during commits when overwrites can be done safely.
248 *
249 * The simplest solution to this problem is to allocate a shadow buffer when a
250 * log item is committed for the second time, and then to only use this buffer
251 * if necessary. The buffer can remain attached to the log item until such time
252 * it is needed, and this is the buffer that is reallocated to match the size of
253 * the incoming modification. Then during the formatting of the item we can swap
254 * the active buffer with the new one if we can't reuse the existing buffer. We
255 * don't free the old buffer as it may be reused on the next modification if
256 * it's size is right, otherwise we'll free and reallocate it at that point.
257 *
258 * This function builds a vector for the changes in each log item in the
259 * transaction. It then works out the length of the buffer needed for each log
260 * item, allocates them and attaches the vector to the log item in preparation
261 * for the formatting step which occurs under the xc_ctx_lock.
262 *
263 * While this means the memory footprint goes up, it avoids the repeated
264 * alloc/free pattern that repeated modifications of an item would otherwise
265 * cause, and hence minimises the CPU overhead of such behaviour.
266 */
267 static void
xlog_cil_alloc_shadow_bufs(struct xlog * log,struct xfs_trans * tp)268 xlog_cil_alloc_shadow_bufs(
269 struct xlog *log,
270 struct xfs_trans *tp)
271 {
272 struct xfs_log_item *lip;
273
274 list_for_each_entry(lip, &tp->t_items, li_trans) {
275 struct xfs_log_vec *lv;
276 int niovecs = 0;
277 int nbytes = 0;
278 int buf_size;
279 bool ordered = false;
280
281 /* Skip items which aren't dirty in this transaction. */
282 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
283 continue;
284
285 /* get number of vecs and size of data to be stored */
286 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
287
288 /*
289 * Ordered items need to be tracked but we do not wish to write
290 * them. We need a logvec to track the object, but we do not
291 * need an iovec or buffer to be allocated for copying data.
292 */
293 if (niovecs == XFS_LOG_VEC_ORDERED) {
294 ordered = true;
295 niovecs = 0;
296 nbytes = 0;
297 }
298
299 /*
300 * We 64-bit align the length of each iovec so that the start of
301 * the next one is naturally aligned. We'll need to account for
302 * that slack space here.
303 *
304 * We also add the xlog_op_header to each region when
305 * formatting, but that's not accounted to the size of the item
306 * at this point. Hence we'll need an addition number of bytes
307 * for each vector to hold an opheader.
308 *
309 * Then round nbytes up to 64-bit alignment so that the initial
310 * buffer alignment is easy to calculate and verify.
311 */
312 nbytes += niovecs *
313 (sizeof(uint64_t) + sizeof(struct xlog_op_header));
314 nbytes = round_up(nbytes, sizeof(uint64_t));
315
316 /*
317 * The data buffer needs to start 64-bit aligned, so round up
318 * that space to ensure we can align it appropriately and not
319 * overrun the buffer.
320 */
321 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
322
323 /*
324 * if we have no shadow buffer, or it is too small, we need to
325 * reallocate it.
326 */
327 if (!lip->li_lv_shadow ||
328 buf_size > lip->li_lv_shadow->lv_size) {
329 /*
330 * We free and allocate here as a realloc would copy
331 * unnecessary data. We don't use kvzalloc() for the
332 * same reason - we don't need to zero the data area in
333 * the buffer, only the log vector header and the iovec
334 * storage.
335 */
336 kvfree(lip->li_lv_shadow);
337 lv = xlog_kvmalloc(buf_size);
338
339 memset(lv, 0, xlog_cil_iovec_space(niovecs));
340
341 INIT_LIST_HEAD(&lv->lv_list);
342 lv->lv_item = lip;
343 lv->lv_size = buf_size;
344 if (ordered)
345 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
346 else
347 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
348 lip->li_lv_shadow = lv;
349 } else {
350 /* same or smaller, optimise common overwrite case */
351 lv = lip->li_lv_shadow;
352 if (ordered)
353 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
354 else
355 lv->lv_buf_len = 0;
356 lv->lv_bytes = 0;
357 }
358
359 /* Ensure the lv is set up according to ->iop_size */
360 lv->lv_niovecs = niovecs;
361
362 /* The allocated data region lies beyond the iovec region */
363 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
364 }
365
366 }
367
368 /*
369 * Prepare the log item for insertion into the CIL. Calculate the difference in
370 * log space it will consume, and if it is a new item pin it as well.
371 */
372 STATIC void
xfs_cil_prepare_item(struct xlog * log,struct xfs_log_vec * lv,struct xfs_log_vec * old_lv,int * diff_len)373 xfs_cil_prepare_item(
374 struct xlog *log,
375 struct xfs_log_vec *lv,
376 struct xfs_log_vec *old_lv,
377 int *diff_len)
378 {
379 /* Account for the new LV being passed in */
380 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
381 *diff_len += lv->lv_bytes;
382
383 /*
384 * If there is no old LV, this is the first time we've seen the item in
385 * this CIL context and so we need to pin it. If we are replacing the
386 * old_lv, then remove the space it accounts for and make it the shadow
387 * buffer for later freeing. In both cases we are now switching to the
388 * shadow buffer, so update the pointer to it appropriately.
389 */
390 if (!old_lv) {
391 if (lv->lv_item->li_ops->iop_pin)
392 lv->lv_item->li_ops->iop_pin(lv->lv_item);
393 lv->lv_item->li_lv_shadow = NULL;
394 } else if (old_lv != lv) {
395 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
396
397 *diff_len -= old_lv->lv_bytes;
398 lv->lv_item->li_lv_shadow = old_lv;
399 }
400
401 /* attach new log vector to log item */
402 lv->lv_item->li_lv = lv;
403
404 /*
405 * If this is the first time the item is being committed to the
406 * CIL, store the sequence number on the log item so we can
407 * tell in future commits whether this is the first checkpoint
408 * the item is being committed into.
409 */
410 if (!lv->lv_item->li_seq)
411 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
412 }
413
414 /*
415 * Format log item into a flat buffers
416 *
417 * For delayed logging, we need to hold a formatted buffer containing all the
418 * changes on the log item. This enables us to relog the item in memory and
419 * write it out asynchronously without needing to relock the object that was
420 * modified at the time it gets written into the iclog.
421 *
422 * This function takes the prepared log vectors attached to each log item, and
423 * formats the changes into the log vector buffer. The buffer it uses is
424 * dependent on the current state of the vector in the CIL - the shadow lv is
425 * guaranteed to be large enough for the current modification, but we will only
426 * use that if we can't reuse the existing lv. If we can't reuse the existing
427 * lv, then simple swap it out for the shadow lv. We don't free it - that is
428 * done lazily either by th enext modification or the freeing of the log item.
429 *
430 * We don't set up region headers during this process; we simply copy the
431 * regions into the flat buffer. We can do this because we still have to do a
432 * formatting step to write the regions into the iclog buffer. Writing the
433 * ophdrs during the iclog write means that we can support splitting large
434 * regions across iclog boundares without needing a change in the format of the
435 * item/region encapsulation.
436 *
437 * Hence what we need to do now is change the rewrite the vector array to point
438 * to the copied region inside the buffer we just allocated. This allows us to
439 * format the regions into the iclog as though they are being formatted
440 * directly out of the objects themselves.
441 */
442 static void
xlog_cil_insert_format_items(struct xlog * log,struct xfs_trans * tp,int * diff_len)443 xlog_cil_insert_format_items(
444 struct xlog *log,
445 struct xfs_trans *tp,
446 int *diff_len)
447 {
448 struct xfs_log_item *lip;
449
450 /* Bail out if we didn't find a log item. */
451 if (list_empty(&tp->t_items)) {
452 ASSERT(0);
453 return;
454 }
455
456 list_for_each_entry(lip, &tp->t_items, li_trans) {
457 struct xfs_log_vec *lv;
458 struct xfs_log_vec *old_lv = NULL;
459 struct xfs_log_vec *shadow;
460 bool ordered = false;
461
462 /* Skip items which aren't dirty in this transaction. */
463 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
464 continue;
465
466 /*
467 * The formatting size information is already attached to
468 * the shadow lv on the log item.
469 */
470 shadow = lip->li_lv_shadow;
471 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
472 ordered = true;
473
474 /* Skip items that do not have any vectors for writing */
475 if (!shadow->lv_niovecs && !ordered)
476 continue;
477
478 /* compare to existing item size */
479 old_lv = lip->li_lv;
480 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
481 /* same or smaller, optimise common overwrite case */
482 lv = lip->li_lv;
483
484 if (ordered)
485 goto insert;
486
487 /*
488 * set the item up as though it is a new insertion so
489 * that the space reservation accounting is correct.
490 */
491 *diff_len -= lv->lv_bytes;
492
493 /* Ensure the lv is set up according to ->iop_size */
494 lv->lv_niovecs = shadow->lv_niovecs;
495
496 /* reset the lv buffer information for new formatting */
497 lv->lv_buf_len = 0;
498 lv->lv_bytes = 0;
499 lv->lv_buf = (char *)lv +
500 xlog_cil_iovec_space(lv->lv_niovecs);
501 } else {
502 /* switch to shadow buffer! */
503 lv = shadow;
504 lv->lv_item = lip;
505 if (ordered) {
506 /* track as an ordered logvec */
507 ASSERT(lip->li_lv == NULL);
508 goto insert;
509 }
510 }
511
512 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
513 lip->li_ops->iop_format(lip, lv);
514 insert:
515 xfs_cil_prepare_item(log, lv, old_lv, diff_len);
516 }
517 }
518
519 /*
520 * The use of lockless waitqueue_active() requires that the caller has
521 * serialised itself against the wakeup call in xlog_cil_push_work(). That
522 * can be done by either holding the push lock or the context lock.
523 */
524 static inline bool
xlog_cil_over_hard_limit(struct xlog * log,int32_t space_used)525 xlog_cil_over_hard_limit(
526 struct xlog *log,
527 int32_t space_used)
528 {
529 if (waitqueue_active(&log->l_cilp->xc_push_wait))
530 return true;
531 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
532 return true;
533 return false;
534 }
535
536 /*
537 * Insert the log items into the CIL and calculate the difference in space
538 * consumed by the item. Add the space to the checkpoint ticket and calculate
539 * if the change requires additional log metadata. If it does, take that space
540 * as well. Remove the amount of space we added to the checkpoint ticket from
541 * the current transaction ticket so that the accounting works out correctly.
542 */
543 static void
xlog_cil_insert_items(struct xlog * log,struct xfs_trans * tp,uint32_t released_space)544 xlog_cil_insert_items(
545 struct xlog *log,
546 struct xfs_trans *tp,
547 uint32_t released_space)
548 {
549 struct xfs_cil *cil = log->l_cilp;
550 struct xfs_cil_ctx *ctx = cil->xc_ctx;
551 struct xfs_log_item *lip;
552 int len = 0;
553 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
554 int space_used;
555 int order;
556 unsigned int cpu_nr;
557 struct xlog_cil_pcp *cilpcp;
558
559 ASSERT(tp);
560
561 /*
562 * We can do this safely because the context can't checkpoint until we
563 * are done so it doesn't matter exactly how we update the CIL.
564 */
565 xlog_cil_insert_format_items(log, tp, &len);
566
567 /*
568 * Subtract the space released by intent cancelation from the space we
569 * consumed so that we remove it from the CIL space and add it back to
570 * the current transaction reservation context.
571 */
572 len -= released_space;
573
574 /*
575 * Grab the per-cpu pointer for the CIL before we start any accounting.
576 * That ensures that we are running with pre-emption disabled and so we
577 * can't be scheduled away between split sample/update operations that
578 * are done without outside locking to serialise them.
579 */
580 cpu_nr = get_cpu();
581 cilpcp = this_cpu_ptr(cil->xc_pcp);
582
583 /* Tell the future push that there was work added by this CPU. */
584 if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
585 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
586
587 /*
588 * We need to take the CIL checkpoint unit reservation on the first
589 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
590 * unnecessarily do an atomic op in the fast path here. We can clear the
591 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
592 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
593 */
594 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
595 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
596 ctx_res = ctx->ticket->t_unit_res;
597
598 /*
599 * Check if we need to steal iclog headers. atomic_read() is not a
600 * locked atomic operation, so we can check the value before we do any
601 * real atomic ops in the fast path. If we've already taken the CIL unit
602 * reservation from this commit, we've already got one iclog header
603 * space reserved so we have to account for that otherwise we risk
604 * overrunning the reservation on this ticket.
605 *
606 * If the CIL is already at the hard limit, we might need more header
607 * space that originally reserved. So steal more header space from every
608 * commit that occurs once we are over the hard limit to ensure the CIL
609 * push won't run out of reservation space.
610 *
611 * This can steal more than we need, but that's OK.
612 *
613 * The cil->xc_ctx_lock provides the serialisation necessary for safely
614 * calling xlog_cil_over_hard_limit() in this context.
615 */
616 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
617 if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
618 xlog_cil_over_hard_limit(log, space_used)) {
619 split_res = log->l_iclog_hsize +
620 sizeof(struct xlog_op_header);
621 if (ctx_res)
622 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
623 else
624 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
625 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
626 }
627 cilpcp->space_reserved += ctx_res;
628
629 /*
630 * Accurately account when over the soft limit, otherwise fold the
631 * percpu count into the global count if over the per-cpu threshold.
632 */
633 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
634 atomic_add(len, &ctx->space_used);
635 } else if (cilpcp->space_used + len >
636 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
637 space_used = atomic_add_return(cilpcp->space_used + len,
638 &ctx->space_used);
639 cilpcp->space_used = 0;
640
641 /*
642 * If we just transitioned over the soft limit, we need to
643 * transition to the global atomic counter.
644 */
645 if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
646 xlog_cil_insert_pcp_aggregate(cil, ctx);
647 } else {
648 cilpcp->space_used += len;
649 }
650 /* attach the transaction to the CIL if it has any busy extents */
651 if (!list_empty(&tp->t_busy))
652 list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
653
654 /*
655 * Now update the order of everything modified in the transaction
656 * and insert items into the CIL if they aren't already there.
657 * We do this here so we only need to take the CIL lock once during
658 * the transaction commit.
659 */
660 order = atomic_inc_return(&ctx->order_id);
661 list_for_each_entry(lip, &tp->t_items, li_trans) {
662 /* Skip items which aren't dirty in this transaction. */
663 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
664 continue;
665
666 lip->li_order_id = order;
667 if (!list_empty(&lip->li_cil))
668 continue;
669 list_add_tail(&lip->li_cil, &cilpcp->log_items);
670 }
671 put_cpu();
672
673 /*
674 * If we've overrun the reservation, dump the tx details before we move
675 * the log items. Shutdown is imminent...
676 */
677 tp->t_ticket->t_curr_res -= ctx_res + len;
678 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
679 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
680 xfs_warn(log->l_mp,
681 " log items: %d bytes (iov hdrs: %d bytes)",
682 len, iovhdr_res);
683 xfs_warn(log->l_mp, " split region headers: %d bytes",
684 split_res);
685 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
686 xlog_print_trans(tp);
687 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
688 }
689 }
690
691 static inline void
xlog_cil_ail_insert_batch(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,struct xfs_log_item ** log_items,int nr_items,xfs_lsn_t commit_lsn)692 xlog_cil_ail_insert_batch(
693 struct xfs_ail *ailp,
694 struct xfs_ail_cursor *cur,
695 struct xfs_log_item **log_items,
696 int nr_items,
697 xfs_lsn_t commit_lsn)
698 {
699 int i;
700
701 spin_lock(&ailp->ail_lock);
702 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
703 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
704
705 for (i = 0; i < nr_items; i++) {
706 struct xfs_log_item *lip = log_items[i];
707
708 if (lip->li_ops->iop_unpin)
709 lip->li_ops->iop_unpin(lip, 0);
710 }
711 }
712
713 /*
714 * Take the checkpoint's log vector chain of items and insert the attached log
715 * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
716 * traffic.
717 *
718 * The AIL tracks log items via the start record LSN of the checkpoint,
719 * not the commit record LSN. This is because we can pipeline multiple
720 * checkpoints, and so the start record of checkpoint N+1 can be
721 * written before the commit record of checkpoint N. i.e:
722 *
723 * start N commit N
724 * +-------------+------------+----------------+
725 * start N+1 commit N+1
726 *
727 * The tail of the log cannot be moved to the LSN of commit N when all
728 * the items of that checkpoint are written back, because then the
729 * start record for N+1 is no longer in the active portion of the log
730 * and recovery will fail/corrupt the filesystem.
731 *
732 * Hence when all the log items in checkpoint N are written back, the
733 * tail of the log most now only move as far forwards as the start LSN
734 * of checkpoint N+1.
735 *
736 * If we are called with the aborted flag set, it is because a log write during
737 * a CIL checkpoint commit has failed. In this case, all the items in the
738 * checkpoint have already gone through iop_committed and iop_committing, which
739 * means that checkpoint commit abort handling is treated exactly the same as an
740 * iclog write error even though we haven't started any IO yet. Hence in this
741 * case all we need to do is iop_committed processing, followed by an
742 * iop_unpin(aborted) call.
743 *
744 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
745 * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
746 * find the insertion point on every xfs_log_item_batch_insert() call. This
747 * saves a lot of needless list walking and is a net win, even though it
748 * slightly increases that amount of AIL lock traffic to set it up and tear it
749 * down.
750 */
751 static void
xlog_cil_ail_insert(struct xfs_cil_ctx * ctx,bool aborted)752 xlog_cil_ail_insert(
753 struct xfs_cil_ctx *ctx,
754 bool aborted)
755 {
756 #define LOG_ITEM_BATCH_SIZE 32
757 struct xfs_ail *ailp = ctx->cil->xc_log->l_ailp;
758 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
759 struct xfs_log_vec *lv;
760 struct xfs_ail_cursor cur;
761 xfs_lsn_t old_head;
762 int i = 0;
763
764 /*
765 * Update the AIL head LSN with the commit record LSN of this
766 * checkpoint. As iclogs are always completed in order, this should
767 * always be the same (as iclogs can contain multiple commit records) or
768 * higher LSN than the current head. We do this before insertion of the
769 * items so that log space checks during insertion will reflect the
770 * space that this checkpoint has already consumed. We call
771 * xfs_ail_update_finish() so that tail space and space-based wakeups
772 * will be recalculated appropriately.
773 */
774 ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 ||
775 aborted);
776 spin_lock(&ailp->ail_lock);
777 xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
778 old_head = ailp->ail_head_lsn;
779 ailp->ail_head_lsn = ctx->commit_lsn;
780 /* xfs_ail_update_finish() drops the ail_lock */
781 xfs_ail_update_finish(ailp, NULLCOMMITLSN);
782
783 /*
784 * We move the AIL head forwards to account for the space used in the
785 * log before we remove that space from the grant heads. This prevents a
786 * transient condition where reservation space appears to become
787 * available on return, only for it to disappear again immediately as
788 * the AIL head update accounts in the log tail space.
789 */
790 smp_wmb(); /* paired with smp_rmb in xlog_grant_space_left */
791 xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn);
792
793 /* unpin all the log items */
794 list_for_each_entry(lv, &ctx->lv_chain, lv_list) {
795 struct xfs_log_item *lip = lv->lv_item;
796 xfs_lsn_t item_lsn;
797
798 if (aborted)
799 set_bit(XFS_LI_ABORTED, &lip->li_flags);
800
801 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
802 lip->li_ops->iop_release(lip);
803 continue;
804 }
805
806 if (lip->li_ops->iop_committed)
807 item_lsn = lip->li_ops->iop_committed(lip,
808 ctx->start_lsn);
809 else
810 item_lsn = ctx->start_lsn;
811
812 /* item_lsn of -1 means the item needs no further processing */
813 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
814 continue;
815
816 /*
817 * if we are aborting the operation, no point in inserting the
818 * object into the AIL as we are in a shutdown situation.
819 */
820 if (aborted) {
821 ASSERT(xlog_is_shutdown(ailp->ail_log));
822 if (lip->li_ops->iop_unpin)
823 lip->li_ops->iop_unpin(lip, 1);
824 continue;
825 }
826
827 if (item_lsn != ctx->start_lsn) {
828
829 /*
830 * Not a bulk update option due to unusual item_lsn.
831 * Push into AIL immediately, rechecking the lsn once
832 * we have the ail lock. Then unpin the item. This does
833 * not affect the AIL cursor the bulk insert path is
834 * using.
835 */
836 spin_lock(&ailp->ail_lock);
837 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
838 xfs_trans_ail_update(ailp, lip, item_lsn);
839 else
840 spin_unlock(&ailp->ail_lock);
841 if (lip->li_ops->iop_unpin)
842 lip->li_ops->iop_unpin(lip, 0);
843 continue;
844 }
845
846 /* Item is a candidate for bulk AIL insert. */
847 log_items[i++] = lv->lv_item;
848 if (i >= LOG_ITEM_BATCH_SIZE) {
849 xlog_cil_ail_insert_batch(ailp, &cur, log_items,
850 LOG_ITEM_BATCH_SIZE, ctx->start_lsn);
851 i = 0;
852 }
853 }
854
855 /* make sure we insert the remainder! */
856 if (i)
857 xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
858 ctx->start_lsn);
859
860 spin_lock(&ailp->ail_lock);
861 xfs_trans_ail_cursor_done(&cur);
862 spin_unlock(&ailp->ail_lock);
863 }
864
865 static void
xlog_cil_free_logvec(struct list_head * lv_chain)866 xlog_cil_free_logvec(
867 struct list_head *lv_chain)
868 {
869 struct xfs_log_vec *lv;
870
871 while (!list_empty(lv_chain)) {
872 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
873 list_del_init(&lv->lv_list);
874 kvfree(lv);
875 }
876 }
877
878 /*
879 * Mark all items committed and clear busy extents. We free the log vector
880 * chains in a separate pass so that we unpin the log items as quickly as
881 * possible.
882 */
883 static void
xlog_cil_committed(struct xfs_cil_ctx * ctx)884 xlog_cil_committed(
885 struct xfs_cil_ctx *ctx)
886 {
887 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
888 bool abort = xlog_is_shutdown(ctx->cil->xc_log);
889
890 /*
891 * If the I/O failed, we're aborting the commit and already shutdown.
892 * Wake any commit waiters before aborting the log items so we don't
893 * block async log pushers on callbacks. Async log pushers explicitly do
894 * not wait on log force completion because they may be holding locks
895 * required to unpin items.
896 */
897 if (abort) {
898 spin_lock(&ctx->cil->xc_push_lock);
899 wake_up_all(&ctx->cil->xc_start_wait);
900 wake_up_all(&ctx->cil->xc_commit_wait);
901 spin_unlock(&ctx->cil->xc_push_lock);
902 }
903
904 xlog_cil_ail_insert(ctx, abort);
905
906 xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
907 xfs_extent_busy_clear(&ctx->busy_extents.extent_list,
908 xfs_has_discard(mp) && !abort);
909
910 spin_lock(&ctx->cil->xc_push_lock);
911 list_del(&ctx->committing);
912 spin_unlock(&ctx->cil->xc_push_lock);
913
914 xlog_cil_free_logvec(&ctx->lv_chain);
915
916 if (!list_empty(&ctx->busy_extents.extent_list)) {
917 ctx->busy_extents.owner = ctx;
918 xfs_discard_extents(mp, &ctx->busy_extents);
919 return;
920 }
921
922 kfree(ctx);
923 }
924
925 void
xlog_cil_process_committed(struct list_head * list)926 xlog_cil_process_committed(
927 struct list_head *list)
928 {
929 struct xfs_cil_ctx *ctx;
930
931 while ((ctx = list_first_entry_or_null(list,
932 struct xfs_cil_ctx, iclog_entry))) {
933 list_del(&ctx->iclog_entry);
934 xlog_cil_committed(ctx);
935 }
936 }
937
938 /*
939 * Record the LSN of the iclog we were just granted space to start writing into.
940 * If the context doesn't have a start_lsn recorded, then this iclog will
941 * contain the start record for the checkpoint. Otherwise this write contains
942 * the commit record for the checkpoint.
943 */
944 void
xlog_cil_set_ctx_write_state(struct xfs_cil_ctx * ctx,struct xlog_in_core * iclog)945 xlog_cil_set_ctx_write_state(
946 struct xfs_cil_ctx *ctx,
947 struct xlog_in_core *iclog)
948 {
949 struct xfs_cil *cil = ctx->cil;
950 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
951
952 ASSERT(!ctx->commit_lsn);
953 if (!ctx->start_lsn) {
954 spin_lock(&cil->xc_push_lock);
955 /*
956 * The LSN we need to pass to the log items on transaction
957 * commit is the LSN reported by the first log vector write, not
958 * the commit lsn. If we use the commit record lsn then we can
959 * move the grant write head beyond the tail LSN and overwrite
960 * it.
961 */
962 ctx->start_lsn = lsn;
963 wake_up_all(&cil->xc_start_wait);
964 spin_unlock(&cil->xc_push_lock);
965
966 /*
967 * Make sure the metadata we are about to overwrite in the log
968 * has been flushed to stable storage before this iclog is
969 * issued.
970 */
971 spin_lock(&cil->xc_log->l_icloglock);
972 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
973 spin_unlock(&cil->xc_log->l_icloglock);
974 return;
975 }
976
977 /*
978 * Take a reference to the iclog for the context so that we still hold
979 * it when xlog_write is done and has released it. This means the
980 * context controls when the iclog is released for IO.
981 */
982 atomic_inc(&iclog->ic_refcnt);
983
984 /*
985 * xlog_state_get_iclog_space() guarantees there is enough space in the
986 * iclog for an entire commit record, so we can attach the context
987 * callbacks now. This needs to be done before we make the commit_lsn
988 * visible to waiters so that checkpoints with commit records in the
989 * same iclog order their IO completion callbacks in the same order that
990 * the commit records appear in the iclog.
991 */
992 spin_lock(&cil->xc_log->l_icloglock);
993 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
994 spin_unlock(&cil->xc_log->l_icloglock);
995
996 /*
997 * Now we can record the commit LSN and wake anyone waiting for this
998 * sequence to have the ordered commit record assigned to a physical
999 * location in the log.
1000 */
1001 spin_lock(&cil->xc_push_lock);
1002 ctx->commit_iclog = iclog;
1003 ctx->commit_lsn = lsn;
1004 wake_up_all(&cil->xc_commit_wait);
1005 spin_unlock(&cil->xc_push_lock);
1006 }
1007
1008
1009 /*
1010 * Ensure that the order of log writes follows checkpoint sequence order. This
1011 * relies on the context LSN being zero until the log write has guaranteed the
1012 * LSN that the log write will start at via xlog_state_get_iclog_space().
1013 */
1014 enum _record_type {
1015 _START_RECORD,
1016 _COMMIT_RECORD,
1017 };
1018
1019 static int
xlog_cil_order_write(struct xfs_cil * cil,xfs_csn_t sequence,enum _record_type record)1020 xlog_cil_order_write(
1021 struct xfs_cil *cil,
1022 xfs_csn_t sequence,
1023 enum _record_type record)
1024 {
1025 struct xfs_cil_ctx *ctx;
1026
1027 restart:
1028 spin_lock(&cil->xc_push_lock);
1029 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1030 /*
1031 * Avoid getting stuck in this loop because we were woken by the
1032 * shutdown, but then went back to sleep once already in the
1033 * shutdown state.
1034 */
1035 if (xlog_is_shutdown(cil->xc_log)) {
1036 spin_unlock(&cil->xc_push_lock);
1037 return -EIO;
1038 }
1039
1040 /*
1041 * Higher sequences will wait for this one so skip them.
1042 * Don't wait for our own sequence, either.
1043 */
1044 if (ctx->sequence >= sequence)
1045 continue;
1046
1047 /* Wait until the LSN for the record has been recorded. */
1048 switch (record) {
1049 case _START_RECORD:
1050 if (!ctx->start_lsn) {
1051 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
1052 goto restart;
1053 }
1054 break;
1055 case _COMMIT_RECORD:
1056 if (!ctx->commit_lsn) {
1057 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1058 goto restart;
1059 }
1060 break;
1061 }
1062 }
1063 spin_unlock(&cil->xc_push_lock);
1064 return 0;
1065 }
1066
1067 /*
1068 * Write out the log vector change now attached to the CIL context. This will
1069 * write a start record that needs to be strictly ordered in ascending CIL
1070 * sequence order so that log recovery will always use in-order start LSNs when
1071 * replaying checkpoints.
1072 */
1073 static int
xlog_cil_write_chain(struct xfs_cil_ctx * ctx,uint32_t chain_len)1074 xlog_cil_write_chain(
1075 struct xfs_cil_ctx *ctx,
1076 uint32_t chain_len)
1077 {
1078 struct xlog *log = ctx->cil->xc_log;
1079 int error;
1080
1081 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
1082 if (error)
1083 return error;
1084 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
1085 }
1086
1087 /*
1088 * Write out the commit record of a checkpoint transaction to close off a
1089 * running log write. These commit records are strictly ordered in ascending CIL
1090 * sequence order so that log recovery will always replay the checkpoints in the
1091 * correct order.
1092 */
1093 static int
xlog_cil_write_commit_record(struct xfs_cil_ctx * ctx)1094 xlog_cil_write_commit_record(
1095 struct xfs_cil_ctx *ctx)
1096 {
1097 struct xlog *log = ctx->cil->xc_log;
1098 struct xlog_op_header ophdr = {
1099 .oh_clientid = XFS_TRANSACTION,
1100 .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
1101 .oh_flags = XLOG_COMMIT_TRANS,
1102 };
1103 struct xfs_log_iovec reg = {
1104 .i_addr = &ophdr,
1105 .i_len = sizeof(struct xlog_op_header),
1106 .i_type = XLOG_REG_TYPE_COMMIT,
1107 };
1108 struct xfs_log_vec vec = {
1109 .lv_niovecs = 1,
1110 .lv_iovecp = ®,
1111 };
1112 int error;
1113 LIST_HEAD(lv_chain);
1114 list_add(&vec.lv_list, &lv_chain);
1115
1116 if (xlog_is_shutdown(log))
1117 return -EIO;
1118
1119 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1120 if (error)
1121 return error;
1122
1123 /* account for space used by record data */
1124 ctx->ticket->t_curr_res -= reg.i_len;
1125 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1126 if (error)
1127 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1128 return error;
1129 }
1130
1131 struct xlog_cil_trans_hdr {
1132 struct xlog_op_header oph[2];
1133 struct xfs_trans_header thdr;
1134 struct xfs_log_iovec lhdr[2];
1135 };
1136
1137 /*
1138 * Build a checkpoint transaction header to begin the journal transaction. We
1139 * need to account for the space used by the transaction header here as it is
1140 * not accounted for in xlog_write().
1141 *
1142 * This is the only place we write a transaction header, so we also build the
1143 * log opheaders that indicate the start of a log transaction and wrap the
1144 * transaction header. We keep the start record in it's own log vector rather
1145 * than compacting them into a single region as this ends up making the logic
1146 * in xlog_write() for handling empty opheaders for start, commit and unmount
1147 * records much simpler.
1148 */
1149 static void
xlog_cil_build_trans_hdr(struct xfs_cil_ctx * ctx,struct xlog_cil_trans_hdr * hdr,struct xfs_log_vec * lvhdr,int num_iovecs)1150 xlog_cil_build_trans_hdr(
1151 struct xfs_cil_ctx *ctx,
1152 struct xlog_cil_trans_hdr *hdr,
1153 struct xfs_log_vec *lvhdr,
1154 int num_iovecs)
1155 {
1156 struct xlog_ticket *tic = ctx->ticket;
1157 __be32 tid = cpu_to_be32(tic->t_tid);
1158
1159 memset(hdr, 0, sizeof(*hdr));
1160
1161 /* Log start record */
1162 hdr->oph[0].oh_tid = tid;
1163 hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1164 hdr->oph[0].oh_flags = XLOG_START_TRANS;
1165
1166 /* log iovec region pointer */
1167 hdr->lhdr[0].i_addr = &hdr->oph[0];
1168 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1169 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1170
1171 /* log opheader */
1172 hdr->oph[1].oh_tid = tid;
1173 hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1174 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1175
1176 /* transaction header in host byte order format */
1177 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1178 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1179 hdr->thdr.th_tid = tic->t_tid;
1180 hdr->thdr.th_num_items = num_iovecs;
1181
1182 /* log iovec region pointer */
1183 hdr->lhdr[1].i_addr = &hdr->oph[1];
1184 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1185 sizeof(struct xfs_trans_header);
1186 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1187
1188 lvhdr->lv_niovecs = 2;
1189 lvhdr->lv_iovecp = &hdr->lhdr[0];
1190 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1191
1192 tic->t_curr_res -= lvhdr->lv_bytes;
1193 }
1194
1195 /*
1196 * CIL item reordering compare function. We want to order in ascending ID order,
1197 * but we want to leave items with the same ID in the order they were added to
1198 * the list. This is important for operations like reflink where we log 4 order
1199 * dependent intents in a single transaction when we overwrite an existing
1200 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1201 * CUI (inc), BUI(remap)...
1202 */
1203 static int
xlog_cil_order_cmp(void * priv,const struct list_head * a,const struct list_head * b)1204 xlog_cil_order_cmp(
1205 void *priv,
1206 const struct list_head *a,
1207 const struct list_head *b)
1208 {
1209 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
1210 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
1211
1212 return l1->lv_order_id > l2->lv_order_id;
1213 }
1214
1215 /*
1216 * Pull all the log vectors off the items in the CIL, and remove the items from
1217 * the CIL. We don't need the CIL lock here because it's only needed on the
1218 * transaction commit side which is currently locked out by the flush lock.
1219 *
1220 * If a log item is marked with a whiteout, we do not need to write it to the
1221 * journal and so we just move them to the whiteout list for the caller to
1222 * dispose of appropriately.
1223 */
1224 static void
xlog_cil_build_lv_chain(struct xfs_cil_ctx * ctx,struct list_head * whiteouts,uint32_t * num_iovecs,uint32_t * num_bytes)1225 xlog_cil_build_lv_chain(
1226 struct xfs_cil_ctx *ctx,
1227 struct list_head *whiteouts,
1228 uint32_t *num_iovecs,
1229 uint32_t *num_bytes)
1230 {
1231 while (!list_empty(&ctx->log_items)) {
1232 struct xfs_log_item *item;
1233 struct xfs_log_vec *lv;
1234
1235 item = list_first_entry(&ctx->log_items,
1236 struct xfs_log_item, li_cil);
1237
1238 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1239 list_move(&item->li_cil, whiteouts);
1240 trace_xfs_cil_whiteout_skip(item);
1241 continue;
1242 }
1243
1244 lv = item->li_lv;
1245 lv->lv_order_id = item->li_order_id;
1246
1247 /* we don't write ordered log vectors */
1248 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1249 *num_bytes += lv->lv_bytes;
1250 *num_iovecs += lv->lv_niovecs;
1251 list_add_tail(&lv->lv_list, &ctx->lv_chain);
1252
1253 list_del_init(&item->li_cil);
1254 item->li_order_id = 0;
1255 item->li_lv = NULL;
1256 }
1257 }
1258
1259 static void
xlog_cil_cleanup_whiteouts(struct list_head * whiteouts)1260 xlog_cil_cleanup_whiteouts(
1261 struct list_head *whiteouts)
1262 {
1263 while (!list_empty(whiteouts)) {
1264 struct xfs_log_item *item = list_first_entry(whiteouts,
1265 struct xfs_log_item, li_cil);
1266 list_del_init(&item->li_cil);
1267 trace_xfs_cil_whiteout_unpin(item);
1268 item->li_ops->iop_unpin(item, 1);
1269 }
1270 }
1271
1272 /*
1273 * Push the Committed Item List to the log.
1274 *
1275 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1276 * xc_push_seq is less than the current sequence, then it has already been
1277 * flushed and we don't need to do anything - the caller will wait for it to
1278 * complete if necessary.
1279 *
1280 * xc_push_seq is checked unlocked against the sequence number for a match.
1281 * Hence we can allow log forces to run racily and not issue pushes for the
1282 * same sequence twice. If we get a race between multiple pushes for the same
1283 * sequence they will block on the first one and then abort, hence avoiding
1284 * needless pushes.
1285 *
1286 * This runs from a workqueue so it does not inherent any specific memory
1287 * allocation context. However, we do not want to block on memory reclaim
1288 * recursing back into the filesystem because this push may have been triggered
1289 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1290 * contraints here.
1291 */
1292 static void
xlog_cil_push_work(struct work_struct * work)1293 xlog_cil_push_work(
1294 struct work_struct *work)
1295 {
1296 unsigned int nofs_flags = memalloc_nofs_save();
1297 struct xfs_cil_ctx *ctx =
1298 container_of(work, struct xfs_cil_ctx, push_work);
1299 struct xfs_cil *cil = ctx->cil;
1300 struct xlog *log = cil->xc_log;
1301 struct xfs_cil_ctx *new_ctx;
1302 int num_iovecs = 0;
1303 int num_bytes = 0;
1304 int error = 0;
1305 struct xlog_cil_trans_hdr thdr;
1306 struct xfs_log_vec lvhdr = {};
1307 xfs_csn_t push_seq;
1308 bool push_commit_stable;
1309 LIST_HEAD (whiteouts);
1310 struct xlog_ticket *ticket;
1311
1312 new_ctx = xlog_cil_ctx_alloc();
1313 new_ctx->ticket = xlog_cil_ticket_alloc(log);
1314
1315 down_write(&cil->xc_ctx_lock);
1316
1317 spin_lock(&cil->xc_push_lock);
1318 push_seq = cil->xc_push_seq;
1319 ASSERT(push_seq <= ctx->sequence);
1320 push_commit_stable = cil->xc_push_commit_stable;
1321 cil->xc_push_commit_stable = false;
1322
1323 /*
1324 * As we are about to switch to a new, empty CIL context, we no longer
1325 * need to throttle tasks on CIL space overruns. Wake any waiters that
1326 * the hard push throttle may have caught so they can start committing
1327 * to the new context. The ctx->xc_push_lock provides the serialisation
1328 * necessary for safely using the lockless waitqueue_active() check in
1329 * this context.
1330 */
1331 if (waitqueue_active(&cil->xc_push_wait))
1332 wake_up_all(&cil->xc_push_wait);
1333
1334 xlog_cil_push_pcp_aggregate(cil, ctx);
1335
1336 /*
1337 * Check if we've anything to push. If there is nothing, then we don't
1338 * move on to a new sequence number and so we have to be able to push
1339 * this sequence again later.
1340 */
1341 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1342 cil->xc_push_seq = 0;
1343 spin_unlock(&cil->xc_push_lock);
1344 goto out_skip;
1345 }
1346
1347
1348 /* check for a previously pushed sequence */
1349 if (push_seq < ctx->sequence) {
1350 spin_unlock(&cil->xc_push_lock);
1351 goto out_skip;
1352 }
1353
1354 /*
1355 * We are now going to push this context, so add it to the committing
1356 * list before we do anything else. This ensures that anyone waiting on
1357 * this push can easily detect the difference between a "push in
1358 * progress" and "CIL is empty, nothing to do".
1359 *
1360 * IOWs, a wait loop can now check for:
1361 * the current sequence not being found on the committing list;
1362 * an empty CIL; and
1363 * an unchanged sequence number
1364 * to detect a push that had nothing to do and therefore does not need
1365 * waiting on. If the CIL is not empty, we get put on the committing
1366 * list before emptying the CIL and bumping the sequence number. Hence
1367 * an empty CIL and an unchanged sequence number means we jumped out
1368 * above after doing nothing.
1369 *
1370 * Hence the waiter will either find the commit sequence on the
1371 * committing list or the sequence number will be unchanged and the CIL
1372 * still dirty. In that latter case, the push has not yet started, and
1373 * so the waiter will have to continue trying to check the CIL
1374 * committing list until it is found. In extreme cases of delay, the
1375 * sequence may fully commit between the attempts the wait makes to wait
1376 * on the commit sequence.
1377 */
1378 list_add(&ctx->committing, &cil->xc_committing);
1379 spin_unlock(&cil->xc_push_lock);
1380
1381 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1382
1383 /*
1384 * Switch the contexts so we can drop the context lock and move out
1385 * of a shared context. We can't just go straight to the commit record,
1386 * though - we need to synchronise with previous and future commits so
1387 * that the commit records are correctly ordered in the log to ensure
1388 * that we process items during log IO completion in the correct order.
1389 *
1390 * For example, if we get an EFI in one checkpoint and the EFD in the
1391 * next (e.g. due to log forces), we do not want the checkpoint with
1392 * the EFD to be committed before the checkpoint with the EFI. Hence
1393 * we must strictly order the commit records of the checkpoints so
1394 * that: a) the checkpoint callbacks are attached to the iclogs in the
1395 * correct order; and b) the checkpoints are replayed in correct order
1396 * in log recovery.
1397 *
1398 * Hence we need to add this context to the committing context list so
1399 * that higher sequences will wait for us to write out a commit record
1400 * before they do.
1401 *
1402 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1403 * structure atomically with the addition of this sequence to the
1404 * committing list. This also ensures that we can do unlocked checks
1405 * against the current sequence in log forces without risking
1406 * deferencing a freed context pointer.
1407 */
1408 spin_lock(&cil->xc_push_lock);
1409 xlog_cil_ctx_switch(cil, new_ctx);
1410 spin_unlock(&cil->xc_push_lock);
1411 up_write(&cil->xc_ctx_lock);
1412
1413 /*
1414 * Sort the log vector chain before we add the transaction headers.
1415 * This ensures we always have the transaction headers at the start
1416 * of the chain.
1417 */
1418 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1419
1420 /*
1421 * Build a checkpoint transaction header and write it to the log to
1422 * begin the transaction. We need to account for the space used by the
1423 * transaction header here as it is not accounted for in xlog_write().
1424 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1425 * it gets written into the iclog first.
1426 */
1427 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1428 num_bytes += lvhdr.lv_bytes;
1429 list_add(&lvhdr.lv_list, &ctx->lv_chain);
1430
1431 /*
1432 * Take the lvhdr back off the lv_chain immediately after calling
1433 * xlog_cil_write_chain() as it should not be passed to log IO
1434 * completion.
1435 */
1436 error = xlog_cil_write_chain(ctx, num_bytes);
1437 list_del(&lvhdr.lv_list);
1438 if (error)
1439 goto out_abort_free_ticket;
1440
1441 error = xlog_cil_write_commit_record(ctx);
1442 if (error)
1443 goto out_abort_free_ticket;
1444
1445 /*
1446 * Grab the ticket from the ctx so we can ungrant it after releasing the
1447 * commit_iclog. The ctx may be freed by the time we return from
1448 * releasing the commit_iclog (i.e. checkpoint has been completed and
1449 * callback run) so we can't reference the ctx after the call to
1450 * xlog_state_release_iclog().
1451 */
1452 ticket = ctx->ticket;
1453
1454 /*
1455 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1456 * to complete before we submit the commit_iclog. We can't use state
1457 * checks for this - ACTIVE can be either a past completed iclog or a
1458 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1459 * past or future iclog awaiting IO or ordered IO completion to be run.
1460 * In the latter case, if it's a future iclog and we wait on it, the we
1461 * will hang because it won't get processed through to ic_force_wait
1462 * wakeup until this commit_iclog is written to disk. Hence we use the
1463 * iclog header lsn and compare it to the commit lsn to determine if we
1464 * need to wait on iclogs or not.
1465 */
1466 spin_lock(&log->l_icloglock);
1467 if (ctx->start_lsn != ctx->commit_lsn) {
1468 xfs_lsn_t plsn;
1469
1470 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1471 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1472 /*
1473 * Waiting on ic_force_wait orders the completion of
1474 * iclogs older than ic_prev. Hence we only need to wait
1475 * on the most recent older iclog here.
1476 */
1477 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1478 spin_lock(&log->l_icloglock);
1479 }
1480
1481 /*
1482 * We need to issue a pre-flush so that the ordering for this
1483 * checkpoint is correctly preserved down to stable storage.
1484 */
1485 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1486 }
1487
1488 /*
1489 * The commit iclog must be written to stable storage to guarantee
1490 * journal IO vs metadata writeback IO is correctly ordered on stable
1491 * storage.
1492 *
1493 * If the push caller needs the commit to be immediately stable and the
1494 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1495 * will be written when released, switch it's state to WANT_SYNC right
1496 * now.
1497 */
1498 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1499 if (push_commit_stable &&
1500 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1501 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1502 ticket = ctx->ticket;
1503 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1504
1505 /* Not safe to reference ctx now! */
1506
1507 spin_unlock(&log->l_icloglock);
1508 xlog_cil_cleanup_whiteouts(&whiteouts);
1509 xfs_log_ticket_ungrant(log, ticket);
1510 memalloc_nofs_restore(nofs_flags);
1511 return;
1512
1513 out_skip:
1514 up_write(&cil->xc_ctx_lock);
1515 xfs_log_ticket_put(new_ctx->ticket);
1516 kfree(new_ctx);
1517 memalloc_nofs_restore(nofs_flags);
1518 return;
1519
1520 out_abort_free_ticket:
1521 ASSERT(xlog_is_shutdown(log));
1522 xlog_cil_cleanup_whiteouts(&whiteouts);
1523 if (!ctx->commit_iclog) {
1524 xfs_log_ticket_ungrant(log, ctx->ticket);
1525 xlog_cil_committed(ctx);
1526 memalloc_nofs_restore(nofs_flags);
1527 return;
1528 }
1529 spin_lock(&log->l_icloglock);
1530 ticket = ctx->ticket;
1531 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1532 /* Not safe to reference ctx now! */
1533 spin_unlock(&log->l_icloglock);
1534 xfs_log_ticket_ungrant(log, ticket);
1535 memalloc_nofs_restore(nofs_flags);
1536 }
1537
1538 /*
1539 * We need to push CIL every so often so we don't cache more than we can fit in
1540 * the log. The limit really is that a checkpoint can't be more than half the
1541 * log (the current checkpoint is not allowed to overwrite the previous
1542 * checkpoint), but commit latency and memory usage limit this to a smaller
1543 * size.
1544 */
1545 static void
xlog_cil_push_background(struct xlog * log)1546 xlog_cil_push_background(
1547 struct xlog *log)
1548 {
1549 struct xfs_cil *cil = log->l_cilp;
1550 int space_used = atomic_read(&cil->xc_ctx->space_used);
1551
1552 /*
1553 * The cil won't be empty because we are called while holding the
1554 * context lock so whatever we added to the CIL will still be there.
1555 */
1556 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1557
1558 /*
1559 * We are done if:
1560 * - we haven't used up all the space available yet; or
1561 * - we've already queued up a push; and
1562 * - we're not over the hard limit; and
1563 * - nothing has been over the hard limit.
1564 *
1565 * If so, we don't need to take the push lock as there's nothing to do.
1566 */
1567 if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1568 (cil->xc_push_seq == cil->xc_current_sequence &&
1569 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1570 !waitqueue_active(&cil->xc_push_wait))) {
1571 up_read(&cil->xc_ctx_lock);
1572 return;
1573 }
1574
1575 spin_lock(&cil->xc_push_lock);
1576 if (cil->xc_push_seq < cil->xc_current_sequence) {
1577 cil->xc_push_seq = cil->xc_current_sequence;
1578 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1579 }
1580
1581 /*
1582 * Drop the context lock now, we can't hold that if we need to sleep
1583 * because we are over the blocking threshold. The push_lock is still
1584 * held, so blocking threshold sleep/wakeup is still correctly
1585 * serialised here.
1586 */
1587 up_read(&cil->xc_ctx_lock);
1588
1589 /*
1590 * If we are well over the space limit, throttle the work that is being
1591 * done until the push work on this context has begun. Enforce the hard
1592 * throttle on all transaction commits once it has been activated, even
1593 * if the committing transactions have resulted in the space usage
1594 * dipping back down under the hard limit.
1595 *
1596 * The ctx->xc_push_lock provides the serialisation necessary for safely
1597 * calling xlog_cil_over_hard_limit() in this context.
1598 */
1599 if (xlog_cil_over_hard_limit(log, space_used)) {
1600 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1601 ASSERT(space_used < log->l_logsize);
1602 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1603 return;
1604 }
1605
1606 spin_unlock(&cil->xc_push_lock);
1607
1608 }
1609
1610 /*
1611 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1612 * number that is passed. When it returns, the work will be queued for
1613 * @push_seq, but it won't be completed.
1614 *
1615 * If the caller is performing a synchronous force, we will flush the workqueue
1616 * to get previously queued work moving to minimise the wait time they will
1617 * undergo waiting for all outstanding pushes to complete. The caller is
1618 * expected to do the required waiting for push_seq to complete.
1619 *
1620 * If the caller is performing an async push, we need to ensure that the
1621 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1622 * don't do this, then the commit record may remain sitting in memory in an
1623 * ACTIVE iclog. This then requires another full log force to push to disk,
1624 * which defeats the purpose of having an async, non-blocking CIL force
1625 * mechanism. Hence in this case we need to pass a flag to the push work to
1626 * indicate it needs to flush the commit record itself.
1627 */
1628 static void
xlog_cil_push_now(struct xlog * log,xfs_lsn_t push_seq,bool async)1629 xlog_cil_push_now(
1630 struct xlog *log,
1631 xfs_lsn_t push_seq,
1632 bool async)
1633 {
1634 struct xfs_cil *cil = log->l_cilp;
1635
1636 if (!cil)
1637 return;
1638
1639 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1640
1641 /* start on any pending background push to minimise wait time on it */
1642 if (!async)
1643 flush_workqueue(cil->xc_push_wq);
1644
1645 spin_lock(&cil->xc_push_lock);
1646
1647 /*
1648 * If this is an async flush request, we always need to set the
1649 * xc_push_commit_stable flag even if something else has already queued
1650 * a push. The flush caller is asking for the CIL to be on stable
1651 * storage when the next push completes, so regardless of who has queued
1652 * the push, the flush requires stable semantics from it.
1653 */
1654 cil->xc_push_commit_stable = async;
1655
1656 /*
1657 * If the CIL is empty or we've already pushed the sequence then
1658 * there's no more work that we need to do.
1659 */
1660 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1661 push_seq <= cil->xc_push_seq) {
1662 spin_unlock(&cil->xc_push_lock);
1663 return;
1664 }
1665
1666 cil->xc_push_seq = push_seq;
1667 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1668 spin_unlock(&cil->xc_push_lock);
1669 }
1670
1671 bool
xlog_cil_empty(struct xlog * log)1672 xlog_cil_empty(
1673 struct xlog *log)
1674 {
1675 struct xfs_cil *cil = log->l_cilp;
1676 bool empty = false;
1677
1678 spin_lock(&cil->xc_push_lock);
1679 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1680 empty = true;
1681 spin_unlock(&cil->xc_push_lock);
1682 return empty;
1683 }
1684
1685 /*
1686 * If there are intent done items in this transaction and the related intent was
1687 * committed in the current (same) CIL checkpoint, we don't need to write either
1688 * the intent or intent done item to the journal as the change will be
1689 * journalled atomically within this checkpoint. As we cannot remove items from
1690 * the CIL here, mark the related intent with a whiteout so that the CIL push
1691 * can remove it rather than writing it to the journal. Then remove the intent
1692 * done item from the current transaction and release it so it doesn't get put
1693 * into the CIL at all.
1694 */
1695 static uint32_t
xlog_cil_process_intents(struct xfs_cil * cil,struct xfs_trans * tp)1696 xlog_cil_process_intents(
1697 struct xfs_cil *cil,
1698 struct xfs_trans *tp)
1699 {
1700 struct xfs_log_item *lip, *ilip, *next;
1701 uint32_t len = 0;
1702
1703 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1704 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1705 continue;
1706
1707 ilip = lip->li_ops->iop_intent(lip);
1708 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1709 continue;
1710 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1711 trace_xfs_cil_whiteout_mark(ilip);
1712 len += ilip->li_lv->lv_bytes;
1713 kvfree(ilip->li_lv);
1714 ilip->li_lv = NULL;
1715
1716 xfs_trans_del_item(lip);
1717 lip->li_ops->iop_release(lip);
1718 }
1719 return len;
1720 }
1721
1722 /*
1723 * Commit a transaction with the given vector to the Committed Item List.
1724 *
1725 * To do this, we need to format the item, pin it in memory if required and
1726 * account for the space used by the transaction. Once we have done that we
1727 * need to release the unused reservation for the transaction, attach the
1728 * transaction to the checkpoint context so we carry the busy extents through
1729 * to checkpoint completion, and then unlock all the items in the transaction.
1730 *
1731 * Called with the context lock already held in read mode to lock out
1732 * background commit, returns without it held once background commits are
1733 * allowed again.
1734 */
1735 void
xlog_cil_commit(struct xlog * log,struct xfs_trans * tp,xfs_csn_t * commit_seq,bool regrant)1736 xlog_cil_commit(
1737 struct xlog *log,
1738 struct xfs_trans *tp,
1739 xfs_csn_t *commit_seq,
1740 bool regrant)
1741 {
1742 struct xfs_cil *cil = log->l_cilp;
1743 struct xfs_log_item *lip, *next;
1744 uint32_t released_space = 0;
1745
1746 /*
1747 * Do all necessary memory allocation before we lock the CIL.
1748 * This ensures the allocation does not deadlock with a CIL
1749 * push in memory reclaim (e.g. from kswapd).
1750 */
1751 xlog_cil_alloc_shadow_bufs(log, tp);
1752
1753 /* lock out background commit */
1754 down_read(&cil->xc_ctx_lock);
1755
1756 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1757 released_space = xlog_cil_process_intents(cil, tp);
1758
1759 xlog_cil_insert_items(log, tp, released_space);
1760
1761 if (regrant && !xlog_is_shutdown(log))
1762 xfs_log_ticket_regrant(log, tp->t_ticket);
1763 else
1764 xfs_log_ticket_ungrant(log, tp->t_ticket);
1765 tp->t_ticket = NULL;
1766 xfs_trans_unreserve_and_mod_sb(tp);
1767
1768 /*
1769 * Once all the items of the transaction have been copied to the CIL,
1770 * the items can be unlocked and possibly freed.
1771 *
1772 * This needs to be done before we drop the CIL context lock because we
1773 * have to update state in the log items and unlock them before they go
1774 * to disk. If we don't, then the CIL checkpoint can race with us and
1775 * we can run checkpoint completion before we've updated and unlocked
1776 * the log items. This affects (at least) processing of stale buffers,
1777 * inodes and EFIs.
1778 */
1779 trace_xfs_trans_commit_items(tp, _RET_IP_);
1780 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1781 xfs_trans_del_item(lip);
1782 if (lip->li_ops->iop_committing)
1783 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1784 }
1785 if (commit_seq)
1786 *commit_seq = cil->xc_ctx->sequence;
1787
1788 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1789 xlog_cil_push_background(log);
1790 }
1791
1792 /*
1793 * Flush the CIL to stable storage but don't wait for it to complete. This
1794 * requires the CIL push to ensure the commit record for the push hits the disk,
1795 * but otherwise is no different to a push done from a log force.
1796 */
1797 void
xlog_cil_flush(struct xlog * log)1798 xlog_cil_flush(
1799 struct xlog *log)
1800 {
1801 xfs_csn_t seq = log->l_cilp->xc_current_sequence;
1802
1803 trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1804 xlog_cil_push_now(log, seq, true);
1805
1806 /*
1807 * If the CIL is empty, make sure that any previous checkpoint that may
1808 * still be in an active iclog is pushed to stable storage.
1809 */
1810 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1811 xfs_log_force(log->l_mp, 0);
1812 }
1813
1814 /*
1815 * Conditionally push the CIL based on the sequence passed in.
1816 *
1817 * We only need to push if we haven't already pushed the sequence number given.
1818 * Hence the only time we will trigger a push here is if the push sequence is
1819 * the same as the current context.
1820 *
1821 * We return the current commit lsn to allow the callers to determine if a
1822 * iclog flush is necessary following this call.
1823 */
1824 xfs_lsn_t
xlog_cil_force_seq(struct xlog * log,xfs_csn_t sequence)1825 xlog_cil_force_seq(
1826 struct xlog *log,
1827 xfs_csn_t sequence)
1828 {
1829 struct xfs_cil *cil = log->l_cilp;
1830 struct xfs_cil_ctx *ctx;
1831 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1832
1833 ASSERT(sequence <= cil->xc_current_sequence);
1834
1835 if (!sequence)
1836 sequence = cil->xc_current_sequence;
1837 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1838
1839 /*
1840 * check to see if we need to force out the current context.
1841 * xlog_cil_push() handles racing pushes for the same sequence,
1842 * so no need to deal with it here.
1843 */
1844 restart:
1845 xlog_cil_push_now(log, sequence, false);
1846
1847 /*
1848 * See if we can find a previous sequence still committing.
1849 * We need to wait for all previous sequence commits to complete
1850 * before allowing the force of push_seq to go ahead. Hence block
1851 * on commits for those as well.
1852 */
1853 spin_lock(&cil->xc_push_lock);
1854 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1855 /*
1856 * Avoid getting stuck in this loop because we were woken by the
1857 * shutdown, but then went back to sleep once already in the
1858 * shutdown state.
1859 */
1860 if (xlog_is_shutdown(log))
1861 goto out_shutdown;
1862 if (ctx->sequence > sequence)
1863 continue;
1864 if (!ctx->commit_lsn) {
1865 /*
1866 * It is still being pushed! Wait for the push to
1867 * complete, then start again from the beginning.
1868 */
1869 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1870 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1871 goto restart;
1872 }
1873 if (ctx->sequence != sequence)
1874 continue;
1875 /* found it! */
1876 commit_lsn = ctx->commit_lsn;
1877 }
1878
1879 /*
1880 * The call to xlog_cil_push_now() executes the push in the background.
1881 * Hence by the time we have got here it our sequence may not have been
1882 * pushed yet. This is true if the current sequence still matches the
1883 * push sequence after the above wait loop and the CIL still contains
1884 * dirty objects. This is guaranteed by the push code first adding the
1885 * context to the committing list before emptying the CIL.
1886 *
1887 * Hence if we don't find the context in the committing list and the
1888 * current sequence number is unchanged then the CIL contents are
1889 * significant. If the CIL is empty, if means there was nothing to push
1890 * and that means there is nothing to wait for. If the CIL is not empty,
1891 * it means we haven't yet started the push, because if it had started
1892 * we would have found the context on the committing list.
1893 */
1894 if (sequence == cil->xc_current_sequence &&
1895 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1896 spin_unlock(&cil->xc_push_lock);
1897 goto restart;
1898 }
1899
1900 spin_unlock(&cil->xc_push_lock);
1901 return commit_lsn;
1902
1903 /*
1904 * We detected a shutdown in progress. We need to trigger the log force
1905 * to pass through it's iclog state machine error handling, even though
1906 * we are already in a shutdown state. Hence we can't return
1907 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1908 * LSN is already stable), so we return a zero LSN instead.
1909 */
1910 out_shutdown:
1911 spin_unlock(&cil->xc_push_lock);
1912 return 0;
1913 }
1914
1915 /*
1916 * Perform initial CIL structure initialisation.
1917 */
1918 int
xlog_cil_init(struct xlog * log)1919 xlog_cil_init(
1920 struct xlog *log)
1921 {
1922 struct xfs_cil *cil;
1923 struct xfs_cil_ctx *ctx;
1924 struct xlog_cil_pcp *cilpcp;
1925 int cpu;
1926
1927 cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1928 if (!cil)
1929 return -ENOMEM;
1930 /*
1931 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1932 * concurrency the log spinlocks will be exposed to.
1933 */
1934 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1935 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1936 4, log->l_mp->m_super->s_id);
1937 if (!cil->xc_push_wq)
1938 goto out_destroy_cil;
1939
1940 cil->xc_log = log;
1941 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1942 if (!cil->xc_pcp)
1943 goto out_destroy_wq;
1944
1945 for_each_possible_cpu(cpu) {
1946 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1947 INIT_LIST_HEAD(&cilpcp->busy_extents);
1948 INIT_LIST_HEAD(&cilpcp->log_items);
1949 }
1950
1951 INIT_LIST_HEAD(&cil->xc_committing);
1952 spin_lock_init(&cil->xc_push_lock);
1953 init_waitqueue_head(&cil->xc_push_wait);
1954 init_rwsem(&cil->xc_ctx_lock);
1955 init_waitqueue_head(&cil->xc_start_wait);
1956 init_waitqueue_head(&cil->xc_commit_wait);
1957 log->l_cilp = cil;
1958
1959 ctx = xlog_cil_ctx_alloc();
1960 xlog_cil_ctx_switch(cil, ctx);
1961 return 0;
1962
1963 out_destroy_wq:
1964 destroy_workqueue(cil->xc_push_wq);
1965 out_destroy_cil:
1966 kfree(cil);
1967 return -ENOMEM;
1968 }
1969
1970 void
xlog_cil_destroy(struct xlog * log)1971 xlog_cil_destroy(
1972 struct xlog *log)
1973 {
1974 struct xfs_cil *cil = log->l_cilp;
1975
1976 if (cil->xc_ctx) {
1977 if (cil->xc_ctx->ticket)
1978 xfs_log_ticket_put(cil->xc_ctx->ticket);
1979 kfree(cil->xc_ctx);
1980 }
1981
1982 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1983 free_percpu(cil->xc_pcp);
1984 destroy_workqueue(cil->xc_push_wq);
1985 kfree(cil);
1986 }
1987
1988