1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 4 */ 5 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_extent_busy.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_log.h" 17 #include "xfs_log_priv.h" 18 #include "xfs_trace.h" 19 #include "xfs_discard.h" 20 21 /* 22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 23 * recover, so we don't allow failure here. Also, we allocate in a context that 24 * we don't want to be issuing transactions from, so we need to tell the 25 * allocation code this as well. 26 * 27 * We don't reserve any space for the ticket - we are going to steal whatever 28 * space we require from transactions as they commit. To ensure we reserve all 29 * the space required, we need to set the current reservation of the ticket to 30 * zero so that we know to steal the initial transaction overhead from the 31 * first transaction commit. 32 */ 33 static struct xlog_ticket * 34 xlog_cil_ticket_alloc( 35 struct xlog *log) 36 { 37 struct xlog_ticket *tic; 38 39 tic = xlog_ticket_alloc(log, 0, 1, 0); 40 41 /* 42 * set the current reservation to zero so we know to steal the basic 43 * transaction overhead reservation from the first transaction commit. 44 */ 45 tic->t_curr_res = 0; 46 tic->t_iclog_hdrs = 0; 47 return tic; 48 } 49 50 static inline void 51 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil) 52 { 53 struct xlog *log = cil->xc_log; 54 55 atomic_set(&cil->xc_iclog_hdrs, 56 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) / 57 (log->l_iclog_size - log->l_iclog_hsize))); 58 } 59 60 /* 61 * Check if the current log item was first committed in this sequence. 62 * We can't rely on just the log item being in the CIL, we have to check 63 * the recorded commit sequence number. 64 * 65 * Note: for this to be used in a non-racy manner, it has to be called with 66 * CIL flushing locked out. As a result, it should only be used during the 67 * transaction commit process when deciding what to format into the item. 68 */ 69 static bool 70 xlog_item_in_current_chkpt( 71 struct xfs_cil *cil, 72 struct xfs_log_item *lip) 73 { 74 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 75 return false; 76 77 /* 78 * li_seq is written on the first commit of a log item to record the 79 * first checkpoint it is written to. Hence if it is different to the 80 * current sequence, we're in a new checkpoint. 81 */ 82 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); 83 } 84 85 bool 86 xfs_log_item_in_current_chkpt( 87 struct xfs_log_item *lip) 88 { 89 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); 90 } 91 92 /* 93 * Unavoidable forward declaration - xlog_cil_push_work() calls 94 * xlog_cil_ctx_alloc() itself. 95 */ 96 static void xlog_cil_push_work(struct work_struct *work); 97 98 static struct xfs_cil_ctx * 99 xlog_cil_ctx_alloc(void) 100 { 101 struct xfs_cil_ctx *ctx; 102 103 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); 104 INIT_LIST_HEAD(&ctx->committing); 105 INIT_LIST_HEAD(&ctx->busy_extents.extent_list); 106 INIT_LIST_HEAD(&ctx->log_items); 107 INIT_LIST_HEAD(&ctx->lv_chain); 108 INIT_WORK(&ctx->push_work, xlog_cil_push_work); 109 return ctx; 110 } 111 112 /* 113 * Aggregate the CIL per cpu structures into global counts, lists, etc and 114 * clear the percpu state ready for the next context to use. This is called 115 * from the push code with the context lock held exclusively, hence nothing else 116 * will be accessing or modifying the per-cpu counters. 117 */ 118 static void 119 xlog_cil_push_pcp_aggregate( 120 struct xfs_cil *cil, 121 struct xfs_cil_ctx *ctx) 122 { 123 struct xlog_cil_pcp *cilpcp; 124 int cpu; 125 126 for_each_cpu(cpu, &ctx->cil_pcpmask) { 127 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 128 129 ctx->ticket->t_curr_res += cilpcp->space_reserved; 130 cilpcp->space_reserved = 0; 131 132 if (!list_empty(&cilpcp->busy_extents)) { 133 list_splice_init(&cilpcp->busy_extents, 134 &ctx->busy_extents.extent_list); 135 } 136 if (!list_empty(&cilpcp->log_items)) 137 list_splice_init(&cilpcp->log_items, &ctx->log_items); 138 139 /* 140 * We're in the middle of switching cil contexts. Reset the 141 * counter we use to detect when the current context is nearing 142 * full. 143 */ 144 cilpcp->space_used = 0; 145 } 146 } 147 148 /* 149 * Aggregate the CIL per-cpu space used counters into the global atomic value. 150 * This is called when the per-cpu counter aggregation will first pass the soft 151 * limit threshold so we can switch to atomic counter aggregation for accurate 152 * detection of hard limit traversal. 153 */ 154 static void 155 xlog_cil_insert_pcp_aggregate( 156 struct xfs_cil *cil, 157 struct xfs_cil_ctx *ctx) 158 { 159 int cpu; 160 int count = 0; 161 162 /* Trigger atomic updates then aggregate only for the first caller */ 163 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) 164 return; 165 166 /* 167 * We can race with other cpus setting cil_pcpmask. However, we've 168 * atomically cleared PCP_SPACE which forces other threads to add to 169 * the global space used count. cil_pcpmask is a superset of cilpcp 170 * structures that could have a nonzero space_used. 171 */ 172 for_each_cpu(cpu, &ctx->cil_pcpmask) { 173 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 174 int old = READ_ONCE(cilpcp->space_used); 175 176 while (!try_cmpxchg(&cilpcp->space_used, &old, 0)) 177 ; 178 count += old; 179 } 180 atomic_add(count, &ctx->space_used); 181 } 182 183 static void 184 xlog_cil_ctx_switch( 185 struct xfs_cil *cil, 186 struct xfs_cil_ctx *ctx) 187 { 188 xlog_cil_set_iclog_hdr_count(cil); 189 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); 190 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags); 191 ctx->sequence = ++cil->xc_current_sequence; 192 ctx->cil = cil; 193 cil->xc_ctx = ctx; 194 } 195 196 /* 197 * After the first stage of log recovery is done, we know where the head and 198 * tail of the log are. We need this log initialisation done before we can 199 * initialise the first CIL checkpoint context. 200 * 201 * Here we allocate a log ticket to track space usage during a CIL push. This 202 * ticket is passed to xlog_write() directly so that we don't slowly leak log 203 * space by failing to account for space used by log headers and additional 204 * region headers for split regions. 205 */ 206 void 207 xlog_cil_init_post_recovery( 208 struct xlog *log) 209 { 210 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 211 log->l_cilp->xc_ctx->sequence = 1; 212 xlog_cil_set_iclog_hdr_count(log->l_cilp); 213 } 214 215 static inline int 216 xlog_cil_iovec_space( 217 uint niovecs) 218 { 219 return round_up((sizeof(struct xfs_log_vec) + 220 niovecs * sizeof(struct xfs_log_iovec)), 221 sizeof(uint64_t)); 222 } 223 224 /* 225 * Allocate or pin log vector buffers for CIL insertion. 226 * 227 * The CIL currently uses disposable buffers for copying a snapshot of the 228 * modified items into the log during a push. The biggest problem with this is 229 * the requirement to allocate the disposable buffer during the commit if: 230 * a) does not exist; or 231 * b) it is too small 232 * 233 * If we do this allocation within xlog_cil_insert_format_items(), it is done 234 * under the xc_ctx_lock, which means that a CIL push cannot occur during 235 * the memory allocation. This means that we have a potential deadlock situation 236 * under low memory conditions when we have lots of dirty metadata pinned in 237 * the CIL and we need a CIL commit to occur to free memory. 238 * 239 * To avoid this, we need to move the memory allocation outside the 240 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 241 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 242 * vector buffers between the check and the formatting of the item into the 243 * log vector buffer within the xc_ctx_lock. 244 * 245 * Because the log vector buffer needs to be unchanged during the CIL push 246 * process, we cannot share the buffer between the transaction commit (which 247 * modifies the buffer) and the CIL push context that is writing the changes 248 * into the log. This means skipping preallocation of buffer space is 249 * unreliable, but we most definitely do not want to be allocating and freeing 250 * buffers unnecessarily during commits when overwrites can be done safely. 251 * 252 * The simplest solution to this problem is to allocate a shadow buffer when a 253 * log item is committed for the second time, and then to only use this buffer 254 * if necessary. The buffer can remain attached to the log item until such time 255 * it is needed, and this is the buffer that is reallocated to match the size of 256 * the incoming modification. Then during the formatting of the item we can swap 257 * the active buffer with the new one if we can't reuse the existing buffer. We 258 * don't free the old buffer as it may be reused on the next modification if 259 * it's size is right, otherwise we'll free and reallocate it at that point. 260 * 261 * This function builds a vector for the changes in each log item in the 262 * transaction. It then works out the length of the buffer needed for each log 263 * item, allocates them and attaches the vector to the log item in preparation 264 * for the formatting step which occurs under the xc_ctx_lock. 265 * 266 * While this means the memory footprint goes up, it avoids the repeated 267 * alloc/free pattern that repeated modifications of an item would otherwise 268 * cause, and hence minimises the CPU overhead of such behaviour. 269 */ 270 static void 271 xlog_cil_alloc_shadow_bufs( 272 struct xlog *log, 273 struct xfs_trans *tp) 274 { 275 struct xfs_log_item *lip; 276 277 list_for_each_entry(lip, &tp->t_items, li_trans) { 278 struct xfs_log_vec *lv; 279 int niovecs = 0; 280 int nbytes = 0; 281 int buf_size; 282 bool ordered = false; 283 284 /* Skip items which aren't dirty in this transaction. */ 285 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 286 continue; 287 288 /* get number of vecs and size of data to be stored */ 289 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 290 291 /* 292 * Ordered items need to be tracked but we do not wish to write 293 * them. We need a logvec to track the object, but we do not 294 * need an iovec or buffer to be allocated for copying data. 295 */ 296 if (niovecs == XFS_LOG_VEC_ORDERED) { 297 ordered = true; 298 niovecs = 0; 299 nbytes = 0; 300 } 301 302 /* 303 * We 64-bit align the length of each iovec so that the start of 304 * the next one is naturally aligned. We'll need to account for 305 * that slack space here. 306 * 307 * We also add the xlog_op_header to each region when 308 * formatting, but that's not accounted to the size of the item 309 * at this point. Hence we'll need an addition number of bytes 310 * for each vector to hold an opheader. 311 * 312 * Then round nbytes up to 64-bit alignment so that the initial 313 * buffer alignment is easy to calculate and verify. 314 */ 315 nbytes += niovecs * 316 (sizeof(uint64_t) + sizeof(struct xlog_op_header)); 317 nbytes = round_up(nbytes, sizeof(uint64_t)); 318 319 /* 320 * The data buffer needs to start 64-bit aligned, so round up 321 * that space to ensure we can align it appropriately and not 322 * overrun the buffer. 323 */ 324 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 325 326 /* 327 * if we have no shadow buffer, or it is too small, we need to 328 * reallocate it. 329 */ 330 if (!lip->li_lv_shadow || 331 buf_size > lip->li_lv_shadow->lv_size) { 332 /* 333 * We free and allocate here as a realloc would copy 334 * unnecessary data. We don't use kvzalloc() for the 335 * same reason - we don't need to zero the data area in 336 * the buffer, only the log vector header and the iovec 337 * storage. 338 */ 339 kvfree(lip->li_lv_shadow); 340 lv = xlog_kvmalloc(buf_size); 341 342 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 343 344 INIT_LIST_HEAD(&lv->lv_list); 345 lv->lv_item = lip; 346 lv->lv_size = buf_size; 347 if (ordered) 348 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 349 else 350 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 351 lip->li_lv_shadow = lv; 352 } else { 353 /* same or smaller, optimise common overwrite case */ 354 lv = lip->li_lv_shadow; 355 if (ordered) 356 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 357 else 358 lv->lv_buf_len = 0; 359 lv->lv_bytes = 0; 360 } 361 362 /* Ensure the lv is set up according to ->iop_size */ 363 lv->lv_niovecs = niovecs; 364 365 /* The allocated data region lies beyond the iovec region */ 366 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 367 } 368 369 } 370 371 /* 372 * Prepare the log item for insertion into the CIL. Calculate the difference in 373 * log space it will consume, and if it is a new item pin it as well. 374 */ 375 STATIC void 376 xfs_cil_prepare_item( 377 struct xlog *log, 378 struct xfs_log_vec *lv, 379 struct xfs_log_vec *old_lv, 380 int *diff_len) 381 { 382 /* Account for the new LV being passed in */ 383 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 384 *diff_len += lv->lv_bytes; 385 386 /* 387 * If there is no old LV, this is the first time we've seen the item in 388 * this CIL context and so we need to pin it. If we are replacing the 389 * old_lv, then remove the space it accounts for and make it the shadow 390 * buffer for later freeing. In both cases we are now switching to the 391 * shadow buffer, so update the pointer to it appropriately. 392 */ 393 if (!old_lv) { 394 if (lv->lv_item->li_ops->iop_pin) 395 lv->lv_item->li_ops->iop_pin(lv->lv_item); 396 lv->lv_item->li_lv_shadow = NULL; 397 } else if (old_lv != lv) { 398 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 399 400 *diff_len -= old_lv->lv_bytes; 401 lv->lv_item->li_lv_shadow = old_lv; 402 } 403 404 /* attach new log vector to log item */ 405 lv->lv_item->li_lv = lv; 406 407 /* 408 * If this is the first time the item is being committed to the 409 * CIL, store the sequence number on the log item so we can 410 * tell in future commits whether this is the first checkpoint 411 * the item is being committed into. 412 */ 413 if (!lv->lv_item->li_seq) 414 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 415 } 416 417 /* 418 * Format log item into a flat buffers 419 * 420 * For delayed logging, we need to hold a formatted buffer containing all the 421 * changes on the log item. This enables us to relog the item in memory and 422 * write it out asynchronously without needing to relock the object that was 423 * modified at the time it gets written into the iclog. 424 * 425 * This function takes the prepared log vectors attached to each log item, and 426 * formats the changes into the log vector buffer. The buffer it uses is 427 * dependent on the current state of the vector in the CIL - the shadow lv is 428 * guaranteed to be large enough for the current modification, but we will only 429 * use that if we can't reuse the existing lv. If we can't reuse the existing 430 * lv, then simple swap it out for the shadow lv. We don't free it - that is 431 * done lazily either by th enext modification or the freeing of the log item. 432 * 433 * We don't set up region headers during this process; we simply copy the 434 * regions into the flat buffer. We can do this because we still have to do a 435 * formatting step to write the regions into the iclog buffer. Writing the 436 * ophdrs during the iclog write means that we can support splitting large 437 * regions across iclog boundares without needing a change in the format of the 438 * item/region encapsulation. 439 * 440 * Hence what we need to do now is change the rewrite the vector array to point 441 * to the copied region inside the buffer we just allocated. This allows us to 442 * format the regions into the iclog as though they are being formatted 443 * directly out of the objects themselves. 444 */ 445 static void 446 xlog_cil_insert_format_items( 447 struct xlog *log, 448 struct xfs_trans *tp, 449 int *diff_len) 450 { 451 struct xfs_log_item *lip; 452 453 /* Bail out if we didn't find a log item. */ 454 if (list_empty(&tp->t_items)) { 455 ASSERT(0); 456 return; 457 } 458 459 list_for_each_entry(lip, &tp->t_items, li_trans) { 460 struct xfs_log_vec *lv; 461 struct xfs_log_vec *old_lv = NULL; 462 struct xfs_log_vec *shadow; 463 bool ordered = false; 464 465 /* Skip items which aren't dirty in this transaction. */ 466 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 467 continue; 468 469 /* 470 * The formatting size information is already attached to 471 * the shadow lv on the log item. 472 */ 473 shadow = lip->li_lv_shadow; 474 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 475 ordered = true; 476 477 /* Skip items that do not have any vectors for writing */ 478 if (!shadow->lv_niovecs && !ordered) 479 continue; 480 481 /* compare to existing item size */ 482 old_lv = lip->li_lv; 483 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 484 /* same or smaller, optimise common overwrite case */ 485 lv = lip->li_lv; 486 487 if (ordered) 488 goto insert; 489 490 /* 491 * set the item up as though it is a new insertion so 492 * that the space reservation accounting is correct. 493 */ 494 *diff_len -= lv->lv_bytes; 495 496 /* Ensure the lv is set up according to ->iop_size */ 497 lv->lv_niovecs = shadow->lv_niovecs; 498 499 /* reset the lv buffer information for new formatting */ 500 lv->lv_buf_len = 0; 501 lv->lv_bytes = 0; 502 lv->lv_buf = (char *)lv + 503 xlog_cil_iovec_space(lv->lv_niovecs); 504 } else { 505 /* switch to shadow buffer! */ 506 lv = shadow; 507 lv->lv_item = lip; 508 if (ordered) { 509 /* track as an ordered logvec */ 510 ASSERT(lip->li_lv == NULL); 511 goto insert; 512 } 513 } 514 515 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 516 lip->li_ops->iop_format(lip, lv); 517 insert: 518 xfs_cil_prepare_item(log, lv, old_lv, diff_len); 519 } 520 } 521 522 /* 523 * The use of lockless waitqueue_active() requires that the caller has 524 * serialised itself against the wakeup call in xlog_cil_push_work(). That 525 * can be done by either holding the push lock or the context lock. 526 */ 527 static inline bool 528 xlog_cil_over_hard_limit( 529 struct xlog *log, 530 int32_t space_used) 531 { 532 if (waitqueue_active(&log->l_cilp->xc_push_wait)) 533 return true; 534 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) 535 return true; 536 return false; 537 } 538 539 /* 540 * Insert the log items into the CIL and calculate the difference in space 541 * consumed by the item. Add the space to the checkpoint ticket and calculate 542 * if the change requires additional log metadata. If it does, take that space 543 * as well. Remove the amount of space we added to the checkpoint ticket from 544 * the current transaction ticket so that the accounting works out correctly. 545 */ 546 static void 547 xlog_cil_insert_items( 548 struct xlog *log, 549 struct xfs_trans *tp, 550 uint32_t released_space) 551 { 552 struct xfs_cil *cil = log->l_cilp; 553 struct xfs_cil_ctx *ctx = cil->xc_ctx; 554 struct xfs_log_item *lip; 555 int len = 0; 556 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 557 int space_used; 558 int order; 559 unsigned int cpu_nr; 560 struct xlog_cil_pcp *cilpcp; 561 562 ASSERT(tp); 563 564 /* 565 * We can do this safely because the context can't checkpoint until we 566 * are done so it doesn't matter exactly how we update the CIL. 567 */ 568 xlog_cil_insert_format_items(log, tp, &len); 569 570 /* 571 * Subtract the space released by intent cancelation from the space we 572 * consumed so that we remove it from the CIL space and add it back to 573 * the current transaction reservation context. 574 */ 575 len -= released_space; 576 577 /* 578 * Grab the per-cpu pointer for the CIL before we start any accounting. 579 * That ensures that we are running with pre-emption disabled and so we 580 * can't be scheduled away between split sample/update operations that 581 * are done without outside locking to serialise them. 582 */ 583 cpu_nr = get_cpu(); 584 cilpcp = this_cpu_ptr(cil->xc_pcp); 585 586 /* Tell the future push that there was work added by this CPU. */ 587 if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask)) 588 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask); 589 590 /* 591 * We need to take the CIL checkpoint unit reservation on the first 592 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't 593 * unnecessarily do an atomic op in the fast path here. We can clear the 594 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that 595 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit. 596 */ 597 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && 598 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 599 ctx_res = ctx->ticket->t_unit_res; 600 601 /* 602 * Check if we need to steal iclog headers. atomic_read() is not a 603 * locked atomic operation, so we can check the value before we do any 604 * real atomic ops in the fast path. If we've already taken the CIL unit 605 * reservation from this commit, we've already got one iclog header 606 * space reserved so we have to account for that otherwise we risk 607 * overrunning the reservation on this ticket. 608 * 609 * If the CIL is already at the hard limit, we might need more header 610 * space that originally reserved. So steal more header space from every 611 * commit that occurs once we are over the hard limit to ensure the CIL 612 * push won't run out of reservation space. 613 * 614 * This can steal more than we need, but that's OK. 615 * 616 * The cil->xc_ctx_lock provides the serialisation necessary for safely 617 * calling xlog_cil_over_hard_limit() in this context. 618 */ 619 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len; 620 if (atomic_read(&cil->xc_iclog_hdrs) > 0 || 621 xlog_cil_over_hard_limit(log, space_used)) { 622 split_res = log->l_iclog_hsize + 623 sizeof(struct xlog_op_header); 624 if (ctx_res) 625 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); 626 else 627 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; 628 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); 629 } 630 cilpcp->space_reserved += ctx_res; 631 632 /* 633 * Accurately account when over the soft limit, otherwise fold the 634 * percpu count into the global count if over the per-cpu threshold. 635 */ 636 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { 637 atomic_add(len, &ctx->space_used); 638 } else if (cilpcp->space_used + len > 639 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) { 640 space_used = atomic_add_return(cilpcp->space_used + len, 641 &ctx->space_used); 642 cilpcp->space_used = 0; 643 644 /* 645 * If we just transitioned over the soft limit, we need to 646 * transition to the global atomic counter. 647 */ 648 if (space_used >= XLOG_CIL_SPACE_LIMIT(log)) 649 xlog_cil_insert_pcp_aggregate(cil, ctx); 650 } else { 651 cilpcp->space_used += len; 652 } 653 /* attach the transaction to the CIL if it has any busy extents */ 654 if (!list_empty(&tp->t_busy)) 655 list_splice_init(&tp->t_busy, &cilpcp->busy_extents); 656 657 /* 658 * Now update the order of everything modified in the transaction 659 * and insert items into the CIL if they aren't already there. 660 * We do this here so we only need to take the CIL lock once during 661 * the transaction commit. 662 */ 663 order = atomic_inc_return(&ctx->order_id); 664 list_for_each_entry(lip, &tp->t_items, li_trans) { 665 /* Skip items which aren't dirty in this transaction. */ 666 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 667 continue; 668 669 lip->li_order_id = order; 670 if (!list_empty(&lip->li_cil)) 671 continue; 672 list_add_tail(&lip->li_cil, &cilpcp->log_items); 673 } 674 put_cpu(); 675 676 /* 677 * If we've overrun the reservation, dump the tx details before we move 678 * the log items. Shutdown is imminent... 679 */ 680 tp->t_ticket->t_curr_res -= ctx_res + len; 681 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 682 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 683 xfs_warn(log->l_mp, 684 " log items: %d bytes (iov hdrs: %d bytes)", 685 len, iovhdr_res); 686 xfs_warn(log->l_mp, " split region headers: %d bytes", 687 split_res); 688 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 689 xlog_print_trans(tp); 690 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 691 } 692 } 693 694 static inline void 695 xlog_cil_ail_insert_batch( 696 struct xfs_ail *ailp, 697 struct xfs_ail_cursor *cur, 698 struct xfs_log_item **log_items, 699 int nr_items, 700 xfs_lsn_t commit_lsn) 701 { 702 int i; 703 704 spin_lock(&ailp->ail_lock); 705 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 706 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 707 708 for (i = 0; i < nr_items; i++) { 709 struct xfs_log_item *lip = log_items[i]; 710 711 if (lip->li_ops->iop_unpin) 712 lip->li_ops->iop_unpin(lip, 0); 713 } 714 } 715 716 /* 717 * Take the checkpoint's log vector chain of items and insert the attached log 718 * items into the AIL. This uses bulk insertion techniques to minimise AIL lock 719 * traffic. 720 * 721 * The AIL tracks log items via the start record LSN of the checkpoint, 722 * not the commit record LSN. This is because we can pipeline multiple 723 * checkpoints, and so the start record of checkpoint N+1 can be 724 * written before the commit record of checkpoint N. i.e: 725 * 726 * start N commit N 727 * +-------------+------------+----------------+ 728 * start N+1 commit N+1 729 * 730 * The tail of the log cannot be moved to the LSN of commit N when all 731 * the items of that checkpoint are written back, because then the 732 * start record for N+1 is no longer in the active portion of the log 733 * and recovery will fail/corrupt the filesystem. 734 * 735 * Hence when all the log items in checkpoint N are written back, the 736 * tail of the log most now only move as far forwards as the start LSN 737 * of checkpoint N+1. 738 * 739 * If we are called with the aborted flag set, it is because a log write during 740 * a CIL checkpoint commit has failed. In this case, all the items in the 741 * checkpoint have already gone through iop_committed and iop_committing, which 742 * means that checkpoint commit abort handling is treated exactly the same as an 743 * iclog write error even though we haven't started any IO yet. Hence in this 744 * case all we need to do is iop_committed processing, followed by an 745 * iop_unpin(aborted) call. 746 * 747 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 748 * at the end of the AIL, the insert cursor avoids the need to walk the AIL to 749 * find the insertion point on every xfs_log_item_batch_insert() call. This 750 * saves a lot of needless list walking and is a net win, even though it 751 * slightly increases that amount of AIL lock traffic to set it up and tear it 752 * down. 753 */ 754 static void 755 xlog_cil_ail_insert( 756 struct xfs_cil_ctx *ctx, 757 bool aborted) 758 { 759 #define LOG_ITEM_BATCH_SIZE 32 760 struct xfs_ail *ailp = ctx->cil->xc_log->l_ailp; 761 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 762 struct xfs_log_vec *lv; 763 struct xfs_ail_cursor cur; 764 xfs_lsn_t old_head; 765 int i = 0; 766 767 /* 768 * Update the AIL head LSN with the commit record LSN of this 769 * checkpoint. As iclogs are always completed in order, this should 770 * always be the same (as iclogs can contain multiple commit records) or 771 * higher LSN than the current head. We do this before insertion of the 772 * items so that log space checks during insertion will reflect the 773 * space that this checkpoint has already consumed. We call 774 * xfs_ail_update_finish() so that tail space and space-based wakeups 775 * will be recalculated appropriately. 776 */ 777 ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 || 778 aborted); 779 spin_lock(&ailp->ail_lock); 780 xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn); 781 old_head = ailp->ail_head_lsn; 782 ailp->ail_head_lsn = ctx->commit_lsn; 783 /* xfs_ail_update_finish() drops the ail_lock */ 784 xfs_ail_update_finish(ailp, NULLCOMMITLSN); 785 786 /* 787 * We move the AIL head forwards to account for the space used in the 788 * log before we remove that space from the grant heads. This prevents a 789 * transient condition where reservation space appears to become 790 * available on return, only for it to disappear again immediately as 791 * the AIL head update accounts in the log tail space. 792 */ 793 smp_wmb(); /* paired with smp_rmb in xlog_grant_space_left */ 794 xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn); 795 796 /* unpin all the log items */ 797 list_for_each_entry(lv, &ctx->lv_chain, lv_list) { 798 struct xfs_log_item *lip = lv->lv_item; 799 xfs_lsn_t item_lsn; 800 801 if (aborted) 802 set_bit(XFS_LI_ABORTED, &lip->li_flags); 803 804 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { 805 lip->li_ops->iop_release(lip); 806 continue; 807 } 808 809 if (lip->li_ops->iop_committed) 810 item_lsn = lip->li_ops->iop_committed(lip, 811 ctx->start_lsn); 812 else 813 item_lsn = ctx->start_lsn; 814 815 /* item_lsn of -1 means the item needs no further processing */ 816 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 817 continue; 818 819 /* 820 * if we are aborting the operation, no point in inserting the 821 * object into the AIL as we are in a shutdown situation. 822 */ 823 if (aborted) { 824 ASSERT(xlog_is_shutdown(ailp->ail_log)); 825 if (lip->li_ops->iop_unpin) 826 lip->li_ops->iop_unpin(lip, 1); 827 continue; 828 } 829 830 if (item_lsn != ctx->start_lsn) { 831 832 /* 833 * Not a bulk update option due to unusual item_lsn. 834 * Push into AIL immediately, rechecking the lsn once 835 * we have the ail lock. Then unpin the item. This does 836 * not affect the AIL cursor the bulk insert path is 837 * using. 838 */ 839 spin_lock(&ailp->ail_lock); 840 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 841 xfs_trans_ail_update(ailp, lip, item_lsn); 842 else 843 spin_unlock(&ailp->ail_lock); 844 if (lip->li_ops->iop_unpin) 845 lip->li_ops->iop_unpin(lip, 0); 846 continue; 847 } 848 849 /* Item is a candidate for bulk AIL insert. */ 850 log_items[i++] = lv->lv_item; 851 if (i >= LOG_ITEM_BATCH_SIZE) { 852 xlog_cil_ail_insert_batch(ailp, &cur, log_items, 853 LOG_ITEM_BATCH_SIZE, ctx->start_lsn); 854 i = 0; 855 } 856 } 857 858 /* make sure we insert the remainder! */ 859 if (i) 860 xlog_cil_ail_insert_batch(ailp, &cur, log_items, i, 861 ctx->start_lsn); 862 863 spin_lock(&ailp->ail_lock); 864 xfs_trans_ail_cursor_done(&cur); 865 spin_unlock(&ailp->ail_lock); 866 } 867 868 static void 869 xlog_cil_free_logvec( 870 struct list_head *lv_chain) 871 { 872 struct xfs_log_vec *lv; 873 874 while (!list_empty(lv_chain)) { 875 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list); 876 list_del_init(&lv->lv_list); 877 kvfree(lv); 878 } 879 } 880 881 /* 882 * Mark all items committed and clear busy extents. We free the log vector 883 * chains in a separate pass so that we unpin the log items as quickly as 884 * possible. 885 */ 886 static void 887 xlog_cil_committed( 888 struct xfs_cil_ctx *ctx) 889 { 890 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 891 bool abort = xlog_is_shutdown(ctx->cil->xc_log); 892 893 /* 894 * If the I/O failed, we're aborting the commit and already shutdown. 895 * Wake any commit waiters before aborting the log items so we don't 896 * block async log pushers on callbacks. Async log pushers explicitly do 897 * not wait on log force completion because they may be holding locks 898 * required to unpin items. 899 */ 900 if (abort) { 901 spin_lock(&ctx->cil->xc_push_lock); 902 wake_up_all(&ctx->cil->xc_start_wait); 903 wake_up_all(&ctx->cil->xc_commit_wait); 904 spin_unlock(&ctx->cil->xc_push_lock); 905 } 906 907 xlog_cil_ail_insert(ctx, abort); 908 909 xfs_extent_busy_sort(&ctx->busy_extents.extent_list); 910 xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list, 911 xfs_has_discard(mp) && !abort); 912 913 spin_lock(&ctx->cil->xc_push_lock); 914 list_del(&ctx->committing); 915 spin_unlock(&ctx->cil->xc_push_lock); 916 917 xlog_cil_free_logvec(&ctx->lv_chain); 918 919 if (!list_empty(&ctx->busy_extents.extent_list)) { 920 ctx->busy_extents.mount = mp; 921 ctx->busy_extents.owner = ctx; 922 xfs_discard_extents(mp, &ctx->busy_extents); 923 return; 924 } 925 926 kfree(ctx); 927 } 928 929 void 930 xlog_cil_process_committed( 931 struct list_head *list) 932 { 933 struct xfs_cil_ctx *ctx; 934 935 while ((ctx = list_first_entry_or_null(list, 936 struct xfs_cil_ctx, iclog_entry))) { 937 list_del(&ctx->iclog_entry); 938 xlog_cil_committed(ctx); 939 } 940 } 941 942 /* 943 * Record the LSN of the iclog we were just granted space to start writing into. 944 * If the context doesn't have a start_lsn recorded, then this iclog will 945 * contain the start record for the checkpoint. Otherwise this write contains 946 * the commit record for the checkpoint. 947 */ 948 void 949 xlog_cil_set_ctx_write_state( 950 struct xfs_cil_ctx *ctx, 951 struct xlog_in_core *iclog) 952 { 953 struct xfs_cil *cil = ctx->cil; 954 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 955 956 ASSERT(!ctx->commit_lsn); 957 if (!ctx->start_lsn) { 958 spin_lock(&cil->xc_push_lock); 959 /* 960 * The LSN we need to pass to the log items on transaction 961 * commit is the LSN reported by the first log vector write, not 962 * the commit lsn. If we use the commit record lsn then we can 963 * move the grant write head beyond the tail LSN and overwrite 964 * it. 965 */ 966 ctx->start_lsn = lsn; 967 wake_up_all(&cil->xc_start_wait); 968 spin_unlock(&cil->xc_push_lock); 969 970 /* 971 * Make sure the metadata we are about to overwrite in the log 972 * has been flushed to stable storage before this iclog is 973 * issued. 974 */ 975 spin_lock(&cil->xc_log->l_icloglock); 976 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 977 spin_unlock(&cil->xc_log->l_icloglock); 978 return; 979 } 980 981 /* 982 * Take a reference to the iclog for the context so that we still hold 983 * it when xlog_write is done and has released it. This means the 984 * context controls when the iclog is released for IO. 985 */ 986 atomic_inc(&iclog->ic_refcnt); 987 988 /* 989 * xlog_state_get_iclog_space() guarantees there is enough space in the 990 * iclog for an entire commit record, so we can attach the context 991 * callbacks now. This needs to be done before we make the commit_lsn 992 * visible to waiters so that checkpoints with commit records in the 993 * same iclog order their IO completion callbacks in the same order that 994 * the commit records appear in the iclog. 995 */ 996 spin_lock(&cil->xc_log->l_icloglock); 997 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); 998 spin_unlock(&cil->xc_log->l_icloglock); 999 1000 /* 1001 * Now we can record the commit LSN and wake anyone waiting for this 1002 * sequence to have the ordered commit record assigned to a physical 1003 * location in the log. 1004 */ 1005 spin_lock(&cil->xc_push_lock); 1006 ctx->commit_iclog = iclog; 1007 ctx->commit_lsn = lsn; 1008 wake_up_all(&cil->xc_commit_wait); 1009 spin_unlock(&cil->xc_push_lock); 1010 } 1011 1012 1013 /* 1014 * Ensure that the order of log writes follows checkpoint sequence order. This 1015 * relies on the context LSN being zero until the log write has guaranteed the 1016 * LSN that the log write will start at via xlog_state_get_iclog_space(). 1017 */ 1018 enum _record_type { 1019 _START_RECORD, 1020 _COMMIT_RECORD, 1021 }; 1022 1023 static int 1024 xlog_cil_order_write( 1025 struct xfs_cil *cil, 1026 xfs_csn_t sequence, 1027 enum _record_type record) 1028 { 1029 struct xfs_cil_ctx *ctx; 1030 1031 restart: 1032 spin_lock(&cil->xc_push_lock); 1033 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1034 /* 1035 * Avoid getting stuck in this loop because we were woken by the 1036 * shutdown, but then went back to sleep once already in the 1037 * shutdown state. 1038 */ 1039 if (xlog_is_shutdown(cil->xc_log)) { 1040 spin_unlock(&cil->xc_push_lock); 1041 return -EIO; 1042 } 1043 1044 /* 1045 * Higher sequences will wait for this one so skip them. 1046 * Don't wait for our own sequence, either. 1047 */ 1048 if (ctx->sequence >= sequence) 1049 continue; 1050 1051 /* Wait until the LSN for the record has been recorded. */ 1052 switch (record) { 1053 case _START_RECORD: 1054 if (!ctx->start_lsn) { 1055 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); 1056 goto restart; 1057 } 1058 break; 1059 case _COMMIT_RECORD: 1060 if (!ctx->commit_lsn) { 1061 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1062 goto restart; 1063 } 1064 break; 1065 } 1066 } 1067 spin_unlock(&cil->xc_push_lock); 1068 return 0; 1069 } 1070 1071 /* 1072 * Write out the log vector change now attached to the CIL context. This will 1073 * write a start record that needs to be strictly ordered in ascending CIL 1074 * sequence order so that log recovery will always use in-order start LSNs when 1075 * replaying checkpoints. 1076 */ 1077 static int 1078 xlog_cil_write_chain( 1079 struct xfs_cil_ctx *ctx, 1080 uint32_t chain_len) 1081 { 1082 struct xlog *log = ctx->cil->xc_log; 1083 int error; 1084 1085 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); 1086 if (error) 1087 return error; 1088 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len); 1089 } 1090 1091 /* 1092 * Write out the commit record of a checkpoint transaction to close off a 1093 * running log write. These commit records are strictly ordered in ascending CIL 1094 * sequence order so that log recovery will always replay the checkpoints in the 1095 * correct order. 1096 */ 1097 static int 1098 xlog_cil_write_commit_record( 1099 struct xfs_cil_ctx *ctx) 1100 { 1101 struct xlog *log = ctx->cil->xc_log; 1102 struct xlog_op_header ophdr = { 1103 .oh_clientid = XFS_TRANSACTION, 1104 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), 1105 .oh_flags = XLOG_COMMIT_TRANS, 1106 }; 1107 struct xfs_log_iovec reg = { 1108 .i_addr = &ophdr, 1109 .i_len = sizeof(struct xlog_op_header), 1110 .i_type = XLOG_REG_TYPE_COMMIT, 1111 }; 1112 struct xfs_log_vec vec = { 1113 .lv_niovecs = 1, 1114 .lv_iovecp = ®, 1115 }; 1116 int error; 1117 LIST_HEAD(lv_chain); 1118 list_add(&vec.lv_list, &lv_chain); 1119 1120 if (xlog_is_shutdown(log)) 1121 return -EIO; 1122 1123 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); 1124 if (error) 1125 return error; 1126 1127 /* account for space used by record data */ 1128 ctx->ticket->t_curr_res -= reg.i_len; 1129 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len); 1130 if (error) 1131 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 1132 return error; 1133 } 1134 1135 struct xlog_cil_trans_hdr { 1136 struct xlog_op_header oph[2]; 1137 struct xfs_trans_header thdr; 1138 struct xfs_log_iovec lhdr[2]; 1139 }; 1140 1141 /* 1142 * Build a checkpoint transaction header to begin the journal transaction. We 1143 * need to account for the space used by the transaction header here as it is 1144 * not accounted for in xlog_write(). 1145 * 1146 * This is the only place we write a transaction header, so we also build the 1147 * log opheaders that indicate the start of a log transaction and wrap the 1148 * transaction header. We keep the start record in it's own log vector rather 1149 * than compacting them into a single region as this ends up making the logic 1150 * in xlog_write() for handling empty opheaders for start, commit and unmount 1151 * records much simpler. 1152 */ 1153 static void 1154 xlog_cil_build_trans_hdr( 1155 struct xfs_cil_ctx *ctx, 1156 struct xlog_cil_trans_hdr *hdr, 1157 struct xfs_log_vec *lvhdr, 1158 int num_iovecs) 1159 { 1160 struct xlog_ticket *tic = ctx->ticket; 1161 __be32 tid = cpu_to_be32(tic->t_tid); 1162 1163 memset(hdr, 0, sizeof(*hdr)); 1164 1165 /* Log start record */ 1166 hdr->oph[0].oh_tid = tid; 1167 hdr->oph[0].oh_clientid = XFS_TRANSACTION; 1168 hdr->oph[0].oh_flags = XLOG_START_TRANS; 1169 1170 /* log iovec region pointer */ 1171 hdr->lhdr[0].i_addr = &hdr->oph[0]; 1172 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); 1173 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; 1174 1175 /* log opheader */ 1176 hdr->oph[1].oh_tid = tid; 1177 hdr->oph[1].oh_clientid = XFS_TRANSACTION; 1178 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); 1179 1180 /* transaction header in host byte order format */ 1181 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 1182 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; 1183 hdr->thdr.th_tid = tic->t_tid; 1184 hdr->thdr.th_num_items = num_iovecs; 1185 1186 /* log iovec region pointer */ 1187 hdr->lhdr[1].i_addr = &hdr->oph[1]; 1188 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + 1189 sizeof(struct xfs_trans_header); 1190 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; 1191 1192 lvhdr->lv_niovecs = 2; 1193 lvhdr->lv_iovecp = &hdr->lhdr[0]; 1194 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; 1195 1196 tic->t_curr_res -= lvhdr->lv_bytes; 1197 } 1198 1199 /* 1200 * CIL item reordering compare function. We want to order in ascending ID order, 1201 * but we want to leave items with the same ID in the order they were added to 1202 * the list. This is important for operations like reflink where we log 4 order 1203 * dependent intents in a single transaction when we overwrite an existing 1204 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop), 1205 * CUI (inc), BUI(remap)... 1206 */ 1207 static int 1208 xlog_cil_order_cmp( 1209 void *priv, 1210 const struct list_head *a, 1211 const struct list_head *b) 1212 { 1213 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list); 1214 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list); 1215 1216 return l1->lv_order_id > l2->lv_order_id; 1217 } 1218 1219 /* 1220 * Pull all the log vectors off the items in the CIL, and remove the items from 1221 * the CIL. We don't need the CIL lock here because it's only needed on the 1222 * transaction commit side which is currently locked out by the flush lock. 1223 * 1224 * If a log item is marked with a whiteout, we do not need to write it to the 1225 * journal and so we just move them to the whiteout list for the caller to 1226 * dispose of appropriately. 1227 */ 1228 static void 1229 xlog_cil_build_lv_chain( 1230 struct xfs_cil_ctx *ctx, 1231 struct list_head *whiteouts, 1232 uint32_t *num_iovecs, 1233 uint32_t *num_bytes) 1234 { 1235 while (!list_empty(&ctx->log_items)) { 1236 struct xfs_log_item *item; 1237 struct xfs_log_vec *lv; 1238 1239 item = list_first_entry(&ctx->log_items, 1240 struct xfs_log_item, li_cil); 1241 1242 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { 1243 list_move(&item->li_cil, whiteouts); 1244 trace_xfs_cil_whiteout_skip(item); 1245 continue; 1246 } 1247 1248 lv = item->li_lv; 1249 lv->lv_order_id = item->li_order_id; 1250 1251 /* we don't write ordered log vectors */ 1252 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 1253 *num_bytes += lv->lv_bytes; 1254 *num_iovecs += lv->lv_niovecs; 1255 list_add_tail(&lv->lv_list, &ctx->lv_chain); 1256 1257 list_del_init(&item->li_cil); 1258 item->li_order_id = 0; 1259 item->li_lv = NULL; 1260 } 1261 } 1262 1263 static void 1264 xlog_cil_cleanup_whiteouts( 1265 struct list_head *whiteouts) 1266 { 1267 while (!list_empty(whiteouts)) { 1268 struct xfs_log_item *item = list_first_entry(whiteouts, 1269 struct xfs_log_item, li_cil); 1270 list_del_init(&item->li_cil); 1271 trace_xfs_cil_whiteout_unpin(item); 1272 item->li_ops->iop_unpin(item, 1); 1273 } 1274 } 1275 1276 /* 1277 * Push the Committed Item List to the log. 1278 * 1279 * If the current sequence is the same as xc_push_seq we need to do a flush. If 1280 * xc_push_seq is less than the current sequence, then it has already been 1281 * flushed and we don't need to do anything - the caller will wait for it to 1282 * complete if necessary. 1283 * 1284 * xc_push_seq is checked unlocked against the sequence number for a match. 1285 * Hence we can allow log forces to run racily and not issue pushes for the 1286 * same sequence twice. If we get a race between multiple pushes for the same 1287 * sequence they will block on the first one and then abort, hence avoiding 1288 * needless pushes. 1289 * 1290 * This runs from a workqueue so it does not inherent any specific memory 1291 * allocation context. However, we do not want to block on memory reclaim 1292 * recursing back into the filesystem because this push may have been triggered 1293 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS 1294 * contraints here. 1295 */ 1296 static void 1297 xlog_cil_push_work( 1298 struct work_struct *work) 1299 { 1300 unsigned int nofs_flags = memalloc_nofs_save(); 1301 struct xfs_cil_ctx *ctx = 1302 container_of(work, struct xfs_cil_ctx, push_work); 1303 struct xfs_cil *cil = ctx->cil; 1304 struct xlog *log = cil->xc_log; 1305 struct xfs_cil_ctx *new_ctx; 1306 int num_iovecs = 0; 1307 int num_bytes = 0; 1308 int error = 0; 1309 struct xlog_cil_trans_hdr thdr; 1310 struct xfs_log_vec lvhdr = {}; 1311 xfs_csn_t push_seq; 1312 bool push_commit_stable; 1313 LIST_HEAD (whiteouts); 1314 struct xlog_ticket *ticket; 1315 1316 new_ctx = xlog_cil_ctx_alloc(); 1317 new_ctx->ticket = xlog_cil_ticket_alloc(log); 1318 1319 down_write(&cil->xc_ctx_lock); 1320 1321 spin_lock(&cil->xc_push_lock); 1322 push_seq = cil->xc_push_seq; 1323 ASSERT(push_seq <= ctx->sequence); 1324 push_commit_stable = cil->xc_push_commit_stable; 1325 cil->xc_push_commit_stable = false; 1326 1327 /* 1328 * As we are about to switch to a new, empty CIL context, we no longer 1329 * need to throttle tasks on CIL space overruns. Wake any waiters that 1330 * the hard push throttle may have caught so they can start committing 1331 * to the new context. The ctx->xc_push_lock provides the serialisation 1332 * necessary for safely using the lockless waitqueue_active() check in 1333 * this context. 1334 */ 1335 if (waitqueue_active(&cil->xc_push_wait)) 1336 wake_up_all(&cil->xc_push_wait); 1337 1338 xlog_cil_push_pcp_aggregate(cil, ctx); 1339 1340 /* 1341 * Check if we've anything to push. If there is nothing, then we don't 1342 * move on to a new sequence number and so we have to be able to push 1343 * this sequence again later. 1344 */ 1345 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1346 cil->xc_push_seq = 0; 1347 spin_unlock(&cil->xc_push_lock); 1348 goto out_skip; 1349 } 1350 1351 1352 /* check for a previously pushed sequence */ 1353 if (push_seq < ctx->sequence) { 1354 spin_unlock(&cil->xc_push_lock); 1355 goto out_skip; 1356 } 1357 1358 /* 1359 * We are now going to push this context, so add it to the committing 1360 * list before we do anything else. This ensures that anyone waiting on 1361 * this push can easily detect the difference between a "push in 1362 * progress" and "CIL is empty, nothing to do". 1363 * 1364 * IOWs, a wait loop can now check for: 1365 * the current sequence not being found on the committing list; 1366 * an empty CIL; and 1367 * an unchanged sequence number 1368 * to detect a push that had nothing to do and therefore does not need 1369 * waiting on. If the CIL is not empty, we get put on the committing 1370 * list before emptying the CIL and bumping the sequence number. Hence 1371 * an empty CIL and an unchanged sequence number means we jumped out 1372 * above after doing nothing. 1373 * 1374 * Hence the waiter will either find the commit sequence on the 1375 * committing list or the sequence number will be unchanged and the CIL 1376 * still dirty. In that latter case, the push has not yet started, and 1377 * so the waiter will have to continue trying to check the CIL 1378 * committing list until it is found. In extreme cases of delay, the 1379 * sequence may fully commit between the attempts the wait makes to wait 1380 * on the commit sequence. 1381 */ 1382 list_add(&ctx->committing, &cil->xc_committing); 1383 spin_unlock(&cil->xc_push_lock); 1384 1385 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes); 1386 1387 /* 1388 * Switch the contexts so we can drop the context lock and move out 1389 * of a shared context. We can't just go straight to the commit record, 1390 * though - we need to synchronise with previous and future commits so 1391 * that the commit records are correctly ordered in the log to ensure 1392 * that we process items during log IO completion in the correct order. 1393 * 1394 * For example, if we get an EFI in one checkpoint and the EFD in the 1395 * next (e.g. due to log forces), we do not want the checkpoint with 1396 * the EFD to be committed before the checkpoint with the EFI. Hence 1397 * we must strictly order the commit records of the checkpoints so 1398 * that: a) the checkpoint callbacks are attached to the iclogs in the 1399 * correct order; and b) the checkpoints are replayed in correct order 1400 * in log recovery. 1401 * 1402 * Hence we need to add this context to the committing context list so 1403 * that higher sequences will wait for us to write out a commit record 1404 * before they do. 1405 * 1406 * xfs_log_force_seq requires us to mirror the new sequence into the cil 1407 * structure atomically with the addition of this sequence to the 1408 * committing list. This also ensures that we can do unlocked checks 1409 * against the current sequence in log forces without risking 1410 * deferencing a freed context pointer. 1411 */ 1412 spin_lock(&cil->xc_push_lock); 1413 xlog_cil_ctx_switch(cil, new_ctx); 1414 spin_unlock(&cil->xc_push_lock); 1415 up_write(&cil->xc_ctx_lock); 1416 1417 /* 1418 * Sort the log vector chain before we add the transaction headers. 1419 * This ensures we always have the transaction headers at the start 1420 * of the chain. 1421 */ 1422 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp); 1423 1424 /* 1425 * Build a checkpoint transaction header and write it to the log to 1426 * begin the transaction. We need to account for the space used by the 1427 * transaction header here as it is not accounted for in xlog_write(). 1428 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so 1429 * it gets written into the iclog first. 1430 */ 1431 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); 1432 num_bytes += lvhdr.lv_bytes; 1433 list_add(&lvhdr.lv_list, &ctx->lv_chain); 1434 1435 /* 1436 * Take the lvhdr back off the lv_chain immediately after calling 1437 * xlog_cil_write_chain() as it should not be passed to log IO 1438 * completion. 1439 */ 1440 error = xlog_cil_write_chain(ctx, num_bytes); 1441 list_del(&lvhdr.lv_list); 1442 if (error) 1443 goto out_abort_free_ticket; 1444 1445 error = xlog_cil_write_commit_record(ctx); 1446 if (error) 1447 goto out_abort_free_ticket; 1448 1449 /* 1450 * Grab the ticket from the ctx so we can ungrant it after releasing the 1451 * commit_iclog. The ctx may be freed by the time we return from 1452 * releasing the commit_iclog (i.e. checkpoint has been completed and 1453 * callback run) so we can't reference the ctx after the call to 1454 * xlog_state_release_iclog(). 1455 */ 1456 ticket = ctx->ticket; 1457 1458 /* 1459 * If the checkpoint spans multiple iclogs, wait for all previous iclogs 1460 * to complete before we submit the commit_iclog. We can't use state 1461 * checks for this - ACTIVE can be either a past completed iclog or a 1462 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a 1463 * past or future iclog awaiting IO or ordered IO completion to be run. 1464 * In the latter case, if it's a future iclog and we wait on it, the we 1465 * will hang because it won't get processed through to ic_force_wait 1466 * wakeup until this commit_iclog is written to disk. Hence we use the 1467 * iclog header lsn and compare it to the commit lsn to determine if we 1468 * need to wait on iclogs or not. 1469 */ 1470 spin_lock(&log->l_icloglock); 1471 if (ctx->start_lsn != ctx->commit_lsn) { 1472 xfs_lsn_t plsn; 1473 1474 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); 1475 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { 1476 /* 1477 * Waiting on ic_force_wait orders the completion of 1478 * iclogs older than ic_prev. Hence we only need to wait 1479 * on the most recent older iclog here. 1480 */ 1481 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); 1482 spin_lock(&log->l_icloglock); 1483 } 1484 1485 /* 1486 * We need to issue a pre-flush so that the ordering for this 1487 * checkpoint is correctly preserved down to stable storage. 1488 */ 1489 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 1490 } 1491 1492 /* 1493 * The commit iclog must be written to stable storage to guarantee 1494 * journal IO vs metadata writeback IO is correctly ordered on stable 1495 * storage. 1496 * 1497 * If the push caller needs the commit to be immediately stable and the 1498 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it 1499 * will be written when released, switch it's state to WANT_SYNC right 1500 * now. 1501 */ 1502 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 1503 if (push_commit_stable && 1504 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) 1505 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); 1506 ticket = ctx->ticket; 1507 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); 1508 1509 /* Not safe to reference ctx now! */ 1510 1511 spin_unlock(&log->l_icloglock); 1512 xlog_cil_cleanup_whiteouts(&whiteouts); 1513 xfs_log_ticket_ungrant(log, ticket); 1514 memalloc_nofs_restore(nofs_flags); 1515 return; 1516 1517 out_skip: 1518 up_write(&cil->xc_ctx_lock); 1519 xfs_log_ticket_put(new_ctx->ticket); 1520 kfree(new_ctx); 1521 memalloc_nofs_restore(nofs_flags); 1522 return; 1523 1524 out_abort_free_ticket: 1525 ASSERT(xlog_is_shutdown(log)); 1526 xlog_cil_cleanup_whiteouts(&whiteouts); 1527 if (!ctx->commit_iclog) { 1528 xfs_log_ticket_ungrant(log, ctx->ticket); 1529 xlog_cil_committed(ctx); 1530 memalloc_nofs_restore(nofs_flags); 1531 return; 1532 } 1533 spin_lock(&log->l_icloglock); 1534 ticket = ctx->ticket; 1535 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); 1536 /* Not safe to reference ctx now! */ 1537 spin_unlock(&log->l_icloglock); 1538 xfs_log_ticket_ungrant(log, ticket); 1539 memalloc_nofs_restore(nofs_flags); 1540 } 1541 1542 /* 1543 * We need to push CIL every so often so we don't cache more than we can fit in 1544 * the log. The limit really is that a checkpoint can't be more than half the 1545 * log (the current checkpoint is not allowed to overwrite the previous 1546 * checkpoint), but commit latency and memory usage limit this to a smaller 1547 * size. 1548 */ 1549 static void 1550 xlog_cil_push_background( 1551 struct xlog *log) 1552 { 1553 struct xfs_cil *cil = log->l_cilp; 1554 int space_used = atomic_read(&cil->xc_ctx->space_used); 1555 1556 /* 1557 * The cil won't be empty because we are called while holding the 1558 * context lock so whatever we added to the CIL will still be there. 1559 */ 1560 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1561 1562 /* 1563 * We are done if: 1564 * - we haven't used up all the space available yet; or 1565 * - we've already queued up a push; and 1566 * - we're not over the hard limit; and 1567 * - nothing has been over the hard limit. 1568 * 1569 * If so, we don't need to take the push lock as there's nothing to do. 1570 */ 1571 if (space_used < XLOG_CIL_SPACE_LIMIT(log) || 1572 (cil->xc_push_seq == cil->xc_current_sequence && 1573 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) && 1574 !waitqueue_active(&cil->xc_push_wait))) { 1575 up_read(&cil->xc_ctx_lock); 1576 return; 1577 } 1578 1579 spin_lock(&cil->xc_push_lock); 1580 if (cil->xc_push_seq < cil->xc_current_sequence) { 1581 cil->xc_push_seq = cil->xc_current_sequence; 1582 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1583 } 1584 1585 /* 1586 * Drop the context lock now, we can't hold that if we need to sleep 1587 * because we are over the blocking threshold. The push_lock is still 1588 * held, so blocking threshold sleep/wakeup is still correctly 1589 * serialised here. 1590 */ 1591 up_read(&cil->xc_ctx_lock); 1592 1593 /* 1594 * If we are well over the space limit, throttle the work that is being 1595 * done until the push work on this context has begun. Enforce the hard 1596 * throttle on all transaction commits once it has been activated, even 1597 * if the committing transactions have resulted in the space usage 1598 * dipping back down under the hard limit. 1599 * 1600 * The ctx->xc_push_lock provides the serialisation necessary for safely 1601 * calling xlog_cil_over_hard_limit() in this context. 1602 */ 1603 if (xlog_cil_over_hard_limit(log, space_used)) { 1604 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 1605 ASSERT(space_used < log->l_logsize); 1606 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 1607 return; 1608 } 1609 1610 spin_unlock(&cil->xc_push_lock); 1611 1612 } 1613 1614 /* 1615 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 1616 * number that is passed. When it returns, the work will be queued for 1617 * @push_seq, but it won't be completed. 1618 * 1619 * If the caller is performing a synchronous force, we will flush the workqueue 1620 * to get previously queued work moving to minimise the wait time they will 1621 * undergo waiting for all outstanding pushes to complete. The caller is 1622 * expected to do the required waiting for push_seq to complete. 1623 * 1624 * If the caller is performing an async push, we need to ensure that the 1625 * checkpoint is fully flushed out of the iclogs when we finish the push. If we 1626 * don't do this, then the commit record may remain sitting in memory in an 1627 * ACTIVE iclog. This then requires another full log force to push to disk, 1628 * which defeats the purpose of having an async, non-blocking CIL force 1629 * mechanism. Hence in this case we need to pass a flag to the push work to 1630 * indicate it needs to flush the commit record itself. 1631 */ 1632 static void 1633 xlog_cil_push_now( 1634 struct xlog *log, 1635 xfs_lsn_t push_seq, 1636 bool async) 1637 { 1638 struct xfs_cil *cil = log->l_cilp; 1639 1640 if (!cil) 1641 return; 1642 1643 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 1644 1645 /* start on any pending background push to minimise wait time on it */ 1646 if (!async) 1647 flush_workqueue(cil->xc_push_wq); 1648 1649 spin_lock(&cil->xc_push_lock); 1650 1651 /* 1652 * If this is an async flush request, we always need to set the 1653 * xc_push_commit_stable flag even if something else has already queued 1654 * a push. The flush caller is asking for the CIL to be on stable 1655 * storage when the next push completes, so regardless of who has queued 1656 * the push, the flush requires stable semantics from it. 1657 */ 1658 cil->xc_push_commit_stable = async; 1659 1660 /* 1661 * If the CIL is empty or we've already pushed the sequence then 1662 * there's no more work that we need to do. 1663 */ 1664 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || 1665 push_seq <= cil->xc_push_seq) { 1666 spin_unlock(&cil->xc_push_lock); 1667 return; 1668 } 1669 1670 cil->xc_push_seq = push_seq; 1671 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1672 spin_unlock(&cil->xc_push_lock); 1673 } 1674 1675 bool 1676 xlog_cil_empty( 1677 struct xlog *log) 1678 { 1679 struct xfs_cil *cil = log->l_cilp; 1680 bool empty = false; 1681 1682 spin_lock(&cil->xc_push_lock); 1683 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 1684 empty = true; 1685 spin_unlock(&cil->xc_push_lock); 1686 return empty; 1687 } 1688 1689 /* 1690 * If there are intent done items in this transaction and the related intent was 1691 * committed in the current (same) CIL checkpoint, we don't need to write either 1692 * the intent or intent done item to the journal as the change will be 1693 * journalled atomically within this checkpoint. As we cannot remove items from 1694 * the CIL here, mark the related intent with a whiteout so that the CIL push 1695 * can remove it rather than writing it to the journal. Then remove the intent 1696 * done item from the current transaction and release it so it doesn't get put 1697 * into the CIL at all. 1698 */ 1699 static uint32_t 1700 xlog_cil_process_intents( 1701 struct xfs_cil *cil, 1702 struct xfs_trans *tp) 1703 { 1704 struct xfs_log_item *lip, *ilip, *next; 1705 uint32_t len = 0; 1706 1707 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1708 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) 1709 continue; 1710 1711 ilip = lip->li_ops->iop_intent(lip); 1712 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip)) 1713 continue; 1714 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); 1715 trace_xfs_cil_whiteout_mark(ilip); 1716 len += ilip->li_lv->lv_bytes; 1717 kvfree(ilip->li_lv); 1718 ilip->li_lv = NULL; 1719 1720 xfs_trans_del_item(lip); 1721 lip->li_ops->iop_release(lip); 1722 } 1723 return len; 1724 } 1725 1726 /* 1727 * Commit a transaction with the given vector to the Committed Item List. 1728 * 1729 * To do this, we need to format the item, pin it in memory if required and 1730 * account for the space used by the transaction. Once we have done that we 1731 * need to release the unused reservation for the transaction, attach the 1732 * transaction to the checkpoint context so we carry the busy extents through 1733 * to checkpoint completion, and then unlock all the items in the transaction. 1734 * 1735 * Called with the context lock already held in read mode to lock out 1736 * background commit, returns without it held once background commits are 1737 * allowed again. 1738 */ 1739 void 1740 xlog_cil_commit( 1741 struct xlog *log, 1742 struct xfs_trans *tp, 1743 xfs_csn_t *commit_seq, 1744 bool regrant) 1745 { 1746 struct xfs_cil *cil = log->l_cilp; 1747 struct xfs_log_item *lip, *next; 1748 uint32_t released_space = 0; 1749 1750 /* 1751 * Do all necessary memory allocation before we lock the CIL. 1752 * This ensures the allocation does not deadlock with a CIL 1753 * push in memory reclaim (e.g. from kswapd). 1754 */ 1755 xlog_cil_alloc_shadow_bufs(log, tp); 1756 1757 /* lock out background commit */ 1758 down_read(&cil->xc_ctx_lock); 1759 1760 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) 1761 released_space = xlog_cil_process_intents(cil, tp); 1762 1763 xlog_cil_insert_items(log, tp, released_space); 1764 1765 if (regrant && !xlog_is_shutdown(log)) 1766 xfs_log_ticket_regrant(log, tp->t_ticket); 1767 else 1768 xfs_log_ticket_ungrant(log, tp->t_ticket); 1769 tp->t_ticket = NULL; 1770 xfs_trans_unreserve_and_mod_sb(tp); 1771 1772 /* 1773 * Once all the items of the transaction have been copied to the CIL, 1774 * the items can be unlocked and possibly freed. 1775 * 1776 * This needs to be done before we drop the CIL context lock because we 1777 * have to update state in the log items and unlock them before they go 1778 * to disk. If we don't, then the CIL checkpoint can race with us and 1779 * we can run checkpoint completion before we've updated and unlocked 1780 * the log items. This affects (at least) processing of stale buffers, 1781 * inodes and EFIs. 1782 */ 1783 trace_xfs_trans_commit_items(tp, _RET_IP_); 1784 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1785 xfs_trans_del_item(lip); 1786 if (lip->li_ops->iop_committing) 1787 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); 1788 } 1789 if (commit_seq) 1790 *commit_seq = cil->xc_ctx->sequence; 1791 1792 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ 1793 xlog_cil_push_background(log); 1794 } 1795 1796 /* 1797 * Flush the CIL to stable storage but don't wait for it to complete. This 1798 * requires the CIL push to ensure the commit record for the push hits the disk, 1799 * but otherwise is no different to a push done from a log force. 1800 */ 1801 void 1802 xlog_cil_flush( 1803 struct xlog *log) 1804 { 1805 xfs_csn_t seq = log->l_cilp->xc_current_sequence; 1806 1807 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); 1808 xlog_cil_push_now(log, seq, true); 1809 1810 /* 1811 * If the CIL is empty, make sure that any previous checkpoint that may 1812 * still be in an active iclog is pushed to stable storage. 1813 */ 1814 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) 1815 xfs_log_force(log->l_mp, 0); 1816 } 1817 1818 /* 1819 * Conditionally push the CIL based on the sequence passed in. 1820 * 1821 * We only need to push if we haven't already pushed the sequence number given. 1822 * Hence the only time we will trigger a push here is if the push sequence is 1823 * the same as the current context. 1824 * 1825 * We return the current commit lsn to allow the callers to determine if a 1826 * iclog flush is necessary following this call. 1827 */ 1828 xfs_lsn_t 1829 xlog_cil_force_seq( 1830 struct xlog *log, 1831 xfs_csn_t sequence) 1832 { 1833 struct xfs_cil *cil = log->l_cilp; 1834 struct xfs_cil_ctx *ctx; 1835 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1836 1837 ASSERT(sequence <= cil->xc_current_sequence); 1838 1839 if (!sequence) 1840 sequence = cil->xc_current_sequence; 1841 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); 1842 1843 /* 1844 * check to see if we need to force out the current context. 1845 * xlog_cil_push() handles racing pushes for the same sequence, 1846 * so no need to deal with it here. 1847 */ 1848 restart: 1849 xlog_cil_push_now(log, sequence, false); 1850 1851 /* 1852 * See if we can find a previous sequence still committing. 1853 * We need to wait for all previous sequence commits to complete 1854 * before allowing the force of push_seq to go ahead. Hence block 1855 * on commits for those as well. 1856 */ 1857 spin_lock(&cil->xc_push_lock); 1858 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1859 /* 1860 * Avoid getting stuck in this loop because we were woken by the 1861 * shutdown, but then went back to sleep once already in the 1862 * shutdown state. 1863 */ 1864 if (xlog_is_shutdown(log)) 1865 goto out_shutdown; 1866 if (ctx->sequence > sequence) 1867 continue; 1868 if (!ctx->commit_lsn) { 1869 /* 1870 * It is still being pushed! Wait for the push to 1871 * complete, then start again from the beginning. 1872 */ 1873 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 1874 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1875 goto restart; 1876 } 1877 if (ctx->sequence != sequence) 1878 continue; 1879 /* found it! */ 1880 commit_lsn = ctx->commit_lsn; 1881 } 1882 1883 /* 1884 * The call to xlog_cil_push_now() executes the push in the background. 1885 * Hence by the time we have got here it our sequence may not have been 1886 * pushed yet. This is true if the current sequence still matches the 1887 * push sequence after the above wait loop and the CIL still contains 1888 * dirty objects. This is guaranteed by the push code first adding the 1889 * context to the committing list before emptying the CIL. 1890 * 1891 * Hence if we don't find the context in the committing list and the 1892 * current sequence number is unchanged then the CIL contents are 1893 * significant. If the CIL is empty, if means there was nothing to push 1894 * and that means there is nothing to wait for. If the CIL is not empty, 1895 * it means we haven't yet started the push, because if it had started 1896 * we would have found the context on the committing list. 1897 */ 1898 if (sequence == cil->xc_current_sequence && 1899 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1900 spin_unlock(&cil->xc_push_lock); 1901 goto restart; 1902 } 1903 1904 spin_unlock(&cil->xc_push_lock); 1905 return commit_lsn; 1906 1907 /* 1908 * We detected a shutdown in progress. We need to trigger the log force 1909 * to pass through it's iclog state machine error handling, even though 1910 * we are already in a shutdown state. Hence we can't return 1911 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1912 * LSN is already stable), so we return a zero LSN instead. 1913 */ 1914 out_shutdown: 1915 spin_unlock(&cil->xc_push_lock); 1916 return 0; 1917 } 1918 1919 /* 1920 * Perform initial CIL structure initialisation. 1921 */ 1922 int 1923 xlog_cil_init( 1924 struct xlog *log) 1925 { 1926 struct xfs_cil *cil; 1927 struct xfs_cil_ctx *ctx; 1928 struct xlog_cil_pcp *cilpcp; 1929 int cpu; 1930 1931 cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1932 if (!cil) 1933 return -ENOMEM; 1934 /* 1935 * Limit the CIL pipeline depth to 4 concurrent works to bound the 1936 * concurrency the log spinlocks will be exposed to. 1937 */ 1938 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", 1939 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 1940 4, log->l_mp->m_super->s_id); 1941 if (!cil->xc_push_wq) 1942 goto out_destroy_cil; 1943 1944 cil->xc_log = log; 1945 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); 1946 if (!cil->xc_pcp) 1947 goto out_destroy_wq; 1948 1949 for_each_possible_cpu(cpu) { 1950 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 1951 INIT_LIST_HEAD(&cilpcp->busy_extents); 1952 INIT_LIST_HEAD(&cilpcp->log_items); 1953 } 1954 1955 INIT_LIST_HEAD(&cil->xc_committing); 1956 spin_lock_init(&cil->xc_push_lock); 1957 init_waitqueue_head(&cil->xc_push_wait); 1958 init_rwsem(&cil->xc_ctx_lock); 1959 init_waitqueue_head(&cil->xc_start_wait); 1960 init_waitqueue_head(&cil->xc_commit_wait); 1961 log->l_cilp = cil; 1962 1963 ctx = xlog_cil_ctx_alloc(); 1964 xlog_cil_ctx_switch(cil, ctx); 1965 return 0; 1966 1967 out_destroy_wq: 1968 destroy_workqueue(cil->xc_push_wq); 1969 out_destroy_cil: 1970 kfree(cil); 1971 return -ENOMEM; 1972 } 1973 1974 void 1975 xlog_cil_destroy( 1976 struct xlog *log) 1977 { 1978 struct xfs_cil *cil = log->l_cilp; 1979 1980 if (cil->xc_ctx) { 1981 if (cil->xc_ctx->ticket) 1982 xfs_log_ticket_put(cil->xc_ctx->ticket); 1983 kfree(cil->xc_ctx); 1984 } 1985 1986 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1987 free_percpu(cil->xc_pcp); 1988 destroy_workqueue(cil->xc_push_wq); 1989 kfree(cil); 1990 } 1991 1992