1 /* 2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 */ 17 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_shared.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_error.h" 26 #include "xfs_alloc.h" 27 #include "xfs_extent_busy.h" 28 #include "xfs_discard.h" 29 #include "xfs_trans.h" 30 #include "xfs_trans_priv.h" 31 #include "xfs_log.h" 32 #include "xfs_log_priv.h" 33 34 /* 35 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 36 * recover, so we don't allow failure here. Also, we allocate in a context that 37 * we don't want to be issuing transactions from, so we need to tell the 38 * allocation code this as well. 39 * 40 * We don't reserve any space for the ticket - we are going to steal whatever 41 * space we require from transactions as they commit. To ensure we reserve all 42 * the space required, we need to set the current reservation of the ticket to 43 * zero so that we know to steal the initial transaction overhead from the 44 * first transaction commit. 45 */ 46 static struct xlog_ticket * 47 xlog_cil_ticket_alloc( 48 struct xlog *log) 49 { 50 struct xlog_ticket *tic; 51 52 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 53 KM_SLEEP|KM_NOFS); 54 55 /* 56 * set the current reservation to zero so we know to steal the basic 57 * transaction overhead reservation from the first transaction commit. 58 */ 59 tic->t_curr_res = 0; 60 return tic; 61 } 62 63 /* 64 * After the first stage of log recovery is done, we know where the head and 65 * tail of the log are. We need this log initialisation done before we can 66 * initialise the first CIL checkpoint context. 67 * 68 * Here we allocate a log ticket to track space usage during a CIL push. This 69 * ticket is passed to xlog_write() directly so that we don't slowly leak log 70 * space by failing to account for space used by log headers and additional 71 * region headers for split regions. 72 */ 73 void 74 xlog_cil_init_post_recovery( 75 struct xlog *log) 76 { 77 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 78 log->l_cilp->xc_ctx->sequence = 1; 79 } 80 81 /* 82 * Prepare the log item for insertion into the CIL. Calculate the difference in 83 * log space and vectors it will consume, and if it is a new item pin it as 84 * well. 85 */ 86 STATIC void 87 xfs_cil_prepare_item( 88 struct xlog *log, 89 struct xfs_log_vec *lv, 90 struct xfs_log_vec *old_lv, 91 int *diff_len, 92 int *diff_iovecs) 93 { 94 /* Account for the new LV being passed in */ 95 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { 96 *diff_len += lv->lv_bytes; 97 *diff_iovecs += lv->lv_niovecs; 98 } 99 100 /* 101 * If there is no old LV, this is the first time we've seen the item in 102 * this CIL context and so we need to pin it. If we are replacing the 103 * old_lv, then remove the space it accounts for and free it. 104 */ 105 if (!old_lv) 106 lv->lv_item->li_ops->iop_pin(lv->lv_item); 107 else if (old_lv != lv) { 108 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 109 110 *diff_len -= old_lv->lv_bytes; 111 *diff_iovecs -= old_lv->lv_niovecs; 112 kmem_free(old_lv); 113 } 114 115 /* attach new log vector to log item */ 116 lv->lv_item->li_lv = lv; 117 118 /* 119 * If this is the first time the item is being committed to the 120 * CIL, store the sequence number on the log item so we can 121 * tell in future commits whether this is the first checkpoint 122 * the item is being committed into. 123 */ 124 if (!lv->lv_item->li_seq) 125 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 126 } 127 128 /* 129 * Format log item into a flat buffers 130 * 131 * For delayed logging, we need to hold a formatted buffer containing all the 132 * changes on the log item. This enables us to relog the item in memory and 133 * write it out asynchronously without needing to relock the object that was 134 * modified at the time it gets written into the iclog. 135 * 136 * This function builds a vector for the changes in each log item in the 137 * transaction. It then works out the length of the buffer needed for each log 138 * item, allocates them and formats the vector for the item into the buffer. 139 * The buffer is then attached to the log item are then inserted into the 140 * Committed Item List for tracking until the next checkpoint is written out. 141 * 142 * We don't set up region headers during this process; we simply copy the 143 * regions into the flat buffer. We can do this because we still have to do a 144 * formatting step to write the regions into the iclog buffer. Writing the 145 * ophdrs during the iclog write means that we can support splitting large 146 * regions across iclog boundares without needing a change in the format of the 147 * item/region encapsulation. 148 * 149 * Hence what we need to do now is change the rewrite the vector array to point 150 * to the copied region inside the buffer we just allocated. This allows us to 151 * format the regions into the iclog as though they are being formatted 152 * directly out of the objects themselves. 153 */ 154 static void 155 xlog_cil_insert_format_items( 156 struct xlog *log, 157 struct xfs_trans *tp, 158 int *diff_len, 159 int *diff_iovecs) 160 { 161 struct xfs_log_item_desc *lidp; 162 163 164 /* Bail out if we didn't find a log item. */ 165 if (list_empty(&tp->t_items)) { 166 ASSERT(0); 167 return; 168 } 169 170 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 171 struct xfs_log_item *lip = lidp->lid_item; 172 struct xfs_log_vec *lv; 173 struct xfs_log_vec *old_lv; 174 int niovecs = 0; 175 int nbytes = 0; 176 int buf_size; 177 bool ordered = false; 178 179 /* Skip items which aren't dirty in this transaction. */ 180 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 181 continue; 182 183 /* get number of vecs and size of data to be stored */ 184 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 185 186 /* Skip items that do not have any vectors for writing */ 187 if (!niovecs) 188 continue; 189 190 /* 191 * Ordered items need to be tracked but we do not wish to write 192 * them. We need a logvec to track the object, but we do not 193 * need an iovec or buffer to be allocated for copying data. 194 */ 195 if (niovecs == XFS_LOG_VEC_ORDERED) { 196 ordered = true; 197 niovecs = 0; 198 nbytes = 0; 199 } 200 201 /* 202 * We 64-bit align the length of each iovec so that the start 203 * of the next one is naturally aligned. We'll need to 204 * account for that slack space here. Then round nbytes up 205 * to 64-bit alignment so that the initial buffer alignment is 206 * easy to calculate and verify. 207 */ 208 nbytes += niovecs * sizeof(uint64_t); 209 nbytes = round_up(nbytes, sizeof(uint64_t)); 210 211 /* grab the old item if it exists for reservation accounting */ 212 old_lv = lip->li_lv; 213 214 /* 215 * The data buffer needs to start 64-bit aligned, so round up 216 * that space to ensure we can align it appropriately and not 217 * overrun the buffer. 218 */ 219 buf_size = nbytes + 220 round_up((sizeof(struct xfs_log_vec) + 221 niovecs * sizeof(struct xfs_log_iovec)), 222 sizeof(uint64_t)); 223 224 /* compare to existing item size */ 225 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { 226 /* same or smaller, optimise common overwrite case */ 227 lv = lip->li_lv; 228 lv->lv_next = NULL; 229 230 if (ordered) 231 goto insert; 232 233 /* 234 * set the item up as though it is a new insertion so 235 * that the space reservation accounting is correct. 236 */ 237 *diff_iovecs -= lv->lv_niovecs; 238 *diff_len -= lv->lv_bytes; 239 } else { 240 /* allocate new data chunk */ 241 lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); 242 lv->lv_item = lip; 243 lv->lv_size = buf_size; 244 if (ordered) { 245 /* track as an ordered logvec */ 246 ASSERT(lip->li_lv == NULL); 247 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 248 goto insert; 249 } 250 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 251 } 252 253 /* Ensure the lv is set up according to ->iop_size */ 254 lv->lv_niovecs = niovecs; 255 256 /* The allocated data region lies beyond the iovec region */ 257 lv->lv_buf_len = 0; 258 lv->lv_bytes = 0; 259 lv->lv_buf = (char *)lv + buf_size - nbytes; 260 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 261 262 lip->li_ops->iop_format(lip, lv); 263 insert: 264 ASSERT(lv->lv_buf_len <= nbytes); 265 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); 266 } 267 } 268 269 /* 270 * Insert the log items into the CIL and calculate the difference in space 271 * consumed by the item. Add the space to the checkpoint ticket and calculate 272 * if the change requires additional log metadata. If it does, take that space 273 * as well. Remove the amount of space we added to the checkpoint ticket from 274 * the current transaction ticket so that the accounting works out correctly. 275 */ 276 static void 277 xlog_cil_insert_items( 278 struct xlog *log, 279 struct xfs_trans *tp) 280 { 281 struct xfs_cil *cil = log->l_cilp; 282 struct xfs_cil_ctx *ctx = cil->xc_ctx; 283 struct xfs_log_item_desc *lidp; 284 int len = 0; 285 int diff_iovecs = 0; 286 int iclog_space; 287 288 ASSERT(tp); 289 290 /* 291 * We can do this safely because the context can't checkpoint until we 292 * are done so it doesn't matter exactly how we update the CIL. 293 */ 294 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); 295 296 /* 297 * Now (re-)position everything modified at the tail of the CIL. 298 * We do this here so we only need to take the CIL lock once during 299 * the transaction commit. 300 */ 301 spin_lock(&cil->xc_cil_lock); 302 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 303 struct xfs_log_item *lip = lidp->lid_item; 304 305 /* Skip items which aren't dirty in this transaction. */ 306 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 307 continue; 308 309 /* 310 * Only move the item if it isn't already at the tail. This is 311 * to prevent a transient list_empty() state when reinserting 312 * an item that is already the only item in the CIL. 313 */ 314 if (!list_is_last(&lip->li_cil, &cil->xc_cil)) 315 list_move_tail(&lip->li_cil, &cil->xc_cil); 316 } 317 318 /* account for space used by new iovec headers */ 319 len += diff_iovecs * sizeof(xlog_op_header_t); 320 ctx->nvecs += diff_iovecs; 321 322 /* attach the transaction to the CIL if it has any busy extents */ 323 if (!list_empty(&tp->t_busy)) 324 list_splice_init(&tp->t_busy, &ctx->busy_extents); 325 326 /* 327 * Now transfer enough transaction reservation to the context ticket 328 * for the checkpoint. The context ticket is special - the unit 329 * reservation has to grow as well as the current reservation as we 330 * steal from tickets so we can correctly determine the space used 331 * during the transaction commit. 332 */ 333 if (ctx->ticket->t_curr_res == 0) { 334 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; 335 tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res; 336 } 337 338 /* do we need space for more log record headers? */ 339 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 340 if (len > 0 && (ctx->space_used / iclog_space != 341 (ctx->space_used + len) / iclog_space)) { 342 int hdrs; 343 344 hdrs = (len + iclog_space - 1) / iclog_space; 345 /* need to take into account split region headers, too */ 346 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 347 ctx->ticket->t_unit_res += hdrs; 348 ctx->ticket->t_curr_res += hdrs; 349 tp->t_ticket->t_curr_res -= hdrs; 350 ASSERT(tp->t_ticket->t_curr_res >= len); 351 } 352 tp->t_ticket->t_curr_res -= len; 353 ctx->space_used += len; 354 355 spin_unlock(&cil->xc_cil_lock); 356 } 357 358 static void 359 xlog_cil_free_logvec( 360 struct xfs_log_vec *log_vector) 361 { 362 struct xfs_log_vec *lv; 363 364 for (lv = log_vector; lv; ) { 365 struct xfs_log_vec *next = lv->lv_next; 366 kmem_free(lv); 367 lv = next; 368 } 369 } 370 371 /* 372 * Mark all items committed and clear busy extents. We free the log vector 373 * chains in a separate pass so that we unpin the log items as quickly as 374 * possible. 375 */ 376 static void 377 xlog_cil_committed( 378 void *args, 379 int abort) 380 { 381 struct xfs_cil_ctx *ctx = args; 382 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 383 384 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 385 ctx->start_lsn, abort); 386 387 xfs_extent_busy_sort(&ctx->busy_extents); 388 xfs_extent_busy_clear(mp, &ctx->busy_extents, 389 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); 390 391 /* 392 * If we are aborting the commit, wake up anyone waiting on the 393 * committing list. If we don't, then a shutdown we can leave processes 394 * waiting in xlog_cil_force_lsn() waiting on a sequence commit that 395 * will never happen because we aborted it. 396 */ 397 spin_lock(&ctx->cil->xc_push_lock); 398 if (abort) 399 wake_up_all(&ctx->cil->xc_commit_wait); 400 list_del(&ctx->committing); 401 spin_unlock(&ctx->cil->xc_push_lock); 402 403 xlog_cil_free_logvec(ctx->lv_chain); 404 405 if (!list_empty(&ctx->busy_extents)) { 406 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); 407 408 xfs_discard_extents(mp, &ctx->busy_extents); 409 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 410 } 411 412 kmem_free(ctx); 413 } 414 415 /* 416 * Push the Committed Item List to the log. If @push_seq flag is zero, then it 417 * is a background flush and so we can chose to ignore it. Otherwise, if the 418 * current sequence is the same as @push_seq we need to do a flush. If 419 * @push_seq is less than the current sequence, then it has already been 420 * flushed and we don't need to do anything - the caller will wait for it to 421 * complete if necessary. 422 * 423 * @push_seq is a value rather than a flag because that allows us to do an 424 * unlocked check of the sequence number for a match. Hence we can allows log 425 * forces to run racily and not issue pushes for the same sequence twice. If we 426 * get a race between multiple pushes for the same sequence they will block on 427 * the first one and then abort, hence avoiding needless pushes. 428 */ 429 STATIC int 430 xlog_cil_push( 431 struct xlog *log) 432 { 433 struct xfs_cil *cil = log->l_cilp; 434 struct xfs_log_vec *lv; 435 struct xfs_cil_ctx *ctx; 436 struct xfs_cil_ctx *new_ctx; 437 struct xlog_in_core *commit_iclog; 438 struct xlog_ticket *tic; 439 int num_iovecs; 440 int error = 0; 441 struct xfs_trans_header thdr; 442 struct xfs_log_iovec lhdr; 443 struct xfs_log_vec lvhdr = { NULL }; 444 xfs_lsn_t commit_lsn; 445 xfs_lsn_t push_seq; 446 447 if (!cil) 448 return 0; 449 450 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 451 new_ctx->ticket = xlog_cil_ticket_alloc(log); 452 453 down_write(&cil->xc_ctx_lock); 454 ctx = cil->xc_ctx; 455 456 spin_lock(&cil->xc_push_lock); 457 push_seq = cil->xc_push_seq; 458 ASSERT(push_seq <= ctx->sequence); 459 460 /* 461 * Check if we've anything to push. If there is nothing, then we don't 462 * move on to a new sequence number and so we have to be able to push 463 * this sequence again later. 464 */ 465 if (list_empty(&cil->xc_cil)) { 466 cil->xc_push_seq = 0; 467 spin_unlock(&cil->xc_push_lock); 468 goto out_skip; 469 } 470 471 472 /* check for a previously pushed seqeunce */ 473 if (push_seq < cil->xc_ctx->sequence) { 474 spin_unlock(&cil->xc_push_lock); 475 goto out_skip; 476 } 477 478 /* 479 * We are now going to push this context, so add it to the committing 480 * list before we do anything else. This ensures that anyone waiting on 481 * this push can easily detect the difference between a "push in 482 * progress" and "CIL is empty, nothing to do". 483 * 484 * IOWs, a wait loop can now check for: 485 * the current sequence not being found on the committing list; 486 * an empty CIL; and 487 * an unchanged sequence number 488 * to detect a push that had nothing to do and therefore does not need 489 * waiting on. If the CIL is not empty, we get put on the committing 490 * list before emptying the CIL and bumping the sequence number. Hence 491 * an empty CIL and an unchanged sequence number means we jumped out 492 * above after doing nothing. 493 * 494 * Hence the waiter will either find the commit sequence on the 495 * committing list or the sequence number will be unchanged and the CIL 496 * still dirty. In that latter case, the push has not yet started, and 497 * so the waiter will have to continue trying to check the CIL 498 * committing list until it is found. In extreme cases of delay, the 499 * sequence may fully commit between the attempts the wait makes to wait 500 * on the commit sequence. 501 */ 502 list_add(&ctx->committing, &cil->xc_committing); 503 spin_unlock(&cil->xc_push_lock); 504 505 /* 506 * pull all the log vectors off the items in the CIL, and 507 * remove the items from the CIL. We don't need the CIL lock 508 * here because it's only needed on the transaction commit 509 * side which is currently locked out by the flush lock. 510 */ 511 lv = NULL; 512 num_iovecs = 0; 513 while (!list_empty(&cil->xc_cil)) { 514 struct xfs_log_item *item; 515 516 item = list_first_entry(&cil->xc_cil, 517 struct xfs_log_item, li_cil); 518 list_del_init(&item->li_cil); 519 if (!ctx->lv_chain) 520 ctx->lv_chain = item->li_lv; 521 else 522 lv->lv_next = item->li_lv; 523 lv = item->li_lv; 524 item->li_lv = NULL; 525 num_iovecs += lv->lv_niovecs; 526 } 527 528 /* 529 * initialise the new context and attach it to the CIL. Then attach 530 * the current context to the CIL committing lsit so it can be found 531 * during log forces to extract the commit lsn of the sequence that 532 * needs to be forced. 533 */ 534 INIT_LIST_HEAD(&new_ctx->committing); 535 INIT_LIST_HEAD(&new_ctx->busy_extents); 536 new_ctx->sequence = ctx->sequence + 1; 537 new_ctx->cil = cil; 538 cil->xc_ctx = new_ctx; 539 540 /* 541 * The switch is now done, so we can drop the context lock and move out 542 * of a shared context. We can't just go straight to the commit record, 543 * though - we need to synchronise with previous and future commits so 544 * that the commit records are correctly ordered in the log to ensure 545 * that we process items during log IO completion in the correct order. 546 * 547 * For example, if we get an EFI in one checkpoint and the EFD in the 548 * next (e.g. due to log forces), we do not want the checkpoint with 549 * the EFD to be committed before the checkpoint with the EFI. Hence 550 * we must strictly order the commit records of the checkpoints so 551 * that: a) the checkpoint callbacks are attached to the iclogs in the 552 * correct order; and b) the checkpoints are replayed in correct order 553 * in log recovery. 554 * 555 * Hence we need to add this context to the committing context list so 556 * that higher sequences will wait for us to write out a commit record 557 * before they do. 558 * 559 * xfs_log_force_lsn requires us to mirror the new sequence into the cil 560 * structure atomically with the addition of this sequence to the 561 * committing list. This also ensures that we can do unlocked checks 562 * against the current sequence in log forces without risking 563 * deferencing a freed context pointer. 564 */ 565 spin_lock(&cil->xc_push_lock); 566 cil->xc_current_sequence = new_ctx->sequence; 567 spin_unlock(&cil->xc_push_lock); 568 up_write(&cil->xc_ctx_lock); 569 570 /* 571 * Build a checkpoint transaction header and write it to the log to 572 * begin the transaction. We need to account for the space used by the 573 * transaction header here as it is not accounted for in xlog_write(). 574 * 575 * The LSN we need to pass to the log items on transaction commit is 576 * the LSN reported by the first log vector write. If we use the commit 577 * record lsn then we can move the tail beyond the grant write head. 578 */ 579 tic = ctx->ticket; 580 thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 581 thdr.th_type = XFS_TRANS_CHECKPOINT; 582 thdr.th_tid = tic->t_tid; 583 thdr.th_num_items = num_iovecs; 584 lhdr.i_addr = &thdr; 585 lhdr.i_len = sizeof(xfs_trans_header_t); 586 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 587 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 588 589 lvhdr.lv_niovecs = 1; 590 lvhdr.lv_iovecp = &lhdr; 591 lvhdr.lv_next = ctx->lv_chain; 592 593 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); 594 if (error) 595 goto out_abort_free_ticket; 596 597 /* 598 * now that we've written the checkpoint into the log, strictly 599 * order the commit records so replay will get them in the right order. 600 */ 601 restart: 602 spin_lock(&cil->xc_push_lock); 603 list_for_each_entry(new_ctx, &cil->xc_committing, committing) { 604 /* 605 * Avoid getting stuck in this loop because we were woken by the 606 * shutdown, but then went back to sleep once already in the 607 * shutdown state. 608 */ 609 if (XLOG_FORCED_SHUTDOWN(log)) { 610 spin_unlock(&cil->xc_push_lock); 611 goto out_abort_free_ticket; 612 } 613 614 /* 615 * Higher sequences will wait for this one so skip them. 616 * Don't wait for our own sequence, either. 617 */ 618 if (new_ctx->sequence >= ctx->sequence) 619 continue; 620 if (!new_ctx->commit_lsn) { 621 /* 622 * It is still being pushed! Wait for the push to 623 * complete, then start again from the beginning. 624 */ 625 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 626 goto restart; 627 } 628 } 629 spin_unlock(&cil->xc_push_lock); 630 631 /* xfs_log_done always frees the ticket on error. */ 632 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); 633 if (commit_lsn == -1) 634 goto out_abort; 635 636 /* attach all the transactions w/ busy extents to iclog */ 637 ctx->log_cb.cb_func = xlog_cil_committed; 638 ctx->log_cb.cb_arg = ctx; 639 error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); 640 if (error) 641 goto out_abort; 642 643 /* 644 * now the checkpoint commit is complete and we've attached the 645 * callbacks to the iclog we can assign the commit LSN to the context 646 * and wake up anyone who is waiting for the commit to complete. 647 */ 648 spin_lock(&cil->xc_push_lock); 649 ctx->commit_lsn = commit_lsn; 650 wake_up_all(&cil->xc_commit_wait); 651 spin_unlock(&cil->xc_push_lock); 652 653 /* release the hounds! */ 654 return xfs_log_release_iclog(log->l_mp, commit_iclog); 655 656 out_skip: 657 up_write(&cil->xc_ctx_lock); 658 xfs_log_ticket_put(new_ctx->ticket); 659 kmem_free(new_ctx); 660 return 0; 661 662 out_abort_free_ticket: 663 xfs_log_ticket_put(tic); 664 out_abort: 665 xlog_cil_committed(ctx, XFS_LI_ABORTED); 666 return -EIO; 667 } 668 669 static void 670 xlog_cil_push_work( 671 struct work_struct *work) 672 { 673 struct xfs_cil *cil = container_of(work, struct xfs_cil, 674 xc_push_work); 675 xlog_cil_push(cil->xc_log); 676 } 677 678 /* 679 * We need to push CIL every so often so we don't cache more than we can fit in 680 * the log. The limit really is that a checkpoint can't be more than half the 681 * log (the current checkpoint is not allowed to overwrite the previous 682 * checkpoint), but commit latency and memory usage limit this to a smaller 683 * size. 684 */ 685 static void 686 xlog_cil_push_background( 687 struct xlog *log) 688 { 689 struct xfs_cil *cil = log->l_cilp; 690 691 /* 692 * The cil won't be empty because we are called while holding the 693 * context lock so whatever we added to the CIL will still be there 694 */ 695 ASSERT(!list_empty(&cil->xc_cil)); 696 697 /* 698 * don't do a background push if we haven't used up all the 699 * space available yet. 700 */ 701 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 702 return; 703 704 spin_lock(&cil->xc_push_lock); 705 if (cil->xc_push_seq < cil->xc_current_sequence) { 706 cil->xc_push_seq = cil->xc_current_sequence; 707 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 708 } 709 spin_unlock(&cil->xc_push_lock); 710 711 } 712 713 /* 714 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 715 * number that is passed. When it returns, the work will be queued for 716 * @push_seq, but it won't be completed. The caller is expected to do any 717 * waiting for push_seq to complete if it is required. 718 */ 719 static void 720 xlog_cil_push_now( 721 struct xlog *log, 722 xfs_lsn_t push_seq) 723 { 724 struct xfs_cil *cil = log->l_cilp; 725 726 if (!cil) 727 return; 728 729 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 730 731 /* start on any pending background push to minimise wait time on it */ 732 flush_work(&cil->xc_push_work); 733 734 /* 735 * If the CIL is empty or we've already pushed the sequence then 736 * there's no work we need to do. 737 */ 738 spin_lock(&cil->xc_push_lock); 739 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { 740 spin_unlock(&cil->xc_push_lock); 741 return; 742 } 743 744 cil->xc_push_seq = push_seq; 745 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 746 spin_unlock(&cil->xc_push_lock); 747 } 748 749 bool 750 xlog_cil_empty( 751 struct xlog *log) 752 { 753 struct xfs_cil *cil = log->l_cilp; 754 bool empty = false; 755 756 spin_lock(&cil->xc_push_lock); 757 if (list_empty(&cil->xc_cil)) 758 empty = true; 759 spin_unlock(&cil->xc_push_lock); 760 return empty; 761 } 762 763 /* 764 * Commit a transaction with the given vector to the Committed Item List. 765 * 766 * To do this, we need to format the item, pin it in memory if required and 767 * account for the space used by the transaction. Once we have done that we 768 * need to release the unused reservation for the transaction, attach the 769 * transaction to the checkpoint context so we carry the busy extents through 770 * to checkpoint completion, and then unlock all the items in the transaction. 771 * 772 * Called with the context lock already held in read mode to lock out 773 * background commit, returns without it held once background commits are 774 * allowed again. 775 */ 776 void 777 xfs_log_commit_cil( 778 struct xfs_mount *mp, 779 struct xfs_trans *tp, 780 xfs_lsn_t *commit_lsn, 781 bool regrant) 782 { 783 struct xlog *log = mp->m_log; 784 struct xfs_cil *cil = log->l_cilp; 785 786 /* lock out background commit */ 787 down_read(&cil->xc_ctx_lock); 788 789 xlog_cil_insert_items(log, tp); 790 791 /* check we didn't blow the reservation */ 792 if (tp->t_ticket->t_curr_res < 0) 793 xlog_print_tic_res(mp, tp->t_ticket); 794 795 tp->t_commit_lsn = cil->xc_ctx->sequence; 796 if (commit_lsn) 797 *commit_lsn = tp->t_commit_lsn; 798 799 xfs_log_done(mp, tp->t_ticket, NULL, regrant); 800 xfs_trans_unreserve_and_mod_sb(tp); 801 802 /* 803 * Once all the items of the transaction have been copied to the CIL, 804 * the items can be unlocked and freed. 805 * 806 * This needs to be done before we drop the CIL context lock because we 807 * have to update state in the log items and unlock them before they go 808 * to disk. If we don't, then the CIL checkpoint can race with us and 809 * we can run checkpoint completion before we've updated and unlocked 810 * the log items. This affects (at least) processing of stale buffers, 811 * inodes and EFIs. 812 */ 813 xfs_trans_free_items(tp, tp->t_commit_lsn, false); 814 815 xlog_cil_push_background(log); 816 817 up_read(&cil->xc_ctx_lock); 818 } 819 820 /* 821 * Conditionally push the CIL based on the sequence passed in. 822 * 823 * We only need to push if we haven't already pushed the sequence 824 * number given. Hence the only time we will trigger a push here is 825 * if the push sequence is the same as the current context. 826 * 827 * We return the current commit lsn to allow the callers to determine if a 828 * iclog flush is necessary following this call. 829 */ 830 xfs_lsn_t 831 xlog_cil_force_lsn( 832 struct xlog *log, 833 xfs_lsn_t sequence) 834 { 835 struct xfs_cil *cil = log->l_cilp; 836 struct xfs_cil_ctx *ctx; 837 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 838 839 ASSERT(sequence <= cil->xc_current_sequence); 840 841 /* 842 * check to see if we need to force out the current context. 843 * xlog_cil_push() handles racing pushes for the same sequence, 844 * so no need to deal with it here. 845 */ 846 restart: 847 xlog_cil_push_now(log, sequence); 848 849 /* 850 * See if we can find a previous sequence still committing. 851 * We need to wait for all previous sequence commits to complete 852 * before allowing the force of push_seq to go ahead. Hence block 853 * on commits for those as well. 854 */ 855 spin_lock(&cil->xc_push_lock); 856 list_for_each_entry(ctx, &cil->xc_committing, committing) { 857 /* 858 * Avoid getting stuck in this loop because we were woken by the 859 * shutdown, but then went back to sleep once already in the 860 * shutdown state. 861 */ 862 if (XLOG_FORCED_SHUTDOWN(log)) 863 goto out_shutdown; 864 if (ctx->sequence > sequence) 865 continue; 866 if (!ctx->commit_lsn) { 867 /* 868 * It is still being pushed! Wait for the push to 869 * complete, then start again from the beginning. 870 */ 871 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 872 goto restart; 873 } 874 if (ctx->sequence != sequence) 875 continue; 876 /* found it! */ 877 commit_lsn = ctx->commit_lsn; 878 } 879 880 /* 881 * The call to xlog_cil_push_now() executes the push in the background. 882 * Hence by the time we have got here it our sequence may not have been 883 * pushed yet. This is true if the current sequence still matches the 884 * push sequence after the above wait loop and the CIL still contains 885 * dirty objects. This is guaranteed by the push code first adding the 886 * context to the committing list before emptying the CIL. 887 * 888 * Hence if we don't find the context in the committing list and the 889 * current sequence number is unchanged then the CIL contents are 890 * significant. If the CIL is empty, if means there was nothing to push 891 * and that means there is nothing to wait for. If the CIL is not empty, 892 * it means we haven't yet started the push, because if it had started 893 * we would have found the context on the committing list. 894 */ 895 if (sequence == cil->xc_current_sequence && 896 !list_empty(&cil->xc_cil)) { 897 spin_unlock(&cil->xc_push_lock); 898 goto restart; 899 } 900 901 spin_unlock(&cil->xc_push_lock); 902 return commit_lsn; 903 904 /* 905 * We detected a shutdown in progress. We need to trigger the log force 906 * to pass through it's iclog state machine error handling, even though 907 * we are already in a shutdown state. Hence we can't return 908 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 909 * LSN is already stable), so we return a zero LSN instead. 910 */ 911 out_shutdown: 912 spin_unlock(&cil->xc_push_lock); 913 return 0; 914 } 915 916 /* 917 * Check if the current log item was first committed in this sequence. 918 * We can't rely on just the log item being in the CIL, we have to check 919 * the recorded commit sequence number. 920 * 921 * Note: for this to be used in a non-racy manner, it has to be called with 922 * CIL flushing locked out. As a result, it should only be used during the 923 * transaction commit process when deciding what to format into the item. 924 */ 925 bool 926 xfs_log_item_in_current_chkpt( 927 struct xfs_log_item *lip) 928 { 929 struct xfs_cil_ctx *ctx; 930 931 if (list_empty(&lip->li_cil)) 932 return false; 933 934 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; 935 936 /* 937 * li_seq is written on the first commit of a log item to record the 938 * first checkpoint it is written to. Hence if it is different to the 939 * current sequence, we're in a new checkpoint. 940 */ 941 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) 942 return false; 943 return true; 944 } 945 946 /* 947 * Perform initial CIL structure initialisation. 948 */ 949 int 950 xlog_cil_init( 951 struct xlog *log) 952 { 953 struct xfs_cil *cil; 954 struct xfs_cil_ctx *ctx; 955 956 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 957 if (!cil) 958 return -ENOMEM; 959 960 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 961 if (!ctx) { 962 kmem_free(cil); 963 return -ENOMEM; 964 } 965 966 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); 967 INIT_LIST_HEAD(&cil->xc_cil); 968 INIT_LIST_HEAD(&cil->xc_committing); 969 spin_lock_init(&cil->xc_cil_lock); 970 spin_lock_init(&cil->xc_push_lock); 971 init_rwsem(&cil->xc_ctx_lock); 972 init_waitqueue_head(&cil->xc_commit_wait); 973 974 INIT_LIST_HEAD(&ctx->committing); 975 INIT_LIST_HEAD(&ctx->busy_extents); 976 ctx->sequence = 1; 977 ctx->cil = cil; 978 cil->xc_ctx = ctx; 979 cil->xc_current_sequence = ctx->sequence; 980 981 cil->xc_log = log; 982 log->l_cilp = cil; 983 return 0; 984 } 985 986 void 987 xlog_cil_destroy( 988 struct xlog *log) 989 { 990 if (log->l_cilp->xc_ctx) { 991 if (log->l_cilp->xc_ctx->ticket) 992 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 993 kmem_free(log->l_cilp->xc_ctx); 994 } 995 996 ASSERT(list_empty(&log->l_cilp->xc_cil)); 997 kmem_free(log->l_cilp); 998 } 999 1000