xref: /linux/fs/jbd2/transaction.c (revision ac8b422838046ffc26be4874a3cbae0d313f4209)
1 /*
2  * linux/fs/jbd2/transaction.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Generic filesystem transaction handling code; part of the ext2fs
13  * journaling system.
14  *
15  * This file manages transactions (compound commits managed by the
16  * journaling code) and handles (individual atomic operations by the
17  * filesystem).
18  */
19 
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 
30 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
31 
32 /*
33  * jbd2_get_transaction: obtain a new transaction_t object.
34  *
35  * Simply allocate and initialise a new transaction.  Create it in
36  * RUNNING state and add it to the current journal (which should not
37  * have an existing running transaction: we only make a new transaction
38  * once we have started to commit the old one).
39  *
40  * Preconditions:
41  *	The journal MUST be locked.  We don't perform atomic mallocs on the
42  *	new transaction	and we can't block without protecting against other
43  *	processes trying to touch the journal while it is in transition.
44  *
45  */
46 
47 static transaction_t *
48 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
49 {
50 	transaction->t_journal = journal;
51 	transaction->t_state = T_RUNNING;
52 	transaction->t_start_time = ktime_get();
53 	transaction->t_tid = journal->j_transaction_sequence++;
54 	transaction->t_expires = jiffies + journal->j_commit_interval;
55 	spin_lock_init(&transaction->t_handle_lock);
56 	INIT_LIST_HEAD(&transaction->t_inode_list);
57 	INIT_LIST_HEAD(&transaction->t_private_list);
58 
59 	/* Set up the commit timer for the new transaction. */
60 	journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
61 	add_timer(&journal->j_commit_timer);
62 
63 	J_ASSERT(journal->j_running_transaction == NULL);
64 	journal->j_running_transaction = transaction;
65 	transaction->t_max_wait = 0;
66 	transaction->t_start = jiffies;
67 
68 	return transaction;
69 }
70 
71 /*
72  * Handle management.
73  *
74  * A handle_t is an object which represents a single atomic update to a
75  * filesystem, and which tracks all of the modifications which form part
76  * of that one update.
77  */
78 
79 /*
80  * start_this_handle: Given a handle, deal with any locking or stalling
81  * needed to make sure that there is enough journal space for the handle
82  * to begin.  Attach the handle to a transaction and set up the
83  * transaction's buffer credits.
84  */
85 
86 static int start_this_handle(journal_t *journal, handle_t *handle)
87 {
88 	transaction_t *transaction;
89 	int needed;
90 	int nblocks = handle->h_buffer_credits;
91 	transaction_t *new_transaction = NULL;
92 	int ret = 0;
93 	unsigned long ts = jiffies;
94 
95 	if (nblocks > journal->j_max_transaction_buffers) {
96 		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
97 		       current->comm, nblocks,
98 		       journal->j_max_transaction_buffers);
99 		ret = -ENOSPC;
100 		goto out;
101 	}
102 
103 alloc_transaction:
104 	if (!journal->j_running_transaction) {
105 		new_transaction = kzalloc(sizeof(*new_transaction),
106 						GFP_NOFS|__GFP_NOFAIL);
107 		if (!new_transaction) {
108 			ret = -ENOMEM;
109 			goto out;
110 		}
111 	}
112 
113 	jbd_debug(3, "New handle %p going live.\n", handle);
114 
115 repeat:
116 
117 	/*
118 	 * We need to hold j_state_lock until t_updates has been incremented,
119 	 * for proper journal barrier handling
120 	 */
121 	spin_lock(&journal->j_state_lock);
122 repeat_locked:
123 	if (is_journal_aborted(journal) ||
124 	    (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
125 		spin_unlock(&journal->j_state_lock);
126 		ret = -EROFS;
127 		goto out;
128 	}
129 
130 	/* Wait on the journal's transaction barrier if necessary */
131 	if (journal->j_barrier_count) {
132 		spin_unlock(&journal->j_state_lock);
133 		wait_event(journal->j_wait_transaction_locked,
134 				journal->j_barrier_count == 0);
135 		goto repeat;
136 	}
137 
138 	if (!journal->j_running_transaction) {
139 		if (!new_transaction) {
140 			spin_unlock(&journal->j_state_lock);
141 			goto alloc_transaction;
142 		}
143 		jbd2_get_transaction(journal, new_transaction);
144 		new_transaction = NULL;
145 	}
146 
147 	transaction = journal->j_running_transaction;
148 
149 	/*
150 	 * If the current transaction is locked down for commit, wait for the
151 	 * lock to be released.
152 	 */
153 	if (transaction->t_state == T_LOCKED) {
154 		DEFINE_WAIT(wait);
155 
156 		prepare_to_wait(&journal->j_wait_transaction_locked,
157 					&wait, TASK_UNINTERRUPTIBLE);
158 		spin_unlock(&journal->j_state_lock);
159 		schedule();
160 		finish_wait(&journal->j_wait_transaction_locked, &wait);
161 		goto repeat;
162 	}
163 
164 	/*
165 	 * If there is not enough space left in the log to write all potential
166 	 * buffers requested by this operation, we need to stall pending a log
167 	 * checkpoint to free some more log space.
168 	 */
169 	spin_lock(&transaction->t_handle_lock);
170 	needed = transaction->t_outstanding_credits + nblocks;
171 
172 	if (needed > journal->j_max_transaction_buffers) {
173 		/*
174 		 * If the current transaction is already too large, then start
175 		 * to commit it: we can then go back and attach this handle to
176 		 * a new transaction.
177 		 */
178 		DEFINE_WAIT(wait);
179 
180 		jbd_debug(2, "Handle %p starting new commit...\n", handle);
181 		spin_unlock(&transaction->t_handle_lock);
182 		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
183 				TASK_UNINTERRUPTIBLE);
184 		__jbd2_log_start_commit(journal, transaction->t_tid);
185 		spin_unlock(&journal->j_state_lock);
186 		schedule();
187 		finish_wait(&journal->j_wait_transaction_locked, &wait);
188 		goto repeat;
189 	}
190 
191 	/*
192 	 * The commit code assumes that it can get enough log space
193 	 * without forcing a checkpoint.  This is *critical* for
194 	 * correctness: a checkpoint of a buffer which is also
195 	 * associated with a committing transaction creates a deadlock,
196 	 * so commit simply cannot force through checkpoints.
197 	 *
198 	 * We must therefore ensure the necessary space in the journal
199 	 * *before* starting to dirty potentially checkpointed buffers
200 	 * in the new transaction.
201 	 *
202 	 * The worst part is, any transaction currently committing can
203 	 * reduce the free space arbitrarily.  Be careful to account for
204 	 * those buffers when checkpointing.
205 	 */
206 
207 	/*
208 	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
209 	 * a _lot_ of headroom: 1/4 of the journal plus the size of
210 	 * the committing transaction.  Really, we only need to give it
211 	 * committing_transaction->t_outstanding_credits plus "enough" for
212 	 * the log control blocks.
213 	 * Also, this test is inconsitent with the matching one in
214 	 * jbd2_journal_extend().
215 	 */
216 	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
217 		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
218 		spin_unlock(&transaction->t_handle_lock);
219 		__jbd2_log_wait_for_space(journal);
220 		goto repeat_locked;
221 	}
222 
223 	/* OK, account for the buffers that this operation expects to
224 	 * use and add the handle to the running transaction. */
225 
226 	if (time_after(transaction->t_start, ts)) {
227 		ts = jbd2_time_diff(ts, transaction->t_start);
228 		if (ts > transaction->t_max_wait)
229 			transaction->t_max_wait = ts;
230 	}
231 
232 	handle->h_transaction = transaction;
233 	transaction->t_outstanding_credits += nblocks;
234 	transaction->t_updates++;
235 	transaction->t_handle_count++;
236 	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
237 		  handle, nblocks, transaction->t_outstanding_credits,
238 		  __jbd2_log_space_left(journal));
239 	spin_unlock(&transaction->t_handle_lock);
240 	spin_unlock(&journal->j_state_lock);
241 
242 	lock_map_acquire(&handle->h_lockdep_map);
243 out:
244 	if (unlikely(new_transaction))		/* It's usually NULL */
245 		kfree(new_transaction);
246 	return ret;
247 }
248 
249 static struct lock_class_key jbd2_handle_key;
250 
251 /* Allocate a new handle.  This should probably be in a slab... */
252 static handle_t *new_handle(int nblocks)
253 {
254 	handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
255 	if (!handle)
256 		return NULL;
257 	memset(handle, 0, sizeof(*handle));
258 	handle->h_buffer_credits = nblocks;
259 	handle->h_ref = 1;
260 
261 	lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
262 						&jbd2_handle_key, 0);
263 
264 	return handle;
265 }
266 
267 /**
268  * handle_t *jbd2_journal_start() - Obtain a new handle.
269  * @journal: Journal to start transaction on.
270  * @nblocks: number of block buffer we might modify
271  *
272  * We make sure that the transaction can guarantee at least nblocks of
273  * modified buffers in the log.  We block until the log can guarantee
274  * that much space.
275  *
276  * This function is visible to journal users (like ext3fs), so is not
277  * called with the journal already locked.
278  *
279  * Return a pointer to a newly allocated handle, or NULL on failure
280  */
281 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
282 {
283 	handle_t *handle = journal_current_handle();
284 	int err;
285 
286 	if (!journal)
287 		return ERR_PTR(-EROFS);
288 
289 	if (handle) {
290 		J_ASSERT(handle->h_transaction->t_journal == journal);
291 		handle->h_ref++;
292 		return handle;
293 	}
294 
295 	handle = new_handle(nblocks);
296 	if (!handle)
297 		return ERR_PTR(-ENOMEM);
298 
299 	current->journal_info = handle;
300 
301 	err = start_this_handle(journal, handle);
302 	if (err < 0) {
303 		jbd2_free_handle(handle);
304 		current->journal_info = NULL;
305 		handle = ERR_PTR(err);
306 		goto out;
307 	}
308 out:
309 	return handle;
310 }
311 
312 /**
313  * int jbd2_journal_extend() - extend buffer credits.
314  * @handle:  handle to 'extend'
315  * @nblocks: nr blocks to try to extend by.
316  *
317  * Some transactions, such as large extends and truncates, can be done
318  * atomically all at once or in several stages.  The operation requests
319  * a credit for a number of buffer modications in advance, but can
320  * extend its credit if it needs more.
321  *
322  * jbd2_journal_extend tries to give the running handle more buffer credits.
323  * It does not guarantee that allocation - this is a best-effort only.
324  * The calling process MUST be able to deal cleanly with a failure to
325  * extend here.
326  *
327  * Return 0 on success, non-zero on failure.
328  *
329  * return code < 0 implies an error
330  * return code > 0 implies normal transaction-full status.
331  */
332 int jbd2_journal_extend(handle_t *handle, int nblocks)
333 {
334 	transaction_t *transaction = handle->h_transaction;
335 	journal_t *journal = transaction->t_journal;
336 	int result;
337 	int wanted;
338 
339 	result = -EIO;
340 	if (is_handle_aborted(handle))
341 		goto out;
342 
343 	result = 1;
344 
345 	spin_lock(&journal->j_state_lock);
346 
347 	/* Don't extend a locked-down transaction! */
348 	if (handle->h_transaction->t_state != T_RUNNING) {
349 		jbd_debug(3, "denied handle %p %d blocks: "
350 			  "transaction not running\n", handle, nblocks);
351 		goto error_out;
352 	}
353 
354 	spin_lock(&transaction->t_handle_lock);
355 	wanted = transaction->t_outstanding_credits + nblocks;
356 
357 	if (wanted > journal->j_max_transaction_buffers) {
358 		jbd_debug(3, "denied handle %p %d blocks: "
359 			  "transaction too large\n", handle, nblocks);
360 		goto unlock;
361 	}
362 
363 	if (wanted > __jbd2_log_space_left(journal)) {
364 		jbd_debug(3, "denied handle %p %d blocks: "
365 			  "insufficient log space\n", handle, nblocks);
366 		goto unlock;
367 	}
368 
369 	handle->h_buffer_credits += nblocks;
370 	transaction->t_outstanding_credits += nblocks;
371 	result = 0;
372 
373 	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
374 unlock:
375 	spin_unlock(&transaction->t_handle_lock);
376 error_out:
377 	spin_unlock(&journal->j_state_lock);
378 out:
379 	return result;
380 }
381 
382 
383 /**
384  * int jbd2_journal_restart() - restart a handle .
385  * @handle:  handle to restart
386  * @nblocks: nr credits requested
387  *
388  * Restart a handle for a multi-transaction filesystem
389  * operation.
390  *
391  * If the jbd2_journal_extend() call above fails to grant new buffer credits
392  * to a running handle, a call to jbd2_journal_restart will commit the
393  * handle's transaction so far and reattach the handle to a new
394  * transaction capabable of guaranteeing the requested number of
395  * credits.
396  */
397 
398 int jbd2_journal_restart(handle_t *handle, int nblocks)
399 {
400 	transaction_t *transaction = handle->h_transaction;
401 	journal_t *journal = transaction->t_journal;
402 	int ret;
403 
404 	/* If we've had an abort of any type, don't even think about
405 	 * actually doing the restart! */
406 	if (is_handle_aborted(handle))
407 		return 0;
408 
409 	/*
410 	 * First unlink the handle from its current transaction, and start the
411 	 * commit on that.
412 	 */
413 	J_ASSERT(transaction->t_updates > 0);
414 	J_ASSERT(journal_current_handle() == handle);
415 
416 	spin_lock(&journal->j_state_lock);
417 	spin_lock(&transaction->t_handle_lock);
418 	transaction->t_outstanding_credits -= handle->h_buffer_credits;
419 	transaction->t_updates--;
420 
421 	if (!transaction->t_updates)
422 		wake_up(&journal->j_wait_updates);
423 	spin_unlock(&transaction->t_handle_lock);
424 
425 	jbd_debug(2, "restarting handle %p\n", handle);
426 	__jbd2_log_start_commit(journal, transaction->t_tid);
427 	spin_unlock(&journal->j_state_lock);
428 
429 	lock_map_release(&handle->h_lockdep_map);
430 	handle->h_buffer_credits = nblocks;
431 	ret = start_this_handle(journal, handle);
432 	return ret;
433 }
434 
435 
436 /**
437  * void jbd2_journal_lock_updates () - establish a transaction barrier.
438  * @journal:  Journal to establish a barrier on.
439  *
440  * This locks out any further updates from being started, and blocks
441  * until all existing updates have completed, returning only once the
442  * journal is in a quiescent state with no updates running.
443  *
444  * The journal lock should not be held on entry.
445  */
446 void jbd2_journal_lock_updates(journal_t *journal)
447 {
448 	DEFINE_WAIT(wait);
449 
450 	spin_lock(&journal->j_state_lock);
451 	++journal->j_barrier_count;
452 
453 	/* Wait until there are no running updates */
454 	while (1) {
455 		transaction_t *transaction = journal->j_running_transaction;
456 
457 		if (!transaction)
458 			break;
459 
460 		spin_lock(&transaction->t_handle_lock);
461 		if (!transaction->t_updates) {
462 			spin_unlock(&transaction->t_handle_lock);
463 			break;
464 		}
465 		prepare_to_wait(&journal->j_wait_updates, &wait,
466 				TASK_UNINTERRUPTIBLE);
467 		spin_unlock(&transaction->t_handle_lock);
468 		spin_unlock(&journal->j_state_lock);
469 		schedule();
470 		finish_wait(&journal->j_wait_updates, &wait);
471 		spin_lock(&journal->j_state_lock);
472 	}
473 	spin_unlock(&journal->j_state_lock);
474 
475 	/*
476 	 * We have now established a barrier against other normal updates, but
477 	 * we also need to barrier against other jbd2_journal_lock_updates() calls
478 	 * to make sure that we serialise special journal-locked operations
479 	 * too.
480 	 */
481 	mutex_lock(&journal->j_barrier);
482 }
483 
484 /**
485  * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
486  * @journal:  Journal to release the barrier on.
487  *
488  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
489  *
490  * Should be called without the journal lock held.
491  */
492 void jbd2_journal_unlock_updates (journal_t *journal)
493 {
494 	J_ASSERT(journal->j_barrier_count != 0);
495 
496 	mutex_unlock(&journal->j_barrier);
497 	spin_lock(&journal->j_state_lock);
498 	--journal->j_barrier_count;
499 	spin_unlock(&journal->j_state_lock);
500 	wake_up(&journal->j_wait_transaction_locked);
501 }
502 
503 static void warn_dirty_buffer(struct buffer_head *bh)
504 {
505 	char b[BDEVNAME_SIZE];
506 
507 	printk(KERN_WARNING
508 	       "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
509 	       "There's a risk of filesystem corruption in case of system "
510 	       "crash.\n",
511 	       bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
512 }
513 
514 /*
515  * If the buffer is already part of the current transaction, then there
516  * is nothing we need to do.  If it is already part of a prior
517  * transaction which we are still committing to disk, then we need to
518  * make sure that we do not overwrite the old copy: we do copy-out to
519  * preserve the copy going to disk.  We also account the buffer against
520  * the handle's metadata buffer credits (unless the buffer is already
521  * part of the transaction, that is).
522  *
523  */
524 static int
525 do_get_write_access(handle_t *handle, struct journal_head *jh,
526 			int force_copy)
527 {
528 	struct buffer_head *bh;
529 	transaction_t *transaction;
530 	journal_t *journal;
531 	int error;
532 	char *frozen_buffer = NULL;
533 	int need_copy = 0;
534 
535 	if (is_handle_aborted(handle))
536 		return -EROFS;
537 
538 	transaction = handle->h_transaction;
539 	journal = transaction->t_journal;
540 
541 	jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
542 
543 	JBUFFER_TRACE(jh, "entry");
544 repeat:
545 	bh = jh2bh(jh);
546 
547 	/* @@@ Need to check for errors here at some point. */
548 
549 	lock_buffer(bh);
550 	jbd_lock_bh_state(bh);
551 
552 	/* We now hold the buffer lock so it is safe to query the buffer
553 	 * state.  Is the buffer dirty?
554 	 *
555 	 * If so, there are two possibilities.  The buffer may be
556 	 * non-journaled, and undergoing a quite legitimate writeback.
557 	 * Otherwise, it is journaled, and we don't expect dirty buffers
558 	 * in that state (the buffers should be marked JBD_Dirty
559 	 * instead.)  So either the IO is being done under our own
560 	 * control and this is a bug, or it's a third party IO such as
561 	 * dump(8) (which may leave the buffer scheduled for read ---
562 	 * ie. locked but not dirty) or tune2fs (which may actually have
563 	 * the buffer dirtied, ugh.)  */
564 
565 	if (buffer_dirty(bh)) {
566 		/*
567 		 * First question: is this buffer already part of the current
568 		 * transaction or the existing committing transaction?
569 		 */
570 		if (jh->b_transaction) {
571 			J_ASSERT_JH(jh,
572 				jh->b_transaction == transaction ||
573 				jh->b_transaction ==
574 					journal->j_committing_transaction);
575 			if (jh->b_next_transaction)
576 				J_ASSERT_JH(jh, jh->b_next_transaction ==
577 							transaction);
578 			warn_dirty_buffer(bh);
579 		}
580 		/*
581 		 * In any case we need to clean the dirty flag and we must
582 		 * do it under the buffer lock to be sure we don't race
583 		 * with running write-out.
584 		 */
585 		JBUFFER_TRACE(jh, "Journalling dirty buffer");
586 		clear_buffer_dirty(bh);
587 		set_buffer_jbddirty(bh);
588 	}
589 
590 	unlock_buffer(bh);
591 
592 	error = -EROFS;
593 	if (is_handle_aborted(handle)) {
594 		jbd_unlock_bh_state(bh);
595 		goto out;
596 	}
597 	error = 0;
598 
599 	/*
600 	 * The buffer is already part of this transaction if b_transaction or
601 	 * b_next_transaction points to it
602 	 */
603 	if (jh->b_transaction == transaction ||
604 	    jh->b_next_transaction == transaction)
605 		goto done;
606 
607 	/*
608 	 * this is the first time this transaction is touching this buffer,
609 	 * reset the modified flag
610 	 */
611        jh->b_modified = 0;
612 
613 	/*
614 	 * If there is already a copy-out version of this buffer, then we don't
615 	 * need to make another one
616 	 */
617 	if (jh->b_frozen_data) {
618 		JBUFFER_TRACE(jh, "has frozen data");
619 		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
620 		jh->b_next_transaction = transaction;
621 		goto done;
622 	}
623 
624 	/* Is there data here we need to preserve? */
625 
626 	if (jh->b_transaction && jh->b_transaction != transaction) {
627 		JBUFFER_TRACE(jh, "owned by older transaction");
628 		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
629 		J_ASSERT_JH(jh, jh->b_transaction ==
630 					journal->j_committing_transaction);
631 
632 		/* There is one case we have to be very careful about.
633 		 * If the committing transaction is currently writing
634 		 * this buffer out to disk and has NOT made a copy-out,
635 		 * then we cannot modify the buffer contents at all
636 		 * right now.  The essence of copy-out is that it is the
637 		 * extra copy, not the primary copy, which gets
638 		 * journaled.  If the primary copy is already going to
639 		 * disk then we cannot do copy-out here. */
640 
641 		if (jh->b_jlist == BJ_Shadow) {
642 			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
643 			wait_queue_head_t *wqh;
644 
645 			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
646 
647 			JBUFFER_TRACE(jh, "on shadow: sleep");
648 			jbd_unlock_bh_state(bh);
649 			/* commit wakes up all shadow buffers after IO */
650 			for ( ; ; ) {
651 				prepare_to_wait(wqh, &wait.wait,
652 						TASK_UNINTERRUPTIBLE);
653 				if (jh->b_jlist != BJ_Shadow)
654 					break;
655 				schedule();
656 			}
657 			finish_wait(wqh, &wait.wait);
658 			goto repeat;
659 		}
660 
661 		/* Only do the copy if the currently-owning transaction
662 		 * still needs it.  If it is on the Forget list, the
663 		 * committing transaction is past that stage.  The
664 		 * buffer had better remain locked during the kmalloc,
665 		 * but that should be true --- we hold the journal lock
666 		 * still and the buffer is already on the BUF_JOURNAL
667 		 * list so won't be flushed.
668 		 *
669 		 * Subtle point, though: if this is a get_undo_access,
670 		 * then we will be relying on the frozen_data to contain
671 		 * the new value of the committed_data record after the
672 		 * transaction, so we HAVE to force the frozen_data copy
673 		 * in that case. */
674 
675 		if (jh->b_jlist != BJ_Forget || force_copy) {
676 			JBUFFER_TRACE(jh, "generate frozen data");
677 			if (!frozen_buffer) {
678 				JBUFFER_TRACE(jh, "allocate memory for buffer");
679 				jbd_unlock_bh_state(bh);
680 				frozen_buffer =
681 					jbd2_alloc(jh2bh(jh)->b_size,
682 							 GFP_NOFS);
683 				if (!frozen_buffer) {
684 					printk(KERN_EMERG
685 					       "%s: OOM for frozen_buffer\n",
686 					       __func__);
687 					JBUFFER_TRACE(jh, "oom!");
688 					error = -ENOMEM;
689 					jbd_lock_bh_state(bh);
690 					goto done;
691 				}
692 				goto repeat;
693 			}
694 			jh->b_frozen_data = frozen_buffer;
695 			frozen_buffer = NULL;
696 			need_copy = 1;
697 		}
698 		jh->b_next_transaction = transaction;
699 	}
700 
701 
702 	/*
703 	 * Finally, if the buffer is not journaled right now, we need to make
704 	 * sure it doesn't get written to disk before the caller actually
705 	 * commits the new data
706 	 */
707 	if (!jh->b_transaction) {
708 		JBUFFER_TRACE(jh, "no transaction");
709 		J_ASSERT_JH(jh, !jh->b_next_transaction);
710 		jh->b_transaction = transaction;
711 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
712 		spin_lock(&journal->j_list_lock);
713 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
714 		spin_unlock(&journal->j_list_lock);
715 	}
716 
717 done:
718 	if (need_copy) {
719 		struct page *page;
720 		int offset;
721 		char *source;
722 
723 		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
724 			    "Possible IO failure.\n");
725 		page = jh2bh(jh)->b_page;
726 		offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
727 		source = kmap_atomic(page, KM_USER0);
728 		/* Fire data frozen trigger just before we copy the data */
729 		jbd2_buffer_frozen_trigger(jh, source + offset,
730 					   jh->b_triggers);
731 		memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
732 		kunmap_atomic(source, KM_USER0);
733 
734 		/*
735 		 * Now that the frozen data is saved off, we need to store
736 		 * any matching triggers.
737 		 */
738 		jh->b_frozen_triggers = jh->b_triggers;
739 	}
740 	jbd_unlock_bh_state(bh);
741 
742 	/*
743 	 * If we are about to journal a buffer, then any revoke pending on it is
744 	 * no longer valid
745 	 */
746 	jbd2_journal_cancel_revoke(handle, jh);
747 
748 out:
749 	if (unlikely(frozen_buffer))	/* It's usually NULL */
750 		jbd2_free(frozen_buffer, bh->b_size);
751 
752 	JBUFFER_TRACE(jh, "exit");
753 	return error;
754 }
755 
756 /**
757  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
758  * @handle: transaction to add buffer modifications to
759  * @bh:     bh to be used for metadata writes
760  * @credits: variable that will receive credits for the buffer
761  *
762  * Returns an error code or 0 on success.
763  *
764  * In full data journalling mode the buffer may be of type BJ_AsyncData,
765  * because we're write()ing a buffer which is also part of a shared mapping.
766  */
767 
768 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
769 {
770 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
771 	int rc;
772 
773 	/* We do not want to get caught playing with fields which the
774 	 * log thread also manipulates.  Make sure that the buffer
775 	 * completes any outstanding IO before proceeding. */
776 	rc = do_get_write_access(handle, jh, 0);
777 	jbd2_journal_put_journal_head(jh);
778 	return rc;
779 }
780 
781 
782 /*
783  * When the user wants to journal a newly created buffer_head
784  * (ie. getblk() returned a new buffer and we are going to populate it
785  * manually rather than reading off disk), then we need to keep the
786  * buffer_head locked until it has been completely filled with new
787  * data.  In this case, we should be able to make the assertion that
788  * the bh is not already part of an existing transaction.
789  *
790  * The buffer should already be locked by the caller by this point.
791  * There is no lock ranking violation: it was a newly created,
792  * unlocked buffer beforehand. */
793 
794 /**
795  * int jbd2_journal_get_create_access () - notify intent to use newly created bh
796  * @handle: transaction to new buffer to
797  * @bh: new buffer.
798  *
799  * Call this if you create a new bh.
800  */
801 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
802 {
803 	transaction_t *transaction = handle->h_transaction;
804 	journal_t *journal = transaction->t_journal;
805 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
806 	int err;
807 
808 	jbd_debug(5, "journal_head %p\n", jh);
809 	err = -EROFS;
810 	if (is_handle_aborted(handle))
811 		goto out;
812 	err = 0;
813 
814 	JBUFFER_TRACE(jh, "entry");
815 	/*
816 	 * The buffer may already belong to this transaction due to pre-zeroing
817 	 * in the filesystem's new_block code.  It may also be on the previous,
818 	 * committing transaction's lists, but it HAS to be in Forget state in
819 	 * that case: the transaction must have deleted the buffer for it to be
820 	 * reused here.
821 	 */
822 	jbd_lock_bh_state(bh);
823 	spin_lock(&journal->j_list_lock);
824 	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
825 		jh->b_transaction == NULL ||
826 		(jh->b_transaction == journal->j_committing_transaction &&
827 			  jh->b_jlist == BJ_Forget)));
828 
829 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
830 	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
831 
832 	if (jh->b_transaction == NULL) {
833 		/*
834 		 * Previous jbd2_journal_forget() could have left the buffer
835 		 * with jbddirty bit set because it was being committed. When
836 		 * the commit finished, we've filed the buffer for
837 		 * checkpointing and marked it dirty. Now we are reallocating
838 		 * the buffer so the transaction freeing it must have
839 		 * committed and so it's safe to clear the dirty bit.
840 		 */
841 		clear_buffer_dirty(jh2bh(jh));
842 		jh->b_transaction = transaction;
843 
844 		/* first access by this transaction */
845 		jh->b_modified = 0;
846 
847 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
848 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
849 	} else if (jh->b_transaction == journal->j_committing_transaction) {
850 		/* first access by this transaction */
851 		jh->b_modified = 0;
852 
853 		JBUFFER_TRACE(jh, "set next transaction");
854 		jh->b_next_transaction = transaction;
855 	}
856 	spin_unlock(&journal->j_list_lock);
857 	jbd_unlock_bh_state(bh);
858 
859 	/*
860 	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
861 	 * blocks which contain freed but then revoked metadata.  We need
862 	 * to cancel the revoke in case we end up freeing it yet again
863 	 * and the reallocating as data - this would cause a second revoke,
864 	 * which hits an assertion error.
865 	 */
866 	JBUFFER_TRACE(jh, "cancelling revoke");
867 	jbd2_journal_cancel_revoke(handle, jh);
868 	jbd2_journal_put_journal_head(jh);
869 out:
870 	return err;
871 }
872 
873 /**
874  * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
875  *     non-rewindable consequences
876  * @handle: transaction
877  * @bh: buffer to undo
878  * @credits: store the number of taken credits here (if not NULL)
879  *
880  * Sometimes there is a need to distinguish between metadata which has
881  * been committed to disk and that which has not.  The ext3fs code uses
882  * this for freeing and allocating space, we have to make sure that we
883  * do not reuse freed space until the deallocation has been committed,
884  * since if we overwrote that space we would make the delete
885  * un-rewindable in case of a crash.
886  *
887  * To deal with that, jbd2_journal_get_undo_access requests write access to a
888  * buffer for parts of non-rewindable operations such as delete
889  * operations on the bitmaps.  The journaling code must keep a copy of
890  * the buffer's contents prior to the undo_access call until such time
891  * as we know that the buffer has definitely been committed to disk.
892  *
893  * We never need to know which transaction the committed data is part
894  * of, buffers touched here are guaranteed to be dirtied later and so
895  * will be committed to a new transaction in due course, at which point
896  * we can discard the old committed data pointer.
897  *
898  * Returns error number or 0 on success.
899  */
900 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
901 {
902 	int err;
903 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
904 	char *committed_data = NULL;
905 
906 	JBUFFER_TRACE(jh, "entry");
907 
908 	/*
909 	 * Do this first --- it can drop the journal lock, so we want to
910 	 * make sure that obtaining the committed_data is done
911 	 * atomically wrt. completion of any outstanding commits.
912 	 */
913 	err = do_get_write_access(handle, jh, 1);
914 	if (err)
915 		goto out;
916 
917 repeat:
918 	if (!jh->b_committed_data) {
919 		committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
920 		if (!committed_data) {
921 			printk(KERN_EMERG "%s: No memory for committed data\n",
922 				__func__);
923 			err = -ENOMEM;
924 			goto out;
925 		}
926 	}
927 
928 	jbd_lock_bh_state(bh);
929 	if (!jh->b_committed_data) {
930 		/* Copy out the current buffer contents into the
931 		 * preserved, committed copy. */
932 		JBUFFER_TRACE(jh, "generate b_committed data");
933 		if (!committed_data) {
934 			jbd_unlock_bh_state(bh);
935 			goto repeat;
936 		}
937 
938 		jh->b_committed_data = committed_data;
939 		committed_data = NULL;
940 		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
941 	}
942 	jbd_unlock_bh_state(bh);
943 out:
944 	jbd2_journal_put_journal_head(jh);
945 	if (unlikely(committed_data))
946 		jbd2_free(committed_data, bh->b_size);
947 	return err;
948 }
949 
950 /**
951  * void jbd2_journal_set_triggers() - Add triggers for commit writeout
952  * @bh: buffer to trigger on
953  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
954  *
955  * Set any triggers on this journal_head.  This is always safe, because
956  * triggers for a committing buffer will be saved off, and triggers for
957  * a running transaction will match the buffer in that transaction.
958  *
959  * Call with NULL to clear the triggers.
960  */
961 void jbd2_journal_set_triggers(struct buffer_head *bh,
962 			       struct jbd2_buffer_trigger_type *type)
963 {
964 	struct journal_head *jh = bh2jh(bh);
965 
966 	jh->b_triggers = type;
967 }
968 
969 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
970 				struct jbd2_buffer_trigger_type *triggers)
971 {
972 	struct buffer_head *bh = jh2bh(jh);
973 
974 	if (!triggers || !triggers->t_frozen)
975 		return;
976 
977 	triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
978 }
979 
980 void jbd2_buffer_abort_trigger(struct journal_head *jh,
981 			       struct jbd2_buffer_trigger_type *triggers)
982 {
983 	if (!triggers || !triggers->t_abort)
984 		return;
985 
986 	triggers->t_abort(triggers, jh2bh(jh));
987 }
988 
989 
990 
991 /**
992  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
993  * @handle: transaction to add buffer to.
994  * @bh: buffer to mark
995  *
996  * mark dirty metadata which needs to be journaled as part of the current
997  * transaction.
998  *
999  * The buffer is placed on the transaction's metadata list and is marked
1000  * as belonging to the transaction.
1001  *
1002  * Returns error number or 0 on success.
1003  *
1004  * Special care needs to be taken if the buffer already belongs to the
1005  * current committing transaction (in which case we should have frozen
1006  * data present for that commit).  In that case, we don't relink the
1007  * buffer: that only gets done when the old transaction finally
1008  * completes its commit.
1009  */
1010 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1011 {
1012 	transaction_t *transaction = handle->h_transaction;
1013 	journal_t *journal = transaction->t_journal;
1014 	struct journal_head *jh = bh2jh(bh);
1015 
1016 	jbd_debug(5, "journal_head %p\n", jh);
1017 	JBUFFER_TRACE(jh, "entry");
1018 	if (is_handle_aborted(handle))
1019 		goto out;
1020 
1021 	jbd_lock_bh_state(bh);
1022 
1023 	if (jh->b_modified == 0) {
1024 		/*
1025 		 * This buffer's got modified and becoming part
1026 		 * of the transaction. This needs to be done
1027 		 * once a transaction -bzzz
1028 		 */
1029 		jh->b_modified = 1;
1030 		J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1031 		handle->h_buffer_credits--;
1032 	}
1033 
1034 	/*
1035 	 * fastpath, to avoid expensive locking.  If this buffer is already
1036 	 * on the running transaction's metadata list there is nothing to do.
1037 	 * Nobody can take it off again because there is a handle open.
1038 	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1039 	 * result in this test being false, so we go in and take the locks.
1040 	 */
1041 	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1042 		JBUFFER_TRACE(jh, "fastpath");
1043 		J_ASSERT_JH(jh, jh->b_transaction ==
1044 					journal->j_running_transaction);
1045 		goto out_unlock_bh;
1046 	}
1047 
1048 	set_buffer_jbddirty(bh);
1049 
1050 	/*
1051 	 * Metadata already on the current transaction list doesn't
1052 	 * need to be filed.  Metadata on another transaction's list must
1053 	 * be committing, and will be refiled once the commit completes:
1054 	 * leave it alone for now.
1055 	 */
1056 	if (jh->b_transaction != transaction) {
1057 		JBUFFER_TRACE(jh, "already on other transaction");
1058 		J_ASSERT_JH(jh, jh->b_transaction ==
1059 					journal->j_committing_transaction);
1060 		J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
1061 		/* And this case is illegal: we can't reuse another
1062 		 * transaction's data buffer, ever. */
1063 		goto out_unlock_bh;
1064 	}
1065 
1066 	/* That test should have eliminated the following case: */
1067 	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1068 
1069 	JBUFFER_TRACE(jh, "file as BJ_Metadata");
1070 	spin_lock(&journal->j_list_lock);
1071 	__jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1072 	spin_unlock(&journal->j_list_lock);
1073 out_unlock_bh:
1074 	jbd_unlock_bh_state(bh);
1075 out:
1076 	JBUFFER_TRACE(jh, "exit");
1077 	return 0;
1078 }
1079 
1080 /*
1081  * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1082  * updates, if the update decided in the end that it didn't need access.
1083  *
1084  */
1085 void
1086 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1087 {
1088 	BUFFER_TRACE(bh, "entry");
1089 }
1090 
1091 /**
1092  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1093  * @handle: transaction handle
1094  * @bh:     bh to 'forget'
1095  *
1096  * We can only do the bforget if there are no commits pending against the
1097  * buffer.  If the buffer is dirty in the current running transaction we
1098  * can safely unlink it.
1099  *
1100  * bh may not be a journalled buffer at all - it may be a non-JBD
1101  * buffer which came off the hashtable.  Check for this.
1102  *
1103  * Decrements bh->b_count by one.
1104  *
1105  * Allow this call even if the handle has aborted --- it may be part of
1106  * the caller's cleanup after an abort.
1107  */
1108 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1109 {
1110 	transaction_t *transaction = handle->h_transaction;
1111 	journal_t *journal = transaction->t_journal;
1112 	struct journal_head *jh;
1113 	int drop_reserve = 0;
1114 	int err = 0;
1115 	int was_modified = 0;
1116 
1117 	BUFFER_TRACE(bh, "entry");
1118 
1119 	jbd_lock_bh_state(bh);
1120 	spin_lock(&journal->j_list_lock);
1121 
1122 	if (!buffer_jbd(bh))
1123 		goto not_jbd;
1124 	jh = bh2jh(bh);
1125 
1126 	/* Critical error: attempting to delete a bitmap buffer, maybe?
1127 	 * Don't do any jbd operations, and return an error. */
1128 	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1129 			 "inconsistent data on disk")) {
1130 		err = -EIO;
1131 		goto not_jbd;
1132 	}
1133 
1134 	/* keep track of wether or not this transaction modified us */
1135 	was_modified = jh->b_modified;
1136 
1137 	/*
1138 	 * The buffer's going from the transaction, we must drop
1139 	 * all references -bzzz
1140 	 */
1141 	jh->b_modified = 0;
1142 
1143 	if (jh->b_transaction == handle->h_transaction) {
1144 		J_ASSERT_JH(jh, !jh->b_frozen_data);
1145 
1146 		/* If we are forgetting a buffer which is already part
1147 		 * of this transaction, then we can just drop it from
1148 		 * the transaction immediately. */
1149 		clear_buffer_dirty(bh);
1150 		clear_buffer_jbddirty(bh);
1151 
1152 		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1153 
1154 		/*
1155 		 * we only want to drop a reference if this transaction
1156 		 * modified the buffer
1157 		 */
1158 		if (was_modified)
1159 			drop_reserve = 1;
1160 
1161 		/*
1162 		 * We are no longer going to journal this buffer.
1163 		 * However, the commit of this transaction is still
1164 		 * important to the buffer: the delete that we are now
1165 		 * processing might obsolete an old log entry, so by
1166 		 * committing, we can satisfy the buffer's checkpoint.
1167 		 *
1168 		 * So, if we have a checkpoint on the buffer, we should
1169 		 * now refile the buffer on our BJ_Forget list so that
1170 		 * we know to remove the checkpoint after we commit.
1171 		 */
1172 
1173 		if (jh->b_cp_transaction) {
1174 			__jbd2_journal_temp_unlink_buffer(jh);
1175 			__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1176 		} else {
1177 			__jbd2_journal_unfile_buffer(jh);
1178 			jbd2_journal_remove_journal_head(bh);
1179 			__brelse(bh);
1180 			if (!buffer_jbd(bh)) {
1181 				spin_unlock(&journal->j_list_lock);
1182 				jbd_unlock_bh_state(bh);
1183 				__bforget(bh);
1184 				goto drop;
1185 			}
1186 		}
1187 	} else if (jh->b_transaction) {
1188 		J_ASSERT_JH(jh, (jh->b_transaction ==
1189 				 journal->j_committing_transaction));
1190 		/* However, if the buffer is still owned by a prior
1191 		 * (committing) transaction, we can't drop it yet... */
1192 		JBUFFER_TRACE(jh, "belongs to older transaction");
1193 		/* ... but we CAN drop it from the new transaction if we
1194 		 * have also modified it since the original commit. */
1195 
1196 		if (jh->b_next_transaction) {
1197 			J_ASSERT(jh->b_next_transaction == transaction);
1198 			jh->b_next_transaction = NULL;
1199 
1200 			/*
1201 			 * only drop a reference if this transaction modified
1202 			 * the buffer
1203 			 */
1204 			if (was_modified)
1205 				drop_reserve = 1;
1206 		}
1207 	}
1208 
1209 not_jbd:
1210 	spin_unlock(&journal->j_list_lock);
1211 	jbd_unlock_bh_state(bh);
1212 	__brelse(bh);
1213 drop:
1214 	if (drop_reserve) {
1215 		/* no need to reserve log space for this block -bzzz */
1216 		handle->h_buffer_credits++;
1217 	}
1218 	return err;
1219 }
1220 
1221 /**
1222  * int jbd2_journal_stop() - complete a transaction
1223  * @handle: tranaction to complete.
1224  *
1225  * All done for a particular handle.
1226  *
1227  * There is not much action needed here.  We just return any remaining
1228  * buffer credits to the transaction and remove the handle.  The only
1229  * complication is that we need to start a commit operation if the
1230  * filesystem is marked for synchronous update.
1231  *
1232  * jbd2_journal_stop itself will not usually return an error, but it may
1233  * do so in unusual circumstances.  In particular, expect it to
1234  * return -EIO if a jbd2_journal_abort has been executed since the
1235  * transaction began.
1236  */
1237 int jbd2_journal_stop(handle_t *handle)
1238 {
1239 	transaction_t *transaction = handle->h_transaction;
1240 	journal_t *journal = transaction->t_journal;
1241 	int err;
1242 	pid_t pid;
1243 
1244 	J_ASSERT(journal_current_handle() == handle);
1245 
1246 	if (is_handle_aborted(handle))
1247 		err = -EIO;
1248 	else {
1249 		J_ASSERT(transaction->t_updates > 0);
1250 		err = 0;
1251 	}
1252 
1253 	if (--handle->h_ref > 0) {
1254 		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1255 			  handle->h_ref);
1256 		return err;
1257 	}
1258 
1259 	jbd_debug(4, "Handle %p going down\n", handle);
1260 
1261 	/*
1262 	 * Implement synchronous transaction batching.  If the handle
1263 	 * was synchronous, don't force a commit immediately.  Let's
1264 	 * yield and let another thread piggyback onto this
1265 	 * transaction.  Keep doing that while new threads continue to
1266 	 * arrive.  It doesn't cost much - we're about to run a commit
1267 	 * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1268 	 * operations by 30x or more...
1269 	 *
1270 	 * We try and optimize the sleep time against what the
1271 	 * underlying disk can do, instead of having a static sleep
1272 	 * time.  This is useful for the case where our storage is so
1273 	 * fast that it is more optimal to go ahead and force a flush
1274 	 * and wait for the transaction to be committed than it is to
1275 	 * wait for an arbitrary amount of time for new writers to
1276 	 * join the transaction.  We achieve this by measuring how
1277 	 * long it takes to commit a transaction, and compare it with
1278 	 * how long this transaction has been running, and if run time
1279 	 * < commit time then we sleep for the delta and commit.  This
1280 	 * greatly helps super fast disks that would see slowdowns as
1281 	 * more threads started doing fsyncs.
1282 	 *
1283 	 * But don't do this if this process was the most recent one
1284 	 * to perform a synchronous write.  We do this to detect the
1285 	 * case where a single process is doing a stream of sync
1286 	 * writes.  No point in waiting for joiners in that case.
1287 	 */
1288 	pid = current->pid;
1289 	if (handle->h_sync && journal->j_last_sync_writer != pid) {
1290 		u64 commit_time, trans_time;
1291 
1292 		journal->j_last_sync_writer = pid;
1293 
1294 		spin_lock(&journal->j_state_lock);
1295 		commit_time = journal->j_average_commit_time;
1296 		spin_unlock(&journal->j_state_lock);
1297 
1298 		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1299 						   transaction->t_start_time));
1300 
1301 		commit_time = max_t(u64, commit_time,
1302 				    1000*journal->j_min_batch_time);
1303 		commit_time = min_t(u64, commit_time,
1304 				    1000*journal->j_max_batch_time);
1305 
1306 		if (trans_time < commit_time) {
1307 			ktime_t expires = ktime_add_ns(ktime_get(),
1308 						       commit_time);
1309 			set_current_state(TASK_UNINTERRUPTIBLE);
1310 			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1311 		}
1312 	}
1313 
1314 	if (handle->h_sync)
1315 		transaction->t_synchronous_commit = 1;
1316 	current->journal_info = NULL;
1317 	spin_lock(&transaction->t_handle_lock);
1318 	transaction->t_outstanding_credits -= handle->h_buffer_credits;
1319 	transaction->t_updates--;
1320 	if (!transaction->t_updates) {
1321 		wake_up(&journal->j_wait_updates);
1322 		if (journal->j_barrier_count)
1323 			wake_up(&journal->j_wait_transaction_locked);
1324 	}
1325 
1326 	/*
1327 	 * If the handle is marked SYNC, we need to set another commit
1328 	 * going!  We also want to force a commit if the current
1329 	 * transaction is occupying too much of the log, or if the
1330 	 * transaction is too old now.
1331 	 */
1332 	if (handle->h_sync ||
1333 			transaction->t_outstanding_credits >
1334 				journal->j_max_transaction_buffers ||
1335 			time_after_eq(jiffies, transaction->t_expires)) {
1336 		/* Do this even for aborted journals: an abort still
1337 		 * completes the commit thread, it just doesn't write
1338 		 * anything to disk. */
1339 		tid_t tid = transaction->t_tid;
1340 
1341 		spin_unlock(&transaction->t_handle_lock);
1342 		jbd_debug(2, "transaction too old, requesting commit for "
1343 					"handle %p\n", handle);
1344 		/* This is non-blocking */
1345 		jbd2_log_start_commit(journal, transaction->t_tid);
1346 
1347 		/*
1348 		 * Special case: JBD2_SYNC synchronous updates require us
1349 		 * to wait for the commit to complete.
1350 		 */
1351 		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1352 			err = jbd2_log_wait_commit(journal, tid);
1353 	} else {
1354 		spin_unlock(&transaction->t_handle_lock);
1355 	}
1356 
1357 	lock_map_release(&handle->h_lockdep_map);
1358 
1359 	jbd2_free_handle(handle);
1360 	return err;
1361 }
1362 
1363 /**
1364  * int jbd2_journal_force_commit() - force any uncommitted transactions
1365  * @journal: journal to force
1366  *
1367  * For synchronous operations: force any uncommitted transactions
1368  * to disk.  May seem kludgy, but it reuses all the handle batching
1369  * code in a very simple manner.
1370  */
1371 int jbd2_journal_force_commit(journal_t *journal)
1372 {
1373 	handle_t *handle;
1374 	int ret;
1375 
1376 	handle = jbd2_journal_start(journal, 1);
1377 	if (IS_ERR(handle)) {
1378 		ret = PTR_ERR(handle);
1379 	} else {
1380 		handle->h_sync = 1;
1381 		ret = jbd2_journal_stop(handle);
1382 	}
1383 	return ret;
1384 }
1385 
1386 /*
1387  *
1388  * List management code snippets: various functions for manipulating the
1389  * transaction buffer lists.
1390  *
1391  */
1392 
1393 /*
1394  * Append a buffer to a transaction list, given the transaction's list head
1395  * pointer.
1396  *
1397  * j_list_lock is held.
1398  *
1399  * jbd_lock_bh_state(jh2bh(jh)) is held.
1400  */
1401 
1402 static inline void
1403 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1404 {
1405 	if (!*list) {
1406 		jh->b_tnext = jh->b_tprev = jh;
1407 		*list = jh;
1408 	} else {
1409 		/* Insert at the tail of the list to preserve order */
1410 		struct journal_head *first = *list, *last = first->b_tprev;
1411 		jh->b_tprev = last;
1412 		jh->b_tnext = first;
1413 		last->b_tnext = first->b_tprev = jh;
1414 	}
1415 }
1416 
1417 /*
1418  * Remove a buffer from a transaction list, given the transaction's list
1419  * head pointer.
1420  *
1421  * Called with j_list_lock held, and the journal may not be locked.
1422  *
1423  * jbd_lock_bh_state(jh2bh(jh)) is held.
1424  */
1425 
1426 static inline void
1427 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1428 {
1429 	if (*list == jh) {
1430 		*list = jh->b_tnext;
1431 		if (*list == jh)
1432 			*list = NULL;
1433 	}
1434 	jh->b_tprev->b_tnext = jh->b_tnext;
1435 	jh->b_tnext->b_tprev = jh->b_tprev;
1436 }
1437 
1438 /*
1439  * Remove a buffer from the appropriate transaction list.
1440  *
1441  * Note that this function can *change* the value of
1442  * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1443  * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
1444  * of these pointers, it could go bad.  Generally the caller needs to re-read
1445  * the pointer from the transaction_t.
1446  *
1447  * Called under j_list_lock.  The journal may not be locked.
1448  */
1449 void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1450 {
1451 	struct journal_head **list = NULL;
1452 	transaction_t *transaction;
1453 	struct buffer_head *bh = jh2bh(jh);
1454 
1455 	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1456 	transaction = jh->b_transaction;
1457 	if (transaction)
1458 		assert_spin_locked(&transaction->t_journal->j_list_lock);
1459 
1460 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1461 	if (jh->b_jlist != BJ_None)
1462 		J_ASSERT_JH(jh, transaction != NULL);
1463 
1464 	switch (jh->b_jlist) {
1465 	case BJ_None:
1466 		return;
1467 	case BJ_Metadata:
1468 		transaction->t_nr_buffers--;
1469 		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1470 		list = &transaction->t_buffers;
1471 		break;
1472 	case BJ_Forget:
1473 		list = &transaction->t_forget;
1474 		break;
1475 	case BJ_IO:
1476 		list = &transaction->t_iobuf_list;
1477 		break;
1478 	case BJ_Shadow:
1479 		list = &transaction->t_shadow_list;
1480 		break;
1481 	case BJ_LogCtl:
1482 		list = &transaction->t_log_list;
1483 		break;
1484 	case BJ_Reserved:
1485 		list = &transaction->t_reserved_list;
1486 		break;
1487 	}
1488 
1489 	__blist_del_buffer(list, jh);
1490 	jh->b_jlist = BJ_None;
1491 	if (test_clear_buffer_jbddirty(bh))
1492 		mark_buffer_dirty(bh);	/* Expose it to the VM */
1493 }
1494 
1495 void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1496 {
1497 	__jbd2_journal_temp_unlink_buffer(jh);
1498 	jh->b_transaction = NULL;
1499 }
1500 
1501 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1502 {
1503 	jbd_lock_bh_state(jh2bh(jh));
1504 	spin_lock(&journal->j_list_lock);
1505 	__jbd2_journal_unfile_buffer(jh);
1506 	spin_unlock(&journal->j_list_lock);
1507 	jbd_unlock_bh_state(jh2bh(jh));
1508 }
1509 
1510 /*
1511  * Called from jbd2_journal_try_to_free_buffers().
1512  *
1513  * Called under jbd_lock_bh_state(bh)
1514  */
1515 static void
1516 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1517 {
1518 	struct journal_head *jh;
1519 
1520 	jh = bh2jh(bh);
1521 
1522 	if (buffer_locked(bh) || buffer_dirty(bh))
1523 		goto out;
1524 
1525 	if (jh->b_next_transaction != NULL)
1526 		goto out;
1527 
1528 	spin_lock(&journal->j_list_lock);
1529 	if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1530 		/* written-back checkpointed metadata buffer */
1531 		if (jh->b_jlist == BJ_None) {
1532 			JBUFFER_TRACE(jh, "remove from checkpoint list");
1533 			__jbd2_journal_remove_checkpoint(jh);
1534 			jbd2_journal_remove_journal_head(bh);
1535 			__brelse(bh);
1536 		}
1537 	}
1538 	spin_unlock(&journal->j_list_lock);
1539 out:
1540 	return;
1541 }
1542 
1543 /**
1544  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1545  * @journal: journal for operation
1546  * @page: to try and free
1547  * @gfp_mask: we use the mask to detect how hard should we try to release
1548  * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1549  * release the buffers.
1550  *
1551  *
1552  * For all the buffers on this page,
1553  * if they are fully written out ordered data, move them onto BUF_CLEAN
1554  * so try_to_free_buffers() can reap them.
1555  *
1556  * This function returns non-zero if we wish try_to_free_buffers()
1557  * to be called. We do this if the page is releasable by try_to_free_buffers().
1558  * We also do it if the page has locked or dirty buffers and the caller wants
1559  * us to perform sync or async writeout.
1560  *
1561  * This complicates JBD locking somewhat.  We aren't protected by the
1562  * BKL here.  We wish to remove the buffer from its committing or
1563  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1564  *
1565  * This may *change* the value of transaction_t->t_datalist, so anyone
1566  * who looks at t_datalist needs to lock against this function.
1567  *
1568  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1569  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
1570  * will come out of the lock with the buffer dirty, which makes it
1571  * ineligible for release here.
1572  *
1573  * Who else is affected by this?  hmm...  Really the only contender
1574  * is do_get_write_access() - it could be looking at the buffer while
1575  * journal_try_to_free_buffer() is changing its state.  But that
1576  * cannot happen because we never reallocate freed data as metadata
1577  * while the data is part of a transaction.  Yes?
1578  *
1579  * Return 0 on failure, 1 on success
1580  */
1581 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1582 				struct page *page, gfp_t gfp_mask)
1583 {
1584 	struct buffer_head *head;
1585 	struct buffer_head *bh;
1586 	int ret = 0;
1587 
1588 	J_ASSERT(PageLocked(page));
1589 
1590 	head = page_buffers(page);
1591 	bh = head;
1592 	do {
1593 		struct journal_head *jh;
1594 
1595 		/*
1596 		 * We take our own ref against the journal_head here to avoid
1597 		 * having to add tons of locking around each instance of
1598 		 * jbd2_journal_remove_journal_head() and
1599 		 * jbd2_journal_put_journal_head().
1600 		 */
1601 		jh = jbd2_journal_grab_journal_head(bh);
1602 		if (!jh)
1603 			continue;
1604 
1605 		jbd_lock_bh_state(bh);
1606 		__journal_try_to_free_buffer(journal, bh);
1607 		jbd2_journal_put_journal_head(jh);
1608 		jbd_unlock_bh_state(bh);
1609 		if (buffer_jbd(bh))
1610 			goto busy;
1611 	} while ((bh = bh->b_this_page) != head);
1612 
1613 	ret = try_to_free_buffers(page);
1614 
1615 busy:
1616 	return ret;
1617 }
1618 
1619 /*
1620  * This buffer is no longer needed.  If it is on an older transaction's
1621  * checkpoint list we need to record it on this transaction's forget list
1622  * to pin this buffer (and hence its checkpointing transaction) down until
1623  * this transaction commits.  If the buffer isn't on a checkpoint list, we
1624  * release it.
1625  * Returns non-zero if JBD no longer has an interest in the buffer.
1626  *
1627  * Called under j_list_lock.
1628  *
1629  * Called under jbd_lock_bh_state(bh).
1630  */
1631 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1632 {
1633 	int may_free = 1;
1634 	struct buffer_head *bh = jh2bh(jh);
1635 
1636 	__jbd2_journal_unfile_buffer(jh);
1637 
1638 	if (jh->b_cp_transaction) {
1639 		JBUFFER_TRACE(jh, "on running+cp transaction");
1640 		/*
1641 		 * We don't want to write the buffer anymore, clear the
1642 		 * bit so that we don't confuse checks in
1643 		 * __journal_file_buffer
1644 		 */
1645 		clear_buffer_dirty(bh);
1646 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1647 		may_free = 0;
1648 	} else {
1649 		JBUFFER_TRACE(jh, "on running transaction");
1650 		jbd2_journal_remove_journal_head(bh);
1651 		__brelse(bh);
1652 	}
1653 	return may_free;
1654 }
1655 
1656 /*
1657  * jbd2_journal_invalidatepage
1658  *
1659  * This code is tricky.  It has a number of cases to deal with.
1660  *
1661  * There are two invariants which this code relies on:
1662  *
1663  * i_size must be updated on disk before we start calling invalidatepage on the
1664  * data.
1665  *
1666  *  This is done in ext3 by defining an ext3_setattr method which
1667  *  updates i_size before truncate gets going.  By maintaining this
1668  *  invariant, we can be sure that it is safe to throw away any buffers
1669  *  attached to the current transaction: once the transaction commits,
1670  *  we know that the data will not be needed.
1671  *
1672  *  Note however that we can *not* throw away data belonging to the
1673  *  previous, committing transaction!
1674  *
1675  * Any disk blocks which *are* part of the previous, committing
1676  * transaction (and which therefore cannot be discarded immediately) are
1677  * not going to be reused in the new running transaction
1678  *
1679  *  The bitmap committed_data images guarantee this: any block which is
1680  *  allocated in one transaction and removed in the next will be marked
1681  *  as in-use in the committed_data bitmap, so cannot be reused until
1682  *  the next transaction to delete the block commits.  This means that
1683  *  leaving committing buffers dirty is quite safe: the disk blocks
1684  *  cannot be reallocated to a different file and so buffer aliasing is
1685  *  not possible.
1686  *
1687  *
1688  * The above applies mainly to ordered data mode.  In writeback mode we
1689  * don't make guarantees about the order in which data hits disk --- in
1690  * particular we don't guarantee that new dirty data is flushed before
1691  * transaction commit --- so it is always safe just to discard data
1692  * immediately in that mode.  --sct
1693  */
1694 
1695 /*
1696  * The journal_unmap_buffer helper function returns zero if the buffer
1697  * concerned remains pinned as an anonymous buffer belonging to an older
1698  * transaction.
1699  *
1700  * We're outside-transaction here.  Either or both of j_running_transaction
1701  * and j_committing_transaction may be NULL.
1702  */
1703 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1704 {
1705 	transaction_t *transaction;
1706 	struct journal_head *jh;
1707 	int may_free = 1;
1708 	int ret;
1709 
1710 	BUFFER_TRACE(bh, "entry");
1711 
1712 	/*
1713 	 * It is safe to proceed here without the j_list_lock because the
1714 	 * buffers cannot be stolen by try_to_free_buffers as long as we are
1715 	 * holding the page lock. --sct
1716 	 */
1717 
1718 	if (!buffer_jbd(bh))
1719 		goto zap_buffer_unlocked;
1720 
1721 	/* OK, we have data buffer in journaled mode */
1722 	spin_lock(&journal->j_state_lock);
1723 	jbd_lock_bh_state(bh);
1724 	spin_lock(&journal->j_list_lock);
1725 
1726 	jh = jbd2_journal_grab_journal_head(bh);
1727 	if (!jh)
1728 		goto zap_buffer_no_jh;
1729 
1730 	/*
1731 	 * We cannot remove the buffer from checkpoint lists until the
1732 	 * transaction adding inode to orphan list (let's call it T)
1733 	 * is committed.  Otherwise if the transaction changing the
1734 	 * buffer would be cleaned from the journal before T is
1735 	 * committed, a crash will cause that the correct contents of
1736 	 * the buffer will be lost.  On the other hand we have to
1737 	 * clear the buffer dirty bit at latest at the moment when the
1738 	 * transaction marking the buffer as freed in the filesystem
1739 	 * structures is committed because from that moment on the
1740 	 * buffer can be reallocated and used by a different page.
1741 	 * Since the block hasn't been freed yet but the inode has
1742 	 * already been added to orphan list, it is safe for us to add
1743 	 * the buffer to BJ_Forget list of the newest transaction.
1744 	 */
1745 	transaction = jh->b_transaction;
1746 	if (transaction == NULL) {
1747 		/* First case: not on any transaction.  If it
1748 		 * has no checkpoint link, then we can zap it:
1749 		 * it's a writeback-mode buffer so we don't care
1750 		 * if it hits disk safely. */
1751 		if (!jh->b_cp_transaction) {
1752 			JBUFFER_TRACE(jh, "not on any transaction: zap");
1753 			goto zap_buffer;
1754 		}
1755 
1756 		if (!buffer_dirty(bh)) {
1757 			/* bdflush has written it.  We can drop it now */
1758 			goto zap_buffer;
1759 		}
1760 
1761 		/* OK, it must be in the journal but still not
1762 		 * written fully to disk: it's metadata or
1763 		 * journaled data... */
1764 
1765 		if (journal->j_running_transaction) {
1766 			/* ... and once the current transaction has
1767 			 * committed, the buffer won't be needed any
1768 			 * longer. */
1769 			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1770 			ret = __dispose_buffer(jh,
1771 					journal->j_running_transaction);
1772 			jbd2_journal_put_journal_head(jh);
1773 			spin_unlock(&journal->j_list_lock);
1774 			jbd_unlock_bh_state(bh);
1775 			spin_unlock(&journal->j_state_lock);
1776 			return ret;
1777 		} else {
1778 			/* There is no currently-running transaction. So the
1779 			 * orphan record which we wrote for this file must have
1780 			 * passed into commit.  We must attach this buffer to
1781 			 * the committing transaction, if it exists. */
1782 			if (journal->j_committing_transaction) {
1783 				JBUFFER_TRACE(jh, "give to committing trans");
1784 				ret = __dispose_buffer(jh,
1785 					journal->j_committing_transaction);
1786 				jbd2_journal_put_journal_head(jh);
1787 				spin_unlock(&journal->j_list_lock);
1788 				jbd_unlock_bh_state(bh);
1789 				spin_unlock(&journal->j_state_lock);
1790 				return ret;
1791 			} else {
1792 				/* The orphan record's transaction has
1793 				 * committed.  We can cleanse this buffer */
1794 				clear_buffer_jbddirty(bh);
1795 				goto zap_buffer;
1796 			}
1797 		}
1798 	} else if (transaction == journal->j_committing_transaction) {
1799 		JBUFFER_TRACE(jh, "on committing transaction");
1800 		/*
1801 		 * The buffer is committing, we simply cannot touch
1802 		 * it. So we just set j_next_transaction to the
1803 		 * running transaction (if there is one) and mark
1804 		 * buffer as freed so that commit code knows it should
1805 		 * clear dirty bits when it is done with the buffer.
1806 		 */
1807 		set_buffer_freed(bh);
1808 		if (journal->j_running_transaction && buffer_jbddirty(bh))
1809 			jh->b_next_transaction = journal->j_running_transaction;
1810 		jbd2_journal_put_journal_head(jh);
1811 		spin_unlock(&journal->j_list_lock);
1812 		jbd_unlock_bh_state(bh);
1813 		spin_unlock(&journal->j_state_lock);
1814 		return 0;
1815 	} else {
1816 		/* Good, the buffer belongs to the running transaction.
1817 		 * We are writing our own transaction's data, not any
1818 		 * previous one's, so it is safe to throw it away
1819 		 * (remember that we expect the filesystem to have set
1820 		 * i_size already for this truncate so recovery will not
1821 		 * expose the disk blocks we are discarding here.) */
1822 		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1823 		JBUFFER_TRACE(jh, "on running transaction");
1824 		may_free = __dispose_buffer(jh, transaction);
1825 	}
1826 
1827 zap_buffer:
1828 	jbd2_journal_put_journal_head(jh);
1829 zap_buffer_no_jh:
1830 	spin_unlock(&journal->j_list_lock);
1831 	jbd_unlock_bh_state(bh);
1832 	spin_unlock(&journal->j_state_lock);
1833 zap_buffer_unlocked:
1834 	clear_buffer_dirty(bh);
1835 	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1836 	clear_buffer_mapped(bh);
1837 	clear_buffer_req(bh);
1838 	clear_buffer_new(bh);
1839 	bh->b_bdev = NULL;
1840 	return may_free;
1841 }
1842 
1843 /**
1844  * void jbd2_journal_invalidatepage()
1845  * @journal: journal to use for flush...
1846  * @page:    page to flush
1847  * @offset:  length of page to invalidate.
1848  *
1849  * Reap page buffers containing data after offset in page.
1850  *
1851  */
1852 void jbd2_journal_invalidatepage(journal_t *journal,
1853 		      struct page *page,
1854 		      unsigned long offset)
1855 {
1856 	struct buffer_head *head, *bh, *next;
1857 	unsigned int curr_off = 0;
1858 	int may_free = 1;
1859 
1860 	if (!PageLocked(page))
1861 		BUG();
1862 	if (!page_has_buffers(page))
1863 		return;
1864 
1865 	/* We will potentially be playing with lists other than just the
1866 	 * data lists (especially for journaled data mode), so be
1867 	 * cautious in our locking. */
1868 
1869 	head = bh = page_buffers(page);
1870 	do {
1871 		unsigned int next_off = curr_off + bh->b_size;
1872 		next = bh->b_this_page;
1873 
1874 		if (offset <= curr_off) {
1875 			/* This block is wholly outside the truncation point */
1876 			lock_buffer(bh);
1877 			may_free &= journal_unmap_buffer(journal, bh);
1878 			unlock_buffer(bh);
1879 		}
1880 		curr_off = next_off;
1881 		bh = next;
1882 
1883 	} while (bh != head);
1884 
1885 	if (!offset) {
1886 		if (may_free && try_to_free_buffers(page))
1887 			J_ASSERT(!page_has_buffers(page));
1888 	}
1889 }
1890 
1891 /*
1892  * File a buffer on the given transaction list.
1893  */
1894 void __jbd2_journal_file_buffer(struct journal_head *jh,
1895 			transaction_t *transaction, int jlist)
1896 {
1897 	struct journal_head **list = NULL;
1898 	int was_dirty = 0;
1899 	struct buffer_head *bh = jh2bh(jh);
1900 
1901 	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1902 	assert_spin_locked(&transaction->t_journal->j_list_lock);
1903 
1904 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1905 	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1906 				jh->b_transaction == NULL);
1907 
1908 	if (jh->b_transaction && jh->b_jlist == jlist)
1909 		return;
1910 
1911 	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1912 	    jlist == BJ_Shadow || jlist == BJ_Forget) {
1913 		/*
1914 		 * For metadata buffers, we track dirty bit in buffer_jbddirty
1915 		 * instead of buffer_dirty. We should not see a dirty bit set
1916 		 * here because we clear it in do_get_write_access but e.g.
1917 		 * tune2fs can modify the sb and set the dirty bit at any time
1918 		 * so we try to gracefully handle that.
1919 		 */
1920 		if (buffer_dirty(bh))
1921 			warn_dirty_buffer(bh);
1922 		if (test_clear_buffer_dirty(bh) ||
1923 		    test_clear_buffer_jbddirty(bh))
1924 			was_dirty = 1;
1925 	}
1926 
1927 	if (jh->b_transaction)
1928 		__jbd2_journal_temp_unlink_buffer(jh);
1929 	jh->b_transaction = transaction;
1930 
1931 	switch (jlist) {
1932 	case BJ_None:
1933 		J_ASSERT_JH(jh, !jh->b_committed_data);
1934 		J_ASSERT_JH(jh, !jh->b_frozen_data);
1935 		return;
1936 	case BJ_Metadata:
1937 		transaction->t_nr_buffers++;
1938 		list = &transaction->t_buffers;
1939 		break;
1940 	case BJ_Forget:
1941 		list = &transaction->t_forget;
1942 		break;
1943 	case BJ_IO:
1944 		list = &transaction->t_iobuf_list;
1945 		break;
1946 	case BJ_Shadow:
1947 		list = &transaction->t_shadow_list;
1948 		break;
1949 	case BJ_LogCtl:
1950 		list = &transaction->t_log_list;
1951 		break;
1952 	case BJ_Reserved:
1953 		list = &transaction->t_reserved_list;
1954 		break;
1955 	}
1956 
1957 	__blist_add_buffer(list, jh);
1958 	jh->b_jlist = jlist;
1959 
1960 	if (was_dirty)
1961 		set_buffer_jbddirty(bh);
1962 }
1963 
1964 void jbd2_journal_file_buffer(struct journal_head *jh,
1965 				transaction_t *transaction, int jlist)
1966 {
1967 	jbd_lock_bh_state(jh2bh(jh));
1968 	spin_lock(&transaction->t_journal->j_list_lock);
1969 	__jbd2_journal_file_buffer(jh, transaction, jlist);
1970 	spin_unlock(&transaction->t_journal->j_list_lock);
1971 	jbd_unlock_bh_state(jh2bh(jh));
1972 }
1973 
1974 /*
1975  * Remove a buffer from its current buffer list in preparation for
1976  * dropping it from its current transaction entirely.  If the buffer has
1977  * already started to be used by a subsequent transaction, refile the
1978  * buffer on that transaction's metadata list.
1979  *
1980  * Called under journal->j_list_lock
1981  *
1982  * Called under jbd_lock_bh_state(jh2bh(jh))
1983  */
1984 void __jbd2_journal_refile_buffer(struct journal_head *jh)
1985 {
1986 	int was_dirty, jlist;
1987 	struct buffer_head *bh = jh2bh(jh);
1988 
1989 	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1990 	if (jh->b_transaction)
1991 		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
1992 
1993 	/* If the buffer is now unused, just drop it. */
1994 	if (jh->b_next_transaction == NULL) {
1995 		__jbd2_journal_unfile_buffer(jh);
1996 		return;
1997 	}
1998 
1999 	/*
2000 	 * It has been modified by a later transaction: add it to the new
2001 	 * transaction's metadata list.
2002 	 */
2003 
2004 	was_dirty = test_clear_buffer_jbddirty(bh);
2005 	__jbd2_journal_temp_unlink_buffer(jh);
2006 	jh->b_transaction = jh->b_next_transaction;
2007 	jh->b_next_transaction = NULL;
2008 	if (buffer_freed(bh))
2009 		jlist = BJ_Forget;
2010 	else if (jh->b_modified)
2011 		jlist = BJ_Metadata;
2012 	else
2013 		jlist = BJ_Reserved;
2014 	__jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2015 	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2016 
2017 	if (was_dirty)
2018 		set_buffer_jbddirty(bh);
2019 }
2020 
2021 /*
2022  * For the unlocked version of this call, also make sure that any
2023  * hanging journal_head is cleaned up if necessary.
2024  *
2025  * __jbd2_journal_refile_buffer is usually called as part of a single locked
2026  * operation on a buffer_head, in which the caller is probably going to
2027  * be hooking the journal_head onto other lists.  In that case it is up
2028  * to the caller to remove the journal_head if necessary.  For the
2029  * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2030  * doing anything else to the buffer so we need to do the cleanup
2031  * ourselves to avoid a jh leak.
2032  *
2033  * *** The journal_head may be freed by this call! ***
2034  */
2035 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2036 {
2037 	struct buffer_head *bh = jh2bh(jh);
2038 
2039 	jbd_lock_bh_state(bh);
2040 	spin_lock(&journal->j_list_lock);
2041 
2042 	__jbd2_journal_refile_buffer(jh);
2043 	jbd_unlock_bh_state(bh);
2044 	jbd2_journal_remove_journal_head(bh);
2045 
2046 	spin_unlock(&journal->j_list_lock);
2047 	__brelse(bh);
2048 }
2049 
2050 /*
2051  * File inode in the inode list of the handle's transaction
2052  */
2053 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2054 {
2055 	transaction_t *transaction = handle->h_transaction;
2056 	journal_t *journal = transaction->t_journal;
2057 
2058 	if (is_handle_aborted(handle))
2059 		return -EIO;
2060 
2061 	jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2062 			transaction->t_tid);
2063 
2064 	/*
2065 	 * First check whether inode isn't already on the transaction's
2066 	 * lists without taking the lock. Note that this check is safe
2067 	 * without the lock as we cannot race with somebody removing inode
2068 	 * from the transaction. The reason is that we remove inode from the
2069 	 * transaction only in journal_release_jbd_inode() and when we commit
2070 	 * the transaction. We are guarded from the first case by holding
2071 	 * a reference to the inode. We are safe against the second case
2072 	 * because if jinode->i_transaction == transaction, commit code
2073 	 * cannot touch the transaction because we hold reference to it,
2074 	 * and if jinode->i_next_transaction == transaction, commit code
2075 	 * will only file the inode where we want it.
2076 	 */
2077 	if (jinode->i_transaction == transaction ||
2078 	    jinode->i_next_transaction == transaction)
2079 		return 0;
2080 
2081 	spin_lock(&journal->j_list_lock);
2082 
2083 	if (jinode->i_transaction == transaction ||
2084 	    jinode->i_next_transaction == transaction)
2085 		goto done;
2086 
2087 	/* On some different transaction's list - should be
2088 	 * the committing one */
2089 	if (jinode->i_transaction) {
2090 		J_ASSERT(jinode->i_next_transaction == NULL);
2091 		J_ASSERT(jinode->i_transaction ==
2092 					journal->j_committing_transaction);
2093 		jinode->i_next_transaction = transaction;
2094 		goto done;
2095 	}
2096 	/* Not on any transaction list... */
2097 	J_ASSERT(!jinode->i_next_transaction);
2098 	jinode->i_transaction = transaction;
2099 	list_add(&jinode->i_list, &transaction->t_inode_list);
2100 done:
2101 	spin_unlock(&journal->j_list_lock);
2102 
2103 	return 0;
2104 }
2105 
2106 /*
2107  * File truncate and transaction commit interact with each other in a
2108  * non-trivial way.  If a transaction writing data block A is
2109  * committing, we cannot discard the data by truncate until we have
2110  * written them.  Otherwise if we crashed after the transaction with
2111  * write has committed but before the transaction with truncate has
2112  * committed, we could see stale data in block A.  This function is a
2113  * helper to solve this problem.  It starts writeout of the truncated
2114  * part in case it is in the committing transaction.
2115  *
2116  * Filesystem code must call this function when inode is journaled in
2117  * ordered mode before truncation happens and after the inode has been
2118  * placed on orphan list with the new inode size. The second condition
2119  * avoids the race that someone writes new data and we start
2120  * committing the transaction after this function has been called but
2121  * before a transaction for truncate is started (and furthermore it
2122  * allows us to optimize the case where the addition to orphan list
2123  * happens in the same transaction as write --- we don't have to write
2124  * any data in such case).
2125  */
2126 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2127 					struct jbd2_inode *jinode,
2128 					loff_t new_size)
2129 {
2130 	transaction_t *inode_trans, *commit_trans;
2131 	int ret = 0;
2132 
2133 	/* This is a quick check to avoid locking if not necessary */
2134 	if (!jinode->i_transaction)
2135 		goto out;
2136 	/* Locks are here just to force reading of recent values, it is
2137 	 * enough that the transaction was not committing before we started
2138 	 * a transaction adding the inode to orphan list */
2139 	spin_lock(&journal->j_state_lock);
2140 	commit_trans = journal->j_committing_transaction;
2141 	spin_unlock(&journal->j_state_lock);
2142 	spin_lock(&journal->j_list_lock);
2143 	inode_trans = jinode->i_transaction;
2144 	spin_unlock(&journal->j_list_lock);
2145 	if (inode_trans == commit_trans) {
2146 		ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2147 			new_size, LLONG_MAX);
2148 		if (ret)
2149 			jbd2_journal_abort(journal, ret);
2150 	}
2151 out:
2152 	return ret;
2153 }
2154