xref: /linux/fs/jbd2/commit.c (revision 5c35a02c545a7bbe77f3a1ae337d9e29beed079b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/commit.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Journal commit routines for the generic filesystem journaling code;
10  * part of the ext2fs journaling system.
11  */
12 
13 #include <linux/time.h>
14 #include <linux/fs.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
28 
29 /*
30  * IO end handler for temporary buffer_heads handling writes to the journal.
31  */
32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 {
34 	struct buffer_head *orig_bh = bh->b_private;
35 
36 	BUFFER_TRACE(bh, "");
37 	if (uptodate)
38 		set_buffer_uptodate(bh);
39 	else
40 		clear_buffer_uptodate(bh);
41 	if (orig_bh) {
42 		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 		smp_mb__after_atomic();
44 		wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 	}
46 	unlock_buffer(bh);
47 }
48 
49 /*
50  * When an ext4 file is truncated, it is possible that some pages are not
51  * successfully freed, because they are attached to a committing transaction.
52  * After the transaction commits, these pages are left on the LRU, with no
53  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
54  * by the VM, but their apparent absence upsets the VM accounting, and it makes
55  * the numbers in /proc/meminfo look odd.
56  *
57  * So here, we have a buffer which has just come off the forget list.  Look to
58  * see if we can strip all buffers from the backing page.
59  *
60  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
61  * caller provided us with a ref against the buffer, and we drop that here.
62  */
63 static void release_buffer_page(struct buffer_head *bh)
64 {
65 	struct page *page;
66 
67 	if (buffer_dirty(bh))
68 		goto nope;
69 	if (atomic_read(&bh->b_count) != 1)
70 		goto nope;
71 	page = bh->b_page;
72 	if (!page)
73 		goto nope;
74 	if (page->mapping)
75 		goto nope;
76 
77 	/* OK, it's a truncated page */
78 	if (!trylock_page(page))
79 		goto nope;
80 
81 	get_page(page);
82 	__brelse(bh);
83 	try_to_free_buffers(page);
84 	unlock_page(page);
85 	put_page(page);
86 	return;
87 
88 nope:
89 	__brelse(bh);
90 }
91 
92 static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 {
94 	struct commit_header *h;
95 	__u32 csum;
96 
97 	if (!jbd2_journal_has_csum_v2or3(j))
98 		return;
99 
100 	h = (struct commit_header *)(bh->b_data);
101 	h->h_chksum_type = 0;
102 	h->h_chksum_size = 0;
103 	h->h_chksum[0] = 0;
104 	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 	h->h_chksum[0] = cpu_to_be32(csum);
106 }
107 
108 /*
109  * Done it all: now submit the commit record.  We should have
110  * cleaned up our previous buffers by now, so if we are in abort
111  * mode we can now just skip the rest of the journal write
112  * entirely.
113  *
114  * Returns 1 if the journal needs to be aborted or 0 on success
115  */
116 static int journal_submit_commit_record(journal_t *journal,
117 					transaction_t *commit_transaction,
118 					struct buffer_head **cbh,
119 					__u32 crc32_sum)
120 {
121 	struct commit_header *tmp;
122 	struct buffer_head *bh;
123 	int ret;
124 	struct timespec64 now = current_kernel_time64();
125 
126 	*cbh = NULL;
127 
128 	if (is_journal_aborted(journal))
129 		return 0;
130 
131 	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 						JBD2_COMMIT_BLOCK);
133 	if (!bh)
134 		return 1;
135 
136 	tmp = (struct commit_header *)bh->b_data;
137 	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
138 	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
139 
140 	if (jbd2_has_feature_checksum(journal)) {
141 		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
142 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
143 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
144 	}
145 	jbd2_commit_block_csum_set(journal, bh);
146 
147 	BUFFER_TRACE(bh, "submit commit block");
148 	lock_buffer(bh);
149 	clear_buffer_dirty(bh);
150 	set_buffer_uptodate(bh);
151 	bh->b_end_io = journal_end_buffer_io_sync;
152 
153 	if (journal->j_flags & JBD2_BARRIER &&
154 	    !jbd2_has_feature_async_commit(journal))
155 		ret = submit_bh(REQ_OP_WRITE,
156 			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
157 	else
158 		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
159 
160 	*cbh = bh;
161 	return ret;
162 }
163 
164 /*
165  * This function along with journal_submit_commit_record
166  * allows to write the commit record asynchronously.
167  */
168 static int journal_wait_on_commit_record(journal_t *journal,
169 					 struct buffer_head *bh)
170 {
171 	int ret = 0;
172 
173 	clear_buffer_dirty(bh);
174 	wait_on_buffer(bh);
175 
176 	if (unlikely(!buffer_uptodate(bh)))
177 		ret = -EIO;
178 	put_bh(bh);            /* One for getblk() */
179 
180 	return ret;
181 }
182 
183 /*
184  * write the filemap data using writepage() address_space_operations.
185  * We don't do block allocation here even for delalloc. We don't
186  * use writepages() because with dealyed allocation we may be doing
187  * block allocation in writepages().
188  */
189 static int journal_submit_inode_data_buffers(struct address_space *mapping)
190 {
191 	int ret;
192 	struct writeback_control wbc = {
193 		.sync_mode =  WB_SYNC_ALL,
194 		.nr_to_write = mapping->nrpages * 2,
195 		.range_start = 0,
196 		.range_end = i_size_read(mapping->host),
197 	};
198 
199 	ret = generic_writepages(mapping, &wbc);
200 	return ret;
201 }
202 
203 /*
204  * Submit all the data buffers of inode associated with the transaction to
205  * disk.
206  *
207  * We are in a committing transaction. Therefore no new inode can be added to
208  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
209  * operate on from being released while we write out pages.
210  */
211 static int journal_submit_data_buffers(journal_t *journal,
212 		transaction_t *commit_transaction)
213 {
214 	struct jbd2_inode *jinode;
215 	int err, ret = 0;
216 	struct address_space *mapping;
217 
218 	spin_lock(&journal->j_list_lock);
219 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
220 		if (!(jinode->i_flags & JI_WRITE_DATA))
221 			continue;
222 		mapping = jinode->i_vfs_inode->i_mapping;
223 		jinode->i_flags |= JI_COMMIT_RUNNING;
224 		spin_unlock(&journal->j_list_lock);
225 		/*
226 		 * submit the inode data buffers. We use writepage
227 		 * instead of writepages. Because writepages can do
228 		 * block allocation  with delalloc. We need to write
229 		 * only allocated blocks here.
230 		 */
231 		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
232 		err = journal_submit_inode_data_buffers(mapping);
233 		if (!ret)
234 			ret = err;
235 		spin_lock(&journal->j_list_lock);
236 		J_ASSERT(jinode->i_transaction == commit_transaction);
237 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
238 		smp_mb();
239 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
240 	}
241 	spin_unlock(&journal->j_list_lock);
242 	return ret;
243 }
244 
245 /*
246  * Wait for data submitted for writeout, refile inodes to proper
247  * transaction if needed.
248  *
249  */
250 static int journal_finish_inode_data_buffers(journal_t *journal,
251 		transaction_t *commit_transaction)
252 {
253 	struct jbd2_inode *jinode, *next_i;
254 	int err, ret = 0;
255 
256 	/* For locking, see the comment in journal_submit_data_buffers() */
257 	spin_lock(&journal->j_list_lock);
258 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
259 		if (!(jinode->i_flags & JI_WAIT_DATA))
260 			continue;
261 		jinode->i_flags |= JI_COMMIT_RUNNING;
262 		spin_unlock(&journal->j_list_lock);
263 		err = filemap_fdatawait_keep_errors(
264 				jinode->i_vfs_inode->i_mapping);
265 		if (!ret)
266 			ret = err;
267 		spin_lock(&journal->j_list_lock);
268 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
269 		smp_mb();
270 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
271 	}
272 
273 	/* Now refile inode to proper lists */
274 	list_for_each_entry_safe(jinode, next_i,
275 				 &commit_transaction->t_inode_list, i_list) {
276 		list_del(&jinode->i_list);
277 		if (jinode->i_next_transaction) {
278 			jinode->i_transaction = jinode->i_next_transaction;
279 			jinode->i_next_transaction = NULL;
280 			list_add(&jinode->i_list,
281 				&jinode->i_transaction->t_inode_list);
282 		} else {
283 			jinode->i_transaction = NULL;
284 		}
285 	}
286 	spin_unlock(&journal->j_list_lock);
287 
288 	return ret;
289 }
290 
291 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
292 {
293 	struct page *page = bh->b_page;
294 	char *addr;
295 	__u32 checksum;
296 
297 	addr = kmap_atomic(page);
298 	checksum = crc32_be(crc32_sum,
299 		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
300 	kunmap_atomic(addr);
301 
302 	return checksum;
303 }
304 
305 static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
306 				   unsigned long long block)
307 {
308 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
309 	if (jbd2_has_feature_64bit(j))
310 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
311 }
312 
313 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
314 				    struct buffer_head *bh, __u32 sequence)
315 {
316 	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
317 	struct page *page = bh->b_page;
318 	__u8 *addr;
319 	__u32 csum32;
320 	__be32 seq;
321 
322 	if (!jbd2_journal_has_csum_v2or3(j))
323 		return;
324 
325 	seq = cpu_to_be32(sequence);
326 	addr = kmap_atomic(page);
327 	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
328 	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
329 			     bh->b_size);
330 	kunmap_atomic(addr);
331 
332 	if (jbd2_has_feature_csum3(j))
333 		tag3->t_checksum = cpu_to_be32(csum32);
334 	else
335 		tag->t_checksum = cpu_to_be16(csum32);
336 }
337 /*
338  * jbd2_journal_commit_transaction
339  *
340  * The primary function for committing a transaction to the log.  This
341  * function is called by the journal thread to begin a complete commit.
342  */
343 void jbd2_journal_commit_transaction(journal_t *journal)
344 {
345 	struct transaction_stats_s stats;
346 	transaction_t *commit_transaction;
347 	struct journal_head *jh;
348 	struct buffer_head *descriptor;
349 	struct buffer_head **wbuf = journal->j_wbuf;
350 	int bufs;
351 	int flags;
352 	int err;
353 	unsigned long long blocknr;
354 	ktime_t start_time;
355 	u64 commit_time;
356 	char *tagp = NULL;
357 	journal_block_tag_t *tag = NULL;
358 	int space_left = 0;
359 	int first_tag = 0;
360 	int tag_flag;
361 	int i;
362 	int tag_bytes = journal_tag_bytes(journal);
363 	struct buffer_head *cbh = NULL; /* For transactional checksums */
364 	__u32 crc32_sum = ~0;
365 	struct blk_plug plug;
366 	/* Tail of the journal */
367 	unsigned long first_block;
368 	tid_t first_tid;
369 	int update_tail;
370 	int csum_size = 0;
371 	LIST_HEAD(io_bufs);
372 	LIST_HEAD(log_bufs);
373 
374 	if (jbd2_journal_has_csum_v2or3(journal))
375 		csum_size = sizeof(struct jbd2_journal_block_tail);
376 
377 	/*
378 	 * First job: lock down the current transaction and wait for
379 	 * all outstanding updates to complete.
380 	 */
381 
382 	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
383 	if (journal->j_flags & JBD2_FLUSHED) {
384 		jbd_debug(3, "super block updated\n");
385 		mutex_lock_io(&journal->j_checkpoint_mutex);
386 		/*
387 		 * We hold j_checkpoint_mutex so tail cannot change under us.
388 		 * We don't need any special data guarantees for writing sb
389 		 * since journal is empty and it is ok for write to be
390 		 * flushed only with transaction commit.
391 		 */
392 		jbd2_journal_update_sb_log_tail(journal,
393 						journal->j_tail_sequence,
394 						journal->j_tail,
395 						REQ_SYNC);
396 		mutex_unlock(&journal->j_checkpoint_mutex);
397 	} else {
398 		jbd_debug(3, "superblock not updated\n");
399 	}
400 
401 	J_ASSERT(journal->j_running_transaction != NULL);
402 	J_ASSERT(journal->j_committing_transaction == NULL);
403 
404 	commit_transaction = journal->j_running_transaction;
405 
406 	trace_jbd2_start_commit(journal, commit_transaction);
407 	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
408 			commit_transaction->t_tid);
409 
410 	write_lock(&journal->j_state_lock);
411 	J_ASSERT(commit_transaction->t_state == T_RUNNING);
412 	commit_transaction->t_state = T_LOCKED;
413 
414 	trace_jbd2_commit_locking(journal, commit_transaction);
415 	stats.run.rs_wait = commit_transaction->t_max_wait;
416 	stats.run.rs_request_delay = 0;
417 	stats.run.rs_locked = jiffies;
418 	if (commit_transaction->t_requested)
419 		stats.run.rs_request_delay =
420 			jbd2_time_diff(commit_transaction->t_requested,
421 				       stats.run.rs_locked);
422 	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
423 					      stats.run.rs_locked);
424 
425 	spin_lock(&commit_transaction->t_handle_lock);
426 	while (atomic_read(&commit_transaction->t_updates)) {
427 		DEFINE_WAIT(wait);
428 
429 		prepare_to_wait(&journal->j_wait_updates, &wait,
430 					TASK_UNINTERRUPTIBLE);
431 		if (atomic_read(&commit_transaction->t_updates)) {
432 			spin_unlock(&commit_transaction->t_handle_lock);
433 			write_unlock(&journal->j_state_lock);
434 			schedule();
435 			write_lock(&journal->j_state_lock);
436 			spin_lock(&commit_transaction->t_handle_lock);
437 		}
438 		finish_wait(&journal->j_wait_updates, &wait);
439 	}
440 	spin_unlock(&commit_transaction->t_handle_lock);
441 
442 	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
443 			journal->j_max_transaction_buffers);
444 
445 	/*
446 	 * First thing we are allowed to do is to discard any remaining
447 	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
448 	 * that there are no such buffers: if a large filesystem
449 	 * operation like a truncate needs to split itself over multiple
450 	 * transactions, then it may try to do a jbd2_journal_restart() while
451 	 * there are still BJ_Reserved buffers outstanding.  These must
452 	 * be released cleanly from the current transaction.
453 	 *
454 	 * In this case, the filesystem must still reserve write access
455 	 * again before modifying the buffer in the new transaction, but
456 	 * we do not require it to remember exactly which old buffers it
457 	 * has reserved.  This is consistent with the existing behaviour
458 	 * that multiple jbd2_journal_get_write_access() calls to the same
459 	 * buffer are perfectly permissible.
460 	 */
461 	while (commit_transaction->t_reserved_list) {
462 		jh = commit_transaction->t_reserved_list;
463 		JBUFFER_TRACE(jh, "reserved, unused: refile");
464 		/*
465 		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
466 		 * leave undo-committed data.
467 		 */
468 		if (jh->b_committed_data) {
469 			struct buffer_head *bh = jh2bh(jh);
470 
471 			jbd_lock_bh_state(bh);
472 			jbd2_free(jh->b_committed_data, bh->b_size);
473 			jh->b_committed_data = NULL;
474 			jbd_unlock_bh_state(bh);
475 		}
476 		jbd2_journal_refile_buffer(journal, jh);
477 	}
478 
479 	/*
480 	 * Now try to drop any written-back buffers from the journal's
481 	 * checkpoint lists.  We do this *before* commit because it potentially
482 	 * frees some memory
483 	 */
484 	spin_lock(&journal->j_list_lock);
485 	__jbd2_journal_clean_checkpoint_list(journal, false);
486 	spin_unlock(&journal->j_list_lock);
487 
488 	jbd_debug(3, "JBD2: commit phase 1\n");
489 
490 	/*
491 	 * Clear revoked flag to reflect there is no revoked buffers
492 	 * in the next transaction which is going to be started.
493 	 */
494 	jbd2_clear_buffer_revoked_flags(journal);
495 
496 	/*
497 	 * Switch to a new revoke table.
498 	 */
499 	jbd2_journal_switch_revoke_table(journal);
500 
501 	/*
502 	 * Reserved credits cannot be claimed anymore, free them
503 	 */
504 	atomic_sub(atomic_read(&journal->j_reserved_credits),
505 		   &commit_transaction->t_outstanding_credits);
506 
507 	trace_jbd2_commit_flushing(journal, commit_transaction);
508 	stats.run.rs_flushing = jiffies;
509 	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
510 					     stats.run.rs_flushing);
511 
512 	commit_transaction->t_state = T_FLUSH;
513 	journal->j_committing_transaction = commit_transaction;
514 	journal->j_running_transaction = NULL;
515 	start_time = ktime_get();
516 	commit_transaction->t_log_start = journal->j_head;
517 	wake_up(&journal->j_wait_transaction_locked);
518 	write_unlock(&journal->j_state_lock);
519 
520 	jbd_debug(3, "JBD2: commit phase 2a\n");
521 
522 	/*
523 	 * Now start flushing things to disk, in the order they appear
524 	 * on the transaction lists.  Data blocks go first.
525 	 */
526 	err = journal_submit_data_buffers(journal, commit_transaction);
527 	if (err)
528 		jbd2_journal_abort(journal, err);
529 
530 	blk_start_plug(&plug);
531 	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
532 
533 	jbd_debug(3, "JBD2: commit phase 2b\n");
534 
535 	/*
536 	 * Way to go: we have now written out all of the data for a
537 	 * transaction!  Now comes the tricky part: we need to write out
538 	 * metadata.  Loop over the transaction's entire buffer list:
539 	 */
540 	write_lock(&journal->j_state_lock);
541 	commit_transaction->t_state = T_COMMIT;
542 	write_unlock(&journal->j_state_lock);
543 
544 	trace_jbd2_commit_logging(journal, commit_transaction);
545 	stats.run.rs_logging = jiffies;
546 	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
547 					       stats.run.rs_logging);
548 	stats.run.rs_blocks =
549 		atomic_read(&commit_transaction->t_outstanding_credits);
550 	stats.run.rs_blocks_logged = 0;
551 
552 	J_ASSERT(commit_transaction->t_nr_buffers <=
553 		 atomic_read(&commit_transaction->t_outstanding_credits));
554 
555 	err = 0;
556 	bufs = 0;
557 	descriptor = NULL;
558 	while (commit_transaction->t_buffers) {
559 
560 		/* Find the next buffer to be journaled... */
561 
562 		jh = commit_transaction->t_buffers;
563 
564 		/* If we're in abort mode, we just un-journal the buffer and
565 		   release it. */
566 
567 		if (is_journal_aborted(journal)) {
568 			clear_buffer_jbddirty(jh2bh(jh));
569 			JBUFFER_TRACE(jh, "journal is aborting: refile");
570 			jbd2_buffer_abort_trigger(jh,
571 						  jh->b_frozen_data ?
572 						  jh->b_frozen_triggers :
573 						  jh->b_triggers);
574 			jbd2_journal_refile_buffer(journal, jh);
575 			/* If that was the last one, we need to clean up
576 			 * any descriptor buffers which may have been
577 			 * already allocated, even if we are now
578 			 * aborting. */
579 			if (!commit_transaction->t_buffers)
580 				goto start_journal_io;
581 			continue;
582 		}
583 
584 		/* Make sure we have a descriptor block in which to
585 		   record the metadata buffer. */
586 
587 		if (!descriptor) {
588 			J_ASSERT (bufs == 0);
589 
590 			jbd_debug(4, "JBD2: get descriptor\n");
591 
592 			descriptor = jbd2_journal_get_descriptor_buffer(
593 							commit_transaction,
594 							JBD2_DESCRIPTOR_BLOCK);
595 			if (!descriptor) {
596 				jbd2_journal_abort(journal, -EIO);
597 				continue;
598 			}
599 
600 			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
601 				(unsigned long long)descriptor->b_blocknr,
602 				descriptor->b_data);
603 			tagp = &descriptor->b_data[sizeof(journal_header_t)];
604 			space_left = descriptor->b_size -
605 						sizeof(journal_header_t);
606 			first_tag = 1;
607 			set_buffer_jwrite(descriptor);
608 			set_buffer_dirty(descriptor);
609 			wbuf[bufs++] = descriptor;
610 
611 			/* Record it so that we can wait for IO
612                            completion later */
613 			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
614 			jbd2_file_log_bh(&log_bufs, descriptor);
615 		}
616 
617 		/* Where is the buffer to be written? */
618 
619 		err = jbd2_journal_next_log_block(journal, &blocknr);
620 		/* If the block mapping failed, just abandon the buffer
621 		   and repeat this loop: we'll fall into the
622 		   refile-on-abort condition above. */
623 		if (err) {
624 			jbd2_journal_abort(journal, err);
625 			continue;
626 		}
627 
628 		/*
629 		 * start_this_handle() uses t_outstanding_credits to determine
630 		 * the free space in the log, but this counter is changed
631 		 * by jbd2_journal_next_log_block() also.
632 		 */
633 		atomic_dec(&commit_transaction->t_outstanding_credits);
634 
635 		/* Bump b_count to prevent truncate from stumbling over
636                    the shadowed buffer!  @@@ This can go if we ever get
637                    rid of the shadow pairing of buffers. */
638 		atomic_inc(&jh2bh(jh)->b_count);
639 
640 		/*
641 		 * Make a temporary IO buffer with which to write it out
642 		 * (this will requeue the metadata buffer to BJ_Shadow).
643 		 */
644 		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
645 		JBUFFER_TRACE(jh, "ph3: write metadata");
646 		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
647 						jh, &wbuf[bufs], blocknr);
648 		if (flags < 0) {
649 			jbd2_journal_abort(journal, flags);
650 			continue;
651 		}
652 		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
653 
654 		/* Record the new block's tag in the current descriptor
655                    buffer */
656 
657 		tag_flag = 0;
658 		if (flags & 1)
659 			tag_flag |= JBD2_FLAG_ESCAPE;
660 		if (!first_tag)
661 			tag_flag |= JBD2_FLAG_SAME_UUID;
662 
663 		tag = (journal_block_tag_t *) tagp;
664 		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
665 		tag->t_flags = cpu_to_be16(tag_flag);
666 		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
667 					commit_transaction->t_tid);
668 		tagp += tag_bytes;
669 		space_left -= tag_bytes;
670 		bufs++;
671 
672 		if (first_tag) {
673 			memcpy (tagp, journal->j_uuid, 16);
674 			tagp += 16;
675 			space_left -= 16;
676 			first_tag = 0;
677 		}
678 
679 		/* If there's no more to do, or if the descriptor is full,
680 		   let the IO rip! */
681 
682 		if (bufs == journal->j_wbufsize ||
683 		    commit_transaction->t_buffers == NULL ||
684 		    space_left < tag_bytes + 16 + csum_size) {
685 
686 			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
687 
688 			/* Write an end-of-descriptor marker before
689                            submitting the IOs.  "tag" still points to
690                            the last tag we set up. */
691 
692 			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
693 
694 			jbd2_descriptor_block_csum_set(journal, descriptor);
695 start_journal_io:
696 			for (i = 0; i < bufs; i++) {
697 				struct buffer_head *bh = wbuf[i];
698 				/*
699 				 * Compute checksum.
700 				 */
701 				if (jbd2_has_feature_checksum(journal)) {
702 					crc32_sum =
703 					    jbd2_checksum_data(crc32_sum, bh);
704 				}
705 
706 				lock_buffer(bh);
707 				clear_buffer_dirty(bh);
708 				set_buffer_uptodate(bh);
709 				bh->b_end_io = journal_end_buffer_io_sync;
710 				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
711 			}
712 			cond_resched();
713 			stats.run.rs_blocks_logged += bufs;
714 
715 			/* Force a new descriptor to be generated next
716                            time round the loop. */
717 			descriptor = NULL;
718 			bufs = 0;
719 		}
720 	}
721 
722 	err = journal_finish_inode_data_buffers(journal, commit_transaction);
723 	if (err) {
724 		printk(KERN_WARNING
725 			"JBD2: Detected IO errors while flushing file data "
726 		       "on %s\n", journal->j_devname);
727 		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
728 			jbd2_journal_abort(journal, err);
729 		err = 0;
730 	}
731 
732 	/*
733 	 * Get current oldest transaction in the log before we issue flush
734 	 * to the filesystem device. After the flush we can be sure that
735 	 * blocks of all older transactions are checkpointed to persistent
736 	 * storage and we will be safe to update journal start in the
737 	 * superblock with the numbers we get here.
738 	 */
739 	update_tail =
740 		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
741 
742 	write_lock(&journal->j_state_lock);
743 	if (update_tail) {
744 		long freed = first_block - journal->j_tail;
745 
746 		if (first_block < journal->j_tail)
747 			freed += journal->j_last - journal->j_first;
748 		/* Update tail only if we free significant amount of space */
749 		if (freed < journal->j_maxlen / 4)
750 			update_tail = 0;
751 	}
752 	J_ASSERT(commit_transaction->t_state == T_COMMIT);
753 	commit_transaction->t_state = T_COMMIT_DFLUSH;
754 	write_unlock(&journal->j_state_lock);
755 
756 	/*
757 	 * If the journal is not located on the file system device,
758 	 * then we must flush the file system device before we issue
759 	 * the commit record
760 	 */
761 	if (commit_transaction->t_need_data_flush &&
762 	    (journal->j_fs_dev != journal->j_dev) &&
763 	    (journal->j_flags & JBD2_BARRIER))
764 		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
765 
766 	/* Done it all: now write the commit record asynchronously. */
767 	if (jbd2_has_feature_async_commit(journal)) {
768 		err = journal_submit_commit_record(journal, commit_transaction,
769 						 &cbh, crc32_sum);
770 		if (err)
771 			__jbd2_journal_abort_hard(journal);
772 	}
773 
774 	blk_finish_plug(&plug);
775 
776 	/* Lo and behold: we have just managed to send a transaction to
777            the log.  Before we can commit it, wait for the IO so far to
778            complete.  Control buffers being written are on the
779            transaction's t_log_list queue, and metadata buffers are on
780            the io_bufs list.
781 
782 	   Wait for the buffers in reverse order.  That way we are
783 	   less likely to be woken up until all IOs have completed, and
784 	   so we incur less scheduling load.
785 	*/
786 
787 	jbd_debug(3, "JBD2: commit phase 3\n");
788 
789 	while (!list_empty(&io_bufs)) {
790 		struct buffer_head *bh = list_entry(io_bufs.prev,
791 						    struct buffer_head,
792 						    b_assoc_buffers);
793 
794 		wait_on_buffer(bh);
795 		cond_resched();
796 
797 		if (unlikely(!buffer_uptodate(bh)))
798 			err = -EIO;
799 		jbd2_unfile_log_bh(bh);
800 
801 		/*
802 		 * The list contains temporary buffer heads created by
803 		 * jbd2_journal_write_metadata_buffer().
804 		 */
805 		BUFFER_TRACE(bh, "dumping temporary bh");
806 		__brelse(bh);
807 		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
808 		free_buffer_head(bh);
809 
810 		/* We also have to refile the corresponding shadowed buffer */
811 		jh = commit_transaction->t_shadow_list->b_tprev;
812 		bh = jh2bh(jh);
813 		clear_buffer_jwrite(bh);
814 		J_ASSERT_BH(bh, buffer_jbddirty(bh));
815 		J_ASSERT_BH(bh, !buffer_shadow(bh));
816 
817 		/* The metadata is now released for reuse, but we need
818                    to remember it against this transaction so that when
819                    we finally commit, we can do any checkpointing
820                    required. */
821 		JBUFFER_TRACE(jh, "file as BJ_Forget");
822 		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
823 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
824 		__brelse(bh);
825 	}
826 
827 	J_ASSERT (commit_transaction->t_shadow_list == NULL);
828 
829 	jbd_debug(3, "JBD2: commit phase 4\n");
830 
831 	/* Here we wait for the revoke record and descriptor record buffers */
832 	while (!list_empty(&log_bufs)) {
833 		struct buffer_head *bh;
834 
835 		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
836 		wait_on_buffer(bh);
837 		cond_resched();
838 
839 		if (unlikely(!buffer_uptodate(bh)))
840 			err = -EIO;
841 
842 		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
843 		clear_buffer_jwrite(bh);
844 		jbd2_unfile_log_bh(bh);
845 		__brelse(bh);		/* One for getblk */
846 		/* AKPM: bforget here */
847 	}
848 
849 	if (err)
850 		jbd2_journal_abort(journal, err);
851 
852 	jbd_debug(3, "JBD2: commit phase 5\n");
853 	write_lock(&journal->j_state_lock);
854 	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
855 	commit_transaction->t_state = T_COMMIT_JFLUSH;
856 	write_unlock(&journal->j_state_lock);
857 
858 	if (!jbd2_has_feature_async_commit(journal)) {
859 		err = journal_submit_commit_record(journal, commit_transaction,
860 						&cbh, crc32_sum);
861 		if (err)
862 			__jbd2_journal_abort_hard(journal);
863 	}
864 	if (cbh)
865 		err = journal_wait_on_commit_record(journal, cbh);
866 	if (jbd2_has_feature_async_commit(journal) &&
867 	    journal->j_flags & JBD2_BARRIER) {
868 		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
869 	}
870 
871 	if (err)
872 		jbd2_journal_abort(journal, err);
873 
874 	/*
875 	 * Now disk caches for filesystem device are flushed so we are safe to
876 	 * erase checkpointed transactions from the log by updating journal
877 	 * superblock.
878 	 */
879 	if (update_tail)
880 		jbd2_update_log_tail(journal, first_tid, first_block);
881 
882 	/* End of a transaction!  Finally, we can do checkpoint
883            processing: any buffers committed as a result of this
884            transaction can be removed from any checkpoint list it was on
885            before. */
886 
887 	jbd_debug(3, "JBD2: commit phase 6\n");
888 
889 	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
890 	J_ASSERT(commit_transaction->t_buffers == NULL);
891 	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
892 	J_ASSERT(commit_transaction->t_shadow_list == NULL);
893 
894 restart_loop:
895 	/*
896 	 * As there are other places (journal_unmap_buffer()) adding buffers
897 	 * to this list we have to be careful and hold the j_list_lock.
898 	 */
899 	spin_lock(&journal->j_list_lock);
900 	while (commit_transaction->t_forget) {
901 		transaction_t *cp_transaction;
902 		struct buffer_head *bh;
903 		int try_to_free = 0;
904 
905 		jh = commit_transaction->t_forget;
906 		spin_unlock(&journal->j_list_lock);
907 		bh = jh2bh(jh);
908 		/*
909 		 * Get a reference so that bh cannot be freed before we are
910 		 * done with it.
911 		 */
912 		get_bh(bh);
913 		jbd_lock_bh_state(bh);
914 		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
915 
916 		/*
917 		 * If there is undo-protected committed data against
918 		 * this buffer, then we can remove it now.  If it is a
919 		 * buffer needing such protection, the old frozen_data
920 		 * field now points to a committed version of the
921 		 * buffer, so rotate that field to the new committed
922 		 * data.
923 		 *
924 		 * Otherwise, we can just throw away the frozen data now.
925 		 *
926 		 * We also know that the frozen data has already fired
927 		 * its triggers if they exist, so we can clear that too.
928 		 */
929 		if (jh->b_committed_data) {
930 			jbd2_free(jh->b_committed_data, bh->b_size);
931 			jh->b_committed_data = NULL;
932 			if (jh->b_frozen_data) {
933 				jh->b_committed_data = jh->b_frozen_data;
934 				jh->b_frozen_data = NULL;
935 				jh->b_frozen_triggers = NULL;
936 			}
937 		} else if (jh->b_frozen_data) {
938 			jbd2_free(jh->b_frozen_data, bh->b_size);
939 			jh->b_frozen_data = NULL;
940 			jh->b_frozen_triggers = NULL;
941 		}
942 
943 		spin_lock(&journal->j_list_lock);
944 		cp_transaction = jh->b_cp_transaction;
945 		if (cp_transaction) {
946 			JBUFFER_TRACE(jh, "remove from old cp transaction");
947 			cp_transaction->t_chp_stats.cs_dropped++;
948 			__jbd2_journal_remove_checkpoint(jh);
949 		}
950 
951 		/* Only re-checkpoint the buffer_head if it is marked
952 		 * dirty.  If the buffer was added to the BJ_Forget list
953 		 * by jbd2_journal_forget, it may no longer be dirty and
954 		 * there's no point in keeping a checkpoint record for
955 		 * it. */
956 
957 		/*
958 		* A buffer which has been freed while still being journaled by
959 		* a previous transaction.
960 		*/
961 		if (buffer_freed(bh)) {
962 			/*
963 			 * If the running transaction is the one containing
964 			 * "add to orphan" operation (b_next_transaction !=
965 			 * NULL), we have to wait for that transaction to
966 			 * commit before we can really get rid of the buffer.
967 			 * So just clear b_modified to not confuse transaction
968 			 * credit accounting and refile the buffer to
969 			 * BJ_Forget of the running transaction. If the just
970 			 * committed transaction contains "add to orphan"
971 			 * operation, we can completely invalidate the buffer
972 			 * now. We are rather through in that since the
973 			 * buffer may be still accessible when blocksize <
974 			 * pagesize and it is attached to the last partial
975 			 * page.
976 			 */
977 			jh->b_modified = 0;
978 			if (!jh->b_next_transaction) {
979 				clear_buffer_freed(bh);
980 				clear_buffer_jbddirty(bh);
981 				clear_buffer_mapped(bh);
982 				clear_buffer_new(bh);
983 				clear_buffer_req(bh);
984 				bh->b_bdev = NULL;
985 			}
986 		}
987 
988 		if (buffer_jbddirty(bh)) {
989 			JBUFFER_TRACE(jh, "add to new checkpointing trans");
990 			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
991 			if (is_journal_aborted(journal))
992 				clear_buffer_jbddirty(bh);
993 		} else {
994 			J_ASSERT_BH(bh, !buffer_dirty(bh));
995 			/*
996 			 * The buffer on BJ_Forget list and not jbddirty means
997 			 * it has been freed by this transaction and hence it
998 			 * could not have been reallocated until this
999 			 * transaction has committed. *BUT* it could be
1000 			 * reallocated once we have written all the data to
1001 			 * disk and before we process the buffer on BJ_Forget
1002 			 * list.
1003 			 */
1004 			if (!jh->b_next_transaction)
1005 				try_to_free = 1;
1006 		}
1007 		JBUFFER_TRACE(jh, "refile or unfile buffer");
1008 		__jbd2_journal_refile_buffer(jh);
1009 		jbd_unlock_bh_state(bh);
1010 		if (try_to_free)
1011 			release_buffer_page(bh);	/* Drops bh reference */
1012 		else
1013 			__brelse(bh);
1014 		cond_resched_lock(&journal->j_list_lock);
1015 	}
1016 	spin_unlock(&journal->j_list_lock);
1017 	/*
1018 	 * This is a bit sleazy.  We use j_list_lock to protect transition
1019 	 * of a transaction into T_FINISHED state and calling
1020 	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1021 	 * other checkpointing code processing the transaction...
1022 	 */
1023 	write_lock(&journal->j_state_lock);
1024 	spin_lock(&journal->j_list_lock);
1025 	/*
1026 	 * Now recheck if some buffers did not get attached to the transaction
1027 	 * while the lock was dropped...
1028 	 */
1029 	if (commit_transaction->t_forget) {
1030 		spin_unlock(&journal->j_list_lock);
1031 		write_unlock(&journal->j_state_lock);
1032 		goto restart_loop;
1033 	}
1034 
1035 	/* Add the transaction to the checkpoint list
1036 	 * __journal_remove_checkpoint() can not destroy transaction
1037 	 * under us because it is not marked as T_FINISHED yet */
1038 	if (journal->j_checkpoint_transactions == NULL) {
1039 		journal->j_checkpoint_transactions = commit_transaction;
1040 		commit_transaction->t_cpnext = commit_transaction;
1041 		commit_transaction->t_cpprev = commit_transaction;
1042 	} else {
1043 		commit_transaction->t_cpnext =
1044 			journal->j_checkpoint_transactions;
1045 		commit_transaction->t_cpprev =
1046 			commit_transaction->t_cpnext->t_cpprev;
1047 		commit_transaction->t_cpnext->t_cpprev =
1048 			commit_transaction;
1049 		commit_transaction->t_cpprev->t_cpnext =
1050 				commit_transaction;
1051 	}
1052 	spin_unlock(&journal->j_list_lock);
1053 
1054 	/* Done with this transaction! */
1055 
1056 	jbd_debug(3, "JBD2: commit phase 7\n");
1057 
1058 	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1059 
1060 	commit_transaction->t_start = jiffies;
1061 	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1062 					      commit_transaction->t_start);
1063 
1064 	/*
1065 	 * File the transaction statistics
1066 	 */
1067 	stats.ts_tid = commit_transaction->t_tid;
1068 	stats.run.rs_handle_count =
1069 		atomic_read(&commit_transaction->t_handle_count);
1070 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1071 			     commit_transaction->t_tid, &stats.run);
1072 	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1073 
1074 	commit_transaction->t_state = T_COMMIT_CALLBACK;
1075 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1076 	journal->j_commit_sequence = commit_transaction->t_tid;
1077 	journal->j_committing_transaction = NULL;
1078 	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1079 
1080 	/*
1081 	 * weight the commit time higher than the average time so we don't
1082 	 * react too strongly to vast changes in the commit time
1083 	 */
1084 	if (likely(journal->j_average_commit_time))
1085 		journal->j_average_commit_time = (commit_time +
1086 				journal->j_average_commit_time*3) / 4;
1087 	else
1088 		journal->j_average_commit_time = commit_time;
1089 
1090 	write_unlock(&journal->j_state_lock);
1091 
1092 	if (journal->j_commit_callback)
1093 		journal->j_commit_callback(journal, commit_transaction);
1094 
1095 	trace_jbd2_end_commit(journal, commit_transaction);
1096 	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1097 		  journal->j_commit_sequence, journal->j_tail_sequence);
1098 
1099 	write_lock(&journal->j_state_lock);
1100 	spin_lock(&journal->j_list_lock);
1101 	commit_transaction->t_state = T_FINISHED;
1102 	/* Check if the transaction can be dropped now that we are finished */
1103 	if (commit_transaction->t_checkpoint_list == NULL &&
1104 	    commit_transaction->t_checkpoint_io_list == NULL) {
1105 		__jbd2_journal_drop_transaction(journal, commit_transaction);
1106 		jbd2_journal_free_transaction(commit_transaction);
1107 	}
1108 	spin_unlock(&journal->j_list_lock);
1109 	write_unlock(&journal->j_state_lock);
1110 	wake_up(&journal->j_wait_done_commit);
1111 
1112 	/*
1113 	 * Calculate overall stats
1114 	 */
1115 	spin_lock(&journal->j_history_lock);
1116 	journal->j_stats.ts_tid++;
1117 	journal->j_stats.ts_requested += stats.ts_requested;
1118 	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1119 	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1120 	journal->j_stats.run.rs_running += stats.run.rs_running;
1121 	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1122 	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1123 	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1124 	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1125 	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1126 	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1127 	spin_unlock(&journal->j_history_lock);
1128 }
1129