xref: /linux/fs/gfs2/log.c (revision e5e95a7639ed5f7dc3e404858ad7910de5fa2057)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33 #include "trans.h"
34 
35 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36 
37 /**
38  * gfs2_struct2blk - compute stuff
39  * @sdp: the filesystem
40  * @nstruct: the number of structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47 
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49 {
50 	unsigned int blks;
51 	unsigned int first, second;
52 
53 	blks = 1;
54 	first = sdp->sd_ldptrs;
55 
56 	if (nstruct > first) {
57 		second = sdp->sd_inptrs;
58 		blks += DIV_ROUND_UP(nstruct - first, second);
59 	}
60 
61 	return blks;
62 }
63 
64 /**
65  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
66  * @mapping: The associated mapping (maybe NULL)
67  * @bd: The gfs2_bufdata to remove
68  *
69  * The ail lock _must_ be held when calling this function
70  *
71  */
72 
73 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
74 {
75 	bd->bd_tr = NULL;
76 	list_del_init(&bd->bd_ail_st_list);
77 	list_del_init(&bd->bd_ail_gl_list);
78 	atomic_dec(&bd->bd_gl->gl_ail_count);
79 	brelse(bd->bd_bh);
80 }
81 
82 /**
83  * gfs2_ail1_start_one - Start I/O on a part of the AIL
84  * @sdp: the filesystem
85  * @wbc: The writeback control structure
86  * @ai: The ail structure
87  *
88  */
89 
90 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
91 			       struct writeback_control *wbc,
92 			       struct gfs2_trans *tr)
93 __releases(&sdp->sd_ail_lock)
94 __acquires(&sdp->sd_ail_lock)
95 {
96 	struct gfs2_glock *gl = NULL;
97 	struct address_space *mapping;
98 	struct gfs2_bufdata *bd, *s;
99 	struct buffer_head *bh;
100 	int ret = 0;
101 
102 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
103 		bh = bd->bd_bh;
104 
105 		gfs2_assert(sdp, bd->bd_tr == tr);
106 
107 		if (!buffer_busy(bh)) {
108 			if (buffer_uptodate(bh)) {
109 				list_move(&bd->bd_ail_st_list,
110 					  &tr->tr_ail2_list);
111 				continue;
112 			}
113 			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
114 				gfs2_io_error_bh(sdp, bh);
115 				gfs2_withdraw_delayed(sdp);
116 			}
117 		}
118 
119 		if (gfs2_withdrawn(sdp)) {
120 			gfs2_remove_from_ail(bd);
121 			continue;
122 		}
123 		if (!buffer_dirty(bh))
124 			continue;
125 		if (gl == bd->bd_gl)
126 			continue;
127 		gl = bd->bd_gl;
128 		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
129 		mapping = bh->b_page->mapping;
130 		if (!mapping)
131 			continue;
132 		spin_unlock(&sdp->sd_ail_lock);
133 		ret = generic_writepages(mapping, wbc);
134 		spin_lock(&sdp->sd_ail_lock);
135 		if (ret == -ENODATA) /* if a jdata write into a new hole */
136 			ret = 0; /* ignore it */
137 		if (ret || wbc->nr_to_write <= 0)
138 			break;
139 		return -EBUSY;
140 	}
141 
142 	return ret;
143 }
144 
145 static void dump_ail_list(struct gfs2_sbd *sdp)
146 {
147 	struct gfs2_trans *tr;
148 	struct gfs2_bufdata *bd;
149 	struct buffer_head *bh;
150 
151 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
152 		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
153 					    bd_ail_st_list) {
154 			bh = bd->bd_bh;
155 			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
156 			       (unsigned long long)bd->bd_blkno, bh);
157 			if (!bh) {
158 				fs_err(sdp, "\n");
159 				continue;
160 			}
161 			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
162 			       "map:%d new:%d ar:%d aw:%d delay:%d "
163 			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
164 			       (unsigned long long)bh->b_blocknr,
165 			       buffer_uptodate(bh), buffer_dirty(bh),
166 			       buffer_locked(bh), buffer_req(bh),
167 			       buffer_mapped(bh), buffer_new(bh),
168 			       buffer_async_read(bh), buffer_async_write(bh),
169 			       buffer_delay(bh), buffer_write_io_error(bh),
170 			       buffer_unwritten(bh),
171 			       buffer_defer_completion(bh),
172 			       buffer_pinned(bh), buffer_escaped(bh));
173 		}
174 	}
175 }
176 
177 /**
178  * gfs2_ail1_flush - start writeback of some ail1 entries
179  * @sdp: The super block
180  * @wbc: The writeback control structure
181  *
182  * Writes back some ail1 entries, according to the limits in the
183  * writeback control structure
184  */
185 
186 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
187 {
188 	struct list_head *head = &sdp->sd_ail1_list;
189 	struct gfs2_trans *tr;
190 	struct blk_plug plug;
191 	int ret;
192 	unsigned long flush_start = jiffies;
193 
194 	trace_gfs2_ail_flush(sdp, wbc, 1);
195 	blk_start_plug(&plug);
196 	spin_lock(&sdp->sd_ail_lock);
197 restart:
198 	ret = 0;
199 	if (time_after(jiffies, flush_start + (HZ * 600))) {
200 		fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
201 		       __func__, current->journal_info ? 1 : 0);
202 		dump_ail_list(sdp);
203 		goto out;
204 	}
205 	list_for_each_entry_reverse(tr, head, tr_list) {
206 		if (wbc->nr_to_write <= 0)
207 			break;
208 		ret = gfs2_ail1_start_one(sdp, wbc, tr);
209 		if (ret) {
210 			if (ret == -EBUSY)
211 				goto restart;
212 			break;
213 		}
214 	}
215 out:
216 	spin_unlock(&sdp->sd_ail_lock);
217 	blk_finish_plug(&plug);
218 	if (ret) {
219 		gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
220 			"returned: %d\n", ret);
221 		gfs2_withdraw(sdp);
222 	}
223 	trace_gfs2_ail_flush(sdp, wbc, 0);
224 }
225 
226 /**
227  * gfs2_ail1_start - start writeback of all ail1 entries
228  * @sdp: The superblock
229  */
230 
231 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
232 {
233 	struct writeback_control wbc = {
234 		.sync_mode = WB_SYNC_NONE,
235 		.nr_to_write = LONG_MAX,
236 		.range_start = 0,
237 		.range_end = LLONG_MAX,
238 	};
239 
240 	return gfs2_ail1_flush(sdp, &wbc);
241 }
242 
243 /**
244  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
245  * @sdp: the filesystem
246  * @tr: the transaction
247  * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
248  *
249  * returns: the transaction's count of remaining active items
250  */
251 
252 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
253 				int *max_revokes)
254 {
255 	struct gfs2_bufdata *bd, *s;
256 	struct buffer_head *bh;
257 	int active_count = 0;
258 
259 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
260 					 bd_ail_st_list) {
261 		bh = bd->bd_bh;
262 		gfs2_assert(sdp, bd->bd_tr == tr);
263 		/*
264 		 * If another process flagged an io error, e.g. writing to the
265 		 * journal, error all other bhs and move them off the ail1 to
266 		 * prevent a tight loop when unmount tries to flush ail1,
267 		 * regardless of whether they're still busy. If no outside
268 		 * errors were found and the buffer is busy, move to the next.
269 		 * If the ail buffer is not busy and caught an error, flag it
270 		 * for others.
271 		 */
272 		if (!sdp->sd_log_error && buffer_busy(bh)) {
273 			active_count++;
274 			continue;
275 		}
276 		if (!buffer_uptodate(bh) &&
277 		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
278 			gfs2_io_error_bh(sdp, bh);
279 			gfs2_withdraw_delayed(sdp);
280 		}
281 		/*
282 		 * If we have space for revokes and the bd is no longer on any
283 		 * buf list, we can just add a revoke for it immediately and
284 		 * avoid having to put it on the ail2 list, where it would need
285 		 * to be revoked later.
286 		 */
287 		if (*max_revokes && list_empty(&bd->bd_list)) {
288 			gfs2_add_revoke(sdp, bd);
289 			(*max_revokes)--;
290 			continue;
291 		}
292 		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
293 	}
294 	return active_count;
295 }
296 
297 /**
298  * gfs2_ail1_empty - Try to empty the ail1 lists
299  * @sdp: The superblock
300  * @max_revokes: If non-zero, add revokes where appropriate
301  *
302  * Tries to empty the ail1 lists, starting with the oldest first
303  */
304 
305 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
306 {
307 	struct gfs2_trans *tr, *s;
308 	int oldest_tr = 1;
309 	int ret;
310 
311 	spin_lock(&sdp->sd_ail_lock);
312 	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
313 		if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
314 			list_move(&tr->tr_list, &sdp->sd_ail2_list);
315 		else
316 			oldest_tr = 0;
317 	}
318 	ret = list_empty(&sdp->sd_ail1_list);
319 	spin_unlock(&sdp->sd_ail_lock);
320 
321 	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
322 		gfs2_lm(sdp, "fatal: I/O error(s)\n");
323 		gfs2_withdraw(sdp);
324 	}
325 
326 	return ret;
327 }
328 
329 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
330 {
331 	struct gfs2_trans *tr;
332 	struct gfs2_bufdata *bd;
333 	struct buffer_head *bh;
334 
335 	spin_lock(&sdp->sd_ail_lock);
336 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
337 		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
338 			bh = bd->bd_bh;
339 			if (!buffer_locked(bh))
340 				continue;
341 			get_bh(bh);
342 			spin_unlock(&sdp->sd_ail_lock);
343 			wait_on_buffer(bh);
344 			brelse(bh);
345 			return;
346 		}
347 	}
348 	spin_unlock(&sdp->sd_ail_lock);
349 }
350 
351 /**
352  * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
353  */
354 
355 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
356 			      struct list_head *head)
357 {
358 	struct gfs2_bufdata *bd;
359 
360 	while (!list_empty(head)) {
361 		bd = list_first_entry(head, struct gfs2_bufdata,
362 				      bd_ail_st_list);
363 		gfs2_assert(sdp, bd->bd_tr == tr);
364 		gfs2_remove_from_ail(bd);
365 	}
366 }
367 
368 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
369 {
370 	struct gfs2_trans *tr, *safe;
371 	unsigned int old_tail = sdp->sd_log_tail;
372 	int wrap = (new_tail < old_tail);
373 	int a, b, rm;
374 
375 	spin_lock(&sdp->sd_ail_lock);
376 
377 	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
378 		a = (old_tail <= tr->tr_first);
379 		b = (tr->tr_first < new_tail);
380 		rm = (wrap) ? (a || b) : (a && b);
381 		if (!rm)
382 			continue;
383 
384 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
385 		list_del(&tr->tr_list);
386 		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
387 		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
388 		gfs2_trans_free(sdp, tr);
389 	}
390 
391 	spin_unlock(&sdp->sd_ail_lock);
392 }
393 
394 /**
395  * gfs2_log_release - Release a given number of log blocks
396  * @sdp: The GFS2 superblock
397  * @blks: The number of blocks
398  *
399  */
400 
401 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
402 {
403 
404 	atomic_add(blks, &sdp->sd_log_blks_free);
405 	trace_gfs2_log_blocks(sdp, blks);
406 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
407 				  sdp->sd_jdesc->jd_blocks);
408 	up_read(&sdp->sd_log_flush_lock);
409 }
410 
411 /**
412  * gfs2_log_reserve - Make a log reservation
413  * @sdp: The GFS2 superblock
414  * @blks: The number of blocks to reserve
415  *
416  * Note that we never give out the last few blocks of the journal. Thats
417  * due to the fact that there is a small number of header blocks
418  * associated with each log flush. The exact number can't be known until
419  * flush time, so we ensure that we have just enough free blocks at all
420  * times to avoid running out during a log flush.
421  *
422  * We no longer flush the log here, instead we wake up logd to do that
423  * for us. To avoid the thundering herd and to ensure that we deal fairly
424  * with queued waiters, we use an exclusive wait. This means that when we
425  * get woken with enough journal space to get our reservation, we need to
426  * wake the next waiter on the list.
427  *
428  * Returns: errno
429  */
430 
431 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
432 {
433 	int ret = 0;
434 	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
435 	unsigned wanted = blks + reserved_blks;
436 	DEFINE_WAIT(wait);
437 	int did_wait = 0;
438 	unsigned int free_blocks;
439 
440 	if (gfs2_assert_warn(sdp, blks) ||
441 	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
442 		return -EINVAL;
443 	atomic_add(blks, &sdp->sd_log_blks_needed);
444 retry:
445 	free_blocks = atomic_read(&sdp->sd_log_blks_free);
446 	if (unlikely(free_blocks <= wanted)) {
447 		do {
448 			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
449 					TASK_UNINTERRUPTIBLE);
450 			wake_up(&sdp->sd_logd_waitq);
451 			did_wait = 1;
452 			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
453 				io_schedule();
454 			free_blocks = atomic_read(&sdp->sd_log_blks_free);
455 		} while(free_blocks <= wanted);
456 		finish_wait(&sdp->sd_log_waitq, &wait);
457 	}
458 	atomic_inc(&sdp->sd_reserving_log);
459 	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
460 				free_blocks - blks) != free_blocks) {
461 		if (atomic_dec_and_test(&sdp->sd_reserving_log))
462 			wake_up(&sdp->sd_reserving_log_wait);
463 		goto retry;
464 	}
465 	atomic_sub(blks, &sdp->sd_log_blks_needed);
466 	trace_gfs2_log_blocks(sdp, -blks);
467 
468 	/*
469 	 * If we waited, then so might others, wake them up _after_ we get
470 	 * our share of the log.
471 	 */
472 	if (unlikely(did_wait))
473 		wake_up(&sdp->sd_log_waitq);
474 
475 	down_read(&sdp->sd_log_flush_lock);
476 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
477 		gfs2_log_release(sdp, blks);
478 		ret = -EROFS;
479 	}
480 	if (atomic_dec_and_test(&sdp->sd_reserving_log))
481 		wake_up(&sdp->sd_reserving_log_wait);
482 	return ret;
483 }
484 
485 /**
486  * log_distance - Compute distance between two journal blocks
487  * @sdp: The GFS2 superblock
488  * @newer: The most recent journal block of the pair
489  * @older: The older journal block of the pair
490  *
491  *   Compute the distance (in the journal direction) between two
492  *   blocks in the journal
493  *
494  * Returns: the distance in blocks
495  */
496 
497 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
498 					unsigned int older)
499 {
500 	int dist;
501 
502 	dist = newer - older;
503 	if (dist < 0)
504 		dist += sdp->sd_jdesc->jd_blocks;
505 
506 	return dist;
507 }
508 
509 /**
510  * calc_reserved - Calculate the number of blocks to reserve when
511  *                 refunding a transaction's unused buffers.
512  * @sdp: The GFS2 superblock
513  *
514  * This is complex.  We need to reserve room for all our currently used
515  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
516  * all our journaled data buffers for journaled files (e.g. files in the
517  * meta_fs like rindex, or files for which chattr +j was done.)
518  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
519  * will count it as free space (sd_log_blks_free) and corruption will follow.
520  *
521  * We can have metadata bufs and jdata bufs in the same journal.  So each
522  * type gets its own log header, for which we need to reserve a block.
523  * In fact, each type has the potential for needing more than one header
524  * in cases where we have more buffers than will fit on a journal page.
525  * Metadata journal entries take up half the space of journaled buffer entries.
526  * Thus, metadata entries have buf_limit (502) and journaled buffers have
527  * databuf_limit (251) before they cause a wrap around.
528  *
529  * Also, we need to reserve blocks for revoke journal entries and one for an
530  * overall header for the lot.
531  *
532  * Returns: the number of blocks reserved
533  */
534 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
535 {
536 	unsigned int reserved = 0;
537 	unsigned int mbuf;
538 	unsigned int dbuf;
539 	struct gfs2_trans *tr = sdp->sd_log_tr;
540 
541 	if (tr) {
542 		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
543 		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
544 		reserved = mbuf + dbuf;
545 		/* Account for header blocks */
546 		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
547 		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
548 	}
549 
550 	if (sdp->sd_log_committed_revoke > 0)
551 		reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
552 	/* One for the overall header */
553 	if (reserved)
554 		reserved++;
555 	return reserved;
556 }
557 
558 static unsigned int current_tail(struct gfs2_sbd *sdp)
559 {
560 	struct gfs2_trans *tr;
561 	unsigned int tail;
562 
563 	spin_lock(&sdp->sd_ail_lock);
564 
565 	if (list_empty(&sdp->sd_ail1_list)) {
566 		tail = sdp->sd_log_head;
567 	} else {
568 		tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
569 				tr_list);
570 		tail = tr->tr_first;
571 	}
572 
573 	spin_unlock(&sdp->sd_ail_lock);
574 
575 	return tail;
576 }
577 
578 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
579 {
580 	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
581 
582 	ail2_empty(sdp, new_tail);
583 
584 	atomic_add(dist, &sdp->sd_log_blks_free);
585 	trace_gfs2_log_blocks(sdp, dist);
586 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
587 			     sdp->sd_jdesc->jd_blocks);
588 
589 	sdp->sd_log_tail = new_tail;
590 }
591 
592 
593 void log_flush_wait(struct gfs2_sbd *sdp)
594 {
595 	DEFINE_WAIT(wait);
596 
597 	if (atomic_read(&sdp->sd_log_in_flight)) {
598 		do {
599 			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
600 					TASK_UNINTERRUPTIBLE);
601 			if (atomic_read(&sdp->sd_log_in_flight))
602 				io_schedule();
603 		} while(atomic_read(&sdp->sd_log_in_flight));
604 		finish_wait(&sdp->sd_log_flush_wait, &wait);
605 	}
606 }
607 
608 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
609 {
610 	struct gfs2_inode *ipa, *ipb;
611 
612 	ipa = list_entry(a, struct gfs2_inode, i_ordered);
613 	ipb = list_entry(b, struct gfs2_inode, i_ordered);
614 
615 	if (ipa->i_no_addr < ipb->i_no_addr)
616 		return -1;
617 	if (ipa->i_no_addr > ipb->i_no_addr)
618 		return 1;
619 	return 0;
620 }
621 
622 static void __ordered_del_inode(struct gfs2_inode *ip)
623 {
624 	if (!list_empty(&ip->i_ordered))
625 		list_del_init(&ip->i_ordered);
626 }
627 
628 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
629 {
630 	struct gfs2_inode *ip;
631 	LIST_HEAD(written);
632 
633 	spin_lock(&sdp->sd_ordered_lock);
634 	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
635 	while (!list_empty(&sdp->sd_log_ordered)) {
636 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
637 		if (ip->i_inode.i_mapping->nrpages == 0) {
638 			__ordered_del_inode(ip);
639 			continue;
640 		}
641 		list_move(&ip->i_ordered, &written);
642 		spin_unlock(&sdp->sd_ordered_lock);
643 		filemap_fdatawrite(ip->i_inode.i_mapping);
644 		spin_lock(&sdp->sd_ordered_lock);
645 	}
646 	list_splice(&written, &sdp->sd_log_ordered);
647 	spin_unlock(&sdp->sd_ordered_lock);
648 }
649 
650 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
651 {
652 	struct gfs2_inode *ip;
653 
654 	spin_lock(&sdp->sd_ordered_lock);
655 	while (!list_empty(&sdp->sd_log_ordered)) {
656 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
657 		__ordered_del_inode(ip);
658 		if (ip->i_inode.i_mapping->nrpages == 0)
659 			continue;
660 		spin_unlock(&sdp->sd_ordered_lock);
661 		filemap_fdatawait(ip->i_inode.i_mapping);
662 		spin_lock(&sdp->sd_ordered_lock);
663 	}
664 	spin_unlock(&sdp->sd_ordered_lock);
665 }
666 
667 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
668 {
669 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
670 
671 	spin_lock(&sdp->sd_ordered_lock);
672 	__ordered_del_inode(ip);
673 	spin_unlock(&sdp->sd_ordered_lock);
674 }
675 
676 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
677 {
678 	struct buffer_head *bh = bd->bd_bh;
679 	struct gfs2_glock *gl = bd->bd_gl;
680 
681 	sdp->sd_log_num_revoke++;
682 	if (atomic_inc_return(&gl->gl_revokes) == 1)
683 		gfs2_glock_hold(gl);
684 	bh->b_private = NULL;
685 	bd->bd_blkno = bh->b_blocknr;
686 	gfs2_remove_from_ail(bd); /* drops ref on bh */
687 	bd->bd_bh = NULL;
688 	set_bit(GLF_LFLUSH, &gl->gl_flags);
689 	list_add(&bd->bd_list, &sdp->sd_log_revokes);
690 }
691 
692 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
693 {
694 	if (atomic_dec_return(&gl->gl_revokes) == 0) {
695 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
696 		gfs2_glock_queue_put(gl);
697 	}
698 }
699 
700 /**
701  * gfs2_write_revokes - Add as many revokes to the system transaction as we can
702  * @sdp: The GFS2 superblock
703  *
704  * Our usual strategy is to defer writing revokes as much as we can in the hope
705  * that we'll eventually overwrite the journal, which will make those revokes
706  * go away.  This changes when we flush the log: at that point, there will
707  * likely be some left-over space in the last revoke block of that transaction.
708  * We can fill that space with additional revokes for blocks that have already
709  * been written back.  This will basically come at no cost now, and will save
710  * us from having to keep track of those blocks on the AIL2 list later.
711  */
712 void gfs2_write_revokes(struct gfs2_sbd *sdp)
713 {
714 	/* number of revokes we still have room for */
715 	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
716 
717 	gfs2_log_lock(sdp);
718 	while (sdp->sd_log_num_revoke > max_revokes)
719 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
720 	max_revokes -= sdp->sd_log_num_revoke;
721 	if (!sdp->sd_log_num_revoke) {
722 		atomic_dec(&sdp->sd_log_blks_free);
723 		/* If no blocks have been reserved, we need to also
724 		 * reserve a block for the header */
725 		if (!sdp->sd_log_blks_reserved) {
726 			atomic_dec(&sdp->sd_log_blks_free);
727 			trace_gfs2_log_blocks(sdp, -2);
728 		} else {
729 			trace_gfs2_log_blocks(sdp, -1);
730 		}
731 	}
732 	gfs2_ail1_empty(sdp, max_revokes);
733 	gfs2_log_unlock(sdp);
734 
735 	if (!sdp->sd_log_num_revoke) {
736 		atomic_inc(&sdp->sd_log_blks_free);
737 		if (!sdp->sd_log_blks_reserved) {
738 			atomic_inc(&sdp->sd_log_blks_free);
739 			trace_gfs2_log_blocks(sdp, 2);
740 		} else {
741 			trace_gfs2_log_blocks(sdp, 1);
742 		}
743 	}
744 }
745 
746 /**
747  * gfs2_write_log_header - Write a journal log header buffer at lblock
748  * @sdp: The GFS2 superblock
749  * @jd: journal descriptor of the journal to which we are writing
750  * @seq: sequence number
751  * @tail: tail of the log
752  * @lblock: value for lh_blkno (block number relative to start of journal)
753  * @flags: log header flags GFS2_LOG_HEAD_*
754  * @op_flags: flags to pass to the bio
755  *
756  * Returns: the initialized log buffer descriptor
757  */
758 
759 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
760 			   u64 seq, u32 tail, u32 lblock, u32 flags,
761 			   int op_flags)
762 {
763 	struct gfs2_log_header *lh;
764 	u32 hash, crc;
765 	struct page *page;
766 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
767 	struct timespec64 tv;
768 	struct super_block *sb = sdp->sd_vfs;
769 	u64 dblock;
770 
771 	if (gfs2_withdrawn(sdp))
772 		goto out;
773 
774 	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
775 	lh = page_address(page);
776 	clear_page(lh);
777 
778 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
779 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
780 	lh->lh_header.__pad0 = cpu_to_be64(0);
781 	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
782 	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
783 	lh->lh_sequence = cpu_to_be64(seq);
784 	lh->lh_flags = cpu_to_be32(flags);
785 	lh->lh_tail = cpu_to_be32(tail);
786 	lh->lh_blkno = cpu_to_be32(lblock);
787 	hash = ~crc32(~0, lh, LH_V1_SIZE);
788 	lh->lh_hash = cpu_to_be32(hash);
789 
790 	ktime_get_coarse_real_ts64(&tv);
791 	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
792 	lh->lh_sec = cpu_to_be64(tv.tv_sec);
793 	if (!list_empty(&jd->extent_list))
794 		dblock = gfs2_log_bmap(jd, lblock);
795 	else {
796 		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
797 		if (gfs2_assert_withdraw(sdp, ret == 0))
798 			return;
799 	}
800 	lh->lh_addr = cpu_to_be64(dblock);
801 	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
802 
803 	/* We may only write local statfs, quota, etc., when writing to our
804 	   own journal. The values are left 0 when recovering a journal
805 	   different from our own. */
806 	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
807 		lh->lh_statfs_addr =
808 			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
809 		lh->lh_quota_addr =
810 			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
811 
812 		spin_lock(&sdp->sd_statfs_spin);
813 		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
814 		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
815 		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
816 		spin_unlock(&sdp->sd_statfs_spin);
817 	}
818 
819 	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
820 
821 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
822 		     sb->s_blocksize - LH_V1_SIZE - 4);
823 	lh->lh_crc = cpu_to_be32(crc);
824 
825 	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
826 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
827 out:
828 	log_flush_wait(sdp);
829 }
830 
831 /**
832  * log_write_header - Get and initialize a journal header buffer
833  * @sdp: The GFS2 superblock
834  * @flags: The log header flags, including log header origin
835  *
836  * Returns: the initialized log buffer descriptor
837  */
838 
839 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
840 {
841 	unsigned int tail;
842 	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
843 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
844 
845 	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
846 	tail = current_tail(sdp);
847 
848 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
849 		gfs2_ordered_wait(sdp);
850 		log_flush_wait(sdp);
851 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
852 	}
853 	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
854 	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
855 			      sdp->sd_log_flush_head, flags, op_flags);
856 	gfs2_log_incr_head(sdp);
857 
858 	if (sdp->sd_log_tail != tail)
859 		log_pull_tail(sdp, tail);
860 }
861 
862 /**
863  * ail_drain - drain the ail lists after a withdraw
864  * @sdp: Pointer to GFS2 superblock
865  */
866 static void ail_drain(struct gfs2_sbd *sdp)
867 {
868 	struct gfs2_trans *tr;
869 
870 	spin_lock(&sdp->sd_ail_lock);
871 	/*
872 	 * For transactions on the sd_ail1_list we need to drain both the
873 	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
874 	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
875 	 * before revokes are sent for that block. Items on the sd_ail2_list
876 	 * should have already gotten beyond that point, so no need.
877 	 */
878 	while (!list_empty(&sdp->sd_ail1_list)) {
879 		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
880 				      tr_list);
881 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
882 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
883 		list_del(&tr->tr_list);
884 		gfs2_trans_free(sdp, tr);
885 	}
886 	while (!list_empty(&sdp->sd_ail2_list)) {
887 		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
888 				      tr_list);
889 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
890 		list_del(&tr->tr_list);
891 		gfs2_trans_free(sdp, tr);
892 	}
893 	spin_unlock(&sdp->sd_ail_lock);
894 }
895 
896 /**
897  * empty_ail1_list - try to start IO and empty the ail1 list
898  * @sdp: Pointer to GFS2 superblock
899  */
900 static void empty_ail1_list(struct gfs2_sbd *sdp)
901 {
902 	unsigned long start = jiffies;
903 
904 	for (;;) {
905 		if (time_after(jiffies, start + (HZ * 600))) {
906 			fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
907 			       __func__, current->journal_info ? 1 : 0);
908 			dump_ail_list(sdp);
909 			return;
910 		}
911 		gfs2_ail1_start(sdp);
912 		gfs2_ail1_wait(sdp);
913 		if (gfs2_ail1_empty(sdp, 0))
914 			return;
915 	}
916 }
917 
918 /**
919  * trans_drain - drain the buf and databuf queue for a failed transaction
920  * @tr: the transaction to drain
921  *
922  * When this is called, we're taking an error exit for a log write that failed
923  * but since we bypassed the after_commit functions, we need to remove the
924  * items from the buf and databuf queue.
925  */
926 static void trans_drain(struct gfs2_trans *tr)
927 {
928 	struct gfs2_bufdata *bd;
929 	struct list_head *head;
930 
931 	if (!tr)
932 		return;
933 
934 	head = &tr->tr_buf;
935 	while (!list_empty(head)) {
936 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
937 		list_del_init(&bd->bd_list);
938 		kmem_cache_free(gfs2_bufdata_cachep, bd);
939 	}
940 	head = &tr->tr_databuf;
941 	while (!list_empty(head)) {
942 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
943 		list_del_init(&bd->bd_list);
944 		kmem_cache_free(gfs2_bufdata_cachep, bd);
945 	}
946 }
947 
948 /**
949  * gfs2_log_flush - flush incore transaction(s)
950  * @sdp: the filesystem
951  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
952  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
953  *
954  */
955 
956 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
957 {
958 	struct gfs2_trans *tr = NULL;
959 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
960 
961 	down_write(&sdp->sd_log_flush_lock);
962 
963 	/*
964 	 * Do this check while holding the log_flush_lock to prevent new
965 	 * buffers from being added to the ail via gfs2_pin()
966 	 */
967 	if (gfs2_withdrawn(sdp))
968 		goto out;
969 
970 	/* Log might have been flushed while we waited for the flush lock */
971 	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
972 		goto out;
973 	trace_gfs2_log_flush(sdp, 1, flags);
974 
975 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
976 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
977 
978 	sdp->sd_log_flush_head = sdp->sd_log_head;
979 	tr = sdp->sd_log_tr;
980 	if (tr) {
981 		sdp->sd_log_tr = NULL;
982 		tr->tr_first = sdp->sd_log_flush_head;
983 		if (unlikely (state == SFS_FROZEN))
984 			if (gfs2_assert_withdraw_delayed(sdp,
985 			       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
986 				goto out_withdraw;
987 	}
988 
989 	if (unlikely(state == SFS_FROZEN))
990 		if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
991 			goto out_withdraw;
992 	if (gfs2_assert_withdraw_delayed(sdp,
993 			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
994 		goto out_withdraw;
995 
996 	gfs2_ordered_write(sdp);
997 	if (gfs2_withdrawn(sdp))
998 		goto out_withdraw;
999 	lops_before_commit(sdp, tr);
1000 	if (gfs2_withdrawn(sdp))
1001 		goto out_withdraw;
1002 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
1003 	if (gfs2_withdrawn(sdp))
1004 		goto out_withdraw;
1005 
1006 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1007 		log_flush_wait(sdp);
1008 		log_write_header(sdp, flags);
1009 	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
1010 		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
1011 		trace_gfs2_log_blocks(sdp, -1);
1012 		log_write_header(sdp, flags);
1013 	}
1014 	if (gfs2_withdrawn(sdp))
1015 		goto out_withdraw;
1016 	lops_after_commit(sdp, tr);
1017 
1018 	gfs2_log_lock(sdp);
1019 	sdp->sd_log_head = sdp->sd_log_flush_head;
1020 	sdp->sd_log_blks_reserved = 0;
1021 	sdp->sd_log_committed_revoke = 0;
1022 
1023 	spin_lock(&sdp->sd_ail_lock);
1024 	if (tr && !list_empty(&tr->tr_ail1_list)) {
1025 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1026 		tr = NULL;
1027 	}
1028 	spin_unlock(&sdp->sd_ail_lock);
1029 	gfs2_log_unlock(sdp);
1030 
1031 	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1032 		if (!sdp->sd_log_idle) {
1033 			empty_ail1_list(sdp);
1034 			if (gfs2_withdrawn(sdp))
1035 				goto out_withdraw;
1036 			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
1037 			trace_gfs2_log_blocks(sdp, -1);
1038 			log_write_header(sdp, flags);
1039 			sdp->sd_log_head = sdp->sd_log_flush_head;
1040 		}
1041 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1042 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
1043 			gfs2_log_shutdown(sdp);
1044 		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
1045 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
1046 	}
1047 
1048 out_end:
1049 	trace_gfs2_log_flush(sdp, 0, flags);
1050 out:
1051 	up_write(&sdp->sd_log_flush_lock);
1052 	gfs2_trans_free(sdp, tr);
1053 	if (gfs2_withdrawing(sdp))
1054 		gfs2_withdraw(sdp);
1055 	return;
1056 
1057 out_withdraw:
1058 	trans_drain(tr);
1059 	/**
1060 	 * If the tr_list is empty, we're withdrawing during a log
1061 	 * flush that targets a transaction, but the transaction was
1062 	 * never queued onto any of the ail lists. Here we add it to
1063 	 * ail1 just so that ail_drain() will find and free it.
1064 	 */
1065 	spin_lock(&sdp->sd_ail_lock);
1066 	if (tr && list_empty(&tr->tr_list))
1067 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1068 	spin_unlock(&sdp->sd_ail_lock);
1069 	ail_drain(sdp); /* frees all transactions */
1070 	tr = NULL;
1071 	goto out_end;
1072 }
1073 
1074 /**
1075  * gfs2_merge_trans - Merge a new transaction into a cached transaction
1076  * @old: Original transaction to be expanded
1077  * @new: New transaction to be merged
1078  */
1079 
1080 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1081 {
1082 	struct gfs2_trans *old = sdp->sd_log_tr;
1083 
1084 	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1085 
1086 	old->tr_num_buf_new	+= new->tr_num_buf_new;
1087 	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1088 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1089 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1090 	old->tr_num_revoke	+= new->tr_num_revoke;
1091 	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
1092 
1093 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1094 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1095 
1096 	spin_lock(&sdp->sd_ail_lock);
1097 	list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1098 	list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1099 	spin_unlock(&sdp->sd_ail_lock);
1100 }
1101 
1102 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1103 {
1104 	unsigned int reserved;
1105 	unsigned int unused;
1106 	unsigned int maxres;
1107 
1108 	gfs2_log_lock(sdp);
1109 
1110 	if (sdp->sd_log_tr) {
1111 		gfs2_merge_trans(sdp, tr);
1112 	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1113 		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
1114 		sdp->sd_log_tr = tr;
1115 		set_bit(TR_ATTACHED, &tr->tr_flags);
1116 	}
1117 
1118 	sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
1119 	reserved = calc_reserved(sdp);
1120 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1121 	gfs2_assert_withdraw(sdp, maxres >= reserved);
1122 	unused = maxres - reserved;
1123 	atomic_add(unused, &sdp->sd_log_blks_free);
1124 	trace_gfs2_log_blocks(sdp, unused);
1125 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
1126 			     sdp->sd_jdesc->jd_blocks);
1127 	sdp->sd_log_blks_reserved = reserved;
1128 
1129 	gfs2_log_unlock(sdp);
1130 }
1131 
1132 /**
1133  * gfs2_log_commit - Commit a transaction to the log
1134  * @sdp: the filesystem
1135  * @tr: the transaction
1136  *
1137  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1138  * or the total number of used blocks (pinned blocks plus AIL blocks)
1139  * is greater than thresh2.
1140  *
1141  * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1142  * journal size.
1143  *
1144  * Returns: errno
1145  */
1146 
1147 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1148 {
1149 	log_refund(sdp, tr);
1150 
1151 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1152 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1153 	    atomic_read(&sdp->sd_log_thresh2)))
1154 		wake_up(&sdp->sd_logd_waitq);
1155 }
1156 
1157 /**
1158  * gfs2_log_shutdown - write a shutdown header into a journal
1159  * @sdp: the filesystem
1160  *
1161  */
1162 
1163 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1164 {
1165 	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1166 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1167 	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1168 
1169 	sdp->sd_log_flush_head = sdp->sd_log_head;
1170 
1171 	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1172 
1173 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1174 	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1175 
1176 	sdp->sd_log_head = sdp->sd_log_flush_head;
1177 	sdp->sd_log_tail = sdp->sd_log_head;
1178 }
1179 
1180 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1181 {
1182 	return (atomic_read(&sdp->sd_log_pinned) +
1183 		atomic_read(&sdp->sd_log_blks_needed) >=
1184 		atomic_read(&sdp->sd_log_thresh1));
1185 }
1186 
1187 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1188 {
1189 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1190 
1191 	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1192 		return 1;
1193 
1194 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1195 		atomic_read(&sdp->sd_log_thresh2);
1196 }
1197 
1198 /**
1199  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1200  * @sdp: Pointer to GFS2 superblock
1201  *
1202  * Also, periodically check to make sure that we're using the most recent
1203  * journal index.
1204  */
1205 
1206 int gfs2_logd(void *data)
1207 {
1208 	struct gfs2_sbd *sdp = data;
1209 	unsigned long t = 1;
1210 	DEFINE_WAIT(wait);
1211 	bool did_flush;
1212 
1213 	while (!kthread_should_stop()) {
1214 
1215 		if (gfs2_withdrawn(sdp)) {
1216 			msleep_interruptible(HZ);
1217 			continue;
1218 		}
1219 		/* Check for errors writing to the journal */
1220 		if (sdp->sd_log_error) {
1221 			gfs2_lm(sdp,
1222 				"GFS2: fsid=%s: error %d: "
1223 				"withdrawing the file system to "
1224 				"prevent further damage.\n",
1225 				sdp->sd_fsname, sdp->sd_log_error);
1226 			gfs2_withdraw(sdp);
1227 			continue;
1228 		}
1229 
1230 		did_flush = false;
1231 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1232 			gfs2_ail1_empty(sdp, 0);
1233 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1234 				       GFS2_LFC_LOGD_JFLUSH_REQD);
1235 			did_flush = true;
1236 		}
1237 
1238 		if (gfs2_ail_flush_reqd(sdp)) {
1239 			gfs2_ail1_start(sdp);
1240 			gfs2_ail1_wait(sdp);
1241 			gfs2_ail1_empty(sdp, 0);
1242 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1243 				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1244 			did_flush = true;
1245 		}
1246 
1247 		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1248 			wake_up(&sdp->sd_log_waitq);
1249 
1250 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1251 
1252 		try_to_freeze();
1253 
1254 		do {
1255 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1256 					TASK_INTERRUPTIBLE);
1257 			if (!gfs2_ail_flush_reqd(sdp) &&
1258 			    !gfs2_jrnl_flush_reqd(sdp) &&
1259 			    !kthread_should_stop())
1260 				t = schedule_timeout(t);
1261 		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1262 			!gfs2_jrnl_flush_reqd(sdp) &&
1263 			!kthread_should_stop());
1264 		finish_wait(&sdp->sd_logd_waitq, &wait);
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270