xref: /linux/fs/ext4/fast_commit.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * fs/ext4/fast_commit.c
5  *
6  * Written by Harshad Shirwadkar <harshadshirwadkar@gmail.com>
7  *
8  * Ext4 fast commits routines.
9  */
10 #include "ext4.h"
11 #include "ext4_jbd2.h"
12 #include "ext4_extents.h"
13 #include "mballoc.h"
14 
15 #include <linux/lockdep.h>
16 #include <linux/wait_bit.h>
17 /*
18  * Ext4 Fast Commits
19  * -----------------
20  *
21  * Ext4 fast commits implement fine grained journalling for Ext4.
22  *
23  * Fast commits are organized as a log of tag-length-value (TLV) structs. (See
24  * struct ext4_fc_tl). Each TLV contains some delta that is replayed TLV by
25  * TLV during the recovery phase. For the scenarios for which we currently
26  * don't have replay code, fast commit falls back to full commits.
27  * Fast commits record delta in one of the following three categories.
28  *
29  * (A) Directory entry updates:
30  *
31  * - EXT4_FC_TAG_UNLINK		- records directory entry unlink
32  * - EXT4_FC_TAG_LINK		- records directory entry link
33  * - EXT4_FC_TAG_CREAT		- records inode and directory entry creation
34  *
35  * (B) File specific data range updates:
36  *
37  * - EXT4_FC_TAG_ADD_RANGE	- records addition of new blocks to an inode
38  * - EXT4_FC_TAG_DEL_RANGE	- records deletion of blocks from an inode
39  *
40  * (C) Inode metadata (mtime / ctime etc):
41  *
42  * - EXT4_FC_TAG_INODE		- record the inode that should be replayed
43  *				  during recovery. Note that iblocks field is
44  *				  not replayed and instead derived during
45  *				  replay.
46  * Commit Operation
47  * ----------------
48  * With fast commits, we maintain all the directory entry operations in the
49  * order in which they are issued in an in-memory queue. This queue is flushed
50  * to disk during the commit operation. We also maintain a list of inodes
51  * that need to be committed during a fast commit in another in memory queue of
52  * inodes. During the commit operation, we commit in the following order:
53  *
54  * [1] Prepare all the inodes to write out their data by setting
55  *     "EXT4_STATE_FC_FLUSHING_DATA". This ensures that inode cannot be
56  *     deleted while it is being flushed.
57  * [2] Flush data buffers to disk and clear "EXT4_STATE_FC_FLUSHING_DATA"
58  *     state.
59  * [3] Lock the journal by calling jbd2_journal_lock_updates. This ensures that
60  *     all the exsiting handles finish and no new handles can start.
61  * [4] Mark all the fast commit eligible inodes as undergoing fast commit
62  *     by setting "EXT4_STATE_FC_COMMITTING" state.
63  * [5] Unlock the journal by calling jbd2_journal_unlock_updates. This allows
64  *     starting of new handles. If new handles try to start an update on
65  *     any of the inodes that are being committed, ext4_fc_track_inode()
66  *     will block until those inodes have finished the fast commit.
67  * [6] Commit all the directory entry updates in the fast commit space.
68  * [7] Commit all the changed inodes in the fast commit space and clear
69  *     "EXT4_STATE_FC_COMMITTING" for these inodes.
70  * [8] Write tail tag (this tag ensures the atomicity, please read the following
71  *     section for more details).
72  *
73  * All the inode updates must be enclosed within jbd2_jounrnal_start()
74  * and jbd2_journal_stop() similar to JBD2 journaling.
75  *
76  * Fast Commit Ineligibility
77  * -------------------------
78  *
79  * Not all operations are supported by fast commits today (e.g extended
80  * attributes). Fast commit ineligibility is marked by calling
81  * ext4_fc_mark_ineligible(): This makes next fast commit operation to fall back
82  * to full commit.
83  *
84  * Atomicity of commits
85  * --------------------
86  * In order to guarantee atomicity during the commit operation, fast commit
87  * uses "EXT4_FC_TAG_TAIL" tag that marks a fast commit as complete. Tail
88  * tag contains CRC of the contents and TID of the transaction after which
89  * this fast commit should be applied. Recovery code replays fast commit
90  * logs only if there's at least 1 valid tail present. For every fast commit
91  * operation, there is 1 tail. This means, we may end up with multiple tails
92  * in the fast commit space. Here's an example:
93  *
94  * - Create a new file A and remove existing file B
95  * - fsync()
96  * - Append contents to file A
97  * - Truncate file A
98  * - fsync()
99  *
100  * The fast commit space at the end of above operations would look like this:
101  *      [HEAD] [CREAT A] [UNLINK B] [TAIL] [ADD_RANGE A] [DEL_RANGE A] [TAIL]
102  *             |<---  Fast Commit 1   --->|<---      Fast Commit 2     ---->|
103  *
104  * Replay code should thus check for all the valid tails in the FC area.
105  *
106  * Fast Commit Replay Idempotence
107  * ------------------------------
108  *
109  * Fast commits tags are idempotent in nature provided the recovery code follows
110  * certain rules. The guiding principle that the commit path follows while
111  * committing is that it stores the result of a particular operation instead of
112  * storing the procedure.
113  *
114  * Let's consider this rename operation: 'mv /a /b'. Let's assume dirent '/a'
115  * was associated with inode 10. During fast commit, instead of storing this
116  * operation as a procedure "rename a to b", we store the resulting file system
117  * state as a "series" of outcomes:
118  *
119  * - Link dirent b to inode 10
120  * - Unlink dirent a
121  * - Inode <10> with valid refcount
122  *
123  * Now when recovery code runs, it needs "enforce" this state on the file
124  * system. This is what guarantees idempotence of fast commit replay.
125  *
126  * Let's take an example of a procedure that is not idempotent and see how fast
127  * commits make it idempotent. Consider following sequence of operations:
128  *
129  *     rm A;    mv B A;    read A
130  *  (x)     (y)        (z)
131  *
132  * (x), (y) and (z) are the points at which we can crash. If we store this
133  * sequence of operations as is then the replay is not idempotent. Let's say
134  * while in replay, we crash at (z). During the second replay, file A (which was
135  * actually created as a result of "mv B A" operation) would get deleted. Thus,
136  * file named A would be absent when we try to read A. So, this sequence of
137  * operations is not idempotent. However, as mentioned above, instead of storing
138  * the procedure fast commits store the outcome of each procedure. Thus the fast
139  * commit log for above procedure would be as follows:
140  *
141  * (Let's assume dirent A was linked to inode 10 and dirent B was linked to
142  * inode 11 before the replay)
143  *
144  *    [Unlink A]   [Link A to inode 11]   [Unlink B]   [Inode 11]
145  * (w)          (x)                    (y)          (z)
146  *
147  * If we crash at (z), we will have file A linked to inode 11. During the second
148  * replay, we will remove file A (inode 11). But we will create it back and make
149  * it point to inode 11. We won't find B, so we'll just skip that step. At this
150  * point, the refcount for inode 11 is not reliable, but that gets fixed by the
151  * replay of last inode 11 tag. Crashes at points (w), (x) and (y) get handled
152  * similarly. Thus, by converting a non-idempotent procedure into a series of
153  * idempotent outcomes, fast commits ensured idempotence during the replay.
154  *
155  * Locking
156  * -------
157  * sbi->s_fc_lock protects the fast commit inodes queue and the fast commit
158  * dentry queue. ei->i_fc_lock protects the fast commit related info in a given
159  * inode. Most of the code avoids acquiring both the locks, but if one must do
160  * that then sbi->s_fc_lock must be acquired before ei->i_fc_lock.
161  *
162  * TODOs
163  * -----
164  *
165  * 0) Fast commit replay path hardening: Fast commit replay code should use
166  *    journal handles to make sure all the updates it does during the replay
167  *    path are atomic. With that if we crash during fast commit replay, after
168  *    trying to do recovery again, we will find a file system where fast commit
169  *    area is invalid (because new full commit would be found). In order to deal
170  *    with that, fast commit replay code should ensure that the "FC_REPLAY"
171  *    superblock state is persisted before starting the replay, so that after
172  *    the crash, fast commit recovery code can look at that flag and perform
173  *    fast commit recovery even if that area is invalidated by later full
174  *    commits.
175  *
176  * 1) Handle more ineligible cases.
177  *
178  * 2) Change ext4_fc_commit() to lookup logical to physical mapping using extent
179  *    status tree. This would get rid of the need to call ext4_fc_track_inode()
180  *    before acquiring i_data_sem. To do that we would need to ensure that
181  *    modified extents from the extent status tree are not evicted from memory.
182  */
183 
184 #include <trace/events/ext4.h>
185 static struct kmem_cache *ext4_fc_dentry_cachep;
186 
ext4_end_buffer_io_sync(struct buffer_head * bh,int uptodate)187 static void ext4_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
188 {
189 	BUFFER_TRACE(bh, "");
190 	if (uptodate) {
191 		ext4_debug("%s: Block %lld up-to-date",
192 			   __func__, bh->b_blocknr);
193 		set_buffer_uptodate(bh);
194 	} else {
195 		ext4_debug("%s: Block %lld not up-to-date",
196 			   __func__, bh->b_blocknr);
197 		clear_buffer_uptodate(bh);
198 	}
199 
200 	unlock_buffer(bh);
201 }
202 
ext4_fc_reset_inode(struct inode * inode)203 static inline void ext4_fc_reset_inode(struct inode *inode)
204 {
205 	struct ext4_inode_info *ei = EXT4_I(inode);
206 
207 	ei->i_fc_lblk_start = 0;
208 	ei->i_fc_lblk_len = 0;
209 }
210 
ext4_fc_init_inode(struct inode * inode)211 void ext4_fc_init_inode(struct inode *inode)
212 {
213 	struct ext4_inode_info *ei = EXT4_I(inode);
214 
215 	ext4_fc_reset_inode(inode);
216 	ext4_clear_inode_state(inode, EXT4_STATE_FC_COMMITTING);
217 	INIT_LIST_HEAD(&ei->i_fc_list);
218 	INIT_LIST_HEAD(&ei->i_fc_dilist);
219 }
220 
ext4_fc_disabled(struct super_block * sb)221 static bool ext4_fc_disabled(struct super_block *sb)
222 {
223 	return (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
224 		(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY));
225 }
226 
ext4_fc_eligible(struct super_block * sb)227 static bool ext4_fc_eligible(struct super_block *sb)
228 {
229 	return !ext4_fc_disabled(sb) &&
230 		!(ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE));
231 }
232 
233 /*
234  * Remove inode from fast commit list. If the inode is being committed
235  * we wait until inode commit is done.
236  */
ext4_fc_del(struct inode * inode)237 void ext4_fc_del(struct inode *inode)
238 {
239 	struct ext4_inode_info *ei = EXT4_I(inode);
240 	struct ext4_fc_dentry_update *fc_dentry;
241 	wait_queue_head_t *wq;
242 	int alloc_ctx;
243 
244 	if (ext4_fc_disabled(inode->i_sb))
245 		return;
246 
247 	alloc_ctx = ext4_fc_lock(inode->i_sb);
248 	if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
249 		ext4_fc_unlock(inode->i_sb, alloc_ctx);
250 		return;
251 	}
252 
253 	/*
254 	 * Since ext4_fc_del is called from ext4_evict_inode while having a
255 	 * handle open, there is no need for us to wait here even if a fast
256 	 * commit is going on. That is because, if this inode is being
257 	 * committed, ext4_mark_inode_dirty would have waited for inode commit
258 	 * operation to finish before we come here. So, by the time we come
259 	 * here, inode's EXT4_STATE_FC_COMMITTING would have been cleared. So,
260 	 * we shouldn't see EXT4_STATE_FC_COMMITTING to be set on this inode
261 	 * here.
262 	 *
263 	 * We may come here without any handles open in the "no_delete" case of
264 	 * ext4_evict_inode as well. However, if that happens, we first mark the
265 	 * file system as fast commit ineligible anyway. So, even in that case,
266 	 * it is okay to remove the inode from the fc list.
267 	 */
268 	WARN_ON(ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)
269 		&& !ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE));
270 	while (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
271 #if (BITS_PER_LONG < 64)
272 		DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
273 				EXT4_STATE_FC_FLUSHING_DATA);
274 		wq = bit_waitqueue(&ei->i_state_flags,
275 				   EXT4_STATE_FC_FLUSHING_DATA);
276 #else
277 		DEFINE_WAIT_BIT(wait, &ei->i_flags,
278 				EXT4_STATE_FC_FLUSHING_DATA);
279 		wq = bit_waitqueue(&ei->i_flags,
280 				   EXT4_STATE_FC_FLUSHING_DATA);
281 #endif
282 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
283 		if (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
284 			ext4_fc_unlock(inode->i_sb, alloc_ctx);
285 			schedule();
286 			alloc_ctx = ext4_fc_lock(inode->i_sb);
287 		}
288 		finish_wait(wq, &wait.wq_entry);
289 	}
290 	list_del_init(&ei->i_fc_list);
291 
292 	/*
293 	 * Since this inode is getting removed, let's also remove all FC
294 	 * dentry create references, since it is not needed to log it anyways.
295 	 */
296 	if (list_empty(&ei->i_fc_dilist)) {
297 		ext4_fc_unlock(inode->i_sb, alloc_ctx);
298 		return;
299 	}
300 
301 	fc_dentry = list_first_entry(&ei->i_fc_dilist, struct ext4_fc_dentry_update, fcd_dilist);
302 	WARN_ON(fc_dentry->fcd_op != EXT4_FC_TAG_CREAT);
303 	list_del_init(&fc_dentry->fcd_list);
304 	list_del_init(&fc_dentry->fcd_dilist);
305 
306 	WARN_ON(!list_empty(&ei->i_fc_dilist));
307 	ext4_fc_unlock(inode->i_sb, alloc_ctx);
308 
309 	release_dentry_name_snapshot(&fc_dentry->fcd_name);
310 	kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
311 }
312 
313 /*
314  * Mark file system as fast commit ineligible, and record latest
315  * ineligible transaction tid. This means until the recorded
316  * transaction, commit operation would result in a full jbd2 commit.
317  */
ext4_fc_mark_ineligible(struct super_block * sb,int reason,handle_t * handle)318 void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
319 {
320 	struct ext4_sb_info *sbi = EXT4_SB(sb);
321 	tid_t tid;
322 	bool has_transaction = true;
323 	bool is_ineligible;
324 	int alloc_ctx;
325 
326 	if (ext4_fc_disabled(sb))
327 		return;
328 
329 	if (!IS_ERR_OR_NULL(handle))
330 		tid = handle->h_transaction->t_tid;
331 	else {
332 		read_lock(&sbi->s_journal->j_state_lock);
333 		if (sbi->s_journal->j_running_transaction)
334 			tid = sbi->s_journal->j_running_transaction->t_tid;
335 		else
336 			has_transaction = false;
337 		read_unlock(&sbi->s_journal->j_state_lock);
338 	}
339 	alloc_ctx = ext4_fc_lock(sb);
340 	is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
341 	if (has_transaction && (!is_ineligible || tid_gt(tid, sbi->s_fc_ineligible_tid)))
342 		sbi->s_fc_ineligible_tid = tid;
343 	ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
344 	ext4_fc_unlock(sb, alloc_ctx);
345 	WARN_ON(reason >= EXT4_FC_REASON_MAX);
346 	sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
347 }
348 
349 /*
350  * Generic fast commit tracking function. If this is the first time this we are
351  * called after a full commit, we initialize fast commit fields and then call
352  * __fc_track_fn() with update = 0. If we have already been called after a full
353  * commit, we pass update = 1. Based on that, the track function can determine
354  * if it needs to track a field for the first time or if it needs to just
355  * update the previously tracked value.
356  *
357  * If enqueue is set, this function enqueues the inode in fast commit list.
358  */
ext4_fc_track_template(handle_t * handle,struct inode * inode,int (* __fc_track_fn)(handle_t * handle,struct inode *,void *,bool),void * args,int enqueue)359 static int ext4_fc_track_template(
360 	handle_t *handle, struct inode *inode,
361 	int (*__fc_track_fn)(handle_t *handle, struct inode *, void *, bool),
362 	void *args, int enqueue)
363 {
364 	bool update = false;
365 	struct ext4_inode_info *ei = EXT4_I(inode);
366 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
367 	tid_t tid = 0;
368 	int alloc_ctx;
369 	int ret;
370 
371 	tid = handle->h_transaction->t_tid;
372 	spin_lock(&ei->i_fc_lock);
373 	if (tid == ei->i_sync_tid) {
374 		update = true;
375 	} else {
376 		ext4_fc_reset_inode(inode);
377 		ei->i_sync_tid = tid;
378 	}
379 	ret = __fc_track_fn(handle, inode, args, update);
380 	spin_unlock(&ei->i_fc_lock);
381 	if (!enqueue)
382 		return ret;
383 
384 	alloc_ctx = ext4_fc_lock(inode->i_sb);
385 	if (list_empty(&EXT4_I(inode)->i_fc_list))
386 		list_add_tail(&EXT4_I(inode)->i_fc_list,
387 				(sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
388 				 sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
389 				&sbi->s_fc_q[FC_Q_STAGING] :
390 				&sbi->s_fc_q[FC_Q_MAIN]);
391 	ext4_fc_unlock(inode->i_sb, alloc_ctx);
392 
393 	return ret;
394 }
395 
396 struct __track_dentry_update_args {
397 	struct dentry *dentry;
398 	int op;
399 };
400 
401 /* __track_fn for directory entry updates. Called with ei->i_fc_lock. */
__track_dentry_update(handle_t * handle,struct inode * inode,void * arg,bool update)402 static int __track_dentry_update(handle_t *handle, struct inode *inode,
403 				 void *arg, bool update)
404 {
405 	struct ext4_fc_dentry_update *node;
406 	struct ext4_inode_info *ei = EXT4_I(inode);
407 	struct __track_dentry_update_args *dentry_update =
408 		(struct __track_dentry_update_args *)arg;
409 	struct dentry *dentry = dentry_update->dentry;
410 	struct inode *dir = dentry->d_parent->d_inode;
411 	struct super_block *sb = inode->i_sb;
412 	struct ext4_sb_info *sbi = EXT4_SB(sb);
413 	int alloc_ctx;
414 
415 	spin_unlock(&ei->i_fc_lock);
416 
417 	if (IS_ENCRYPTED(dir)) {
418 		ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
419 					handle);
420 		spin_lock(&ei->i_fc_lock);
421 		return -EOPNOTSUPP;
422 	}
423 
424 	node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
425 	if (!node) {
426 		ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
427 		spin_lock(&ei->i_fc_lock);
428 		return -ENOMEM;
429 	}
430 
431 	node->fcd_op = dentry_update->op;
432 	node->fcd_parent = dir->i_ino;
433 	node->fcd_ino = inode->i_ino;
434 	take_dentry_name_snapshot(&node->fcd_name, dentry);
435 	INIT_LIST_HEAD(&node->fcd_dilist);
436 	INIT_LIST_HEAD(&node->fcd_list);
437 	alloc_ctx = ext4_fc_lock(sb);
438 	if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
439 		sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
440 		list_add_tail(&node->fcd_list,
441 				&sbi->s_fc_dentry_q[FC_Q_STAGING]);
442 	else
443 		list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]);
444 
445 	/*
446 	 * This helps us keep a track of all fc_dentry updates which is part of
447 	 * this ext4 inode. So in case the inode is getting unlinked, before
448 	 * even we get a chance to fsync, we could remove all fc_dentry
449 	 * references while evicting the inode in ext4_fc_del().
450 	 * Also with this, we don't need to loop over all the inodes in
451 	 * sbi->s_fc_q to get the corresponding inode in
452 	 * ext4_fc_commit_dentry_updates().
453 	 */
454 	if (dentry_update->op == EXT4_FC_TAG_CREAT) {
455 		WARN_ON(!list_empty(&ei->i_fc_dilist));
456 		list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
457 	}
458 	ext4_fc_unlock(sb, alloc_ctx);
459 	spin_lock(&ei->i_fc_lock);
460 
461 	return 0;
462 }
463 
__ext4_fc_track_unlink(handle_t * handle,struct inode * inode,struct dentry * dentry)464 void __ext4_fc_track_unlink(handle_t *handle,
465 		struct inode *inode, struct dentry *dentry)
466 {
467 	struct __track_dentry_update_args args;
468 	int ret;
469 
470 	args.dentry = dentry;
471 	args.op = EXT4_FC_TAG_UNLINK;
472 
473 	ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
474 					(void *)&args, 0);
475 	trace_ext4_fc_track_unlink(handle, inode, dentry, ret);
476 }
477 
ext4_fc_track_unlink(handle_t * handle,struct dentry * dentry)478 void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry)
479 {
480 	struct inode *inode = d_inode(dentry);
481 
482 	if (ext4_fc_eligible(inode->i_sb))
483 		__ext4_fc_track_unlink(handle, inode, dentry);
484 }
485 
__ext4_fc_track_link(handle_t * handle,struct inode * inode,struct dentry * dentry)486 void __ext4_fc_track_link(handle_t *handle,
487 	struct inode *inode, struct dentry *dentry)
488 {
489 	struct __track_dentry_update_args args;
490 	int ret;
491 
492 	args.dentry = dentry;
493 	args.op = EXT4_FC_TAG_LINK;
494 
495 	ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
496 					(void *)&args, 0);
497 	trace_ext4_fc_track_link(handle, inode, dentry, ret);
498 }
499 
ext4_fc_track_link(handle_t * handle,struct inode * inode,struct dentry * dentry)500 void ext4_fc_track_link(handle_t *handle, struct inode *inode,
501 			struct dentry *dentry)
502 {
503 	if (ext4_fc_eligible(inode->i_sb))
504 		__ext4_fc_track_link(handle, inode, dentry);
505 }
506 
__ext4_fc_track_create(handle_t * handle,struct inode * inode,struct dentry * dentry)507 void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
508 			  struct dentry *dentry)
509 {
510 	struct __track_dentry_update_args args;
511 	int ret;
512 
513 	args.dentry = dentry;
514 	args.op = EXT4_FC_TAG_CREAT;
515 
516 	ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
517 					(void *)&args, 0);
518 	trace_ext4_fc_track_create(handle, inode, dentry, ret);
519 }
520 
ext4_fc_track_create(handle_t * handle,struct dentry * dentry)521 void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
522 {
523 	struct inode *inode = d_inode(dentry);
524 
525 	if (ext4_fc_eligible(inode->i_sb))
526 		__ext4_fc_track_create(handle, inode, dentry);
527 }
528 
529 /* __track_fn for inode tracking */
__track_inode(handle_t * handle,struct inode * inode,void * arg,bool update)530 static int __track_inode(handle_t *handle, struct inode *inode, void *arg,
531 			 bool update)
532 {
533 	if (update)
534 		return -EEXIST;
535 
536 	EXT4_I(inode)->i_fc_lblk_len = 0;
537 
538 	return 0;
539 }
540 
ext4_fc_track_inode(handle_t * handle,struct inode * inode)541 void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
542 {
543 	struct ext4_inode_info *ei = EXT4_I(inode);
544 	wait_queue_head_t *wq;
545 	int ret;
546 
547 	if (S_ISDIR(inode->i_mode))
548 		return;
549 
550 	if (ext4_should_journal_data(inode)) {
551 		ext4_fc_mark_ineligible(inode->i_sb,
552 					EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
553 		return;
554 	}
555 
556 	if (!ext4_fc_eligible(inode->i_sb))
557 		return;
558 
559 	/*
560 	 * If we come here, we may sleep while waiting for the inode to
561 	 * commit. We shouldn't be holding i_data_sem when we go to sleep since
562 	 * the commit path needs to grab the lock while committing the inode.
563 	 */
564 	lockdep_assert_not_held(&ei->i_data_sem);
565 
566 	while (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
567 #if (BITS_PER_LONG < 64)
568 		DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
569 				EXT4_STATE_FC_COMMITTING);
570 		wq = bit_waitqueue(&ei->i_state_flags,
571 				   EXT4_STATE_FC_COMMITTING);
572 #else
573 		DEFINE_WAIT_BIT(wait, &ei->i_flags,
574 				EXT4_STATE_FC_COMMITTING);
575 		wq = bit_waitqueue(&ei->i_flags,
576 				   EXT4_STATE_FC_COMMITTING);
577 #endif
578 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
579 		if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
580 			schedule();
581 		finish_wait(wq, &wait.wq_entry);
582 	}
583 
584 	/*
585 	 * From this point on, this inode will not be committed either
586 	 * by fast or full commit as long as the handle is open.
587 	 */
588 	ret = ext4_fc_track_template(handle, inode, __track_inode, NULL, 1);
589 	trace_ext4_fc_track_inode(handle, inode, ret);
590 }
591 
592 struct __track_range_args {
593 	ext4_lblk_t start, end;
594 };
595 
596 /* __track_fn for tracking data updates */
__track_range(handle_t * handle,struct inode * inode,void * arg,bool update)597 static int __track_range(handle_t *handle, struct inode *inode, void *arg,
598 			 bool update)
599 {
600 	struct ext4_inode_info *ei = EXT4_I(inode);
601 	ext4_lblk_t oldstart;
602 	struct __track_range_args *__arg =
603 		(struct __track_range_args *)arg;
604 
605 	if (inode->i_ino < EXT4_FIRST_INO(inode->i_sb)) {
606 		ext4_debug("Special inode %llu being modified\n", inode->i_ino);
607 		return -ECANCELED;
608 	}
609 
610 	oldstart = ei->i_fc_lblk_start;
611 
612 	if (update && ei->i_fc_lblk_len > 0) {
613 		ei->i_fc_lblk_start = min(ei->i_fc_lblk_start, __arg->start);
614 		ei->i_fc_lblk_len =
615 			max(oldstart + ei->i_fc_lblk_len - 1, __arg->end) -
616 				ei->i_fc_lblk_start + 1;
617 	} else {
618 		ei->i_fc_lblk_start = __arg->start;
619 		ei->i_fc_lblk_len = __arg->end - __arg->start + 1;
620 	}
621 
622 	return 0;
623 }
624 
ext4_fc_track_range(handle_t * handle,struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)625 void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t start,
626 			 ext4_lblk_t end)
627 {
628 	struct __track_range_args args;
629 	int ret;
630 
631 	if (S_ISDIR(inode->i_mode))
632 		return;
633 
634 	if (!ext4_fc_eligible(inode->i_sb))
635 		return;
636 
637 	if (ext4_has_inline_data(inode)) {
638 		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
639 					handle);
640 		return;
641 	}
642 
643 	args.start = start;
644 	args.end = end;
645 
646 	ret = ext4_fc_track_template(handle, inode,  __track_range, &args, 1);
647 
648 	trace_ext4_fc_track_range(handle, inode, start, end, ret);
649 }
650 
ext4_fc_submit_bh(struct super_block * sb,bool is_tail)651 static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
652 {
653 	blk_opf_t write_flags = JBD2_JOURNAL_REQ_FLAGS;
654 	struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
655 
656 	/* Add REQ_FUA | REQ_PREFLUSH only its tail */
657 	if (test_opt(sb, BARRIER) && is_tail)
658 		write_flags |= REQ_FUA | REQ_PREFLUSH;
659 	lock_buffer(bh);
660 	set_buffer_dirty(bh);
661 	set_buffer_uptodate(bh);
662 	bh->b_end_io = ext4_end_buffer_io_sync;
663 	submit_bh(REQ_OP_WRITE | write_flags, bh);
664 	EXT4_SB(sb)->s_fc_bh = NULL;
665 }
666 
667 /* Ext4 commit path routines */
668 
669 /*
670  * Allocate len bytes on a fast commit buffer.
671  *
672  * During the commit time this function is used to manage fast commit
673  * block space. We don't split a fast commit log onto different
674  * blocks. So this function makes sure that if there's not enough space
675  * on the current block, the remaining space in the current block is
676  * marked as unused by adding EXT4_FC_TAG_PAD tag. In that case,
677  * new block is from jbd2 and CRC is updated to reflect the padding
678  * we added.
679  */
ext4_fc_reserve_space(struct super_block * sb,int len,u32 * crc)680 static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
681 {
682 	struct ext4_fc_tl tl;
683 	struct ext4_sb_info *sbi = EXT4_SB(sb);
684 	struct buffer_head *bh;
685 	int bsize = sbi->s_journal->j_blocksize;
686 	int ret, off = sbi->s_fc_bytes % bsize;
687 	int remaining;
688 	u8 *dst;
689 
690 	/*
691 	 * If 'len' is too long to fit in any block alongside a PAD tlv, then we
692 	 * cannot fulfill the request.
693 	 */
694 	if (len > bsize - EXT4_FC_TAG_BASE_LEN)
695 		return NULL;
696 
697 	if (!sbi->s_fc_bh) {
698 		ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
699 		if (ret)
700 			return NULL;
701 		sbi->s_fc_bh = bh;
702 	}
703 	dst = sbi->s_fc_bh->b_data + off;
704 
705 	/*
706 	 * Allocate the bytes in the current block if we can do so while still
707 	 * leaving enough space for a PAD tlv.
708 	 */
709 	remaining = bsize - EXT4_FC_TAG_BASE_LEN - off;
710 	if (len <= remaining) {
711 		sbi->s_fc_bytes += len;
712 		return dst;
713 	}
714 
715 	/*
716 	 * Else, terminate the current block with a PAD tlv, then allocate a new
717 	 * block and allocate the bytes at the start of that new block.
718 	 */
719 
720 	tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
721 	tl.fc_len = cpu_to_le16(remaining);
722 	memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
723 	memset(dst + EXT4_FC_TAG_BASE_LEN, 0, remaining);
724 	*crc = ext4_chksum(*crc, sbi->s_fc_bh->b_data, bsize);
725 
726 	ext4_fc_submit_bh(sb, false);
727 
728 	ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
729 	if (ret)
730 		return NULL;
731 	sbi->s_fc_bh = bh;
732 	sbi->s_fc_bytes += bsize - off + len;
733 	return sbi->s_fc_bh->b_data;
734 }
735 
736 /*
737  * Complete a fast commit by writing tail tag.
738  *
739  * Writing tail tag marks the end of a fast commit. In order to guarantee
740  * atomicity, after writing tail tag, even if there's space remaining
741  * in the block, next commit shouldn't use it. That's why tail tag
742  * has the length as that of the remaining space on the block.
743  */
ext4_fc_write_tail(struct super_block * sb,u32 crc)744 static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
745 {
746 	struct ext4_sb_info *sbi = EXT4_SB(sb);
747 	struct ext4_fc_tl tl;
748 	struct ext4_fc_tail tail;
749 	int off, bsize = sbi->s_journal->j_blocksize;
750 	u8 *dst;
751 
752 	/*
753 	 * ext4_fc_reserve_space takes care of allocating an extra block if
754 	 * there's no enough space on this block for accommodating this tail.
755 	 */
756 	dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + sizeof(tail), &crc);
757 	if (!dst)
758 		return -ENOSPC;
759 
760 	off = sbi->s_fc_bytes % bsize;
761 
762 	tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL);
763 	tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail));
764 	sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize);
765 
766 	memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
767 	dst += EXT4_FC_TAG_BASE_LEN;
768 	tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid);
769 	memcpy(dst, &tail.fc_tid, sizeof(tail.fc_tid));
770 	dst += sizeof(tail.fc_tid);
771 	crc = ext4_chksum(crc, sbi->s_fc_bh->b_data,
772 			  dst - (u8 *)sbi->s_fc_bh->b_data);
773 	tail.fc_crc = cpu_to_le32(crc);
774 	memcpy(dst, &tail.fc_crc, sizeof(tail.fc_crc));
775 	dst += sizeof(tail.fc_crc);
776 	memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */
777 
778 	ext4_fc_submit_bh(sb, true);
779 
780 	return 0;
781 }
782 
783 /*
784  * Adds tag, length, value and updates CRC. Returns true if tlv was added.
785  * Returns false if there's not enough space.
786  */
ext4_fc_add_tlv(struct super_block * sb,u16 tag,u16 len,u8 * val,u32 * crc)787 static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val,
788 			   u32 *crc)
789 {
790 	struct ext4_fc_tl tl;
791 	u8 *dst;
792 
793 	dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + len, crc);
794 	if (!dst)
795 		return false;
796 
797 	tl.fc_tag = cpu_to_le16(tag);
798 	tl.fc_len = cpu_to_le16(len);
799 
800 	memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
801 	memcpy(dst + EXT4_FC_TAG_BASE_LEN, val, len);
802 
803 	return true;
804 }
805 
806 /* Same as above, but adds dentry tlv. */
ext4_fc_add_dentry_tlv(struct super_block * sb,u32 * crc,struct ext4_fc_dentry_update * fc_dentry)807 static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
808 				   struct ext4_fc_dentry_update *fc_dentry)
809 {
810 	struct ext4_fc_dentry_info fcd;
811 	struct ext4_fc_tl tl;
812 	int dlen = fc_dentry->fcd_name.name.len;
813 	u8 *dst = ext4_fc_reserve_space(sb,
814 			EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc);
815 
816 	if (!dst)
817 		return false;
818 
819 	fcd.fc_parent_ino = cpu_to_le32(fc_dentry->fcd_parent);
820 	fcd.fc_ino = cpu_to_le32(fc_dentry->fcd_ino);
821 	tl.fc_tag = cpu_to_le16(fc_dentry->fcd_op);
822 	tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen);
823 	memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
824 	dst += EXT4_FC_TAG_BASE_LEN;
825 	memcpy(dst, &fcd, sizeof(fcd));
826 	dst += sizeof(fcd);
827 	memcpy(dst, fc_dentry->fcd_name.name.name, dlen);
828 
829 	return true;
830 }
831 
832 /*
833  * Writes inode in the fast commit space under TLV with tag @tag.
834  * Returns 0 on success, error on failure.
835  */
ext4_fc_write_inode(struct inode * inode,u32 * crc)836 static int ext4_fc_write_inode(struct inode *inode, u32 *crc)
837 {
838 	struct ext4_inode_info *ei = EXT4_I(inode);
839 	int inode_len = EXT4_GOOD_OLD_INODE_SIZE;
840 	int ret;
841 	struct ext4_iloc iloc;
842 	struct ext4_fc_inode fc_inode;
843 	struct ext4_fc_tl tl;
844 	u8 *dst;
845 
846 	ret = ext4_get_inode_loc(inode, &iloc);
847 	if (ret)
848 		return ret;
849 
850 	if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
851 		inode_len = EXT4_INODE_SIZE(inode->i_sb);
852 	else if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE)
853 		inode_len += ei->i_extra_isize;
854 
855 	fc_inode.fc_ino = cpu_to_le32(inode->i_ino);
856 	tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE);
857 	tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino));
858 
859 	ret = -ECANCELED;
860 	dst = ext4_fc_reserve_space(inode->i_sb,
861 		EXT4_FC_TAG_BASE_LEN + inode_len + sizeof(fc_inode.fc_ino), crc);
862 	if (!dst)
863 		goto err;
864 
865 	memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
866 	dst += EXT4_FC_TAG_BASE_LEN;
867 	memcpy(dst, &fc_inode, sizeof(fc_inode));
868 	dst += sizeof(fc_inode);
869 	memcpy(dst, (u8 *)ext4_raw_inode(&iloc), inode_len);
870 	ret = 0;
871 err:
872 	brelse(iloc.bh);
873 	return ret;
874 }
875 
876 /*
877  * Writes updated data ranges for the inode in question. Updates CRC.
878  * Returns 0 on success, error otherwise.
879  */
ext4_fc_write_inode_data(struct inode * inode,u32 * crc)880 static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
881 {
882 	ext4_lblk_t old_blk_size, cur_lblk_off, new_blk_size;
883 	struct ext4_inode_info *ei = EXT4_I(inode);
884 	struct ext4_map_blocks map;
885 	struct ext4_fc_add_range fc_ext;
886 	struct ext4_fc_del_range lrange;
887 	struct ext4_extent *ex;
888 	int ret;
889 
890 	spin_lock(&ei->i_fc_lock);
891 	if (ei->i_fc_lblk_len == 0) {
892 		spin_unlock(&ei->i_fc_lock);
893 		return 0;
894 	}
895 	old_blk_size = ei->i_fc_lblk_start;
896 	new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1;
897 	ei->i_fc_lblk_len = 0;
898 	spin_unlock(&ei->i_fc_lock);
899 
900 	cur_lblk_off = old_blk_size;
901 	ext4_debug("will try writing %d to %d for inode %llu\n",
902 		   cur_lblk_off, new_blk_size, inode->i_ino);
903 
904 	while (cur_lblk_off <= new_blk_size) {
905 		map.m_lblk = cur_lblk_off;
906 		map.m_len = new_blk_size - cur_lblk_off + 1;
907 		ret = ext4_map_blocks(NULL, inode, &map,
908 				      EXT4_GET_BLOCKS_IO_SUBMIT |
909 				      EXT4_EX_NOCACHE);
910 		if (ret < 0)
911 			return -ECANCELED;
912 
913 		if (map.m_len == 0) {
914 			cur_lblk_off++;
915 			continue;
916 		}
917 
918 		if (ret == 0) {
919 			lrange.fc_ino = cpu_to_le32(inode->i_ino);
920 			lrange.fc_lblk = cpu_to_le32(map.m_lblk);
921 			lrange.fc_len = cpu_to_le32(map.m_len);
922 			if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_DEL_RANGE,
923 					    sizeof(lrange), (u8 *)&lrange, crc))
924 				return -ENOSPC;
925 		} else {
926 			unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
927 				EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
928 
929 			/* Limit the number of blocks in one extent */
930 			map.m_len = min(max, map.m_len);
931 
932 			fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
933 			ex = (struct ext4_extent *)&fc_ext.fc_ex;
934 			ex->ee_block = cpu_to_le32(map.m_lblk);
935 			ex->ee_len = cpu_to_le16(map.m_len);
936 			ext4_ext_store_pblock(ex, map.m_pblk);
937 			if (map.m_flags & EXT4_MAP_UNWRITTEN)
938 				ext4_ext_mark_unwritten(ex);
939 			else
940 				ext4_ext_mark_initialized(ex);
941 			if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_ADD_RANGE,
942 					    sizeof(fc_ext), (u8 *)&fc_ext, crc))
943 				return -ENOSPC;
944 		}
945 
946 		cur_lblk_off += map.m_len;
947 	}
948 
949 	return 0;
950 }
951 
952 
953 /* Flushes data of all the inodes in the commit queue. */
ext4_fc_flush_data(journal_t * journal)954 static int ext4_fc_flush_data(journal_t *journal)
955 {
956 	struct super_block *sb = journal->j_private;
957 	struct ext4_sb_info *sbi = EXT4_SB(sb);
958 	struct ext4_inode_info *ei;
959 	int ret = 0;
960 
961 	list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
962 		ret = jbd2_submit_inode_data(journal, READ_ONCE(ei->jinode));
963 		if (ret)
964 			return ret;
965 	}
966 
967 	list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
968 		ret = jbd2_wait_inode_data(journal, READ_ONCE(ei->jinode));
969 		if (ret)
970 			return ret;
971 	}
972 
973 	return 0;
974 }
975 
976 /* Commit all the directory entry updates */
ext4_fc_commit_dentry_updates(journal_t * journal,u32 * crc)977 static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
978 {
979 	struct super_block *sb = journal->j_private;
980 	struct ext4_sb_info *sbi = EXT4_SB(sb);
981 	struct ext4_fc_dentry_update *fc_dentry, *fc_dentry_n;
982 	struct inode *inode;
983 	struct ext4_inode_info *ei;
984 	int ret;
985 
986 	if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN]))
987 		return 0;
988 	list_for_each_entry_safe(fc_dentry, fc_dentry_n,
989 				 &sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
990 		if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
991 			if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
992 				return -ENOSPC;
993 			continue;
994 		}
995 		/*
996 		 * With fcd_dilist we need not loop in sbi->s_fc_q to get the
997 		 * corresponding inode. Also, the corresponding inode could have been
998 		 * deleted, in which case, we don't need to do anything.
999 		 */
1000 		if (list_empty(&fc_dentry->fcd_dilist))
1001 			continue;
1002 		ei = list_first_entry(&fc_dentry->fcd_dilist,
1003 				struct ext4_inode_info, i_fc_dilist);
1004 		inode = &ei->vfs_inode;
1005 		WARN_ON(inode->i_ino != fc_dentry->fcd_ino);
1006 
1007 		/*
1008 		 * We first write the inode and then the create dirent. This
1009 		 * allows the recovery code to create an unnamed inode first
1010 		 * and then link it to a directory entry. This allows us
1011 		 * to use namei.c routines almost as is and simplifies
1012 		 * the recovery code.
1013 		 */
1014 		ret = ext4_fc_write_inode(inode, crc);
1015 		if (ret)
1016 			return ret;
1017 		ret = ext4_fc_write_inode_data(inode, crc);
1018 		if (ret)
1019 			return ret;
1020 		if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
1021 			return -ENOSPC;
1022 	}
1023 	return 0;
1024 }
1025 
ext4_fc_perform_commit(journal_t * journal)1026 static int ext4_fc_perform_commit(journal_t *journal)
1027 {
1028 	struct super_block *sb = journal->j_private;
1029 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1030 	struct ext4_inode_info *iter;
1031 	struct ext4_fc_head head;
1032 	struct inode *inode;
1033 	struct blk_plug plug;
1034 	int ret = 0;
1035 	u32 crc = 0;
1036 	int alloc_ctx;
1037 
1038 	/*
1039 	 * Step 1: Mark all inodes on s_fc_q[MAIN] with
1040 	 * EXT4_STATE_FC_FLUSHING_DATA. This prevents these inodes from being
1041 	 * freed until the data flush is over.
1042 	 */
1043 	alloc_ctx = ext4_fc_lock(sb);
1044 	list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
1045 		ext4_set_inode_state(&iter->vfs_inode,
1046 				     EXT4_STATE_FC_FLUSHING_DATA);
1047 	}
1048 	ext4_fc_unlock(sb, alloc_ctx);
1049 
1050 	/* Step 2: Flush data for all the eligible inodes. */
1051 	ret = ext4_fc_flush_data(journal);
1052 
1053 	/*
1054 	 * Step 3: Clear EXT4_STATE_FC_FLUSHING_DATA flag, before returning
1055 	 * any error from step 2. This ensures that waiters waiting on
1056 	 * EXT4_STATE_FC_FLUSHING_DATA can resume.
1057 	 */
1058 	alloc_ctx = ext4_fc_lock(sb);
1059 	list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
1060 		ext4_clear_inode_state(&iter->vfs_inode,
1061 				       EXT4_STATE_FC_FLUSHING_DATA);
1062 #if (BITS_PER_LONG < 64)
1063 		wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_FLUSHING_DATA);
1064 #else
1065 		wake_up_bit(&iter->i_flags, EXT4_STATE_FC_FLUSHING_DATA);
1066 #endif
1067 	}
1068 
1069 	/*
1070 	 * Make sure clearing of EXT4_STATE_FC_FLUSHING_DATA is visible before
1071 	 * the waiter checks the bit. Pairs with implicit barrier in
1072 	 * prepare_to_wait() in ext4_fc_del().
1073 	 */
1074 	smp_mb();
1075 	ext4_fc_unlock(sb, alloc_ctx);
1076 
1077 	/*
1078 	 * If we encountered error in Step 2, return it now after clearing
1079 	 * EXT4_STATE_FC_FLUSHING_DATA bit.
1080 	 */
1081 	if (ret)
1082 		return ret;
1083 
1084 
1085 	/* Step 4: Mark all inodes as being committed. */
1086 	jbd2_journal_lock_updates(journal);
1087 	/*
1088 	 * The journal is now locked. No more handles can start and all the
1089 	 * previous handles are now drained. We now mark the inodes on the
1090 	 * commit queue as being committed.
1091 	 */
1092 	alloc_ctx = ext4_fc_lock(sb);
1093 	list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
1094 		ext4_set_inode_state(&iter->vfs_inode,
1095 				     EXT4_STATE_FC_COMMITTING);
1096 	}
1097 	ext4_fc_unlock(sb, alloc_ctx);
1098 	jbd2_journal_unlock_updates(journal);
1099 
1100 	/*
1101 	 * Step 5: If file system device is different from journal device,
1102 	 * issue a cache flush before we start writing fast commit blocks.
1103 	 */
1104 	if (journal->j_fs_dev != journal->j_dev)
1105 		blkdev_issue_flush(journal->j_fs_dev);
1106 
1107 	blk_start_plug(&plug);
1108 	alloc_ctx = ext4_fc_lock(sb);
1109 	/* Step 6: Write fast commit blocks to disk. */
1110 	if (sbi->s_fc_bytes == 0) {
1111 		/*
1112 		 * Step 6.1: Add a head tag only if this is the first fast
1113 		 * commit in this TID.
1114 		 */
1115 		head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES);
1116 		head.fc_tid = cpu_to_le32(
1117 			sbi->s_journal->j_running_transaction->t_tid);
1118 		if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
1119 			(u8 *)&head, &crc)) {
1120 			ret = -ENOSPC;
1121 			goto out;
1122 		}
1123 	}
1124 
1125 	/* Step 6.2: Now write all the dentry updates. */
1126 	ret = ext4_fc_commit_dentry_updates(journal, &crc);
1127 	if (ret)
1128 		goto out;
1129 
1130 	/* Step 6.3: Now write all the changed inodes to disk. */
1131 	list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
1132 		inode = &iter->vfs_inode;
1133 		if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
1134 			continue;
1135 
1136 		ret = ext4_fc_write_inode_data(inode, &crc);
1137 		if (ret)
1138 			goto out;
1139 		ret = ext4_fc_write_inode(inode, &crc);
1140 		if (ret)
1141 			goto out;
1142 	}
1143 	/* Step 6.4: Finally write tail tag to conclude this fast commit. */
1144 	ret = ext4_fc_write_tail(sb, crc);
1145 
1146 out:
1147 	ext4_fc_unlock(sb, alloc_ctx);
1148 	blk_finish_plug(&plug);
1149 	return ret;
1150 }
1151 
ext4_fc_update_stats(struct super_block * sb,int status,u64 commit_time,int nblks,tid_t commit_tid)1152 static void ext4_fc_update_stats(struct super_block *sb, int status,
1153 				 u64 commit_time, int nblks, tid_t commit_tid)
1154 {
1155 	struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats;
1156 
1157 	ext4_debug("Fast commit ended with status = %d for tid %u",
1158 			status, commit_tid);
1159 	if (status == EXT4_FC_STATUS_OK) {
1160 		stats->fc_num_commits++;
1161 		stats->fc_numblks += nblks;
1162 		if (likely(stats->s_fc_avg_commit_time))
1163 			stats->s_fc_avg_commit_time =
1164 				(commit_time +
1165 				 stats->s_fc_avg_commit_time * 3) / 4;
1166 		else
1167 			stats->s_fc_avg_commit_time = commit_time;
1168 	} else if (status == EXT4_FC_STATUS_FAILED ||
1169 		   status == EXT4_FC_STATUS_INELIGIBLE) {
1170 		if (status == EXT4_FC_STATUS_FAILED)
1171 			stats->fc_failed_commits++;
1172 		stats->fc_ineligible_commits++;
1173 	} else {
1174 		stats->fc_skipped_commits++;
1175 	}
1176 	trace_ext4_fc_commit_stop(sb, nblks, status, commit_tid);
1177 }
1178 
1179 /*
1180  * The main commit entry point. Performs a fast commit for transaction
1181  * commit_tid if needed. If it's not possible to perform a fast commit
1182  * due to various reasons, we fall back to full commit. Returns 0
1183  * on success, error otherwise.
1184  */
ext4_fc_commit(journal_t * journal,tid_t commit_tid)1185 int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
1186 {
1187 	struct super_block *sb = journal->j_private;
1188 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1189 	int nblks = 0, ret, bsize = journal->j_blocksize;
1190 	int subtid = atomic_read(&sbi->s_fc_subtid);
1191 	int status = EXT4_FC_STATUS_OK, fc_bufs_before = 0;
1192 	ktime_t start_time, commit_time;
1193 	int old_ioprio, journal_ioprio;
1194 
1195 	if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
1196 		return jbd2_complete_transaction(journal, commit_tid);
1197 
1198 	trace_ext4_fc_commit_start(sb, commit_tid);
1199 
1200 	start_time = ktime_get();
1201 	old_ioprio = get_current_ioprio();
1202 
1203 restart_fc:
1204 	ret = jbd2_fc_begin_commit(journal, commit_tid);
1205 	if (ret == -EALREADY) {
1206 		/* There was an ongoing commit, check if we need to restart */
1207 		if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
1208 		    tid_gt(commit_tid, journal->j_commit_sequence))
1209 			goto restart_fc;
1210 		ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
1211 				commit_tid);
1212 		return 0;
1213 	} else if (ret) {
1214 		/*
1215 		 * Commit couldn't start. Just update stats and perform a
1216 		 * full commit.
1217 		 */
1218 		ext4_fc_update_stats(sb, EXT4_FC_STATUS_FAILED, 0, 0,
1219 				commit_tid);
1220 		return jbd2_complete_transaction(journal, commit_tid);
1221 	}
1222 
1223 	/*
1224 	 * After establishing journal barrier via jbd2_fc_begin_commit(), check
1225 	 * if we are fast commit ineligible.
1226 	 */
1227 	if (ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE)) {
1228 		status = EXT4_FC_STATUS_INELIGIBLE;
1229 		goto fallback;
1230 	}
1231 
1232 	/*
1233 	 * Now that we know that this thread is going to do a fast commit,
1234 	 * elevate the priority to match that of the journal thread.
1235 	 */
1236 	if (journal->j_task->io_context)
1237 		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
1238 	else
1239 		journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
1240 	set_task_ioprio(current, journal_ioprio);
1241 	fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize;
1242 	ret = ext4_fc_perform_commit(journal);
1243 	if (ret < 0) {
1244 		status = EXT4_FC_STATUS_FAILED;
1245 		goto fallback;
1246 	}
1247 	nblks = (sbi->s_fc_bytes + bsize - 1) / bsize - fc_bufs_before;
1248 	ret = jbd2_fc_wait_bufs(journal, nblks);
1249 	if (ret < 0) {
1250 		status = EXT4_FC_STATUS_FAILED;
1251 		goto fallback;
1252 	}
1253 	atomic_inc(&sbi->s_fc_subtid);
1254 	ret = jbd2_fc_end_commit(journal);
1255 	set_task_ioprio(current, old_ioprio);
1256 	/*
1257 	 * weight the commit time higher than the average time so we
1258 	 * don't react too strongly to vast changes in the commit time
1259 	 */
1260 	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1261 	ext4_fc_update_stats(sb, status, commit_time, nblks, commit_tid);
1262 	return ret;
1263 
1264 fallback:
1265 	set_task_ioprio(current, old_ioprio);
1266 	ret = jbd2_fc_end_commit_fallback(journal);
1267 	ext4_fc_update_stats(sb, status, 0, 0, commit_tid);
1268 	return ret;
1269 }
1270 
1271 /*
1272  * Fast commit cleanup routine. This is called after every fast commit and
1273  * full commit. full is true if we are called after a full commit.
1274  */
ext4_fc_cleanup(journal_t * journal,int full,tid_t tid)1275 static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
1276 {
1277 	struct super_block *sb = journal->j_private;
1278 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1279 	struct ext4_inode_info *ei;
1280 	struct ext4_fc_dentry_update *fc_dentry;
1281 	int alloc_ctx;
1282 
1283 	if (full && sbi->s_fc_bh)
1284 		sbi->s_fc_bh = NULL;
1285 
1286 	trace_ext4_fc_cleanup(journal, full, tid);
1287 	jbd2_fc_release_bufs(journal);
1288 
1289 	alloc_ctx = ext4_fc_lock(sb);
1290 	while (!list_empty(&sbi->s_fc_q[FC_Q_MAIN])) {
1291 		ei = list_first_entry(&sbi->s_fc_q[FC_Q_MAIN],
1292 					struct ext4_inode_info,
1293 					i_fc_list);
1294 		list_del_init(&ei->i_fc_list);
1295 		ext4_clear_inode_state(&ei->vfs_inode,
1296 				       EXT4_STATE_FC_COMMITTING);
1297 		if (tid_geq(tid, ei->i_sync_tid)) {
1298 			ext4_fc_reset_inode(&ei->vfs_inode);
1299 		} else if (full) {
1300 			/*
1301 			 * We are called after a full commit, inode has been
1302 			 * modified while the commit was running. Re-enqueue
1303 			 * the inode into STAGING, which will then be splice
1304 			 * back into MAIN. This cannot happen during
1305 			 * fastcommit because the journal is locked all the
1306 			 * time in that case (and tid doesn't increase so
1307 			 * tid check above isn't reliable).
1308 			 */
1309 			list_add_tail(&ei->i_fc_list,
1310 				      &sbi->s_fc_q[FC_Q_STAGING]);
1311 		}
1312 		/*
1313 		 * Make sure clearing of EXT4_STATE_FC_COMMITTING is
1314 		 * visible before we send the wakeup. Pairs with implicit
1315 		 * barrier in prepare_to_wait() in ext4_fc_track_inode().
1316 		 */
1317 		smp_mb();
1318 #if (BITS_PER_LONG < 64)
1319 		wake_up_bit(&ei->i_state_flags, EXT4_STATE_FC_COMMITTING);
1320 #else
1321 		wake_up_bit(&ei->i_flags, EXT4_STATE_FC_COMMITTING);
1322 #endif
1323 	}
1324 
1325 	while (!list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN])) {
1326 		fc_dentry = list_first_entry(&sbi->s_fc_dentry_q[FC_Q_MAIN],
1327 					     struct ext4_fc_dentry_update,
1328 					     fcd_list);
1329 		list_del_init(&fc_dentry->fcd_list);
1330 		list_del_init(&fc_dentry->fcd_dilist);
1331 
1332 		release_dentry_name_snapshot(&fc_dentry->fcd_name);
1333 		kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
1334 	}
1335 
1336 	list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
1337 				&sbi->s_fc_dentry_q[FC_Q_MAIN]);
1338 	list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
1339 				&sbi->s_fc_q[FC_Q_MAIN]);
1340 
1341 	if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
1342 		sbi->s_fc_ineligible_tid = 0;
1343 		ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
1344 	}
1345 
1346 	if (full)
1347 		sbi->s_fc_bytes = 0;
1348 	ext4_fc_unlock(sb, alloc_ctx);
1349 	trace_ext4_fc_stats(sb);
1350 }
1351 
1352 /* Ext4 Replay Path Routines */
1353 
1354 /* Helper struct for dentry replay routines */
1355 struct dentry_info_args {
1356 	int parent_ino, dname_len, ino, inode_len;
1357 	char *dname;
1358 };
1359 
1360 /* Same as struct ext4_fc_tl, but uses native endianness fields */
1361 struct ext4_fc_tl_mem {
1362 	u16 fc_tag;
1363 	u16 fc_len;
1364 };
1365 
tl_to_darg(struct dentry_info_args * darg,struct ext4_fc_tl_mem * tl,u8 * val)1366 static inline void tl_to_darg(struct dentry_info_args *darg,
1367 			      struct ext4_fc_tl_mem *tl, u8 *val)
1368 {
1369 	struct ext4_fc_dentry_info fcd;
1370 
1371 	memcpy(&fcd, val, sizeof(fcd));
1372 
1373 	darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
1374 	darg->ino = le32_to_cpu(fcd.fc_ino);
1375 	darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
1376 	darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info);
1377 }
1378 
ext4_fc_get_tl(struct ext4_fc_tl_mem * tl,u8 * val)1379 static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val)
1380 {
1381 	struct ext4_fc_tl tl_disk;
1382 
1383 	memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN);
1384 	tl->fc_len = le16_to_cpu(tl_disk.fc_len);
1385 	tl->fc_tag = le16_to_cpu(tl_disk.fc_tag);
1386 }
1387 
1388 /* Unlink replay function */
ext4_fc_replay_unlink(struct super_block * sb,struct ext4_fc_tl_mem * tl,u8 * val)1389 static int ext4_fc_replay_unlink(struct super_block *sb,
1390 				 struct ext4_fc_tl_mem *tl, u8 *val)
1391 {
1392 	struct inode *inode, *old_parent;
1393 	struct qstr entry;
1394 	struct dentry_info_args darg;
1395 	int ret = 0;
1396 
1397 	tl_to_darg(&darg, tl, val);
1398 
1399 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
1400 			darg.parent_ino, darg.dname_len);
1401 
1402 	entry.name = darg.dname;
1403 	entry.len = darg.dname_len;
1404 	inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
1405 
1406 	if (IS_ERR(inode)) {
1407 		ext4_debug("Inode %d not found", darg.ino);
1408 		return 0;
1409 	}
1410 
1411 	old_parent = ext4_iget(sb, darg.parent_ino,
1412 				EXT4_IGET_NORMAL);
1413 	if (IS_ERR(old_parent)) {
1414 		ext4_debug("Dir with inode %d not found", darg.parent_ino);
1415 		iput(inode);
1416 		return 0;
1417 	}
1418 
1419 	ret = __ext4_unlink(old_parent, &entry, inode, NULL);
1420 	/* -ENOENT ok coz it might not exist anymore. */
1421 	if (ret == -ENOENT)
1422 		ret = 0;
1423 	iput(old_parent);
1424 	iput(inode);
1425 	return ret;
1426 }
1427 
ext4_fc_replay_link_internal(struct super_block * sb,struct dentry_info_args * darg,struct inode * inode)1428 static int ext4_fc_replay_link_internal(struct super_block *sb,
1429 				struct dentry_info_args *darg,
1430 				struct inode *inode)
1431 {
1432 	struct inode *dir = NULL;
1433 	struct qstr qstr_dname = QSTR_INIT(darg->dname, darg->dname_len);
1434 	int ret = 0;
1435 
1436 	dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL);
1437 	if (IS_ERR(dir)) {
1438 		ext4_debug("Dir with inode %d not found.", darg->parent_ino);
1439 		dir = NULL;
1440 		goto out;
1441 	}
1442 
1443 	ret = __ext4_link(dir, inode, &qstr_dname, NULL);
1444 	/*
1445 	 * It's possible that link already existed since data blocks
1446 	 * for the dir in question got persisted before we crashed OR
1447 	 * we replayed this tag and crashed before the entire replay
1448 	 * could complete.
1449 	 */
1450 	if (ret && ret != -EEXIST) {
1451 		ext4_debug("Failed to link\n");
1452 		goto out;
1453 	}
1454 
1455 	ret = 0;
1456 out:
1457 	if (dir)
1458 		iput(dir);
1459 
1460 	return ret;
1461 }
1462 
1463 /* Link replay function */
ext4_fc_replay_link(struct super_block * sb,struct ext4_fc_tl_mem * tl,u8 * val)1464 static int ext4_fc_replay_link(struct super_block *sb,
1465 			       struct ext4_fc_tl_mem *tl, u8 *val)
1466 {
1467 	struct inode *inode;
1468 	struct dentry_info_args darg;
1469 	int ret = 0;
1470 
1471 	tl_to_darg(&darg, tl, val);
1472 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
1473 			darg.parent_ino, darg.dname_len);
1474 
1475 	inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
1476 	if (IS_ERR(inode)) {
1477 		ext4_debug("Inode not found.");
1478 		return 0;
1479 	}
1480 
1481 	ret = ext4_fc_replay_link_internal(sb, &darg, inode);
1482 	iput(inode);
1483 	return ret;
1484 }
1485 
1486 /*
1487  * Record all the modified inodes during replay. We use this later to setup
1488  * block bitmaps correctly.
1489  */
ext4_fc_record_modified_inode(struct super_block * sb,int ino)1490 static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
1491 {
1492 	struct ext4_fc_replay_state *state;
1493 	int i;
1494 
1495 	state = &EXT4_SB(sb)->s_fc_replay_state;
1496 	for (i = 0; i < state->fc_modified_inodes_used; i++)
1497 		if (state->fc_modified_inodes[i] == ino)
1498 			return 0;
1499 	if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
1500 		int *fc_modified_inodes;
1501 
1502 		fc_modified_inodes = krealloc(state->fc_modified_inodes,
1503 				sizeof(int) * (state->fc_modified_inodes_size +
1504 				EXT4_FC_REPLAY_REALLOC_INCREMENT),
1505 				GFP_KERNEL);
1506 		if (!fc_modified_inodes)
1507 			return -ENOMEM;
1508 		state->fc_modified_inodes = fc_modified_inodes;
1509 		state->fc_modified_inodes_size +=
1510 			EXT4_FC_REPLAY_REALLOC_INCREMENT;
1511 	}
1512 	state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
1513 	return 0;
1514 }
1515 
1516 /*
1517  * Inode replay function
1518  */
ext4_fc_replay_inode(struct super_block * sb,struct ext4_fc_tl_mem * tl,u8 * val)1519 static int ext4_fc_replay_inode(struct super_block *sb,
1520 				struct ext4_fc_tl_mem *tl, u8 *val)
1521 {
1522 	struct ext4_fc_inode fc_inode;
1523 	struct ext4_inode *raw_inode;
1524 	struct ext4_inode *raw_fc_inode;
1525 	struct inode *inode = NULL;
1526 	struct ext4_iloc iloc;
1527 	int inode_len, ino, ret, tag = tl->fc_tag;
1528 	struct ext4_extent_header *eh;
1529 	size_t off_gen = offsetof(struct ext4_inode, i_generation);
1530 
1531 	memcpy(&fc_inode, val, sizeof(fc_inode));
1532 
1533 	ino = le32_to_cpu(fc_inode.fc_ino);
1534 	trace_ext4_fc_replay(sb, tag, ino, 0, 0);
1535 
1536 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1537 	if (!IS_ERR(inode)) {
1538 		ext4_ext_clear_bb(inode);
1539 		iput(inode);
1540 	}
1541 	inode = NULL;
1542 
1543 	ret = ext4_fc_record_modified_inode(sb, ino);
1544 	if (ret)
1545 		goto out;
1546 
1547 	raw_fc_inode = (struct ext4_inode *)
1548 		(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
1549 	ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
1550 	if (ret)
1551 		goto out;
1552 
1553 	inode_len = tl->fc_len - sizeof(struct ext4_fc_inode);
1554 	raw_inode = ext4_raw_inode(&iloc);
1555 
1556 	memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
1557 	memcpy((u8 *)raw_inode + off_gen, (u8 *)raw_fc_inode + off_gen,
1558 	       inode_len - off_gen);
1559 	if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) {
1560 		eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]);
1561 		if (eh->eh_magic != EXT4_EXT_MAGIC) {
1562 			memset(eh, 0, sizeof(*eh));
1563 			eh->eh_magic = EXT4_EXT_MAGIC;
1564 			eh->eh_max = cpu_to_le16(
1565 				(sizeof(raw_inode->i_block) -
1566 				 sizeof(struct ext4_extent_header))
1567 				 / sizeof(struct ext4_extent));
1568 		}
1569 	} else if (le32_to_cpu(raw_inode->i_flags) & EXT4_INLINE_DATA_FL) {
1570 		memcpy(raw_inode->i_block, raw_fc_inode->i_block,
1571 			sizeof(raw_inode->i_block));
1572 	}
1573 
1574 	/* Immediately update the inode on disk. */
1575 	ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
1576 	if (ret)
1577 		goto out_brelse;
1578 	ret = sync_dirty_buffer(iloc.bh);
1579 	if (ret)
1580 		goto out_brelse;
1581 	ret = ext4_mark_inode_used(sb, ino);
1582 	if (ret)
1583 		goto out_brelse;
1584 
1585 	/* Given that we just wrote the inode on disk, this SHOULD succeed. */
1586 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1587 	if (IS_ERR(inode)) {
1588 		ext4_debug("Inode not found.");
1589 		inode = NULL;
1590 		ret = -EFSCORRUPTED;
1591 		goto out_brelse;
1592 	}
1593 
1594 	/*
1595 	 * Our allocator could have made different decisions than before
1596 	 * crashing. This should be fixed but until then, we calculate
1597 	 * the number of blocks the inode.
1598 	 */
1599 	if (!ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
1600 		ext4_ext_replay_set_iblocks(inode);
1601 
1602 	inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation);
1603 	ext4_reset_inode_seed(inode);
1604 
1605 	ext4_inode_csum_set(inode, ext4_raw_inode(&iloc), EXT4_I(inode));
1606 	ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
1607 	sync_dirty_buffer(iloc.bh);
1608 out_brelse:
1609 	brelse(iloc.bh);
1610 out:
1611 	iput(inode);
1612 	if (!ret)
1613 		blkdev_issue_flush(sb->s_bdev);
1614 
1615 	return ret;
1616 }
1617 
1618 /*
1619  * Dentry create replay function.
1620  *
1621  * EXT4_FC_TAG_CREAT is preceded by EXT4_FC_TAG_INODE_FULL. Which means, the
1622  * inode for which we are trying to create a dentry here, should already have
1623  * been replayed before we start here.
1624  */
ext4_fc_replay_create(struct super_block * sb,struct ext4_fc_tl_mem * tl,u8 * val)1625 static int ext4_fc_replay_create(struct super_block *sb,
1626 				 struct ext4_fc_tl_mem *tl, u8 *val)
1627 {
1628 	int ret = 0;
1629 	struct inode *inode = NULL;
1630 	struct inode *dir = NULL;
1631 	struct dentry_info_args darg;
1632 
1633 	tl_to_darg(&darg, tl, val);
1634 
1635 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
1636 			darg.parent_ino, darg.dname_len);
1637 
1638 	/* This takes care of update group descriptor and other metadata */
1639 	ret = ext4_mark_inode_used(sb, darg.ino);
1640 	if (ret)
1641 		goto out;
1642 
1643 	inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
1644 	if (IS_ERR(inode)) {
1645 		ext4_debug("inode %d not found.", darg.ino);
1646 		inode = NULL;
1647 		ret = -EINVAL;
1648 		goto out;
1649 	}
1650 
1651 	if (S_ISDIR(inode->i_mode)) {
1652 		/*
1653 		 * If we are creating a directory, we need to make sure that the
1654 		 * dot and dot dot dirents are setup properly.
1655 		 */
1656 		dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
1657 		if (IS_ERR(dir)) {
1658 			ext4_debug("Dir %d not found.", darg.ino);
1659 			goto out;
1660 		}
1661 		ret = ext4_init_new_dir(NULL, dir, inode);
1662 		iput(dir);
1663 		if (ret) {
1664 			ret = 0;
1665 			goto out;
1666 		}
1667 	}
1668 	ret = ext4_fc_replay_link_internal(sb, &darg, inode);
1669 	if (ret)
1670 		goto out;
1671 	set_nlink(inode, 1);
1672 	ext4_mark_inode_dirty(NULL, inode);
1673 out:
1674 	iput(inode);
1675 	return ret;
1676 }
1677 
1678 /*
1679  * Record physical disk regions which are in use as per fast commit area,
1680  * and used by inodes during replay phase. Our simple replay phase
1681  * allocator excludes these regions from allocation.
1682  */
ext4_fc_record_regions(struct super_block * sb,int ino,ext4_lblk_t lblk,ext4_fsblk_t pblk,int len,int replay)1683 int ext4_fc_record_regions(struct super_block *sb, int ino,
1684 		ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
1685 {
1686 	struct ext4_fc_replay_state *state;
1687 	struct ext4_fc_alloc_region *region;
1688 
1689 	state = &EXT4_SB(sb)->s_fc_replay_state;
1690 	/*
1691 	 * during replay phase, the fc_regions_valid may not same as
1692 	 * fc_regions_used, update it when do new additions.
1693 	 */
1694 	if (replay && state->fc_regions_used != state->fc_regions_valid)
1695 		state->fc_regions_used = state->fc_regions_valid;
1696 	if (state->fc_regions_used == state->fc_regions_size) {
1697 		struct ext4_fc_alloc_region *fc_regions;
1698 
1699 		fc_regions = krealloc(state->fc_regions,
1700 				      sizeof(struct ext4_fc_alloc_region) *
1701 				      (state->fc_regions_size +
1702 				       EXT4_FC_REPLAY_REALLOC_INCREMENT),
1703 				      GFP_KERNEL);
1704 		if (!fc_regions)
1705 			return -ENOMEM;
1706 		state->fc_regions_size +=
1707 			EXT4_FC_REPLAY_REALLOC_INCREMENT;
1708 		state->fc_regions = fc_regions;
1709 	}
1710 	region = &state->fc_regions[state->fc_regions_used++];
1711 	region->ino = ino;
1712 	region->lblk = lblk;
1713 	region->pblk = pblk;
1714 	region->len = len;
1715 
1716 	if (replay)
1717 		state->fc_regions_valid++;
1718 
1719 	return 0;
1720 }
1721 
1722 /* Replay add range tag */
ext4_fc_replay_add_range(struct super_block * sb,u8 * val)1723 static int ext4_fc_replay_add_range(struct super_block *sb, u8 *val)
1724 {
1725 	struct ext4_fc_add_range fc_add_ex;
1726 	struct ext4_extent newex, *ex;
1727 	struct inode *inode;
1728 	ext4_lblk_t start, cur;
1729 	int remaining, len;
1730 	ext4_fsblk_t start_pblk;
1731 	struct ext4_map_blocks map;
1732 	struct ext4_ext_path *path = NULL;
1733 	int ret;
1734 
1735 	memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
1736 	ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
1737 
1738 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
1739 		le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
1740 		ext4_ext_get_actual_len(ex));
1741 
1742 	inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
1743 	if (IS_ERR(inode)) {
1744 		ext4_debug("Inode not found.");
1745 		return 0;
1746 	}
1747 
1748 	ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
1749 	if (ret)
1750 		goto out;
1751 
1752 	start = le32_to_cpu(ex->ee_block);
1753 	start_pblk = ext4_ext_pblock(ex);
1754 	len = ext4_ext_get_actual_len(ex);
1755 
1756 	cur = start;
1757 	remaining = len;
1758 	ext4_debug("ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %llu\n",
1759 		  start, start_pblk, len, ext4_ext_is_unwritten(ex),
1760 		  inode->i_ino);
1761 
1762 	while (remaining > 0) {
1763 		map.m_lblk = cur;
1764 		map.m_len = remaining;
1765 		map.m_pblk = 0;
1766 		ret = ext4_map_blocks(NULL, inode, &map, 0);
1767 
1768 		if (ret < 0)
1769 			goto out;
1770 
1771 		if (ret == 0) {
1772 			/* Range is not mapped */
1773 			path = ext4_find_extent(inode, cur, path, 0);
1774 			if (IS_ERR(path))
1775 				goto out;
1776 			memset(&newex, 0, sizeof(newex));
1777 			newex.ee_block = cpu_to_le32(cur);
1778 			ext4_ext_store_pblock(
1779 				&newex, start_pblk + cur - start);
1780 			newex.ee_len = cpu_to_le16(map.m_len);
1781 			if (ext4_ext_is_unwritten(ex))
1782 				ext4_ext_mark_unwritten(&newex);
1783 			down_write(&EXT4_I(inode)->i_data_sem);
1784 			path = ext4_ext_insert_extent(NULL, inode,
1785 						      path, &newex, 0);
1786 			up_write((&EXT4_I(inode)->i_data_sem));
1787 			if (IS_ERR(path))
1788 				goto out;
1789 			goto next;
1790 		}
1791 
1792 		if (start_pblk + cur - start != map.m_pblk) {
1793 			/*
1794 			 * Logical to physical mapping changed. This can happen
1795 			 * if this range was removed and then reallocated to
1796 			 * map to new physical blocks during a fast commit.
1797 			 */
1798 			ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
1799 					ext4_ext_is_unwritten(ex),
1800 					start_pblk + cur - start);
1801 			if (ret)
1802 				goto out;
1803 			/*
1804 			 * Mark the old blocks as free since they aren't used
1805 			 * anymore. We maintain an array of all the modified
1806 			 * inodes. In case these blocks are still used at either
1807 			 * a different logical range in the same inode or in
1808 			 * some different inode, we will mark them as allocated
1809 			 * at the end of the FC replay using our array of
1810 			 * modified inodes.
1811 			 */
1812 			ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
1813 			goto next;
1814 		}
1815 
1816 		/* Range is mapped and needs a state change */
1817 		ext4_debug("Converting from %ld to %d %lld",
1818 				map.m_flags & EXT4_MAP_UNWRITTEN,
1819 			ext4_ext_is_unwritten(ex), map.m_pblk);
1820 		ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
1821 					ext4_ext_is_unwritten(ex), map.m_pblk);
1822 		if (ret)
1823 			goto out;
1824 		/*
1825 		 * We may have split the extent tree while toggling the state.
1826 		 * Try to shrink the extent tree now.
1827 		 */
1828 		ext4_ext_replay_shrink_inode(inode, start + len);
1829 next:
1830 		cur += map.m_len;
1831 		remaining -= map.m_len;
1832 	}
1833 	ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
1834 					sb->s_blocksize_bits);
1835 out:
1836 	ext4_free_ext_path(path);
1837 	iput(inode);
1838 	return 0;
1839 }
1840 
1841 /* Replay DEL_RANGE tag */
1842 static int
ext4_fc_replay_del_range(struct super_block * sb,u8 * val)1843 ext4_fc_replay_del_range(struct super_block *sb, u8 *val)
1844 {
1845 	struct inode *inode;
1846 	struct ext4_fc_del_range lrange;
1847 	struct ext4_map_blocks map;
1848 	ext4_lblk_t cur, remaining;
1849 	int ret;
1850 
1851 	memcpy(&lrange, val, sizeof(lrange));
1852 	cur = le32_to_cpu(lrange.fc_lblk);
1853 	remaining = le32_to_cpu(lrange.fc_len);
1854 
1855 	trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
1856 		le32_to_cpu(lrange.fc_ino), cur, remaining);
1857 
1858 	inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
1859 	if (IS_ERR(inode)) {
1860 		ext4_debug("Inode %d not found", le32_to_cpu(lrange.fc_ino));
1861 		return 0;
1862 	}
1863 
1864 	ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
1865 	if (ret)
1866 		goto out;
1867 
1868 	ext4_debug("DEL_RANGE, inode %llu, lblk %d, len %d\n",
1869 			inode->i_ino, le32_to_cpu(lrange.fc_lblk),
1870 			le32_to_cpu(lrange.fc_len));
1871 	while (remaining > 0) {
1872 		map.m_lblk = cur;
1873 		map.m_len = remaining;
1874 
1875 		ret = ext4_map_blocks(NULL, inode, &map, 0);
1876 		if (ret < 0)
1877 			goto out;
1878 		if (ret > 0) {
1879 			remaining -= ret;
1880 			cur += ret;
1881 			ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
1882 		} else {
1883 			remaining -= map.m_len;
1884 			cur += map.m_len;
1885 		}
1886 	}
1887 
1888 	down_write(&EXT4_I(inode)->i_data_sem);
1889 	ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
1890 				le32_to_cpu(lrange.fc_lblk) +
1891 				le32_to_cpu(lrange.fc_len) - 1);
1892 	up_write(&EXT4_I(inode)->i_data_sem);
1893 	if (ret)
1894 		goto out;
1895 	ext4_ext_replay_shrink_inode(inode,
1896 		i_size_read(inode) >> sb->s_blocksize_bits);
1897 	ext4_mark_inode_dirty(NULL, inode);
1898 out:
1899 	iput(inode);
1900 	return 0;
1901 }
1902 
ext4_fc_set_bitmaps_and_counters(struct super_block * sb)1903 static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
1904 {
1905 	struct ext4_fc_replay_state *state;
1906 	struct inode *inode;
1907 	struct ext4_ext_path *path = NULL;
1908 	struct ext4_map_blocks map;
1909 	int i, ret, j;
1910 	ext4_lblk_t cur, end;
1911 
1912 	state = &EXT4_SB(sb)->s_fc_replay_state;
1913 	for (i = 0; i < state->fc_modified_inodes_used; i++) {
1914 		inode = ext4_iget(sb, state->fc_modified_inodes[i],
1915 			EXT4_IGET_NORMAL);
1916 		if (IS_ERR(inode)) {
1917 			ext4_debug("Inode %d not found.",
1918 				state->fc_modified_inodes[i]);
1919 			continue;
1920 		}
1921 		cur = 0;
1922 		end = EXT_MAX_BLOCKS;
1923 		if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) {
1924 			iput(inode);
1925 			continue;
1926 		}
1927 		while (cur < end) {
1928 			map.m_lblk = cur;
1929 			map.m_len = end - cur;
1930 
1931 			ret = ext4_map_blocks(NULL, inode, &map, 0);
1932 			if (ret < 0)
1933 				break;
1934 
1935 			if (ret > 0) {
1936 				path = ext4_find_extent(inode, map.m_lblk, path, 0);
1937 				if (!IS_ERR(path)) {
1938 					for (j = 0; j < path->p_depth; j++)
1939 						ext4_mb_mark_bb(inode->i_sb,
1940 							path[j].p_block, 1, true);
1941 				} else {
1942 					path = NULL;
1943 				}
1944 				cur += ret;
1945 				ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
1946 							map.m_len, true);
1947 			} else {
1948 				cur = cur + (map.m_len ? map.m_len : 1);
1949 			}
1950 		}
1951 		iput(inode);
1952 	}
1953 
1954 	ext4_free_ext_path(path);
1955 }
1956 
1957 /*
1958  * Check if block is in excluded regions for block allocation. The simple
1959  * allocator that runs during replay phase is calls this function to see
1960  * if it is okay to use a block.
1961  */
ext4_fc_replay_check_excluded(struct super_block * sb,ext4_fsblk_t blk)1962 bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t blk)
1963 {
1964 	int i;
1965 	struct ext4_fc_replay_state *state;
1966 
1967 	state = &EXT4_SB(sb)->s_fc_replay_state;
1968 	for (i = 0; i < state->fc_regions_valid; i++) {
1969 		if (state->fc_regions[i].ino == 0 ||
1970 			state->fc_regions[i].len == 0)
1971 			continue;
1972 		if (in_range(blk, state->fc_regions[i].pblk,
1973 					state->fc_regions[i].len))
1974 			return true;
1975 	}
1976 	return false;
1977 }
1978 
1979 /* Cleanup function called after replay */
ext4_fc_replay_cleanup(struct super_block * sb)1980 void ext4_fc_replay_cleanup(struct super_block *sb)
1981 {
1982 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1983 
1984 	sbi->s_mount_state &= ~EXT4_FC_REPLAY;
1985 	kfree(sbi->s_fc_replay_state.fc_regions);
1986 	kfree(sbi->s_fc_replay_state.fc_modified_inodes);
1987 }
1988 
ext4_fc_value_len_isvalid(struct ext4_sb_info * sbi,int tag,int len)1989 static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi,
1990 				      int tag, int len)
1991 {
1992 	switch (tag) {
1993 	case EXT4_FC_TAG_ADD_RANGE:
1994 		return len == sizeof(struct ext4_fc_add_range);
1995 	case EXT4_FC_TAG_DEL_RANGE:
1996 		return len == sizeof(struct ext4_fc_del_range);
1997 	case EXT4_FC_TAG_CREAT:
1998 	case EXT4_FC_TAG_LINK:
1999 	case EXT4_FC_TAG_UNLINK:
2000 		len -= sizeof(struct ext4_fc_dentry_info);
2001 		return len >= 1 && len <= EXT4_NAME_LEN;
2002 	case EXT4_FC_TAG_INODE:
2003 		len -= sizeof(struct ext4_fc_inode);
2004 		return len >= EXT4_GOOD_OLD_INODE_SIZE &&
2005 			len <= sbi->s_inode_size;
2006 	case EXT4_FC_TAG_PAD:
2007 		return true; /* padding can have any length */
2008 	case EXT4_FC_TAG_TAIL:
2009 		return len >= sizeof(struct ext4_fc_tail);
2010 	case EXT4_FC_TAG_HEAD:
2011 		return len == sizeof(struct ext4_fc_head);
2012 	}
2013 	return false;
2014 }
2015 
2016 /*
2017  * Recovery Scan phase handler
2018  *
2019  * This function is called during the scan phase and is responsible
2020  * for doing following things:
2021  * - Make sure the fast commit area has valid tags for replay
2022  * - Count number of tags that need to be replayed by the replay handler
2023  * - Verify CRC
2024  * - Create a list of excluded blocks for allocation during replay phase
2025  *
2026  * This function returns JBD2_FC_REPLAY_CONTINUE to indicate that SCAN is
2027  * incomplete and JBD2 should send more blocks. It returns JBD2_FC_REPLAY_STOP
2028  * to indicate that scan has finished and JBD2 can now start replay phase.
2029  * It returns a negative error to indicate that there was an error. At the end
2030  * of a successful scan phase, sbi->s_fc_replay_state.fc_replay_num_tags is set
2031  * to indicate the number of tags that need to replayed during the replay phase.
2032  */
ext4_fc_replay_scan(journal_t * journal,struct buffer_head * bh,int off,tid_t expected_tid)2033 static int ext4_fc_replay_scan(journal_t *journal,
2034 				struct buffer_head *bh, int off,
2035 				tid_t expected_tid)
2036 {
2037 	struct super_block *sb = journal->j_private;
2038 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2039 	struct ext4_fc_replay_state *state;
2040 	int ret = JBD2_FC_REPLAY_CONTINUE;
2041 	struct ext4_fc_add_range ext;
2042 	struct ext4_fc_tl_mem tl;
2043 	struct ext4_fc_tail tail;
2044 	__u8 *start, *end, *cur, *val;
2045 	struct ext4_fc_head head;
2046 	struct ext4_extent *ex;
2047 
2048 	state = &sbi->s_fc_replay_state;
2049 
2050 	start = (u8 *)bh->b_data;
2051 	end = start + journal->j_blocksize;
2052 
2053 	if (state->fc_replay_expected_off == 0) {
2054 		state->fc_cur_tag = 0;
2055 		state->fc_replay_num_tags = 0;
2056 		state->fc_crc = 0;
2057 		state->fc_regions = NULL;
2058 		state->fc_regions_valid = state->fc_regions_used =
2059 			state->fc_regions_size = 0;
2060 		/* Check if we can stop early */
2061 		if (le16_to_cpu(((struct ext4_fc_tl *)start)->fc_tag)
2062 			!= EXT4_FC_TAG_HEAD)
2063 			return 0;
2064 	}
2065 
2066 	if (off != state->fc_replay_expected_off) {
2067 		ret = -EFSCORRUPTED;
2068 		goto out_err;
2069 	}
2070 
2071 	state->fc_replay_expected_off++;
2072 	for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
2073 	     cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
2074 		ext4_fc_get_tl(&tl, cur);
2075 		val = cur + EXT4_FC_TAG_BASE_LEN;
2076 		if (tl.fc_len > end - val ||
2077 		    !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) {
2078 			ret = state->fc_replay_num_tags ?
2079 				JBD2_FC_REPLAY_STOP : -ECANCELED;
2080 			goto out_err;
2081 		}
2082 		ext4_debug("Scan phase, tag:%s, blk %lld\n",
2083 			   tag2str(tl.fc_tag), bh->b_blocknr);
2084 		switch (tl.fc_tag) {
2085 		case EXT4_FC_TAG_ADD_RANGE:
2086 			memcpy(&ext, val, sizeof(ext));
2087 			ex = (struct ext4_extent *)&ext.fc_ex;
2088 			ret = ext4_fc_record_regions(sb,
2089 				le32_to_cpu(ext.fc_ino),
2090 				le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
2091 				ext4_ext_get_actual_len(ex), 0);
2092 			if (ret < 0)
2093 				break;
2094 			ret = JBD2_FC_REPLAY_CONTINUE;
2095 			fallthrough;
2096 		case EXT4_FC_TAG_DEL_RANGE:
2097 		case EXT4_FC_TAG_LINK:
2098 		case EXT4_FC_TAG_UNLINK:
2099 		case EXT4_FC_TAG_CREAT:
2100 		case EXT4_FC_TAG_INODE:
2101 		case EXT4_FC_TAG_PAD:
2102 			state->fc_cur_tag++;
2103 			state->fc_crc = ext4_chksum(state->fc_crc, cur,
2104 				EXT4_FC_TAG_BASE_LEN + tl.fc_len);
2105 			break;
2106 		case EXT4_FC_TAG_TAIL:
2107 			state->fc_cur_tag++;
2108 			memcpy(&tail, val, sizeof(tail));
2109 			state->fc_crc = ext4_chksum(state->fc_crc, cur,
2110 						EXT4_FC_TAG_BASE_LEN +
2111 						offsetof(struct ext4_fc_tail,
2112 						fc_crc));
2113 			if (le32_to_cpu(tail.fc_tid) == expected_tid &&
2114 				le32_to_cpu(tail.fc_crc) == state->fc_crc) {
2115 				state->fc_replay_num_tags = state->fc_cur_tag;
2116 				state->fc_regions_valid =
2117 					state->fc_regions_used;
2118 			} else {
2119 				ret = state->fc_replay_num_tags ?
2120 					JBD2_FC_REPLAY_STOP : -EFSBADCRC;
2121 			}
2122 			state->fc_crc = 0;
2123 			break;
2124 		case EXT4_FC_TAG_HEAD:
2125 			memcpy(&head, val, sizeof(head));
2126 			if (le32_to_cpu(head.fc_features) &
2127 				~EXT4_FC_SUPPORTED_FEATURES) {
2128 				ret = -EOPNOTSUPP;
2129 				break;
2130 			}
2131 			if (le32_to_cpu(head.fc_tid) != expected_tid) {
2132 				ret = JBD2_FC_REPLAY_STOP;
2133 				break;
2134 			}
2135 			state->fc_cur_tag++;
2136 			state->fc_crc = ext4_chksum(state->fc_crc, cur,
2137 				EXT4_FC_TAG_BASE_LEN + tl.fc_len);
2138 			break;
2139 		default:
2140 			ret = state->fc_replay_num_tags ?
2141 				JBD2_FC_REPLAY_STOP : -ECANCELED;
2142 		}
2143 		if (ret < 0 || ret == JBD2_FC_REPLAY_STOP)
2144 			break;
2145 	}
2146 
2147 out_err:
2148 	trace_ext4_fc_replay_scan(sb, ret, off);
2149 	return ret;
2150 }
2151 
2152 /*
2153  * Main recovery path entry point.
2154  * The meaning of return codes is similar as above.
2155  */
ext4_fc_replay(journal_t * journal,struct buffer_head * bh,enum passtype pass,int off,tid_t expected_tid)2156 static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
2157 				enum passtype pass, int off, tid_t expected_tid)
2158 {
2159 	struct super_block *sb = journal->j_private;
2160 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2161 	struct ext4_fc_tl_mem tl;
2162 	__u8 *start, *end, *cur, *val;
2163 	int ret = JBD2_FC_REPLAY_CONTINUE;
2164 	struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
2165 	struct ext4_fc_tail tail;
2166 
2167 	if (pass == PASS_SCAN) {
2168 		state->fc_current_pass = PASS_SCAN;
2169 		return ext4_fc_replay_scan(journal, bh, off, expected_tid);
2170 	}
2171 
2172 	if (state->fc_current_pass != pass) {
2173 		state->fc_current_pass = pass;
2174 		sbi->s_mount_state |= EXT4_FC_REPLAY;
2175 	}
2176 	if (!sbi->s_fc_replay_state.fc_replay_num_tags) {
2177 		ext4_debug("Replay stops\n");
2178 		ext4_fc_set_bitmaps_and_counters(sb);
2179 		return 0;
2180 	}
2181 
2182 #ifdef CONFIG_EXT4_DEBUG
2183 	if (sbi->s_fc_debug_max_replay && off >= sbi->s_fc_debug_max_replay) {
2184 		pr_warn("Dropping fc block %d because max_replay set\n", off);
2185 		return JBD2_FC_REPLAY_STOP;
2186 	}
2187 #endif
2188 
2189 	start = (u8 *)bh->b_data;
2190 	end = start + journal->j_blocksize;
2191 
2192 	for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
2193 	     cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
2194 		ext4_fc_get_tl(&tl, cur);
2195 		val = cur + EXT4_FC_TAG_BASE_LEN;
2196 
2197 		if (state->fc_replay_num_tags == 0) {
2198 			ret = JBD2_FC_REPLAY_STOP;
2199 			ext4_fc_set_bitmaps_and_counters(sb);
2200 			break;
2201 		}
2202 
2203 		ext4_debug("Replay phase, tag:%s\n", tag2str(tl.fc_tag));
2204 		state->fc_replay_num_tags--;
2205 		switch (tl.fc_tag) {
2206 		case EXT4_FC_TAG_LINK:
2207 			ret = ext4_fc_replay_link(sb, &tl, val);
2208 			break;
2209 		case EXT4_FC_TAG_UNLINK:
2210 			ret = ext4_fc_replay_unlink(sb, &tl, val);
2211 			break;
2212 		case EXT4_FC_TAG_ADD_RANGE:
2213 			ret = ext4_fc_replay_add_range(sb, val);
2214 			break;
2215 		case EXT4_FC_TAG_CREAT:
2216 			ret = ext4_fc_replay_create(sb, &tl, val);
2217 			break;
2218 		case EXT4_FC_TAG_DEL_RANGE:
2219 			ret = ext4_fc_replay_del_range(sb, val);
2220 			break;
2221 		case EXT4_FC_TAG_INODE:
2222 			ret = ext4_fc_replay_inode(sb, &tl, val);
2223 			break;
2224 		case EXT4_FC_TAG_PAD:
2225 			trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
2226 					     tl.fc_len, 0);
2227 			break;
2228 		case EXT4_FC_TAG_TAIL:
2229 			trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL,
2230 					     0, tl.fc_len, 0);
2231 			memcpy(&tail, val, sizeof(tail));
2232 			WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
2233 			break;
2234 		case EXT4_FC_TAG_HEAD:
2235 			break;
2236 		default:
2237 			trace_ext4_fc_replay(sb, tl.fc_tag, 0, tl.fc_len, 0);
2238 			ret = -ECANCELED;
2239 			break;
2240 		}
2241 		if (ret < 0)
2242 			break;
2243 		ret = JBD2_FC_REPLAY_CONTINUE;
2244 	}
2245 	return ret;
2246 }
2247 
ext4_fc_init(struct super_block * sb,journal_t * journal)2248 void ext4_fc_init(struct super_block *sb, journal_t *journal)
2249 {
2250 	/*
2251 	 * We set replay callback even if fast commit disabled because we may
2252 	 * could still have fast commit blocks that need to be replayed even if
2253 	 * fast commit has now been turned off.
2254 	 */
2255 	journal->j_fc_replay_callback = ext4_fc_replay;
2256 	if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
2257 		return;
2258 	journal->j_fc_cleanup_callback = ext4_fc_cleanup;
2259 }
2260 
2261 static const char * const fc_ineligible_reasons[] = {
2262 	[EXT4_FC_REASON_XATTR] = "Extended attributes changed",
2263 	[EXT4_FC_REASON_CROSS_RENAME] = "Cross rename",
2264 	[EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed",
2265 	[EXT4_FC_REASON_NOMEM] = "Insufficient memory",
2266 	[EXT4_FC_REASON_SWAP_BOOT] = "Swap boot",
2267 	[EXT4_FC_REASON_RESIZE] = "Resize",
2268 	[EXT4_FC_REASON_RENAME_DIR] = "Dir renamed",
2269 	[EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op",
2270 	[EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling",
2271 	[EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename",
2272 	[EXT4_FC_REASON_MIGRATE] = "Inode format migration",
2273 	[EXT4_FC_REASON_VERITY] = "fs-verity enable",
2274 	[EXT4_FC_REASON_MOVE_EXT] = "Move extents",
2275 };
2276 
ext4_fc_info_show(struct seq_file * seq,void * v)2277 int ext4_fc_info_show(struct seq_file *seq, void *v)
2278 {
2279 	struct ext4_sb_info *sbi = EXT4_SB((struct super_block *)seq->private);
2280 	struct ext4_fc_stats *stats = &sbi->s_fc_stats;
2281 	int i;
2282 
2283 	if (v != SEQ_START_TOKEN)
2284 		return 0;
2285 
2286 	seq_printf(seq,
2287 		"fc stats:\n%ld commits\n%ld ineligible\n%ld numblks\n%lluus avg_commit_time\n",
2288 		   stats->fc_num_commits, stats->fc_ineligible_commits,
2289 		   stats->fc_numblks,
2290 		   div_u64(stats->s_fc_avg_commit_time, 1000));
2291 	seq_puts(seq, "Ineligible reasons:\n");
2292 	for (i = 0; i < EXT4_FC_REASON_MAX; i++)
2293 		seq_printf(seq, "\"%s\":\t%d\n", fc_ineligible_reasons[i],
2294 			stats->fc_ineligible_reason_count[i]);
2295 
2296 	return 0;
2297 }
2298 
ext4_fc_init_dentry_cache(void)2299 int __init ext4_fc_init_dentry_cache(void)
2300 {
2301 	ext4_fc_dentry_cachep = KMEM_CACHE(ext4_fc_dentry_update,
2302 					   SLAB_RECLAIM_ACCOUNT);
2303 
2304 	if (ext4_fc_dentry_cachep == NULL)
2305 		return -ENOMEM;
2306 
2307 	return 0;
2308 }
2309 
ext4_fc_destroy_dentry_cache(void)2310 void ext4_fc_destroy_dentry_cache(void)
2311 {
2312 	kmem_cache_destroy(ext4_fc_dentry_cachep);
2313 }
2314