xref: /linux/fs/ext4/ext4_jbd2.h (revision edc7616c307ad315159a8aa050142237f524e079)
1 /*
2  * ext4_jbd2.h
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5  *
6  * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Ext4-specific journaling extensions.
13  */
14 
15 #ifndef _EXT4_JBD2_H
16 #define _EXT4_JBD2_H
17 
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include "ext4.h"
21 
22 #define EXT4_JOURNAL(inode)	(EXT4_SB((inode)->i_sb)->s_journal)
23 
24 /* Define the number of blocks we need to account to a transaction to
25  * modify one block of data.
26  *
27  * We may have to touch one inode, one bitmap buffer, up to three
28  * indirection blocks, the group and superblock summaries, and the data
29  * block to complete the transaction.
30  *
31  * For extents-enabled fs we may have to allocate and modify up to
32  * 5 levels of tree + root which are stored in the inode. */
33 
34 #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
35 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
36 	 ? 27U : 8U)
37 
38 /* Extended attribute operations touch at most two data buffers,
39  * two bitmap buffers, and two group summaries, in addition to the inode
40  * and the superblock, which are already accounted for. */
41 
42 #define EXT4_XATTR_TRANS_BLOCKS		6U
43 
44 /* Define the minimum size for a transaction which modifies data.  This
45  * needs to take into account the fact that we may end up modifying two
46  * quota files too (one for the group, one for the user quota).  The
47  * superblock only gets updated once, of course, so don't bother
48  * counting that again for the quota updates. */
49 
50 #define EXT4_DATA_TRANS_BLOCKS(sb)	(EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
51 					 EXT4_XATTR_TRANS_BLOCKS - 2 + \
52 					 EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
53 
54 /*
55  * Define the number of metadata blocks we need to account to modify data.
56  *
57  * This include super block, inode block, quota blocks and xattr blocks
58  */
59 #define EXT4_META_TRANS_BLOCKS(sb)	(EXT4_XATTR_TRANS_BLOCKS + \
60 					EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
61 
62 /* Delete operations potentially hit one directory's namespace plus an
63  * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
64  * generous.  We can grow the delete transaction later if necessary. */
65 
66 #define EXT4_DELETE_TRANS_BLOCKS(sb)	(2 * EXT4_DATA_TRANS_BLOCKS(sb) + 64)
67 
68 /* Define an arbitrary limit for the amount of data we will anticipate
69  * writing to any given transaction.  For unbounded transactions such as
70  * write(2) and truncate(2) we can write more than this, but we always
71  * start off at the maximum transaction size and grow the transaction
72  * optimistically as we go. */
73 
74 #define EXT4_MAX_TRANS_DATA		64U
75 
76 /* We break up a large truncate or write transaction once the handle's
77  * buffer credits gets this low, we need either to extend the
78  * transaction or to start a new one.  Reserve enough space here for
79  * inode, bitmap, superblock, group and indirection updates for at least
80  * one block, plus two quota updates.  Quota allocations are not
81  * needed. */
82 
83 #define EXT4_RESERVE_TRANS_BLOCKS	12U
84 
85 #define EXT4_INDEX_EXTRA_TRANS_BLOCKS	8
86 
87 #ifdef CONFIG_QUOTA
88 /* Amount of blocks needed for quota update - we know that the structure was
89  * allocated so we need to update only inode+data */
90 #define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
91 /* Amount of blocks needed for quota insert/delete - we do some block writes
92  * but inode, sb and group updates are done only once */
93 #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
94 		(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
95 
96 #define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
97 		(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
98 #else
99 #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
100 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
101 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0
102 #endif
103 #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
104 #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
105 #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
106 
107 int
108 ext4_mark_iloc_dirty(handle_t *handle,
109 		     struct inode *inode,
110 		     struct ext4_iloc *iloc);
111 
112 /*
113  * On success, We end up with an outstanding reference count against
114  * iloc->bh.  This _must_ be cleaned up later.
115  */
116 
117 int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
118 			struct ext4_iloc *iloc);
119 
120 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
121 
122 /*
123  * Wrapper functions with which ext4 calls into JBD.
124  */
125 void ext4_journal_abort_handle(const char *caller, const char *err_fn,
126 		struct buffer_head *bh, handle_t *handle, int err);
127 
128 int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
129 				struct buffer_head *bh);
130 
131 int __ext4_journal_get_write_access(const char *where, handle_t *handle,
132 				struct buffer_head *bh);
133 
134 int __ext4_forget(const char *where, handle_t *handle, int is_metadata,
135 		  struct inode *inode, struct buffer_head *bh,
136 		  ext4_fsblk_t blocknr);
137 
138 int __ext4_journal_get_create_access(const char *where,
139 				handle_t *handle, struct buffer_head *bh);
140 
141 int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
142 				 struct inode *inode, struct buffer_head *bh);
143 
144 #define ext4_journal_get_undo_access(handle, bh) \
145 	__ext4_journal_get_undo_access(__func__, (handle), (bh))
146 #define ext4_journal_get_write_access(handle, bh) \
147 	__ext4_journal_get_write_access(__func__, (handle), (bh))
148 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
149 	__ext4_forget(__func__, (handle), (is_metadata), (inode), (bh),\
150 		      (block_nr))
151 #define ext4_journal_get_create_access(handle, bh) \
152 	__ext4_journal_get_create_access(__func__, (handle), (bh))
153 #define ext4_handle_dirty_metadata(handle, inode, bh) \
154 	__ext4_handle_dirty_metadata(__func__, (handle), (inode), (bh))
155 
156 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
157 int __ext4_journal_stop(const char *where, handle_t *handle);
158 
159 #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
160 
161 /* Note:  Do not use this for NULL handles.  This is only to determine if
162  * a properly allocated handle is using a journal or not. */
163 static inline int ext4_handle_valid(handle_t *handle)
164 {
165 	if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT)
166 		return 0;
167 	return 1;
168 }
169 
170 static inline void ext4_handle_sync(handle_t *handle)
171 {
172 	if (ext4_handle_valid(handle))
173 		handle->h_sync = 1;
174 }
175 
176 static inline void ext4_handle_release_buffer(handle_t *handle,
177 						struct buffer_head *bh)
178 {
179 	if (ext4_handle_valid(handle))
180 		jbd2_journal_release_buffer(handle, bh);
181 }
182 
183 static inline int ext4_handle_is_aborted(handle_t *handle)
184 {
185 	if (ext4_handle_valid(handle))
186 		return is_handle_aborted(handle);
187 	return 0;
188 }
189 
190 static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
191 {
192 	if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
193 		return 0;
194 	return 1;
195 }
196 
197 static inline void ext4_journal_release_buffer(handle_t *handle,
198 						struct buffer_head *bh)
199 {
200 	if (ext4_handle_valid(handle))
201 		jbd2_journal_release_buffer(handle, bh);
202 }
203 
204 static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
205 {
206 	return ext4_journal_start_sb(inode->i_sb, nblocks);
207 }
208 
209 #define ext4_journal_stop(handle) \
210 	__ext4_journal_stop(__func__, (handle))
211 
212 static inline handle_t *ext4_journal_current_handle(void)
213 {
214 	return journal_current_handle();
215 }
216 
217 static inline int ext4_journal_extend(handle_t *handle, int nblocks)
218 {
219 	if (ext4_handle_valid(handle))
220 		return jbd2_journal_extend(handle, nblocks);
221 	return 0;
222 }
223 
224 static inline int ext4_journal_restart(handle_t *handle, int nblocks)
225 {
226 	if (ext4_handle_valid(handle))
227 		return jbd2_journal_restart(handle, nblocks);
228 	return 0;
229 }
230 
231 static inline int ext4_journal_blocks_per_page(struct inode *inode)
232 {
233 	if (EXT4_JOURNAL(inode) != NULL)
234 		return jbd2_journal_blocks_per_page(inode);
235 	return 0;
236 }
237 
238 static inline int ext4_journal_force_commit(journal_t *journal)
239 {
240 	if (journal)
241 		return jbd2_journal_force_commit(journal);
242 	return 0;
243 }
244 
245 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
246 {
247 	if (ext4_handle_valid(handle))
248 		return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
249 	return 0;
250 }
251 
252 static inline void ext4_update_inode_fsync_trans(handle_t *handle,
253 						 struct inode *inode,
254 						 int datasync)
255 {
256 	struct ext4_inode_info *ei = EXT4_I(inode);
257 
258 	if (ext4_handle_valid(handle)) {
259 		ei->i_sync_tid = handle->h_transaction->t_tid;
260 		if (datasync)
261 			ei->i_datasync_tid = handle->h_transaction->t_tid;
262 	}
263 }
264 
265 /* super.c */
266 int ext4_force_commit(struct super_block *sb);
267 
268 static inline int ext4_should_journal_data(struct inode *inode)
269 {
270 	if (EXT4_JOURNAL(inode) == NULL)
271 		return 0;
272 	if (!S_ISREG(inode->i_mode))
273 		return 1;
274 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
275 		return 1;
276 	if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
277 		return 1;
278 	return 0;
279 }
280 
281 static inline int ext4_should_order_data(struct inode *inode)
282 {
283 	if (EXT4_JOURNAL(inode) == NULL)
284 		return 0;
285 	if (!S_ISREG(inode->i_mode))
286 		return 0;
287 	if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
288 		return 0;
289 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
290 		return 1;
291 	return 0;
292 }
293 
294 static inline int ext4_should_writeback_data(struct inode *inode)
295 {
296 	if (!S_ISREG(inode->i_mode))
297 		return 0;
298 	if (EXT4_JOURNAL(inode) == NULL)
299 		return 1;
300 	if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
301 		return 0;
302 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
303 		return 1;
304 	return 0;
305 }
306 
307 /*
308  * This function controls whether or not we should try to go down the
309  * dioread_nolock code paths, which makes it safe to avoid taking
310  * i_mutex for direct I/O reads.  This only works for extent-based
311  * files, and it doesn't work for nobh or if data journaling is
312  * enabled, since the dioread_nolock code uses b_private to pass
313  * information back to the I/O completion handler, and this conflicts
314  * with the jbd's use of b_private.
315  */
316 static inline int ext4_should_dioread_nolock(struct inode *inode)
317 {
318 	if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
319 		return 0;
320 	if (test_opt(inode->i_sb, NOBH))
321 		return 0;
322 	if (!S_ISREG(inode->i_mode))
323 		return 0;
324 	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
325 		return 0;
326 	if (ext4_should_journal_data(inode))
327 		return 0;
328 	return 1;
329 }
330 
331 #endif	/* _EXT4_JBD2_H */
332