xref: /linux/fs/gfs2/trans.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/kallsyms.h>
15 #include <linux/gfs2_ondisk.h>
16 
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "lops.h"
23 #include "meta_io.h"
24 #include "trans.h"
25 #include "util.h"
26 #include "trace_gfs2.h"
27 
28 static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
29 {
30 	fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
31 	fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
32 		tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
33 		test_bit(TR_TOUCHED, &tr->tr_flags));
34 	fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
35 		tr->tr_num_buf_new, tr->tr_num_buf_rm,
36 		tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
37 		tr->tr_num_revoke);
38 }
39 
40 int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
41 		       unsigned int blocks, unsigned int revokes,
42 		       unsigned long ip)
43 {
44 	unsigned int extra_revokes;
45 
46 	if (current->journal_info) {
47 		gfs2_print_trans(sdp, current->journal_info);
48 		BUG();
49 	}
50 	BUG_ON(blocks == 0 && revokes == 0);
51 
52 	if (gfs2_withdrawn(sdp))
53 		return -EROFS;
54 
55 	tr->tr_ip = ip;
56 	tr->tr_blocks = blocks;
57 	tr->tr_revokes = revokes;
58 	tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
59 	if (blocks) {
60 		/*
61 		 * The reserved blocks are either used for data or metadata.
62 		 * We can have mixed data and metadata, each with its own log
63 		 * descriptor block; see calc_reserved().
64 		 */
65 		tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
66 	}
67 	INIT_LIST_HEAD(&tr->tr_databuf);
68 	INIT_LIST_HEAD(&tr->tr_buf);
69 	INIT_LIST_HEAD(&tr->tr_list);
70 	INIT_LIST_HEAD(&tr->tr_ail1_list);
71 	INIT_LIST_HEAD(&tr->tr_ail2_list);
72 
73 	if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
74 		return -EINVAL;
75 
76 	sb_start_intwrite(sdp->sd_vfs);
77 
78 	/*
79 	 * Try the reservations under sd_log_flush_lock to prevent log flushes
80 	 * from creating inconsistencies between the number of allocated and
81 	 * reserved revokes.  If that fails, do a full-block allocation outside
82 	 * of the lock to avoid stalling log flushes.  Then, allot the
83 	 * appropriate number of blocks to revokes, use as many revokes locally
84 	 * as needed, and "release" the surplus into the revokes pool.
85 	 */
86 
87 	down_read(&sdp->sd_log_flush_lock);
88 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)))
89 		goto out_not_live;
90 	if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
91 		goto reserved;
92 
93 	up_read(&sdp->sd_log_flush_lock);
94 	gfs2_log_reserve(sdp, tr, &extra_revokes);
95 	down_read(&sdp->sd_log_flush_lock);
96 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
97 		revokes = tr->tr_revokes + extra_revokes;
98 		gfs2_log_release_revokes(sdp, revokes);
99 		gfs2_log_release(sdp, tr->tr_reserved);
100 		goto out_not_live;
101 	}
102 
103 reserved:
104 	gfs2_log_release_revokes(sdp, extra_revokes);
105 	current->journal_info = tr;
106 	return 0;
107 
108 out_not_live:
109 	up_read(&sdp->sd_log_flush_lock);
110 	sb_end_intwrite(sdp->sd_vfs);
111 	return -EROFS;
112 }
113 
114 int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
115 		     unsigned int revokes)
116 {
117 	struct gfs2_trans *tr;
118 	int error;
119 
120 	tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
121 	if (!tr)
122 		return -ENOMEM;
123 	error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
124 	if (error)
125 		kmem_cache_free(gfs2_trans_cachep, tr);
126 	return error;
127 }
128 
129 void gfs2_trans_end(struct gfs2_sbd *sdp)
130 {
131 	struct gfs2_trans *tr = current->journal_info;
132 	s64 nbuf;
133 
134 	current->journal_info = NULL;
135 
136 	if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
137 		gfs2_log_release_revokes(sdp, tr->tr_revokes);
138 		up_read(&sdp->sd_log_flush_lock);
139 		gfs2_log_release(sdp, tr->tr_reserved);
140 		if (!test_bit(TR_ONSTACK, &tr->tr_flags))
141 			gfs2_trans_free(sdp, tr);
142 		sb_end_intwrite(sdp->sd_vfs);
143 		return;
144 	}
145 
146 	gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
147 
148 	nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
149 	nbuf -= tr->tr_num_buf_rm;
150 	nbuf -= tr->tr_num_databuf_rm;
151 
152 	if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
153 	    gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
154 		gfs2_print_trans(sdp, tr);
155 
156 	gfs2_log_commit(sdp, tr);
157 	if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
158 	    !test_bit(TR_ATTACHED, &tr->tr_flags))
159 		gfs2_trans_free(sdp, tr);
160 	up_read(&sdp->sd_log_flush_lock);
161 
162 	if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
163 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 			       GFS2_LFC_TRANS_END);
165 	sb_end_intwrite(sdp->sd_vfs);
166 }
167 
168 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
169 					       struct buffer_head *bh)
170 {
171 	struct gfs2_bufdata *bd;
172 
173 	bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
174 	bd->bd_bh = bh;
175 	bd->bd_gl = gl;
176 	INIT_LIST_HEAD(&bd->bd_list);
177 	INIT_LIST_HEAD(&bd->bd_ail_st_list);
178 	INIT_LIST_HEAD(&bd->bd_ail_gl_list);
179 	return bd;
180 }
181 
182 /**
183  * gfs2_trans_add_data - Add a databuf to the transaction.
184  * @gl: The inode glock associated with the buffer
185  * @bh: The buffer to add
186  *
187  * This is used in journaled data mode.
188  * We need to journal the data block in the same way as metadata in
189  * the functions above. The difference is that here we have a tag
190  * which is two __be64's being the block number (as per meta data)
191  * and a flag which says whether the data block needs escaping or
192  * not. This means we need a new log entry for each 251 or so data
193  * blocks, which isn't an enormous overhead but twice as much as
194  * for normal metadata blocks.
195  */
196 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
197 {
198 	struct gfs2_trans *tr = current->journal_info;
199 	struct gfs2_sbd *sdp = glock_sbd(gl);
200 	struct gfs2_bufdata *bd;
201 
202 	lock_buffer(bh);
203 	if (buffer_pinned(bh)) {
204 		set_bit(TR_TOUCHED, &tr->tr_flags);
205 		goto out;
206 	}
207 	spin_lock(&sdp->sd_log_lock);
208 	bd = bh->b_private;
209 	if (bd == NULL) {
210 		spin_unlock(&sdp->sd_log_lock);
211 		unlock_buffer(bh);
212 		bd = gfs2_alloc_bufdata(gl, bh);
213 		lock_buffer(bh);
214 		spin_lock(&sdp->sd_log_lock);
215 		if (bh->b_private) {
216 			kmem_cache_free(gfs2_bufdata_cachep, bd);
217 			bd = bh->b_private;
218 		} else {
219 			bh->b_private = bd;
220 		}
221 	}
222 	gfs2_assert(sdp, bd->bd_gl == gl);
223 	set_bit(TR_TOUCHED, &tr->tr_flags);
224 	if (list_empty(&bd->bd_list)) {
225 		set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
226 		set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
227 		gfs2_pin(sdp, bd->bd_bh);
228 		tr->tr_num_databuf_new++;
229 		list_add_tail(&bd->bd_list, &tr->tr_databuf);
230 	}
231 	spin_unlock(&sdp->sd_log_lock);
232 out:
233 	unlock_buffer(bh);
234 }
235 
236 void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
237 			     size_t from, size_t len)
238 {
239 	struct buffer_head *head = folio_buffers(folio);
240 	unsigned int bsize = head->b_size;
241 	struct buffer_head *bh;
242 	size_t to = from + len;
243 	size_t start, end;
244 
245 	for (bh = head, start = 0; bh != head || !start;
246 	     bh = bh->b_this_page, start = end) {
247 		end = start + bsize;
248 		if (end <= from)
249 			continue;
250 		if (start >= to)
251 			break;
252 		set_buffer_uptodate(bh);
253 		gfs2_trans_add_data(gl, bh);
254 	}
255 }
256 
257 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
258 {
259 
260 	struct gfs2_sbd *sdp = glock_sbd(gl);
261 	struct super_block *sb = sdp->sd_vfs;
262 	struct gfs2_bufdata *bd;
263 	struct gfs2_meta_header *mh;
264 	struct gfs2_trans *tr = current->journal_info;
265 
266 	lock_buffer(bh);
267 	if (buffer_pinned(bh)) {
268 		set_bit(TR_TOUCHED, &tr->tr_flags);
269 		goto out;
270 	}
271 	spin_lock(&sdp->sd_log_lock);
272 	bd = bh->b_private;
273 	if (bd == NULL) {
274 		spin_unlock(&sdp->sd_log_lock);
275 		unlock_buffer(bh);
276 		bd = gfs2_alloc_bufdata(gl, bh);
277 		lock_buffer(bh);
278 		spin_lock(&sdp->sd_log_lock);
279 		if (bh->b_private) {
280 			kmem_cache_free(gfs2_bufdata_cachep, bd);
281 			bd = bh->b_private;
282 		} else {
283 			bh->b_private = bd;
284 		}
285 	}
286 	gfs2_assert(sdp, bd->bd_gl == gl);
287 	set_bit(TR_TOUCHED, &tr->tr_flags);
288 	if (!list_empty(&bd->bd_list))
289 		goto out_unlock;
290 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
291 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
292 	mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
293 	if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
294 		fs_err(sdp, "Attempting to add uninitialised block to "
295 		       "journal (inplace block=%lld)\n",
296 		       (unsigned long long)bd->bd_bh->b_blocknr);
297 		BUG();
298 	}
299 	if (gfs2_withdrawn(sdp)) {
300 		fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
301 			(unsigned long long)bd->bd_bh->b_blocknr);
302 		goto out_unlock;
303 	}
304 	if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) {
305 		fs_info(sdp, "GFS2:adding buf while frozen\n");
306 		gfs2_withdraw(sdp);
307 		goto out_unlock;
308 	}
309 	gfs2_pin(sdp, bd->bd_bh);
310 	mh->__pad0 = cpu_to_be64(0);
311 	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
312 	list_add(&bd->bd_list, &tr->tr_buf);
313 	tr->tr_num_buf_new++;
314 out_unlock:
315 	spin_unlock(&sdp->sd_log_lock);
316 out:
317 	unlock_buffer(bh);
318 }
319 
320 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
321 {
322 	struct gfs2_trans *tr = current->journal_info;
323 
324 	BUG_ON(!list_empty(&bd->bd_list));
325 	gfs2_add_revoke(sdp, bd);
326 	set_bit(TR_TOUCHED, &tr->tr_flags);
327 	tr->tr_num_revoke++;
328 }
329 
330 void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
331 {
332 	struct gfs2_bufdata *bd, *tmp;
333 	unsigned int n = len;
334 
335 	spin_lock(&sdp->sd_log_lock);
336 	list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
337 		if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
338 			list_del_init(&bd->bd_list);
339 			gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
340 			sdp->sd_log_num_revoke--;
341 			if (bd->bd_gl)
342 				gfs2_glock_remove_revoke(bd->bd_gl);
343 			kmem_cache_free(gfs2_bufdata_cachep, bd);
344 			gfs2_log_release_revokes(sdp, 1);
345 			if (--n == 0)
346 				break;
347 		}
348 	}
349 	spin_unlock(&sdp->sd_log_lock);
350 }
351 
352 void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
353 {
354 	if (tr == NULL)
355 		return;
356 
357 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
358 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
359 	gfs2_assert_warn(sdp, list_empty(&tr->tr_databuf));
360 	gfs2_assert_warn(sdp, list_empty(&tr->tr_buf));
361 	kmem_cache_free(gfs2_trans_cachep, tr);
362 }
363