xref: /linux/fs/gfs2/lops.c (revision ef479de65a700437159d59c00ee5cad6cfc2a89d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18 
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34 
35 /**
36  * gfs2_pin - Pin a buffer in memory
37  * @sdp: The superblock
38  * @bh: The buffer to be pinned
39  *
40  * The log lock must be held when calling this function
41  */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 	struct gfs2_bufdata *bd;
45 
46 	BUG_ON(!current->journal_info);
47 
48 	clear_buffer_dirty(bh);
49 	if (test_set_buffer_pinned(bh))
50 		gfs2_assert_withdraw(sdp, 0);
51 	if (!buffer_uptodate(bh))
52 		gfs2_io_error_bh_wd(sdp, bh);
53 	bd = bh->b_private;
54 	/* If this buffer is in the AIL and it has already been written
55 	 * to in-place disk block, remove it from the AIL.
56 	 */
57 	spin_lock(&sdp->sd_ail_lock);
58 	if (bd->bd_tr)
59 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 	spin_unlock(&sdp->sd_ail_lock);
61 	get_bh(bh);
62 	atomic_inc(&sdp->sd_log_pinned);
63 	trace_gfs2_pin(bd, 1);
64 }
65 
buffer_is_rgrp(const struct gfs2_bufdata * bd)66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70 
maybe_release_space(struct gfs2_bufdata * bd)71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 	struct gfs2_glock *gl = bd->bd_gl;
74 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
78 
79 	rgrp_lock_local(rgd);
80 	if (bi->bi_clone == NULL)
81 		goto out;
82 	if (sdp->sd_args.ar_discard)
83 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 	memcpy(bi->bi_clone + bi->bi_offset,
85 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 	clear_bit(GBF_FULL, &bi->bi_flags);
87 	rgd->rd_free_clone = rgd->rd_free;
88 	BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 	rgd->rd_extfail_pt = rgd->rd_free;
90 
91 out:
92 	rgrp_unlock_local(rgd);
93 }
94 
95 /**
96  * gfs2_unpin - Unpin a buffer
97  * @sdp: the filesystem the buffer belongs to
98  * @bh: The buffer to unpin
99  * @tr: The system transaction being flushed
100  */
101 
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)102 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 		       struct gfs2_trans *tr)
104 {
105 	struct gfs2_bufdata *bd = bh->b_private;
106 
107 	BUG_ON(!buffer_uptodate(bh));
108 	BUG_ON(!buffer_pinned(bh));
109 
110 	lock_buffer(bh);
111 	mark_buffer_dirty(bh);
112 	clear_buffer_pinned(bh);
113 
114 	if (buffer_is_rgrp(bd))
115 		maybe_release_space(bd);
116 
117 	spin_lock(&sdp->sd_ail_lock);
118 	if (bd->bd_tr) {
119 		list_del(&bd->bd_ail_st_list);
120 		brelse(bh);
121 	} else {
122 		struct gfs2_glock *gl = bd->bd_gl;
123 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 		atomic_inc(&gl->gl_ail_count);
125 	}
126 	bd->bd_tr = tr;
127 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 	spin_unlock(&sdp->sd_ail_lock);
129 
130 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 	trace_gfs2_pin(bd, 0);
132 	unlock_buffer(bh);
133 	atomic_dec(&sdp->sd_log_pinned);
134 }
135 
gfs2_log_incr_head(struct gfs2_sbd * sdp)136 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
137 {
138 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
140 
141 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 		sdp->sd_log_flush_head = 0;
143 }
144 
gfs2_log_bmap(struct gfs2_jdesc * jd,unsigned int lblock)145 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
146 {
147 	struct gfs2_journal_extent *je;
148 
149 	list_for_each_entry(je, &jd->extent_list, list) {
150 		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 			return je->dblock + lblock - je->lblock;
152 	}
153 
154 	return -1;
155 }
156 
157 /**
158  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
159  * @sdp: The superblock
160  * @folio: The folio
161  * @offset: The first byte within the folio that completed
162  * @size: The number of bytes that completed
163  * @error: The i/o status
164  *
165  * This finds the relevant buffers and unlocks them and sets the
166  * error flag according to the status of the i/o request. This is
167  * used when the log is writing data which has an in-place version
168  * that is pinned in the pagecache.
169  */
170 
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct folio * folio,size_t offset,size_t size,blk_status_t error)171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
172 		size_t offset, size_t size, blk_status_t error)
173 {
174 	struct buffer_head *bh, *next;
175 
176 	bh = folio_buffers(folio);
177 	while (bh_offset(bh) < offset)
178 		bh = bh->b_this_page;
179 	do {
180 		if (error)
181 			mark_buffer_write_io_error(bh);
182 		unlock_buffer(bh);
183 		next = bh->b_this_page;
184 		size -= bh->b_size;
185 		brelse(bh);
186 		bh = next;
187 	} while (bh && size);
188 }
189 
190 /**
191  * gfs2_end_log_write - end of i/o to the log
192  * @bio: The bio
193  *
194  * Each bio_vec contains either data from the pagecache or data
195  * relating to the log itself. Here we iterate over the bio_vec
196  * array, processing both kinds of data.
197  *
198  */
199 
gfs2_end_log_write(struct bio * bio)200 static void gfs2_end_log_write(struct bio *bio)
201 {
202 	struct gfs2_sbd *sdp = bio->bi_private;
203 	struct bio_vec *bvec;
204 	struct bvec_iter_all iter_all;
205 
206 	if (bio->bi_status) {
207 		if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
208 			fs_err(sdp, "Error %d writing to journal, jid=%u\n",
209 			       bio->bi_status, sdp->sd_jdesc->jd_jid);
210 		gfs2_withdraw_delayed(sdp);
211 		/* prevent more writes to the journal */
212 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
213 		wake_up(&sdp->sd_logd_waitq);
214 	}
215 
216 	bio_for_each_segment_all(bvec, bio, iter_all) {
217 		struct page *page = bvec->bv_page;
218 		struct folio *folio = page_folio(page);
219 
220 		if (folio && folio_buffers(folio))
221 			gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
222 					bvec->bv_len, bio->bi_status);
223 		else
224 			mempool_free(page, gfs2_page_pool);
225 	}
226 
227 	bio_put(bio);
228 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
229 		wake_up(&sdp->sd_log_flush_wait);
230 }
231 
232 /**
233  * gfs2_log_submit_bio - Submit any pending log bio
234  * @biop: Address of the bio pointer
235  * @opf: REQ_OP | op_flags
236  *
237  * Submit any pending part-built or full bio to the block device. If
238  * there is no pending bio, then this is a no-op.
239  */
240 
gfs2_log_submit_bio(struct bio ** biop,blk_opf_t opf)241 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
242 {
243 	struct bio *bio = *biop;
244 	if (bio) {
245 		struct gfs2_sbd *sdp = bio->bi_private;
246 		atomic_inc(&sdp->sd_log_in_flight);
247 		bio->bi_opf = opf;
248 		submit_bio(bio);
249 		*biop = NULL;
250 	}
251 }
252 
253 /**
254  * gfs2_log_alloc_bio - Allocate a bio
255  * @sdp: The super block
256  * @blkno: The device block number we want to write to
257  * @end_io: The bi_end_io callback
258  *
259  * Allocate a new bio, initialize it with the given parameters and return it.
260  *
261  * Returns: The newly allocated bio
262  */
263 
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno,bio_end_io_t * end_io)264 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
265 				      bio_end_io_t *end_io)
266 {
267 	struct super_block *sb = sdp->sd_vfs;
268 	struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
269 
270 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
271 	bio->bi_end_io = end_io;
272 	bio->bi_private = sdp;
273 
274 	return bio;
275 }
276 
277 /**
278  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
279  * @sdp: The super block
280  * @blkno: The device block number we want to write to
281  * @biop: The bio to get or allocate
282  * @op: REQ_OP
283  * @end_io: The bi_end_io callback
284  * @flush: Always flush the current bio and allocate a new one?
285  *
286  * If there is a cached bio, then if the next block number is sequential
287  * with the previous one, return it, otherwise flush the bio to the
288  * device. If there is no cached bio, or we just flushed it, then
289  * allocate a new one.
290  *
291  * Returns: The bio to use for log writes
292  */
293 
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno,struct bio ** biop,enum req_op op,bio_end_io_t * end_io,bool flush)294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
295 				    struct bio **biop, enum req_op op,
296 				    bio_end_io_t *end_io, bool flush)
297 {
298 	struct bio *bio = *biop;
299 
300 	if (bio) {
301 		u64 nblk;
302 
303 		nblk = bio_end_sector(bio);
304 		nblk >>= sdp->sd_fsb2bb_shift;
305 		if (blkno == nblk && !flush)
306 			return bio;
307 		gfs2_log_submit_bio(biop, op);
308 	}
309 
310 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
311 	return *biop;
312 }
313 
314 /**
315  * gfs2_log_write - write to log
316  * @sdp: the filesystem
317  * @jd: The journal descriptor
318  * @page: the page to write
319  * @size: the size of the data to write
320  * @offset: the offset within the page
321  * @blkno: block number of the log entry
322  *
323  * Try and add the page segment to the current bio. If that fails,
324  * submit the current bio to the device and create a new one, and
325  * then add the page segment to that.
326  */
327 
gfs2_log_write(struct gfs2_sbd * sdp,struct gfs2_jdesc * jd,struct page * page,unsigned size,unsigned offset,u64 blkno)328 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
329 		    struct page *page, unsigned size, unsigned offset,
330 		    u64 blkno)
331 {
332 	struct bio *bio;
333 	int ret;
334 
335 	bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
336 			       gfs2_end_log_write, false);
337 	ret = bio_add_page(bio, page, size, offset);
338 	if (ret == 0) {
339 		bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
340 				       REQ_OP_WRITE, gfs2_end_log_write, true);
341 		ret = bio_add_page(bio, page, size, offset);
342 		WARN_ON(ret == 0);
343 	}
344 }
345 
346 /**
347  * gfs2_log_write_bh - write a buffer's content to the log
348  * @sdp: The super block
349  * @bh: The buffer pointing to the in-place location
350  *
351  * This writes the content of the buffer to the next available location
352  * in the log. The buffer will be unlocked once the i/o to the log has
353  * completed.
354  */
355 
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)356 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
357 {
358 	u64 dblock;
359 
360 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
361 	gfs2_log_incr_head(sdp);
362 	gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
363 			bh->b_size, bh_offset(bh), dblock);
364 }
365 
366 /**
367  * gfs2_log_write_page - write one block stored in a page, into the log
368  * @sdp: The superblock
369  * @page: The struct page
370  *
371  * This writes the first block-sized part of the page into the log. Note
372  * that the page must have been allocated from the gfs2_page_pool mempool
373  * and that after this has been called, ownership has been transferred and
374  * the page may be freed at any time.
375  */
376 
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)377 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
378 {
379 	struct super_block *sb = sdp->sd_vfs;
380 	u64 dblock;
381 
382 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
383 	gfs2_log_incr_head(sdp);
384 	gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
385 }
386 
387 /**
388  * gfs2_end_log_read - end I/O callback for reads from the log
389  * @bio: The bio
390  *
391  * Simply unlock the pages in the bio. The main thread will wait on them and
392  * process them in order as necessary.
393  */
gfs2_end_log_read(struct bio * bio)394 static void gfs2_end_log_read(struct bio *bio)
395 {
396 	int error = blk_status_to_errno(bio->bi_status);
397 	struct folio_iter fi;
398 
399 	bio_for_each_folio_all(fi, bio) {
400 		/* We're abusing wb_err to get the error to gfs2_find_jhead */
401 		filemap_set_wb_err(fi.folio->mapping, error);
402 		folio_end_read(fi.folio, !error);
403 	}
404 
405 	bio_put(bio);
406 }
407 
408 /**
409  * gfs2_jhead_folio_search - Look for the journal head in a given page.
410  * @jd: The journal descriptor
411  * @head: The journal head to start from
412  * @folio: The folio to look in
413  *
414  * Returns: 1 if found, 0 otherwise.
415  */
gfs2_jhead_folio_search(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,struct folio * folio)416 static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
417 				    struct gfs2_log_header_host *head,
418 				    struct folio *folio)
419 {
420 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
421 	struct gfs2_log_header_host lh;
422 	void *kaddr;
423 	unsigned int offset;
424 	bool ret = false;
425 
426 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
427 	kaddr = kmap_local_folio(folio, 0);
428 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
429 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
430 			if (lh.lh_sequence >= head->lh_sequence)
431 				*head = lh;
432 			else {
433 				ret = true;
434 				break;
435 			}
436 		}
437 	}
438 	kunmap_local(kaddr);
439 	return ret;
440 }
441 
442 /**
443  * gfs2_jhead_process_page - Search/cleanup a page
444  * @jd: The journal descriptor
445  * @index: Index of the page to look into
446  * @head: The journal head to start from
447  * @done: If set, perform only cleanup, else search and set if found.
448  *
449  * Find the folio with 'index' in the journal's mapping. Search the folio for
450  * the journal head if requested (cleanup == false). Release refs on the
451  * folio so the page cache can reclaim it. We grabbed a
452  * reference on this folio twice, first when we did a grab_cache_page()
453  * to obtain the folio to add it to the bio and second when we do a
454  * filemap_get_folio() here to get the folio to wait on while I/O on it is being
455  * completed.
456  * This function is also used to free up a folio we might've grabbed but not
457  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
458  * submitted the I/O, but we already found the jhead so we only need to drop
459  * our references to the folio.
460  */
461 
gfs2_jhead_process_page(struct gfs2_jdesc * jd,unsigned long index,struct gfs2_log_header_host * head,bool * done)462 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
463 				    struct gfs2_log_header_host *head,
464 				    bool *done)
465 {
466 	struct folio *folio;
467 
468 	folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
469 
470 	folio_wait_locked(folio);
471 	if (!folio_test_uptodate(folio))
472 		*done = true;
473 
474 	if (!*done)
475 		*done = gfs2_jhead_folio_search(jd, head, folio);
476 
477 	/* filemap_get_folio() and the earlier grab_cache_page() */
478 	folio_put_refs(folio, 2);
479 }
480 
gfs2_chain_bio(struct bio * prev,unsigned int nr_iovecs)481 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
482 {
483 	struct bio *new;
484 
485 	new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
486 	bio_clone_blkg_association(new, prev);
487 	new->bi_iter.bi_sector = bio_end_sector(prev);
488 	bio_chain(new, prev);
489 	submit_bio(prev);
490 	return new;
491 }
492 
493 /**
494  * gfs2_find_jhead - find the head of a log
495  * @jd: The journal descriptor
496  * @head: The log descriptor for the head of the log is returned here
497  * @keep_cache: If set inode pages will not be truncated
498  *
499  * Do a search of a journal by reading it in large chunks using bios and find
500  * the valid log entry with the highest sequence number.  (i.e. the log head)
501  *
502  * Returns: 0 on success, errno otherwise
503  */
gfs2_find_jhead(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,bool keep_cache)504 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
505 		    bool keep_cache)
506 {
507 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
508 	struct address_space *mapping = jd->jd_inode->i_mapping;
509 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
510 	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
511 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
512 	unsigned int shift = PAGE_SHIFT - bsize_shift;
513 	unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
514 	struct gfs2_journal_extent *je;
515 	int ret = 0;
516 	struct bio *bio = NULL;
517 	struct folio *folio = NULL;
518 	bool done = false;
519 	errseq_t since;
520 
521 	memset(head, 0, sizeof(*head));
522 	if (list_empty(&jd->extent_list))
523 		gfs2_map_journal_extents(sdp, jd);
524 
525 	since = filemap_sample_wb_err(mapping);
526 	list_for_each_entry(je, &jd->extent_list, list) {
527 		u64 dblock = je->dblock;
528 
529 		for (; block < je->lblock + je->blocks; block++, dblock++) {
530 			if (!folio) {
531 				folio = filemap_grab_folio(mapping,
532 						block >> shift);
533 				if (IS_ERR(folio)) {
534 					ret = PTR_ERR(folio);
535 					done = true;
536 					goto out;
537 				}
538 				off = 0;
539 			}
540 
541 			if (bio && (off || block < blocks_submitted + max_blocks)) {
542 				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
543 
544 				if (bio_end_sector(bio) == sector) {
545 					if (bio_add_folio(bio, folio, bsize, off))
546 						goto block_added;
547 				}
548 				if (off) {
549 					unsigned int blocks =
550 						(PAGE_SIZE - off) >> bsize_shift;
551 
552 					bio = gfs2_chain_bio(bio, blocks);
553 					goto add_block_to_new_bio;
554 				}
555 			}
556 
557 			if (bio) {
558 				blocks_submitted = block;
559 				submit_bio(bio);
560 			}
561 
562 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
563 			bio->bi_opf = REQ_OP_READ;
564 add_block_to_new_bio:
565 			if (!bio_add_folio(bio, folio, bsize, off))
566 				BUG();
567 block_added:
568 			off += bsize;
569 			if (off == folio_size(folio))
570 				folio = NULL;
571 			if (blocks_submitted <= blocks_read + max_blocks) {
572 				/* Keep at least one bio in flight */
573 				continue;
574 			}
575 
576 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
577 			blocks_read += PAGE_SIZE >> bsize_shift;
578 			if (done)
579 				goto out;  /* found */
580 		}
581 	}
582 
583 out:
584 	if (bio)
585 		submit_bio(bio);
586 	while (blocks_read < block) {
587 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
588 		blocks_read += PAGE_SIZE >> bsize_shift;
589 	}
590 
591 	if (!ret)
592 		ret = filemap_check_wb_err(mapping, since);
593 
594 	if (!keep_cache)
595 		truncate_inode_pages(mapping, 0);
596 
597 	return ret;
598 }
599 
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)600 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
601 				      u32 ld_length, u32 ld_data1)
602 {
603 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
604 	struct gfs2_log_descriptor *ld = page_address(page);
605 	clear_page(ld);
606 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
607 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
608 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
609 	ld->ld_type = cpu_to_be32(ld_type);
610 	ld->ld_length = cpu_to_be32(ld_length);
611 	ld->ld_data1 = cpu_to_be32(ld_data1);
612 	ld->ld_data2 = 0;
613 	return page;
614 }
615 
gfs2_check_magic(struct buffer_head * bh)616 static void gfs2_check_magic(struct buffer_head *bh)
617 {
618 	__be32 *ptr;
619 
620 	clear_buffer_escaped(bh);
621 	ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
622 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
623 		set_buffer_escaped(bh);
624 	kunmap_local(ptr);
625 }
626 
blocknr_cmp(void * priv,const struct list_head * a,const struct list_head * b)627 static int blocknr_cmp(void *priv, const struct list_head *a,
628 		       const struct list_head *b)
629 {
630 	struct gfs2_bufdata *bda, *bdb;
631 
632 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
633 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
634 
635 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
636 		return -1;
637 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
638 		return 1;
639 	return 0;
640 }
641 
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)642 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
643 				unsigned int total, struct list_head *blist,
644 				bool is_databuf)
645 {
646 	struct gfs2_log_descriptor *ld;
647 	struct gfs2_bufdata *bd1 = NULL, *bd2;
648 	struct page *page;
649 	unsigned int num;
650 	unsigned n;
651 	__be64 *ptr;
652 
653 	gfs2_log_lock(sdp);
654 	list_sort(NULL, blist, blocknr_cmp);
655 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
656 	while(total) {
657 		num = total;
658 		if (total > limit)
659 			num = limit;
660 		gfs2_log_unlock(sdp);
661 		page = gfs2_get_log_desc(sdp,
662 					 is_databuf ? GFS2_LOG_DESC_JDATA :
663 					 GFS2_LOG_DESC_METADATA, num + 1, num);
664 		ld = page_address(page);
665 		gfs2_log_lock(sdp);
666 		ptr = (__be64 *)(ld + 1);
667 
668 		n = 0;
669 		list_for_each_entry_continue(bd1, blist, bd_list) {
670 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
671 			if (is_databuf) {
672 				gfs2_check_magic(bd1->bd_bh);
673 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
674 			}
675 			if (++n >= num)
676 				break;
677 		}
678 
679 		gfs2_log_unlock(sdp);
680 		gfs2_log_write_page(sdp, page);
681 		gfs2_log_lock(sdp);
682 
683 		n = 0;
684 		list_for_each_entry_continue(bd2, blist, bd_list) {
685 			get_bh(bd2->bd_bh);
686 			gfs2_log_unlock(sdp);
687 			lock_buffer(bd2->bd_bh);
688 
689 			if (buffer_escaped(bd2->bd_bh)) {
690 				void *p;
691 
692 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
693 				p = page_address(page);
694 				memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
695 				*(__be32 *)p = 0;
696 				clear_buffer_escaped(bd2->bd_bh);
697 				unlock_buffer(bd2->bd_bh);
698 				brelse(bd2->bd_bh);
699 				gfs2_log_write_page(sdp, page);
700 			} else {
701 				gfs2_log_write_bh(sdp, bd2->bd_bh);
702 			}
703 			gfs2_log_lock(sdp);
704 			if (++n >= num)
705 				break;
706 		}
707 
708 		BUG_ON(total < num);
709 		total -= num;
710 	}
711 	gfs2_log_unlock(sdp);
712 }
713 
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)714 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
715 {
716 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
717 	unsigned int nbuf;
718 	if (tr == NULL)
719 		return;
720 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
721 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
722 }
723 
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)724 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
725 {
726 	struct list_head *head;
727 	struct gfs2_bufdata *bd;
728 
729 	if (tr == NULL)
730 		return;
731 
732 	head = &tr->tr_buf;
733 	while (!list_empty(head)) {
734 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
735 		list_del_init(&bd->bd_list);
736 		gfs2_unpin(sdp, bd->bd_bh, tr);
737 	}
738 }
739 
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)740 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
741 			       struct gfs2_log_header_host *head, int pass)
742 {
743 	if (pass != 0)
744 		return;
745 
746 	jd->jd_found_blocks = 0;
747 	jd->jd_replayed_blocks = 0;
748 }
749 
750 #define obsolete_rgrp_replay \
751 "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
752 #define obsolete_rgrp_replay2 \
753 "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
754 
obsolete_rgrp(struct gfs2_jdesc * jd,struct buffer_head * bh_log,u64 blkno)755 static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
756 			  u64 blkno)
757 {
758 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
759 	struct gfs2_rgrpd *rgd;
760 	struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
761 
762 	rgd = gfs2_blk2rgrpd(sdp, blkno, false);
763 	if (rgd && rgd->rd_addr == blkno &&
764 	    rgd->rd_bits && rgd->rd_bits->bi_bh) {
765 		fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
766 			jd->jd_jid, bh_log->b_blocknr);
767 		fs_info(sdp, obsolete_rgrp_replay2,
768 			buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
769 			buffer_pinned(rgd->rd_bits->bi_bh),
770 			rgd->rd_igeneration,
771 			be64_to_cpu(jrgd->rg_igeneration));
772 		gfs2_dump_glock(NULL, rgd->rd_gl, true);
773 	}
774 }
775 
buf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)776 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
777 				struct gfs2_log_descriptor *ld, __be64 *ptr,
778 				int pass)
779 {
780 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
781 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
782 	struct gfs2_glock *gl = ip->i_gl;
783 	unsigned int blks = be32_to_cpu(ld->ld_data1);
784 	struct buffer_head *bh_log, *bh_ip;
785 	u64 blkno;
786 	int error = 0;
787 
788 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
789 		return 0;
790 
791 	gfs2_replay_incr_blk(jd, &start);
792 
793 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
794 		blkno = be64_to_cpu(*ptr++);
795 
796 		jd->jd_found_blocks++;
797 
798 		if (gfs2_revoke_check(jd, blkno, start))
799 			continue;
800 
801 		error = gfs2_replay_read_block(jd, start, &bh_log);
802 		if (error)
803 			return error;
804 
805 		bh_ip = gfs2_meta_new(gl, blkno);
806 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
807 
808 		if (gfs2_meta_check(sdp, bh_ip))
809 			error = -EIO;
810 		else {
811 			struct gfs2_meta_header *mh =
812 				(struct gfs2_meta_header *)bh_ip->b_data;
813 
814 			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
815 				obsolete_rgrp(jd, bh_log, blkno);
816 
817 			mark_buffer_dirty(bh_ip);
818 		}
819 		brelse(bh_log);
820 		brelse(bh_ip);
821 
822 		if (error)
823 			break;
824 
825 		jd->jd_replayed_blocks++;
826 	}
827 
828 	return error;
829 }
830 
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)831 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
832 {
833 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
834 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
835 
836 	if (error) {
837 		gfs2_inode_metasync(ip->i_gl);
838 		return;
839 	}
840 	if (pass != 1)
841 		return;
842 
843 	gfs2_inode_metasync(ip->i_gl);
844 
845 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
846 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
847 }
848 
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)849 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
850 {
851 	struct gfs2_meta_header *mh;
852 	unsigned int offset;
853 	struct list_head *head = &sdp->sd_log_revokes;
854 	struct gfs2_bufdata *bd;
855 	struct page *page;
856 	unsigned int length;
857 
858 	gfs2_flush_revokes(sdp);
859 	if (!sdp->sd_log_num_revoke)
860 		return;
861 
862 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
863 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
864 	offset = sizeof(struct gfs2_log_descriptor);
865 
866 	list_for_each_entry(bd, head, bd_list) {
867 		sdp->sd_log_num_revoke--;
868 
869 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
870 			gfs2_log_write_page(sdp, page);
871 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
872 			mh = page_address(page);
873 			clear_page(mh);
874 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
875 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
876 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
877 			offset = sizeof(struct gfs2_meta_header);
878 		}
879 
880 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
881 		offset += sizeof(u64);
882 	}
883 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
884 
885 	gfs2_log_write_page(sdp, page);
886 }
887 
gfs2_drain_revokes(struct gfs2_sbd * sdp)888 void gfs2_drain_revokes(struct gfs2_sbd *sdp)
889 {
890 	struct list_head *head = &sdp->sd_log_revokes;
891 	struct gfs2_bufdata *bd;
892 	struct gfs2_glock *gl;
893 
894 	while (!list_empty(head)) {
895 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
896 		list_del_init(&bd->bd_list);
897 		gl = bd->bd_gl;
898 		gfs2_glock_remove_revoke(gl);
899 		kmem_cache_free(gfs2_bufdata_cachep, bd);
900 	}
901 }
902 
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)903 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
904 {
905 	gfs2_drain_revokes(sdp);
906 }
907 
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)908 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
909 				  struct gfs2_log_header_host *head, int pass)
910 {
911 	if (pass != 0)
912 		return;
913 
914 	jd->jd_found_revokes = 0;
915 	jd->jd_replay_tail = head->lh_tail;
916 }
917 
revoke_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)918 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
919 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
920 				   int pass)
921 {
922 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
923 	unsigned int blks = be32_to_cpu(ld->ld_length);
924 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
925 	struct buffer_head *bh;
926 	unsigned int offset;
927 	u64 blkno;
928 	int first = 1;
929 	int error;
930 
931 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
932 		return 0;
933 
934 	offset = sizeof(struct gfs2_log_descriptor);
935 
936 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
937 		error = gfs2_replay_read_block(jd, start, &bh);
938 		if (error)
939 			return error;
940 
941 		if (!first)
942 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
943 
944 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
945 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
946 
947 			error = gfs2_revoke_add(jd, blkno, start);
948 			if (error < 0) {
949 				brelse(bh);
950 				return error;
951 			}
952 			else if (error)
953 				jd->jd_found_revokes++;
954 
955 			if (!--revokes)
956 				break;
957 			offset += sizeof(u64);
958 		}
959 
960 		brelse(bh);
961 		offset = sizeof(struct gfs2_meta_header);
962 		first = 0;
963 	}
964 
965 	return 0;
966 }
967 
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)968 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
969 {
970 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
971 
972 	if (error) {
973 		gfs2_revoke_clean(jd);
974 		return;
975 	}
976 	if (pass != 1)
977 		return;
978 
979 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
980 	        jd->jd_jid, jd->jd_found_revokes);
981 
982 	gfs2_revoke_clean(jd);
983 }
984 
985 /**
986  * databuf_lo_before_commit - Scan the data buffers, writing as we go
987  * @sdp: The filesystem
988  * @tr: The system transaction being flushed
989  */
990 
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)991 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
992 {
993 	unsigned int limit = databuf_limit(sdp);
994 	unsigned int nbuf;
995 	if (tr == NULL)
996 		return;
997 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
998 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
999 }
1000 
databuf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)1001 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1002 				    struct gfs2_log_descriptor *ld,
1003 				    __be64 *ptr, int pass)
1004 {
1005 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1006 	struct gfs2_glock *gl = ip->i_gl;
1007 	unsigned int blks = be32_to_cpu(ld->ld_data1);
1008 	struct buffer_head *bh_log, *bh_ip;
1009 	u64 blkno;
1010 	u64 esc;
1011 	int error = 0;
1012 
1013 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1014 		return 0;
1015 
1016 	gfs2_replay_incr_blk(jd, &start);
1017 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1018 		blkno = be64_to_cpu(*ptr++);
1019 		esc = be64_to_cpu(*ptr++);
1020 
1021 		jd->jd_found_blocks++;
1022 
1023 		if (gfs2_revoke_check(jd, blkno, start))
1024 			continue;
1025 
1026 		error = gfs2_replay_read_block(jd, start, &bh_log);
1027 		if (error)
1028 			return error;
1029 
1030 		bh_ip = gfs2_meta_new(gl, blkno);
1031 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1032 
1033 		/* Unescape */
1034 		if (esc) {
1035 			__be32 *eptr = (__be32 *)bh_ip->b_data;
1036 			*eptr = cpu_to_be32(GFS2_MAGIC);
1037 		}
1038 		mark_buffer_dirty(bh_ip);
1039 
1040 		brelse(bh_log);
1041 		brelse(bh_ip);
1042 
1043 		jd->jd_replayed_blocks++;
1044 	}
1045 
1046 	return error;
1047 }
1048 
1049 /* FIXME: sort out accounting for log blocks etc. */
1050 
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)1051 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1052 {
1053 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1054 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1055 
1056 	if (error) {
1057 		gfs2_inode_metasync(ip->i_gl);
1058 		return;
1059 	}
1060 	if (pass != 1)
1061 		return;
1062 
1063 	/* data sync? */
1064 	gfs2_inode_metasync(ip->i_gl);
1065 
1066 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1067 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1068 }
1069 
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1070 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1071 {
1072 	struct list_head *head;
1073 	struct gfs2_bufdata *bd;
1074 
1075 	if (tr == NULL)
1076 		return;
1077 
1078 	head = &tr->tr_databuf;
1079 	while (!list_empty(head)) {
1080 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1081 		list_del_init(&bd->bd_list);
1082 		gfs2_unpin(sdp, bd->bd_bh, tr);
1083 	}
1084 }
1085 
1086 
1087 static const struct gfs2_log_operations gfs2_buf_lops = {
1088 	.lo_before_commit = buf_lo_before_commit,
1089 	.lo_after_commit = buf_lo_after_commit,
1090 	.lo_before_scan = buf_lo_before_scan,
1091 	.lo_scan_elements = buf_lo_scan_elements,
1092 	.lo_after_scan = buf_lo_after_scan,
1093 	.lo_name = "buf",
1094 };
1095 
1096 static const struct gfs2_log_operations gfs2_revoke_lops = {
1097 	.lo_before_commit = revoke_lo_before_commit,
1098 	.lo_after_commit = revoke_lo_after_commit,
1099 	.lo_before_scan = revoke_lo_before_scan,
1100 	.lo_scan_elements = revoke_lo_scan_elements,
1101 	.lo_after_scan = revoke_lo_after_scan,
1102 	.lo_name = "revoke",
1103 };
1104 
1105 static const struct gfs2_log_operations gfs2_databuf_lops = {
1106 	.lo_before_commit = databuf_lo_before_commit,
1107 	.lo_after_commit = databuf_lo_after_commit,
1108 	.lo_scan_elements = databuf_lo_scan_elements,
1109 	.lo_after_scan = databuf_lo_after_scan,
1110 	.lo_name = "databuf",
1111 };
1112 
1113 const struct gfs2_log_operations *gfs2_log_ops[] = {
1114 	&gfs2_databuf_lops,
1115 	&gfs2_buf_lops,
1116 	&gfs2_revoke_lops,
1117 	NULL,
1118 };
1119 
1120