1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34
35 /**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
39 *
40 * The log lock must be held when calling this function
41 */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
56 */
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64 }
65
buffer_is_rgrp(const struct gfs2_bufdata * bd)66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70
maybe_release_space(struct gfs2_bufdata * bd)71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 rgrp_lock_local(rgd);
80 if (bi->bi_clone == NULL)
81 goto out;
82 if (sdp->sd_args.ar_discard)
83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 memcpy(bi->bi_clone + bi->bi_offset,
85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 clear_bit(GBF_FULL, &bi->bi_flags);
87 rgd->rd_free_clone = rgd->rd_free;
88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 rgd->rd_extfail_pt = rgd->rd_free;
90
91 out:
92 rgrp_unlock_local(rgd);
93 }
94
95 /**
96 * gfs2_unpin - Unpin a buffer
97 * @sdp: the filesystem the buffer belongs to
98 * @bh: The buffer to unpin
99 * @tr: The system transaction being flushed
100 */
101
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)102 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 struct gfs2_trans *tr)
104 {
105 struct gfs2_bufdata *bd = bh->b_private;
106
107 BUG_ON(!buffer_uptodate(bh));
108 BUG_ON(!buffer_pinned(bh));
109
110 lock_buffer(bh);
111 mark_buffer_dirty(bh);
112 clear_buffer_pinned(bh);
113
114 if (buffer_is_rgrp(bd))
115 maybe_release_space(bd);
116
117 spin_lock(&sdp->sd_ail_lock);
118 if (bd->bd_tr) {
119 list_del(&bd->bd_ail_st_list);
120 brelse(bh);
121 } else {
122 struct gfs2_glock *gl = bd->bd_gl;
123 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 atomic_inc(&gl->gl_ail_count);
125 }
126 bd->bd_tr = tr;
127 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 spin_unlock(&sdp->sd_ail_lock);
129
130 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 trace_gfs2_pin(bd, 0);
132 unlock_buffer(bh);
133 atomic_dec(&sdp->sd_log_pinned);
134 }
135
gfs2_log_incr_head(struct gfs2_sbd * sdp)136 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
137 {
138 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 (sdp->sd_log_flush_head != sdp->sd_log_head));
140
141 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 sdp->sd_log_flush_head = 0;
143 }
144
gfs2_log_bmap(struct gfs2_jdesc * jd,unsigned int lblock)145 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
146 {
147 struct gfs2_journal_extent *je;
148
149 list_for_each_entry(je, &jd->extent_list, list) {
150 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 return je->dblock + lblock - je->lblock;
152 }
153
154 return -1;
155 }
156
157 /**
158 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
159 * @sdp: The superblock
160 * @folio: The folio
161 * @offset: The first byte within the folio that completed
162 * @size: The number of bytes that completed
163 * @error: The i/o status
164 *
165 * This finds the relevant buffers and unlocks them and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
169 */
170
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct folio * folio,size_t offset,size_t size,blk_status_t error)171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
172 size_t offset, size_t size, blk_status_t error)
173 {
174 struct buffer_head *bh, *next;
175
176 bh = folio_buffers(folio);
177 while (bh_offset(bh) < offset)
178 bh = bh->b_this_page;
179 do {
180 if (error)
181 mark_buffer_write_io_error(bh);
182 unlock_buffer(bh);
183 next = bh->b_this_page;
184 size -= bh->b_size;
185 brelse(bh);
186 bh = next;
187 } while (bh && size);
188 }
189
190 /**
191 * gfs2_end_log_write - end of i/o to the log
192 * @bio: The bio
193 *
194 * Each bio_vec contains either data from the pagecache or data
195 * relating to the log itself. Here we iterate over the bio_vec
196 * array, processing both kinds of data.
197 *
198 */
199
gfs2_end_log_write(struct bio * bio)200 static void gfs2_end_log_write(struct bio *bio)
201 {
202 struct gfs2_sbd *sdp = bio->bi_private;
203 struct bio_vec *bvec;
204 struct bvec_iter_all iter_all;
205
206 if (bio->bi_status) {
207 int err = blk_status_to_errno(bio->bi_status);
208
209 if (!cmpxchg(&sdp->sd_log_error, 0, err))
210 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
211 err, sdp->sd_jdesc->jd_jid);
212 gfs2_withdraw(sdp);
213 }
214
215 bio_for_each_segment_all(bvec, bio, iter_all) {
216 struct page *page = bvec->bv_page;
217 struct folio *folio = page_folio(page);
218
219 if (folio && folio_buffers(folio))
220 gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
221 bvec->bv_len, bio->bi_status);
222 else
223 mempool_free(page, gfs2_page_pool);
224 }
225
226 bio_put(bio);
227 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
228 wake_up(&sdp->sd_log_flush_wait);
229 }
230
231 /**
232 * gfs2_log_submit_bio - Submit any pending log bio
233 * @biop: Address of the bio pointer
234 * @opf: REQ_OP | op_flags
235 *
236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op.
238 */
239
gfs2_log_submit_bio(struct bio ** biop,blk_opf_t opf)240 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
241 {
242 struct bio *bio = *biop;
243 if (bio) {
244 struct gfs2_sbd *sdp = bio->bi_private;
245 atomic_inc(&sdp->sd_log_in_flight);
246 bio->bi_opf = opf;
247 submit_bio(bio);
248 *biop = NULL;
249 }
250 }
251
252 /**
253 * gfs2_log_alloc_bio - Allocate a bio
254 * @sdp: The super block
255 * @blkno: The device block number we want to write to
256 * @end_io: The bi_end_io callback
257 *
258 * Allocate a new bio, initialize it with the given parameters and return it.
259 *
260 * Returns: The newly allocated bio
261 */
262
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno,bio_end_io_t * end_io)263 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
264 bio_end_io_t *end_io)
265 {
266 struct super_block *sb = sdp->sd_vfs;
267 struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
268
269 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
270 bio->bi_end_io = end_io;
271 bio->bi_private = sdp;
272
273 return bio;
274 }
275
276 /**
277 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
278 * @sdp: The super block
279 * @blkno: The device block number we want to write to
280 * @biop: The bio to get or allocate
281 * @op: REQ_OP
282 * @end_io: The bi_end_io callback
283 * @flush: Always flush the current bio and allocate a new one?
284 *
285 * If there is a cached bio, then if the next block number is sequential
286 * with the previous one, return it, otherwise flush the bio to the
287 * device. If there is no cached bio, or we just flushed it, then
288 * allocate a new one.
289 *
290 * Returns: The bio to use for log writes
291 */
292
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno,struct bio ** biop,enum req_op op,bio_end_io_t * end_io,bool flush)293 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
294 struct bio **biop, enum req_op op,
295 bio_end_io_t *end_io, bool flush)
296 {
297 struct bio *bio = *biop;
298
299 if (bio) {
300 u64 nblk;
301
302 nblk = bio_end_sector(bio);
303 nblk >>= sdp->sd_fsb2bb_shift;
304 if (blkno == nblk && !flush)
305 return bio;
306 gfs2_log_submit_bio(biop, op);
307 }
308
309 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
310 return *biop;
311 }
312
313 /**
314 * gfs2_log_write - write to log
315 * @sdp: the filesystem
316 * @jd: The journal descriptor
317 * @page: the page to write
318 * @size: the size of the data to write
319 * @offset: the offset within the page
320 * @blkno: block number of the log entry
321 *
322 * Try and add the page segment to the current bio. If that fails,
323 * submit the current bio to the device and create a new one, and
324 * then add the page segment to that.
325 */
326
gfs2_log_write(struct gfs2_sbd * sdp,struct gfs2_jdesc * jd,struct page * page,unsigned size,unsigned offset,u64 blkno)327 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
328 struct page *page, unsigned size, unsigned offset,
329 u64 blkno)
330 {
331 struct bio *bio;
332 int ret;
333
334 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
335 gfs2_end_log_write, false);
336 ret = bio_add_page(bio, page, size, offset);
337 if (ret == 0) {
338 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
339 REQ_OP_WRITE, gfs2_end_log_write, true);
340 ret = bio_add_page(bio, page, size, offset);
341 WARN_ON(ret == 0);
342 }
343 }
344
345 /**
346 * gfs2_log_write_bh - write a buffer's content to the log
347 * @sdp: The super block
348 * @bh: The buffer pointing to the in-place location
349 *
350 * This writes the content of the buffer to the next available location
351 * in the log. The buffer will be unlocked once the i/o to the log has
352 * completed.
353 */
354
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)355 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
356 {
357 u64 dblock;
358
359 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
360 gfs2_log_incr_head(sdp);
361 gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
362 bh->b_size, bh_offset(bh), dblock);
363 }
364
365 /**
366 * gfs2_log_write_page - write one block stored in a page, into the log
367 * @sdp: The superblock
368 * @page: The struct page
369 *
370 * This writes the first block-sized part of the page into the log. Note
371 * that the page must have been allocated from the gfs2_page_pool mempool
372 * and that after this has been called, ownership has been transferred and
373 * the page may be freed at any time.
374 */
375
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)376 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
377 {
378 struct super_block *sb = sdp->sd_vfs;
379 u64 dblock;
380
381 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
382 gfs2_log_incr_head(sdp);
383 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
384 }
385
386 /**
387 * gfs2_end_log_read - end I/O callback for reads from the log
388 * @bio: The bio
389 *
390 * Simply unlock the pages in the bio. The main thread will wait on them and
391 * process them in order as necessary.
392 */
gfs2_end_log_read(struct bio * bio)393 static void gfs2_end_log_read(struct bio *bio)
394 {
395 int error = blk_status_to_errno(bio->bi_status);
396 struct folio_iter fi;
397
398 bio_for_each_folio_all(fi, bio) {
399 /* We're abusing wb_err to get the error to gfs2_find_jhead */
400 filemap_set_wb_err(fi.folio->mapping, error);
401 folio_end_read(fi.folio, !error);
402 }
403
404 bio_put(bio);
405 }
406
407 /**
408 * gfs2_jhead_folio_search - Look for the journal head in a given page.
409 * @jd: The journal descriptor
410 * @head: The journal head to start from
411 * @folio: The folio to look in
412 *
413 * Returns: 1 if found, 0 otherwise.
414 */
gfs2_jhead_folio_search(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,struct folio * folio)415 static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
416 struct gfs2_log_header_host *head,
417 struct folio *folio)
418 {
419 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
420 struct gfs2_log_header_host lh;
421 void *kaddr;
422 unsigned int offset;
423 bool ret = false;
424
425 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
426 kaddr = kmap_local_folio(folio, 0);
427 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
428 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
429 if (lh.lh_sequence >= head->lh_sequence)
430 *head = lh;
431 else {
432 ret = true;
433 break;
434 }
435 }
436 }
437 kunmap_local(kaddr);
438 return ret;
439 }
440
441 /**
442 * gfs2_jhead_process_page - Search/cleanup a page
443 * @jd: The journal descriptor
444 * @index: Index of the page to look into
445 * @head: The journal head to start from
446 * @done: If set, perform only cleanup, else search and set if found.
447 *
448 * Find the folio with 'index' in the journal's mapping. Search the folio for
449 * the journal head if requested (cleanup == false). Release refs on the
450 * folio so the page cache can reclaim it. We grabbed a
451 * reference on this folio twice, first when we did a filemap_grab_folio()
452 * to obtain the folio to add it to the bio and second when we do a
453 * filemap_get_folio() here to get the folio to wait on while I/O on it is being
454 * completed.
455 * This function is also used to free up a folio we might've grabbed but not
456 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
457 * submitted the I/O, but we already found the jhead so we only need to drop
458 * our references to the folio.
459 */
460
gfs2_jhead_process_page(struct gfs2_jdesc * jd,unsigned long index,struct gfs2_log_header_host * head,bool * done)461 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
462 struct gfs2_log_header_host *head,
463 bool *done)
464 {
465 struct folio *folio;
466
467 folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
468
469 folio_wait_locked(folio);
470 if (!folio_test_uptodate(folio))
471 *done = true;
472
473 if (!*done)
474 *done = gfs2_jhead_folio_search(jd, head, folio);
475
476 /* filemap_get_folio() and the earlier filemap_grab_folio() */
477 folio_put_refs(folio, 2);
478 }
479
gfs2_chain_bio(struct bio * prev,unsigned int nr_iovecs)480 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
481 {
482 struct bio *new;
483
484 new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
485 bio_clone_blkg_association(new, prev);
486 new->bi_iter.bi_sector = bio_end_sector(prev);
487 bio_chain(new, prev);
488 submit_bio(prev);
489 return new;
490 }
491
492 /**
493 * gfs2_find_jhead - find the head of a log
494 * @jd: The journal descriptor
495 * @head: The log descriptor for the head of the log is returned here
496 *
497 * Do a search of a journal by reading it in large chunks using bios and find
498 * the valid log entry with the highest sequence number. (i.e. the log head)
499 *
500 * Returns: 0 on success, errno otherwise
501 */
gfs2_find_jhead(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head)502 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
503 {
504 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
505 struct address_space *mapping = jd->jd_inode->i_mapping;
506 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
507 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
508 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
509 unsigned int shift = PAGE_SHIFT - bsize_shift;
510 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
511 struct gfs2_journal_extent *je;
512 int ret = 0;
513 struct bio *bio = NULL;
514 struct folio *folio = NULL;
515 bool done = false;
516 errseq_t since;
517
518 memset(head, 0, sizeof(*head));
519 if (list_empty(&jd->extent_list))
520 gfs2_map_journal_extents(sdp, jd);
521
522 since = filemap_sample_wb_err(mapping);
523 list_for_each_entry(je, &jd->extent_list, list) {
524 u64 dblock = je->dblock;
525
526 for (; block < je->lblock + je->blocks; block++, dblock++) {
527 if (!folio) {
528 folio = filemap_grab_folio(mapping,
529 block >> shift);
530 if (IS_ERR(folio)) {
531 ret = PTR_ERR(folio);
532 done = true;
533 goto out;
534 }
535 off = 0;
536 }
537
538 if (bio && (off || block < blocks_submitted + max_blocks)) {
539 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
540
541 if (bio_end_sector(bio) == sector) {
542 if (bio_add_folio(bio, folio, bsize, off))
543 goto block_added;
544 }
545 if (off) {
546 unsigned int blocks =
547 (PAGE_SIZE - off) >> bsize_shift;
548
549 bio = gfs2_chain_bio(bio, blocks);
550 goto add_block_to_new_bio;
551 }
552 }
553
554 if (bio) {
555 blocks_submitted = block;
556 submit_bio(bio);
557 }
558
559 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
560 bio->bi_opf = REQ_OP_READ;
561 add_block_to_new_bio:
562 bio_add_folio_nofail(bio, folio, bsize, off);
563 block_added:
564 off += bsize;
565 if (off == folio_size(folio))
566 folio = NULL;
567 if (blocks_submitted <= blocks_read + max_blocks) {
568 /* Keep at least one bio in flight */
569 continue;
570 }
571
572 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
573 blocks_read += PAGE_SIZE >> bsize_shift;
574 if (done)
575 goto out; /* found */
576 }
577 }
578
579 out:
580 if (bio)
581 submit_bio(bio);
582 while (blocks_read < block) {
583 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
584 blocks_read += PAGE_SIZE >> bsize_shift;
585 }
586
587 if (!ret)
588 ret = filemap_check_wb_err(mapping, since);
589
590 truncate_inode_pages(mapping, 0);
591
592 return ret;
593 }
594
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)595 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
596 u32 ld_length, u32 ld_data1)
597 {
598 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
599 struct gfs2_log_descriptor *ld = page_address(page);
600 clear_page(ld);
601 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
602 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
603 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
604 ld->ld_type = cpu_to_be32(ld_type);
605 ld->ld_length = cpu_to_be32(ld_length);
606 ld->ld_data1 = cpu_to_be32(ld_data1);
607 ld->ld_data2 = 0;
608 return page;
609 }
610
gfs2_check_magic(struct buffer_head * bh)611 static void gfs2_check_magic(struct buffer_head *bh)
612 {
613 __be32 *ptr;
614
615 clear_buffer_escaped(bh);
616 ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
617 if (*ptr == cpu_to_be32(GFS2_MAGIC))
618 set_buffer_escaped(bh);
619 kunmap_local(ptr);
620 }
621
blocknr_cmp(void * priv,const struct list_head * a,const struct list_head * b)622 static int blocknr_cmp(void *priv, const struct list_head *a,
623 const struct list_head *b)
624 {
625 struct gfs2_bufdata *bda, *bdb;
626
627 bda = list_entry(a, struct gfs2_bufdata, bd_list);
628 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
629
630 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
631 return -1;
632 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
633 return 1;
634 return 0;
635 }
636
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)637 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
638 unsigned int total, struct list_head *blist,
639 bool is_databuf)
640 {
641 struct gfs2_log_descriptor *ld;
642 struct gfs2_bufdata *bd1 = NULL, *bd2;
643 struct page *page;
644 unsigned int num;
645 unsigned n;
646 __be64 *ptr;
647
648 gfs2_log_lock(sdp);
649 list_sort(NULL, blist, blocknr_cmp);
650 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
651 while(total) {
652 num = total;
653 if (total > limit)
654 num = limit;
655 gfs2_log_unlock(sdp);
656 page = gfs2_get_log_desc(sdp,
657 is_databuf ? GFS2_LOG_DESC_JDATA :
658 GFS2_LOG_DESC_METADATA, num + 1, num);
659 ld = page_address(page);
660 gfs2_log_lock(sdp);
661 ptr = (__be64 *)(ld + 1);
662
663 n = 0;
664 list_for_each_entry_continue(bd1, blist, bd_list) {
665 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
666 if (is_databuf) {
667 gfs2_check_magic(bd1->bd_bh);
668 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
669 }
670 if (++n >= num)
671 break;
672 }
673
674 gfs2_log_unlock(sdp);
675 gfs2_log_write_page(sdp, page);
676 gfs2_log_lock(sdp);
677
678 n = 0;
679 list_for_each_entry_continue(bd2, blist, bd_list) {
680 get_bh(bd2->bd_bh);
681 gfs2_log_unlock(sdp);
682 lock_buffer(bd2->bd_bh);
683
684 if (buffer_escaped(bd2->bd_bh)) {
685 void *p;
686
687 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
688 p = page_address(page);
689 memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
690 *(__be32 *)p = 0;
691 clear_buffer_escaped(bd2->bd_bh);
692 unlock_buffer(bd2->bd_bh);
693 brelse(bd2->bd_bh);
694 gfs2_log_write_page(sdp, page);
695 } else {
696 gfs2_log_write_bh(sdp, bd2->bd_bh);
697 }
698 gfs2_log_lock(sdp);
699 if (++n >= num)
700 break;
701 }
702
703 BUG_ON(total < num);
704 total -= num;
705 }
706 gfs2_log_unlock(sdp);
707 }
708
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)709 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
710 {
711 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
712 unsigned int nbuf;
713 if (tr == NULL)
714 return;
715 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
716 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
717 }
718
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)719 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
720 {
721 struct list_head *head;
722 struct gfs2_bufdata *bd;
723
724 if (tr == NULL)
725 return;
726
727 head = &tr->tr_buf;
728 while (!list_empty(head)) {
729 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
730 list_del_init(&bd->bd_list);
731 gfs2_unpin(sdp, bd->bd_bh, tr);
732 }
733 }
734
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)735 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
736 struct gfs2_log_header_host *head, int pass)
737 {
738 if (pass != 0)
739 return;
740
741 jd->jd_found_blocks = 0;
742 jd->jd_replayed_blocks = 0;
743 }
744
745 #define obsolete_rgrp_replay \
746 "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
747 #define obsolete_rgrp_replay2 \
748 "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
749
obsolete_rgrp(struct gfs2_jdesc * jd,struct buffer_head * bh_log,u64 blkno)750 static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
751 u64 blkno)
752 {
753 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
754 struct gfs2_rgrpd *rgd;
755 struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
756
757 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
758 if (rgd && rgd->rd_addr == blkno &&
759 rgd->rd_bits && rgd->rd_bits->bi_bh) {
760 fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
761 jd->jd_jid, bh_log->b_blocknr);
762 fs_info(sdp, obsolete_rgrp_replay2,
763 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
764 buffer_pinned(rgd->rd_bits->bi_bh),
765 rgd->rd_igeneration,
766 be64_to_cpu(jrgd->rg_igeneration));
767 gfs2_dump_glock(NULL, rgd->rd_gl, true);
768 }
769 }
770
buf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)771 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
772 struct gfs2_log_descriptor *ld, __be64 *ptr,
773 int pass)
774 {
775 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
776 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
777 struct gfs2_glock *gl = ip->i_gl;
778 unsigned int blks = be32_to_cpu(ld->ld_data1);
779 struct buffer_head *bh_log, *bh_ip;
780 u64 blkno;
781 int error = 0;
782
783 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
784 return 0;
785
786 gfs2_replay_incr_blk(jd, &start);
787
788 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
789 blkno = be64_to_cpu(*ptr++);
790
791 jd->jd_found_blocks++;
792
793 if (gfs2_revoke_check(jd, blkno, start))
794 continue;
795
796 error = gfs2_replay_read_block(jd, start, &bh_log);
797 if (error)
798 return error;
799
800 bh_ip = gfs2_meta_new(gl, blkno);
801 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
802
803 if (gfs2_meta_check(sdp, bh_ip))
804 error = -EIO;
805 else {
806 struct gfs2_meta_header *mh =
807 (struct gfs2_meta_header *)bh_ip->b_data;
808
809 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
810 obsolete_rgrp(jd, bh_log, blkno);
811
812 mark_buffer_dirty(bh_ip);
813 }
814 brelse(bh_log);
815 brelse(bh_ip);
816
817 if (error)
818 break;
819
820 jd->jd_replayed_blocks++;
821 }
822
823 return error;
824 }
825
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)826 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
827 {
828 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
829 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
830
831 if (error) {
832 gfs2_inode_metasync(ip->i_gl);
833 return;
834 }
835 if (pass != 1)
836 return;
837
838 gfs2_inode_metasync(ip->i_gl);
839
840 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
841 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
842 }
843
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)844 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
845 {
846 struct gfs2_meta_header *mh;
847 unsigned int offset;
848 struct list_head *head = &sdp->sd_log_revokes;
849 struct gfs2_bufdata *bd;
850 struct page *page;
851 unsigned int length;
852
853 gfs2_flush_revokes(sdp);
854 if (!sdp->sd_log_num_revoke)
855 return;
856
857 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
858 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
859 offset = sizeof(struct gfs2_log_descriptor);
860
861 list_for_each_entry(bd, head, bd_list) {
862 sdp->sd_log_num_revoke--;
863
864 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
865 gfs2_log_write_page(sdp, page);
866 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
867 mh = page_address(page);
868 clear_page(mh);
869 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
870 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
871 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
872 offset = sizeof(struct gfs2_meta_header);
873 }
874
875 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
876 offset += sizeof(u64);
877 }
878 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
879
880 gfs2_log_write_page(sdp, page);
881 }
882
gfs2_drain_revokes(struct gfs2_sbd * sdp)883 void gfs2_drain_revokes(struct gfs2_sbd *sdp)
884 {
885 struct list_head *head = &sdp->sd_log_revokes;
886 struct gfs2_bufdata *bd;
887 struct gfs2_glock *gl;
888
889 while (!list_empty(head)) {
890 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
891 list_del_init(&bd->bd_list);
892 gl = bd->bd_gl;
893 gfs2_glock_remove_revoke(gl);
894 kmem_cache_free(gfs2_bufdata_cachep, bd);
895 }
896 }
897
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)898 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
899 {
900 gfs2_drain_revokes(sdp);
901 }
902
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)903 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
904 struct gfs2_log_header_host *head, int pass)
905 {
906 if (pass != 0)
907 return;
908
909 jd->jd_found_revokes = 0;
910 jd->jd_replay_tail = head->lh_tail;
911 }
912
revoke_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)913 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
914 struct gfs2_log_descriptor *ld, __be64 *ptr,
915 int pass)
916 {
917 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
918 unsigned int blks = be32_to_cpu(ld->ld_length);
919 unsigned int revokes = be32_to_cpu(ld->ld_data1);
920 struct buffer_head *bh;
921 unsigned int offset;
922 u64 blkno;
923 int first = 1;
924 int error;
925
926 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
927 return 0;
928
929 offset = sizeof(struct gfs2_log_descriptor);
930
931 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
932 error = gfs2_replay_read_block(jd, start, &bh);
933 if (error)
934 return error;
935
936 if (!first)
937 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
938
939 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
940 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
941
942 error = gfs2_revoke_add(jd, blkno, start);
943 if (error < 0) {
944 brelse(bh);
945 return error;
946 }
947 else if (error)
948 jd->jd_found_revokes++;
949
950 if (!--revokes)
951 break;
952 offset += sizeof(u64);
953 }
954
955 brelse(bh);
956 offset = sizeof(struct gfs2_meta_header);
957 first = 0;
958 }
959
960 return 0;
961 }
962
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)963 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
964 {
965 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
966
967 if (error) {
968 gfs2_revoke_clean(jd);
969 return;
970 }
971 if (pass != 1)
972 return;
973
974 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
975 jd->jd_jid, jd->jd_found_revokes);
976
977 gfs2_revoke_clean(jd);
978 }
979
980 /**
981 * databuf_lo_before_commit - Scan the data buffers, writing as we go
982 * @sdp: The filesystem
983 * @tr: The system transaction being flushed
984 */
985
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)986 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
987 {
988 unsigned int limit = databuf_limit(sdp);
989 unsigned int nbuf;
990 if (tr == NULL)
991 return;
992 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
993 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
994 }
995
databuf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)996 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
997 struct gfs2_log_descriptor *ld,
998 __be64 *ptr, int pass)
999 {
1000 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1001 struct gfs2_glock *gl = ip->i_gl;
1002 unsigned int blks = be32_to_cpu(ld->ld_data1);
1003 struct buffer_head *bh_log, *bh_ip;
1004 u64 blkno;
1005 u64 esc;
1006 int error = 0;
1007
1008 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1009 return 0;
1010
1011 gfs2_replay_incr_blk(jd, &start);
1012 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1013 blkno = be64_to_cpu(*ptr++);
1014 esc = be64_to_cpu(*ptr++);
1015
1016 jd->jd_found_blocks++;
1017
1018 if (gfs2_revoke_check(jd, blkno, start))
1019 continue;
1020
1021 error = gfs2_replay_read_block(jd, start, &bh_log);
1022 if (error)
1023 return error;
1024
1025 bh_ip = gfs2_meta_new(gl, blkno);
1026 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1027
1028 /* Unescape */
1029 if (esc) {
1030 __be32 *eptr = (__be32 *)bh_ip->b_data;
1031 *eptr = cpu_to_be32(GFS2_MAGIC);
1032 }
1033 mark_buffer_dirty(bh_ip);
1034
1035 brelse(bh_log);
1036 brelse(bh_ip);
1037
1038 jd->jd_replayed_blocks++;
1039 }
1040
1041 return error;
1042 }
1043
1044 /* FIXME: sort out accounting for log blocks etc. */
1045
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)1046 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1047 {
1048 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1049 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1050
1051 if (error) {
1052 gfs2_inode_metasync(ip->i_gl);
1053 return;
1054 }
1055 if (pass != 1)
1056 return;
1057
1058 /* data sync? */
1059 gfs2_inode_metasync(ip->i_gl);
1060
1061 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1062 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1063 }
1064
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1065 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1066 {
1067 struct list_head *head;
1068 struct gfs2_bufdata *bd;
1069
1070 if (tr == NULL)
1071 return;
1072
1073 head = &tr->tr_databuf;
1074 while (!list_empty(head)) {
1075 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1076 list_del_init(&bd->bd_list);
1077 gfs2_unpin(sdp, bd->bd_bh, tr);
1078 }
1079 }
1080
1081
1082 static const struct gfs2_log_operations gfs2_buf_lops = {
1083 .lo_before_commit = buf_lo_before_commit,
1084 .lo_after_commit = buf_lo_after_commit,
1085 .lo_before_scan = buf_lo_before_scan,
1086 .lo_scan_elements = buf_lo_scan_elements,
1087 .lo_after_scan = buf_lo_after_scan,
1088 .lo_name = "buf",
1089 };
1090
1091 static const struct gfs2_log_operations gfs2_revoke_lops = {
1092 .lo_before_commit = revoke_lo_before_commit,
1093 .lo_after_commit = revoke_lo_after_commit,
1094 .lo_before_scan = revoke_lo_before_scan,
1095 .lo_scan_elements = revoke_lo_scan_elements,
1096 .lo_after_scan = revoke_lo_after_scan,
1097 .lo_name = "revoke",
1098 };
1099
1100 static const struct gfs2_log_operations gfs2_databuf_lops = {
1101 .lo_before_commit = databuf_lo_before_commit,
1102 .lo_after_commit = databuf_lo_after_commit,
1103 .lo_scan_elements = databuf_lo_scan_elements,
1104 .lo_after_scan = databuf_lo_after_scan,
1105 .lo_name = "databuf",
1106 };
1107
1108 const struct gfs2_log_operations *gfs2_log_ops[] = {
1109 &gfs2_databuf_lops,
1110 &gfs2_buf_lops,
1111 &gfs2_revoke_lops,
1112 NULL,
1113 };
1114
1115