1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
40 /**
41 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
42 * @inode: The inode
43 * @lblock: The block number to look up
44 * @bh_result: The buffer head to return the result in
45 * @create: Non-zero if we may add block to the file
46 *
47 * Returns: errno
48 */
49
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)50 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
51 struct buffer_head *bh_result, int create)
52 {
53 int error;
54
55 error = gfs2_block_map(inode, lblock, bh_result, 0);
56 if (error)
57 return error;
58 if (!buffer_mapped(bh_result))
59 return -ENODATA;
60 return 0;
61 }
62
63 /**
64 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
65 * @folio: The folio to write
66 * @wbc: The writeback control
67 *
68 * This is the same as calling block_write_full_folio, but it also
69 * writes pages outside of i_size
70 */
gfs2_write_jdata_folio(struct folio * folio,struct writeback_control * wbc)71 static int gfs2_write_jdata_folio(struct folio *folio,
72 struct writeback_control *wbc)
73 {
74 struct inode * const inode = folio->mapping->host;
75 loff_t i_size = i_size_read(inode);
76
77 /*
78 * The folio straddles i_size. It must be zeroed out on each and every
79 * writepage invocation because it may be mmapped. "A file is mapped
80 * in multiples of the page size. For a file that is not a multiple of
81 * the page size, the remaining memory is zeroed when mapped, and
82 * writes to that region are not written out to the file."
83 */
84 if (folio_pos(folio) < i_size &&
85 i_size < folio_pos(folio) + folio_size(folio))
86 folio_zero_segment(folio, offset_in_folio(folio, i_size),
87 folio_size(folio));
88
89 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
90 wbc);
91 }
92
93 /**
94 * __gfs2_jdata_write_folio - The core of jdata writepage
95 * @folio: The folio to write
96 * @wbc: The writeback control
97 *
98 * Implements the core of write back. If a transaction is required then
99 * the checked flag will have been set and the transaction will have
100 * already been started before this is called.
101 */
__gfs2_jdata_write_folio(struct folio * folio,struct writeback_control * wbc)102 static int __gfs2_jdata_write_folio(struct folio *folio,
103 struct writeback_control *wbc)
104 {
105 struct inode *inode = folio->mapping->host;
106 struct gfs2_inode *ip = GFS2_I(inode);
107
108 if (folio_test_checked(folio)) {
109 folio_clear_checked(folio);
110 if (!folio_buffers(folio)) {
111 create_empty_buffers(folio,
112 inode->i_sb->s_blocksize,
113 BIT(BH_Dirty)|BIT(BH_Uptodate));
114 }
115 gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
116 }
117 return gfs2_write_jdata_folio(folio, wbc);
118 }
119
120 /**
121 * gfs2_jdata_writeback - Write jdata folios to the log
122 * @mapping: The mapping to write
123 * @wbc: The writeback control
124 *
125 * Returns: errno
126 */
gfs2_jdata_writeback(struct address_space * mapping,struct writeback_control * wbc)127 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
128 {
129 struct inode *inode = mapping->host;
130 struct gfs2_inode *ip = GFS2_I(inode);
131 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
132 struct folio *folio = NULL;
133 int error;
134
135 BUG_ON(current->journal_info);
136 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
137 return 0;
138
139 while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
140 if (folio_test_checked(folio)) {
141 folio_redirty_for_writepage(wbc, folio);
142 folio_unlock(folio);
143 continue;
144 }
145 error = __gfs2_jdata_write_folio(folio, wbc);
146 }
147
148 return error;
149 }
150
151 /**
152 * gfs2_writepages - Write a bunch of dirty pages back to disk
153 * @mapping: The mapping to write
154 * @wbc: Write-back control
155 *
156 * Used for both ordered and writeback modes.
157 */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)158 static int gfs2_writepages(struct address_space *mapping,
159 struct writeback_control *wbc)
160 {
161 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
162 struct iomap_writepage_ctx wpc = {
163 .inode = mapping->host,
164 .wbc = wbc,
165 .ops = &gfs2_writeback_ops,
166 };
167 int ret;
168
169 /*
170 * Even if we didn't write enough pages here, we might still be holding
171 * dirty pages in the ail. We forcibly flush the ail because we don't
172 * want balance_dirty_pages() to loop indefinitely trying to write out
173 * pages held in the ail that it can't find.
174 */
175 ret = iomap_writepages(&wpc);
176 if (ret == 0 && wbc->nr_to_write > 0)
177 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
178 return ret;
179 }
180
181 /**
182 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
183 * @mapping: The mapping
184 * @wbc: The writeback control
185 * @fbatch: The batch of folios
186 * @done_index: Page index
187 *
188 * Returns: non-zero if loop should terminate, zero otherwise
189 */
190
gfs2_write_jdata_batch(struct address_space * mapping,struct writeback_control * wbc,struct folio_batch * fbatch,pgoff_t * done_index)191 static int gfs2_write_jdata_batch(struct address_space *mapping,
192 struct writeback_control *wbc,
193 struct folio_batch *fbatch,
194 pgoff_t *done_index)
195 {
196 struct inode *inode = mapping->host;
197 struct gfs2_sbd *sdp = GFS2_SB(inode);
198 unsigned nrblocks;
199 int i;
200 int ret;
201 size_t size = 0;
202 int nr_folios = folio_batch_count(fbatch);
203
204 for (i = 0; i < nr_folios; i++)
205 size += folio_size(fbatch->folios[i]);
206 nrblocks = size >> inode->i_blkbits;
207
208 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
209 if (ret < 0)
210 return ret;
211
212 for (i = 0; i < nr_folios; i++) {
213 struct folio *folio = fbatch->folios[i];
214
215 *done_index = folio->index;
216
217 folio_lock(folio);
218
219 if (unlikely(folio->mapping != mapping)) {
220 continue_unlock:
221 folio_unlock(folio);
222 continue;
223 }
224
225 if (!folio_test_dirty(folio)) {
226 /* someone wrote it for us */
227 goto continue_unlock;
228 }
229
230 if (folio_test_writeback(folio)) {
231 if (wbc->sync_mode != WB_SYNC_NONE)
232 folio_wait_writeback(folio);
233 else
234 goto continue_unlock;
235 }
236
237 BUG_ON(folio_test_writeback(folio));
238 if (!folio_clear_dirty_for_io(folio))
239 goto continue_unlock;
240
241 trace_wbc_writepage(wbc, inode_to_bdi(inode));
242
243 ret = __gfs2_jdata_write_folio(folio, wbc);
244 if (unlikely(ret)) {
245 /*
246 * done_index is set past this page, so media errors
247 * will not choke background writeout for the entire
248 * file. This has consequences for range_cyclic
249 * semantics (ie. it may not be suitable for data
250 * integrity writeout).
251 */
252 *done_index = folio_next_index(folio);
253 ret = 1;
254 break;
255 }
256
257 /*
258 * We stop writing back only if we are not doing
259 * integrity sync. In case of integrity sync we have to
260 * keep going until we have written all the pages
261 * we tagged for writeback prior to entering this loop.
262 */
263 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
264 ret = 1;
265 break;
266 }
267
268 }
269 gfs2_trans_end(sdp);
270 return ret;
271 }
272
273 /**
274 * gfs2_write_cache_jdata - Like write_cache_pages but different
275 * @mapping: The mapping to write
276 * @wbc: The writeback control
277 *
278 * The reason that we use our own function here is that we need to
279 * start transactions before we grab page locks. This allows us
280 * to get the ordering right.
281 */
282
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)283 static int gfs2_write_cache_jdata(struct address_space *mapping,
284 struct writeback_control *wbc)
285 {
286 int ret = 0;
287 int done = 0;
288 struct folio_batch fbatch;
289 int nr_folios;
290 pgoff_t writeback_index;
291 pgoff_t index;
292 pgoff_t end;
293 pgoff_t done_index;
294 int cycled;
295 int range_whole = 0;
296 xa_mark_t tag;
297
298 folio_batch_init(&fbatch);
299 if (wbc->range_cyclic) {
300 writeback_index = mapping->writeback_index; /* prev offset */
301 index = writeback_index;
302 if (index == 0)
303 cycled = 1;
304 else
305 cycled = 0;
306 end = -1;
307 } else {
308 index = wbc->range_start >> PAGE_SHIFT;
309 end = wbc->range_end >> PAGE_SHIFT;
310 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
311 range_whole = 1;
312 cycled = 1; /* ignore range_cyclic tests */
313 }
314 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
315 tag = PAGECACHE_TAG_TOWRITE;
316 else
317 tag = PAGECACHE_TAG_DIRTY;
318
319 retry:
320 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
321 tag_pages_for_writeback(mapping, index, end);
322 done_index = index;
323 while (!done && (index <= end)) {
324 nr_folios = filemap_get_folios_tag(mapping, &index, end,
325 tag, &fbatch);
326 if (nr_folios == 0)
327 break;
328
329 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
330 &done_index);
331 if (ret)
332 done = 1;
333 if (ret > 0)
334 ret = 0;
335 folio_batch_release(&fbatch);
336 cond_resched();
337 }
338
339 if (!cycled && !done) {
340 /*
341 * range_cyclic:
342 * We hit the last page and there is more work to be done: wrap
343 * back to the start of the file
344 */
345 cycled = 1;
346 index = 0;
347 end = writeback_index - 1;
348 goto retry;
349 }
350
351 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
352 mapping->writeback_index = done_index;
353
354 return ret;
355 }
356
357
358 /**
359 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
360 * @mapping: The mapping to write
361 * @wbc: The writeback control
362 *
363 */
364
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)365 static int gfs2_jdata_writepages(struct address_space *mapping,
366 struct writeback_control *wbc)
367 {
368 struct gfs2_inode *ip = GFS2_I(mapping->host);
369 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
370 int ret;
371
372 ret = gfs2_write_cache_jdata(mapping, wbc);
373 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
374 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
375 GFS2_LFC_JDATA_WPAGES);
376 ret = gfs2_write_cache_jdata(mapping, wbc);
377 }
378 return ret;
379 }
380
381 /**
382 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
383 * @ip: the inode
384 * @folio: the folio
385 *
386 * Returns: errno
387 */
stuffed_read_folio(struct gfs2_inode * ip,struct folio * folio)388 static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
389 {
390 struct buffer_head *dibh = NULL;
391 size_t dsize = i_size_read(&ip->i_inode);
392 void *from = NULL;
393 int error = 0;
394
395 /*
396 * Due to the order of unstuffing files and ->fault(), we can be
397 * asked for a zero folio in the case of a stuffed file being extended,
398 * so we need to supply one here. It doesn't happen often.
399 */
400 if (unlikely(folio->index)) {
401 dsize = 0;
402 } else {
403 error = gfs2_meta_inode_buffer(ip, &dibh);
404 if (error)
405 goto out;
406 from = dibh->b_data + sizeof(struct gfs2_dinode);
407 }
408
409 folio_fill_tail(folio, 0, from, dsize);
410 brelse(dibh);
411 out:
412 folio_end_read(folio, error == 0);
413
414 return error;
415 }
416
417 /**
418 * gfs2_read_folio - read a folio from a file
419 * @file: The file to read
420 * @folio: The folio in the file
421 */
gfs2_read_folio(struct file * file,struct folio * folio)422 static int gfs2_read_folio(struct file *file, struct folio *folio)
423 {
424 struct inode *inode = folio->mapping->host;
425 struct gfs2_inode *ip = GFS2_I(inode);
426 struct gfs2_sbd *sdp = GFS2_SB(inode);
427 int error;
428
429 if (!gfs2_is_jdata(ip) ||
430 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
431 error = iomap_read_folio(folio, &gfs2_iomap_ops);
432 } else if (gfs2_is_stuffed(ip)) {
433 error = stuffed_read_folio(ip, folio);
434 } else {
435 error = mpage_read_folio(folio, gfs2_block_map);
436 }
437
438 if (gfs2_withdrawing_or_withdrawn(sdp))
439 return -EIO;
440
441 return error;
442 }
443
444 /**
445 * gfs2_internal_read - read an internal file
446 * @ip: The gfs2 inode
447 * @buf: The buffer to fill
448 * @pos: The file position
449 * @size: The amount to read
450 *
451 */
452
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,size_t size)453 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
454 size_t size)
455 {
456 struct address_space *mapping = ip->i_inode.i_mapping;
457 unsigned long index = *pos >> PAGE_SHIFT;
458 size_t copied = 0;
459
460 do {
461 size_t offset, chunk;
462 struct folio *folio;
463
464 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
465 if (IS_ERR(folio)) {
466 if (PTR_ERR(folio) == -EINTR)
467 continue;
468 return PTR_ERR(folio);
469 }
470 offset = *pos + copied - folio_pos(folio);
471 chunk = min(size - copied, folio_size(folio) - offset);
472 memcpy_from_folio(buf + copied, folio, offset, chunk);
473 index = folio_next_index(folio);
474 folio_put(folio);
475 copied += chunk;
476 } while(copied < size);
477 (*pos) += size;
478 return size;
479 }
480
481 /**
482 * gfs2_readahead - Read a bunch of pages at once
483 * @rac: Read-ahead control structure
484 *
485 * Some notes:
486 * 1. This is only for readahead, so we can simply ignore any things
487 * which are slightly inconvenient (such as locking conflicts between
488 * the page lock and the glock) and return having done no I/O. Its
489 * obviously not something we'd want to do on too regular a basis.
490 * Any I/O we ignore at this time will be done via readpage later.
491 * 2. We don't handle stuffed files here we let readpage do the honours.
492 * 3. mpage_readahead() does most of the heavy lifting in the common case.
493 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
494 */
495
gfs2_readahead(struct readahead_control * rac)496 static void gfs2_readahead(struct readahead_control *rac)
497 {
498 struct inode *inode = rac->mapping->host;
499 struct gfs2_inode *ip = GFS2_I(inode);
500
501 if (gfs2_is_stuffed(ip))
502 ;
503 else if (gfs2_is_jdata(ip))
504 mpage_readahead(rac, gfs2_block_map);
505 else
506 iomap_readahead(rac, &gfs2_iomap_ops);
507 }
508
509 /**
510 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
511 * @inode: the rindex inode
512 */
adjust_fs_space(struct inode * inode)513 void adjust_fs_space(struct inode *inode)
514 {
515 struct gfs2_sbd *sdp = GFS2_SB(inode);
516 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
517 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
518 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
519 struct buffer_head *m_bh;
520 u64 fs_total, new_free;
521
522 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
523 return;
524
525 /* Total up the file system space, according to the latest rindex. */
526 fs_total = gfs2_ri_total(sdp);
527 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
528 goto out;
529
530 spin_lock(&sdp->sd_statfs_spin);
531 gfs2_statfs_change_in(m_sc, m_bh->b_data +
532 sizeof(struct gfs2_dinode));
533 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
534 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
535 else
536 new_free = 0;
537 spin_unlock(&sdp->sd_statfs_spin);
538 fs_warn(sdp, "File system extended by %llu blocks.\n",
539 (unsigned long long)new_free);
540 gfs2_statfs_change(sdp, new_free, new_free, 0);
541
542 update_statfs(sdp, m_bh);
543 brelse(m_bh);
544 out:
545 sdp->sd_rindex_uptodate = 0;
546 gfs2_trans_end(sdp);
547 }
548
gfs2_jdata_dirty_folio(struct address_space * mapping,struct folio * folio)549 static bool gfs2_jdata_dirty_folio(struct address_space *mapping,
550 struct folio *folio)
551 {
552 if (current->journal_info)
553 folio_set_checked(folio);
554 return block_dirty_folio(mapping, folio);
555 }
556
557 /**
558 * gfs2_bmap - Block map function
559 * @mapping: Address space info
560 * @lblock: The block to map
561 *
562 * Returns: The disk address for the block or 0 on hole or error
563 */
564
gfs2_bmap(struct address_space * mapping,sector_t lblock)565 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
566 {
567 struct gfs2_inode *ip = GFS2_I(mapping->host);
568 struct gfs2_holder i_gh;
569 sector_t dblock = 0;
570 int error;
571
572 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
573 if (error)
574 return 0;
575
576 if (!gfs2_is_stuffed(ip))
577 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
578
579 gfs2_glock_dq_uninit(&i_gh);
580
581 return dblock;
582 }
583
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)584 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
585 {
586 struct gfs2_bufdata *bd;
587
588 lock_buffer(bh);
589 gfs2_log_lock(sdp);
590 clear_buffer_dirty(bh);
591 bd = bh->b_private;
592 if (bd) {
593 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
594 list_del_init(&bd->bd_list);
595 else {
596 spin_lock(&sdp->sd_ail_lock);
597 gfs2_remove_from_journal(bh, REMOVE_JDATA);
598 spin_unlock(&sdp->sd_ail_lock);
599 }
600 }
601 bh->b_bdev = NULL;
602 clear_buffer_mapped(bh);
603 clear_buffer_req(bh);
604 clear_buffer_new(bh);
605 gfs2_log_unlock(sdp);
606 unlock_buffer(bh);
607 }
608
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)609 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
610 size_t length)
611 {
612 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
613 size_t stop = offset + length;
614 int partial_page = (offset || length < folio_size(folio));
615 struct buffer_head *bh, *head;
616 unsigned long pos = 0;
617
618 BUG_ON(!folio_test_locked(folio));
619 if (!partial_page)
620 folio_clear_checked(folio);
621 head = folio_buffers(folio);
622 if (!head)
623 goto out;
624
625 bh = head;
626 do {
627 if (pos + bh->b_size > stop)
628 return;
629
630 if (offset <= pos)
631 gfs2_discard(sdp, bh);
632 pos += bh->b_size;
633 bh = bh->b_this_page;
634 } while (bh != head);
635 out:
636 if (!partial_page)
637 filemap_release_folio(folio, 0);
638 }
639
640 /**
641 * gfs2_release_folio - free the metadata associated with a folio
642 * @folio: the folio that's being released
643 * @gfp_mask: passed from Linux VFS, ignored by us
644 *
645 * Calls try_to_free_buffers() to free the buffers and put the folio if the
646 * buffers can be released.
647 *
648 * Returns: true if the folio was put or else false
649 */
650
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)651 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
652 {
653 struct address_space *mapping = folio->mapping;
654 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
655 struct buffer_head *bh, *head;
656 struct gfs2_bufdata *bd;
657
658 head = folio_buffers(folio);
659 if (!head)
660 return false;
661
662 /*
663 * mm accommodates an old ext3 case where clean folios might
664 * not have had the dirty bit cleared. Thus, it can send actual
665 * dirty folios to ->release_folio() via shrink_active_list().
666 *
667 * As a workaround, we skip folios that contain dirty buffers
668 * below. Once ->release_folio isn't called on dirty folios
669 * anymore, we can warn on dirty buffers like we used to here
670 * again.
671 */
672
673 gfs2_log_lock(sdp);
674 bh = head;
675 do {
676 if (atomic_read(&bh->b_count))
677 goto cannot_release;
678 bd = bh->b_private;
679 if (bd && bd->bd_tr)
680 goto cannot_release;
681 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
682 goto cannot_release;
683 bh = bh->b_this_page;
684 } while (bh != head);
685
686 bh = head;
687 do {
688 bd = bh->b_private;
689 if (bd) {
690 gfs2_assert_warn(sdp, bd->bd_bh == bh);
691 bd->bd_bh = NULL;
692 bh->b_private = NULL;
693 /*
694 * The bd may still be queued as a revoke, in which
695 * case we must not dequeue nor free it.
696 */
697 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
698 list_del_init(&bd->bd_list);
699 if (list_empty(&bd->bd_list))
700 kmem_cache_free(gfs2_bufdata_cachep, bd);
701 }
702
703 bh = bh->b_this_page;
704 } while (bh != head);
705 gfs2_log_unlock(sdp);
706
707 return try_to_free_buffers(folio);
708
709 cannot_release:
710 gfs2_log_unlock(sdp);
711 return false;
712 }
713
714 static const struct address_space_operations gfs2_aops = {
715 .writepages = gfs2_writepages,
716 .read_folio = gfs2_read_folio,
717 .readahead = gfs2_readahead,
718 .dirty_folio = iomap_dirty_folio,
719 .release_folio = iomap_release_folio,
720 .invalidate_folio = iomap_invalidate_folio,
721 .bmap = gfs2_bmap,
722 .migrate_folio = filemap_migrate_folio,
723 .is_partially_uptodate = iomap_is_partially_uptodate,
724 .error_remove_folio = generic_error_remove_folio,
725 };
726
727 static const struct address_space_operations gfs2_jdata_aops = {
728 .writepages = gfs2_jdata_writepages,
729 .read_folio = gfs2_read_folio,
730 .readahead = gfs2_readahead,
731 .dirty_folio = gfs2_jdata_dirty_folio,
732 .bmap = gfs2_bmap,
733 .migrate_folio = buffer_migrate_folio,
734 .invalidate_folio = gfs2_invalidate_folio,
735 .release_folio = gfs2_release_folio,
736 .is_partially_uptodate = block_is_partially_uptodate,
737 .error_remove_folio = generic_error_remove_folio,
738 };
739
gfs2_set_aops(struct inode * inode)740 void gfs2_set_aops(struct inode *inode)
741 {
742 if (gfs2_is_jdata(GFS2_I(inode)))
743 inode->i_mapping->a_ops = &gfs2_jdata_aops;
744 else
745 inode->i_mapping->a_ops = &gfs2_aops;
746 }
747