Lines Matching +full:bd +full:- +full:address

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
19 #include <linux/backing-dev.h>
44 unsigned int bsize = head->b_size; in gfs2_trans_add_databufs()
50 bh = bh->b_this_page, start = end) { in gfs2_trans_add_databufs()
57 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_trans_add_databufs()
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
66 * @create: Non-zero if we may add block to the file
80 return -ENODATA; in gfs2_get_block_noalloc()
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
95 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio()
115 * __gfs2_jdata_write_folio - The core of jdata writepage
126 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio()
133 inode->i_sb->s_blocksize, in __gfs2_jdata_write_folio()
142 * gfs2_writepages - Write a bunch of dirty pages back to disk
144 * @wbc: Write-back control
162 if (ret == 0 && wbc->nr_to_write > 0) in gfs2_writepages()
163 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); in gfs2_writepages()
168 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
174 * Returns: non-zero if loop should terminate, zero otherwise
182 struct inode *inode = mapping->host; in gfs2_write_jdata_batch()
191 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch()
192 nrblocks = size >> inode->i_blkbits; in gfs2_write_jdata_batch()
199 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
201 *done_index = folio->index; in gfs2_write_jdata_batch()
205 if (unlikely(folio->mapping != mapping)) { in gfs2_write_jdata_batch()
217 if (wbc->sync_mode != WB_SYNC_NONE) in gfs2_write_jdata_batch()
257 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { in gfs2_write_jdata_batch()
268 * gfs2_write_cache_jdata - Like write_cache_pages but different
293 if (wbc->range_cyclic) { in gfs2_write_cache_jdata()
294 writeback_index = mapping->writeback_index; /* prev offset */ in gfs2_write_cache_jdata()
300 end = -1; in gfs2_write_cache_jdata()
302 index = wbc->range_start >> PAGE_SHIFT; in gfs2_write_cache_jdata()
303 end = wbc->range_end >> PAGE_SHIFT; in gfs2_write_cache_jdata()
304 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in gfs2_write_cache_jdata()
308 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in gfs2_write_cache_jdata()
314 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in gfs2_write_cache_jdata()
341 end = writeback_index - 1; in gfs2_write_cache_jdata()
345 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in gfs2_write_cache_jdata()
346 mapping->writeback_index = done_index; in gfs2_write_cache_jdata()
353 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
362 struct gfs2_inode *ip = GFS2_I(mapping->host); in gfs2_jdata_writepages()
363 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); in gfs2_jdata_writepages()
367 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { in gfs2_jdata_writepages()
368 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_jdata_writepages()
376 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
385 size_t dsize = i_size_read(&ip->i_inode); in stuffed_read_folio()
390 * Due to the order of unstuffing files and ->fault(), we can be in stuffed_read_folio()
394 if (unlikely(folio->index)) { in stuffed_read_folio()
400 from = dibh->b_data + sizeof(struct gfs2_dinode); in stuffed_read_folio()
412 * gfs2_read_folio - read a folio from a file
418 struct inode *inode = folio->mapping->host; in gfs2_read_folio()
433 return -EIO; in gfs2_read_folio()
439 * gfs2_internal_read - read an internal file
450 struct address_space *mapping = ip->i_inode.i_mapping; in gfs2_internal_read()
460 if (PTR_ERR(folio) == -EINTR) in gfs2_internal_read()
464 offset = *pos + copied - folio_pos(folio); in gfs2_internal_read()
465 chunk = min(size - copied, folio_size(folio) - offset); in gfs2_internal_read()
476 * gfs2_readahead - Read a bunch of pages at once
477 * @rac: Read-ahead control structure
492 struct inode *inode = rac->mapping->host; in gfs2_readahead()
504 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
510 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); in adjust_fs_space()
511 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; in adjust_fs_space()
512 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; in adjust_fs_space()
524 spin_lock(&sdp->sd_statfs_spin); in adjust_fs_space()
525 gfs2_statfs_change_in(m_sc, m_bh->b_data + in adjust_fs_space()
527 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) in adjust_fs_space()
528 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); in adjust_fs_space()
531 spin_unlock(&sdp->sd_statfs_spin); in adjust_fs_space()
539 sdp->sd_rindex_uptodate = 0; in adjust_fs_space()
546 if (current->journal_info) in jdata_dirty_folio()
552 * gfs2_bmap - Block map function
553 * @mapping: Address space info
556 * Returns: The disk address for the block or 0 on hole or error
561 struct gfs2_inode *ip = GFS2_I(mapping->host); in gfs2_bmap()
566 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_bmap()
580 struct gfs2_bufdata *bd; in gfs2_discard() local
585 bd = bh->b_private; in gfs2_discard()
586 if (bd) { in gfs2_discard()
587 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) in gfs2_discard()
588 list_del_init(&bd->bd_list); in gfs2_discard()
590 spin_lock(&sdp->sd_ail_lock); in gfs2_discard()
592 spin_unlock(&sdp->sd_ail_lock); in gfs2_discard()
595 bh->b_bdev = NULL; in gfs2_discard()
606 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); in gfs2_invalidate_folio()
621 if (pos + bh->b_size > stop) in gfs2_invalidate_folio()
626 pos += bh->b_size; in gfs2_invalidate_folio()
627 bh = bh->b_this_page; in gfs2_invalidate_folio()
635 * gfs2_release_folio - free the metadata associated with a folio
647 struct address_space *mapping = folio->mapping; in gfs2_release_folio()
650 struct gfs2_bufdata *bd; in gfs2_release_folio() local
659 * dirty folios to ->release_folio() via shrink_active_list(). in gfs2_release_folio()
662 * below. Once ->release_folio isn't called on dirty folios in gfs2_release_folio()
670 if (atomic_read(&bh->b_count)) in gfs2_release_folio()
672 bd = bh->b_private; in gfs2_release_folio()
673 if (bd && bd->bd_tr) in gfs2_release_folio()
677 bh = bh->b_this_page; in gfs2_release_folio()
682 bd = bh->b_private; in gfs2_release_folio()
683 if (bd) { in gfs2_release_folio()
684 gfs2_assert_warn(sdp, bd->bd_bh == bh); in gfs2_release_folio()
685 bd->bd_bh = NULL; in gfs2_release_folio()
686 bh->b_private = NULL; in gfs2_release_folio()
688 * The bd may still be queued as a revoke, in which in gfs2_release_folio()
691 if (!bd->bd_blkno && !list_empty(&bd->bd_list)) in gfs2_release_folio()
692 list_del_init(&bd->bd_list); in gfs2_release_folio()
693 if (list_empty(&bd->bd_list)) in gfs2_release_folio()
694 kmem_cache_free(gfs2_bufdata_cachep, bd); in gfs2_release_folio()
697 bh = bh->b_this_page; in gfs2_release_folio()
737 inode->i_mapping->a_ops = &gfs2_jdata_aops; in gfs2_set_aops()
739 inode->i_mapping->a_ops = &gfs2_aops; in gfs2_set_aops()