11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
37fab479bSDave Kleikamp * Copyright (C) International Business Machines Corp., 2000-2005
41da177e4SLinus Torvalds * Portions Copyright (C) Christoph Hellwig, 2001-2002
51da177e4SLinus Torvalds */
61da177e4SLinus Torvalds
735474d52SMatthew Wilcox (Oracle) #include <linux/blkdev.h>
81da177e4SLinus Torvalds #include <linux/fs.h>
97fab479bSDave Kleikamp #include <linux/mm.h>
10b2e03ca7SAlexey Dobriyan #include <linux/module.h>
117fab479bSDave Kleikamp #include <linux/bio.h>
125a0e3ad6STejun Heo #include <linux/slab.h>
131da177e4SLinus Torvalds #include <linux/init.h>
141da177e4SLinus Torvalds #include <linux/buffer_head.h>
151da177e4SLinus Torvalds #include <linux/mempool.h>
16b2e03ca7SAlexey Dobriyan #include <linux/seq_file.h>
17cd78ab11SMatthew Wilcox (Oracle) #include <linux/writeback.h>
181da177e4SLinus Torvalds #include "jfs_incore.h"
191da177e4SLinus Torvalds #include "jfs_superblock.h"
201da177e4SLinus Torvalds #include "jfs_filsys.h"
211da177e4SLinus Torvalds #include "jfs_metapage.h"
221da177e4SLinus Torvalds #include "jfs_txnmgr.h"
231da177e4SLinus Torvalds #include "jfs_debug.h"
241da177e4SLinus Torvalds
251da177e4SLinus Torvalds #ifdef CONFIG_JFS_STATISTICS
261da177e4SLinus Torvalds static struct {
271da177e4SLinus Torvalds uint pagealloc; /* # of page allocations */
281da177e4SLinus Torvalds uint pagefree; /* # of page frees */
291da177e4SLinus Torvalds uint lockwait; /* # of sleeping lock_metapage() calls */
301da177e4SLinus Torvalds } mpStat;
311da177e4SLinus Torvalds #endif
321da177e4SLinus Torvalds
337fab479bSDave Kleikamp #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
3454af6233SNick Piggin #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
351da177e4SLinus Torvalds
unlock_metapage(struct metapage * mp)361da177e4SLinus Torvalds static inline void unlock_metapage(struct metapage *mp)
371da177e4SLinus Torvalds {
3854af6233SNick Piggin clear_bit_unlock(META_locked, &mp->flag);
391da177e4SLinus Torvalds wake_up(&mp->wait);
401da177e4SLinus Torvalds }
411da177e4SLinus Torvalds
__lock_metapage(struct metapage * mp)427fab479bSDave Kleikamp static inline void __lock_metapage(struct metapage *mp)
431da177e4SLinus Torvalds {
441da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current);
451da177e4SLinus Torvalds INCREMENT(mpStat.lockwait);
461da177e4SLinus Torvalds add_wait_queue_exclusive(&mp->wait, &wait);
471da177e4SLinus Torvalds do {
481da177e4SLinus Torvalds set_current_state(TASK_UNINTERRUPTIBLE);
491da177e4SLinus Torvalds if (metapage_locked(mp)) {
50ad6c19e5SMatthew Wilcox (Oracle) folio_unlock(mp->folio);
514aa0d230SDave Kleikamp io_schedule();
52ad6c19e5SMatthew Wilcox (Oracle) folio_lock(mp->folio);
531da177e4SLinus Torvalds }
541da177e4SLinus Torvalds } while (trylock_metapage(mp));
551da177e4SLinus Torvalds __set_current_state(TASK_RUNNING);
561da177e4SLinus Torvalds remove_wait_queue(&mp->wait, &wait);
571da177e4SLinus Torvalds }
581da177e4SLinus Torvalds
597fab479bSDave Kleikamp /*
60ad6c19e5SMatthew Wilcox (Oracle) * Must have mp->folio locked
617fab479bSDave Kleikamp */
lock_metapage(struct metapage * mp)621da177e4SLinus Torvalds static inline void lock_metapage(struct metapage *mp)
631da177e4SLinus Torvalds {
641da177e4SLinus Torvalds if (trylock_metapage(mp))
651da177e4SLinus Torvalds __lock_metapage(mp);
661da177e4SLinus Torvalds }
671da177e4SLinus Torvalds
681da177e4SLinus Torvalds #define METAPOOL_MIN_PAGES 32
69e18b890bSChristoph Lameter static struct kmem_cache *metapage_cache;
701da177e4SLinus Torvalds static mempool_t *metapage_mempool;
711da177e4SLinus Torvalds
7209cbfeafSKirill A. Shutemov #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
737fab479bSDave Kleikamp
747fab479bSDave Kleikamp #if MPS_PER_PAGE > 1
757fab479bSDave Kleikamp
767fab479bSDave Kleikamp struct meta_anchor {
777fab479bSDave Kleikamp int mp_count;
787fab479bSDave Kleikamp atomic_t io_count;
79*ee6817e7SMatthew Wilcox (Oracle) blk_status_t status;
807fab479bSDave Kleikamp struct metapage *mp[MPS_PER_PAGE];
817fab479bSDave Kleikamp };
827fab479bSDave Kleikamp
folio_to_mp(struct folio * folio,int offset)83501bb988SMatthew Wilcox (Oracle) static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
847fab479bSDave Kleikamp {
85501bb988SMatthew Wilcox (Oracle) struct meta_anchor *anchor = folio->private;
86501bb988SMatthew Wilcox (Oracle)
87501bb988SMatthew Wilcox (Oracle) if (!anchor)
887fab479bSDave Kleikamp return NULL;
89501bb988SMatthew Wilcox (Oracle) return anchor->mp[offset >> L2PSIZE];
907fab479bSDave Kleikamp }
917fab479bSDave Kleikamp
insert_metapage(struct folio * folio,struct metapage * mp)929346476dSMatthew Wilcox (Oracle) static inline int insert_metapage(struct folio *folio, struct metapage *mp)
937fab479bSDave Kleikamp {
947fab479bSDave Kleikamp struct meta_anchor *a;
957fab479bSDave Kleikamp int index;
967fab479bSDave Kleikamp int l2mp_blocks; /* log2 blocks per metapage */
977fab479bSDave Kleikamp
989346476dSMatthew Wilcox (Oracle) a = folio->private;
999346476dSMatthew Wilcox (Oracle) if (!a) {
1005b3030e3SEric Sesterhenn a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
1017fab479bSDave Kleikamp if (!a)
1027fab479bSDave Kleikamp return -ENOMEM;
1039346476dSMatthew Wilcox (Oracle) folio_attach_private(folio, a);
1049346476dSMatthew Wilcox (Oracle) kmap(&folio->page);
1057fab479bSDave Kleikamp }
1067fab479bSDave Kleikamp
1077fab479bSDave Kleikamp if (mp) {
1089346476dSMatthew Wilcox (Oracle) l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
1097fab479bSDave Kleikamp index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
1107fab479bSDave Kleikamp a->mp_count++;
1117fab479bSDave Kleikamp a->mp[index] = mp;
1127fab479bSDave Kleikamp }
1137fab479bSDave Kleikamp
1147fab479bSDave Kleikamp return 0;
1157fab479bSDave Kleikamp }
1167fab479bSDave Kleikamp
remove_metapage(struct folio * folio,struct metapage * mp)117dd23bf31SMatthew Wilcox (Oracle) static inline void remove_metapage(struct folio *folio, struct metapage *mp)
1187fab479bSDave Kleikamp {
119dd23bf31SMatthew Wilcox (Oracle) struct meta_anchor *a = folio->private;
120dd23bf31SMatthew Wilcox (Oracle) int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
1217fab479bSDave Kleikamp int index;
1227fab479bSDave Kleikamp
1237fab479bSDave Kleikamp index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
1247fab479bSDave Kleikamp
1257fab479bSDave Kleikamp BUG_ON(a->mp[index] != mp);
1267fab479bSDave Kleikamp
1277fab479bSDave Kleikamp a->mp[index] = NULL;
1287fab479bSDave Kleikamp if (--a->mp_count == 0) {
1297fab479bSDave Kleikamp kfree(a);
130dd23bf31SMatthew Wilcox (Oracle) folio_detach_private(folio);
131dd23bf31SMatthew Wilcox (Oracle) kunmap(&folio->page);
1327fab479bSDave Kleikamp }
1337fab479bSDave Kleikamp }
1347fab479bSDave Kleikamp
inc_io(struct folio * folio)135d9c36002SMatthew Wilcox (Oracle) static inline void inc_io(struct folio *folio)
1367fab479bSDave Kleikamp {
137d9c36002SMatthew Wilcox (Oracle) struct meta_anchor *anchor = folio->private;
138d9c36002SMatthew Wilcox (Oracle)
139d9c36002SMatthew Wilcox (Oracle) atomic_inc(&anchor->io_count);
1407fab479bSDave Kleikamp }
1417fab479bSDave Kleikamp
dec_io(struct folio * folio,blk_status_t status,void (* handler)(struct folio *,blk_status_t))142*ee6817e7SMatthew Wilcox (Oracle) static inline void dec_io(struct folio *folio, blk_status_t status,
143*ee6817e7SMatthew Wilcox (Oracle) void (*handler)(struct folio *, blk_status_t))
1447fab479bSDave Kleikamp {
1451f0dc610SMatthew Wilcox (Oracle) struct meta_anchor *anchor = folio->private;
1461f0dc610SMatthew Wilcox (Oracle)
147*ee6817e7SMatthew Wilcox (Oracle) if (anchor->status == BLK_STS_OK)
148*ee6817e7SMatthew Wilcox (Oracle) anchor->status = status;
149*ee6817e7SMatthew Wilcox (Oracle)
1501f0dc610SMatthew Wilcox (Oracle) if (atomic_dec_and_test(&anchor->io_count))
151*ee6817e7SMatthew Wilcox (Oracle) handler(folio, anchor->status);
1527fab479bSDave Kleikamp }
1537fab479bSDave Kleikamp
1547fab479bSDave Kleikamp #else
folio_to_mp(struct folio * folio,int offset)155501bb988SMatthew Wilcox (Oracle) static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
1567fab479bSDave Kleikamp {
157501bb988SMatthew Wilcox (Oracle) return folio->private;
1587fab479bSDave Kleikamp }
1597fab479bSDave Kleikamp
insert_metapage(struct folio * folio,struct metapage * mp)1609346476dSMatthew Wilcox (Oracle) static inline int insert_metapage(struct folio *folio, struct metapage *mp)
1617fab479bSDave Kleikamp {
1627fab479bSDave Kleikamp if (mp) {
1639346476dSMatthew Wilcox (Oracle) folio_attach_private(folio, mp);
1649346476dSMatthew Wilcox (Oracle) kmap(&folio->page);
1657fab479bSDave Kleikamp }
1667fab479bSDave Kleikamp return 0;
1677fab479bSDave Kleikamp }
1687fab479bSDave Kleikamp
remove_metapage(struct folio * folio,struct metapage * mp)169dd23bf31SMatthew Wilcox (Oracle) static inline void remove_metapage(struct folio *folio, struct metapage *mp)
1707fab479bSDave Kleikamp {
171dd23bf31SMatthew Wilcox (Oracle) folio_detach_private(folio);
172dd23bf31SMatthew Wilcox (Oracle) kunmap(&folio->page);
1737fab479bSDave Kleikamp }
1747fab479bSDave Kleikamp
175d9c36002SMatthew Wilcox (Oracle) #define inc_io(folio) do {} while(0)
176*ee6817e7SMatthew Wilcox (Oracle) #define dec_io(folio, status, handler) handler(folio, status)
1777fab479bSDave Kleikamp
1787fab479bSDave Kleikamp #endif
1797fab479bSDave Kleikamp
alloc_metapage(gfp_t gfp_mask)180ee146245SDavid Rientjes static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
1811da177e4SLinus Torvalds {
182ee146245SDavid Rientjes struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
1831da177e4SLinus Torvalds
184ee146245SDavid Rientjes if (mp) {
1851da177e4SLinus Torvalds mp->lid = 0;
1861da177e4SLinus Torvalds mp->lsn = 0;
1871da177e4SLinus Torvalds mp->data = NULL;
1881da177e4SLinus Torvalds mp->clsn = 0;
1891da177e4SLinus Torvalds mp->log = NULL;
1901da177e4SLinus Torvalds init_waitqueue_head(&mp->wait);
1911da177e4SLinus Torvalds }
192ee146245SDavid Rientjes return mp;
1931da177e4SLinus Torvalds }
1941da177e4SLinus Torvalds
free_metapage(struct metapage * mp)1951da177e4SLinus Torvalds static inline void free_metapage(struct metapage *mp)
1961da177e4SLinus Torvalds {
1971da177e4SLinus Torvalds mempool_free(mp, metapage_mempool);
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds
metapage_init(void)2001da177e4SLinus Torvalds int __init metapage_init(void)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds /*
2031da177e4SLinus Torvalds * Allocate the metapage structures
2041da177e4SLinus Torvalds */
2051da177e4SLinus Torvalds metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
206ee146245SDavid Rientjes 0, 0, NULL);
2071da177e4SLinus Torvalds if (metapage_cache == NULL)
2081da177e4SLinus Torvalds return -ENOMEM;
2091da177e4SLinus Torvalds
21093d2341cSMatthew Dobson metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
21193d2341cSMatthew Dobson metapage_cache);
2121da177e4SLinus Torvalds
2131da177e4SLinus Torvalds if (metapage_mempool == NULL) {
2141da177e4SLinus Torvalds kmem_cache_destroy(metapage_cache);
2151da177e4SLinus Torvalds return -ENOMEM;
2161da177e4SLinus Torvalds }
2171da177e4SLinus Torvalds
2181da177e4SLinus Torvalds return 0;
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds
metapage_exit(void)2211da177e4SLinus Torvalds void metapage_exit(void)
2221da177e4SLinus Torvalds {
2231da177e4SLinus Torvalds mempool_destroy(metapage_mempool);
2241da177e4SLinus Torvalds kmem_cache_destroy(metapage_cache);
2251da177e4SLinus Torvalds }
2261da177e4SLinus Torvalds
drop_metapage(struct folio * folio,struct metapage * mp)227dd23bf31SMatthew Wilcox (Oracle) static inline void drop_metapage(struct folio *folio, struct metapage *mp)
2287fab479bSDave Kleikamp {
2297fab479bSDave Kleikamp if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
2307fab479bSDave Kleikamp test_bit(META_io, &mp->flag))
2317fab479bSDave Kleikamp return;
232dd23bf31SMatthew Wilcox (Oracle) remove_metapage(folio, mp);
2337fab479bSDave Kleikamp INCREMENT(mpStat.pagefree);
2347fab479bSDave Kleikamp free_metapage(mp);
2357fab479bSDave Kleikamp }
2367fab479bSDave Kleikamp
2371da177e4SLinus Torvalds /*
2387fab479bSDave Kleikamp * Metapage address space operations
2391da177e4SLinus Torvalds */
2407fab479bSDave Kleikamp
metapage_get_blocks(struct inode * inode,sector_t lblock,int * len)2417fab479bSDave Kleikamp static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
242967c9ec4SDave Kleikamp int *len)
2431da177e4SLinus Torvalds {
2447fab479bSDave Kleikamp int rc = 0;
2457fab479bSDave Kleikamp int xflag;
2467fab479bSDave Kleikamp s64 xaddr;
247ba52de12STheodore Ts'o sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2487fab479bSDave Kleikamp inode->i_blkbits;
2497fab479bSDave Kleikamp
2507fab479bSDave Kleikamp if (lblock >= file_blocks)
2517fab479bSDave Kleikamp return 0;
2527fab479bSDave Kleikamp if (lblock + *len > file_blocks)
2537fab479bSDave Kleikamp *len = file_blocks - lblock;
2547fab479bSDave Kleikamp
2557fab479bSDave Kleikamp if (inode->i_ino) {
2567fab479bSDave Kleikamp rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
2577fab479bSDave Kleikamp if ((rc == 0) && *len)
2587fab479bSDave Kleikamp lblock = (sector_t)xaddr;
2597fab479bSDave Kleikamp else
2607fab479bSDave Kleikamp lblock = 0;
2617fab479bSDave Kleikamp } /* else no mapping */
2627fab479bSDave Kleikamp
2637fab479bSDave Kleikamp return lblock;
2641da177e4SLinus Torvalds }
2651da177e4SLinus Torvalds
last_read_complete(struct folio * folio,blk_status_t status)266*ee6817e7SMatthew Wilcox (Oracle) static void last_read_complete(struct folio *folio, blk_status_t status)
2671da177e4SLinus Torvalds {
268*ee6817e7SMatthew Wilcox (Oracle) if (status)
269*ee6817e7SMatthew Wilcox (Oracle) printk(KERN_ERR "Read error %d at %#llx\n", status,
270*ee6817e7SMatthew Wilcox (Oracle) folio_pos(folio));
271*ee6817e7SMatthew Wilcox (Oracle)
272*ee6817e7SMatthew Wilcox (Oracle) folio_end_read(folio, status == 0);
2731da177e4SLinus Torvalds }
2741da177e4SLinus Torvalds
metapage_read_end_io(struct bio * bio)2754246a0b6SChristoph Hellwig static void metapage_read_end_io(struct bio *bio)
2761da177e4SLinus Torvalds {
2779b4b3f84SMatthew Wilcox (Oracle) struct folio *folio = bio->bi_private;
2781da177e4SLinus Torvalds
279*ee6817e7SMatthew Wilcox (Oracle) dec_io(folio, bio->bi_status, last_read_complete);
2807fab479bSDave Kleikamp bio_put(bio);
2817fab479bSDave Kleikamp }
2827fab479bSDave Kleikamp
remove_from_logsync(struct metapage * mp)2837fab479bSDave Kleikamp static void remove_from_logsync(struct metapage *mp)
2841da177e4SLinus Torvalds {
2857fab479bSDave Kleikamp struct jfs_log *log = mp->log;
2867fab479bSDave Kleikamp unsigned long flags;
2877fab479bSDave Kleikamp /*
2887fab479bSDave Kleikamp * This can race. Recheck that log hasn't been set to null, and after
2897fab479bSDave Kleikamp * acquiring logsync lock, recheck lsn
2907fab479bSDave Kleikamp */
2917fab479bSDave Kleikamp if (!log)
2927fab479bSDave Kleikamp return;
2937fab479bSDave Kleikamp
2947fab479bSDave Kleikamp LOGSYNC_LOCK(log, flags);
2957fab479bSDave Kleikamp if (mp->lsn) {
2967fab479bSDave Kleikamp mp->log = NULL;
2977fab479bSDave Kleikamp mp->lsn = 0;
2987fab479bSDave Kleikamp mp->clsn = 0;
2997fab479bSDave Kleikamp log->count--;
3007fab479bSDave Kleikamp list_del(&mp->synclist);
3017fab479bSDave Kleikamp }
3027fab479bSDave Kleikamp LOGSYNC_UNLOCK(log, flags);
3031da177e4SLinus Torvalds }
3041da177e4SLinus Torvalds
last_write_complete(struct folio * folio,blk_status_t status)305*ee6817e7SMatthew Wilcox (Oracle) static void last_write_complete(struct folio *folio, blk_status_t status)
3067fab479bSDave Kleikamp {
3077fab479bSDave Kleikamp struct metapage *mp;
3087fab479bSDave Kleikamp unsigned int offset;
3097fab479bSDave Kleikamp
310*ee6817e7SMatthew Wilcox (Oracle) if (status) {
311*ee6817e7SMatthew Wilcox (Oracle) int err = blk_status_to_errno(status);
312*ee6817e7SMatthew Wilcox (Oracle) printk(KERN_ERR "metapage_write_end_io: I/O error\n");
313*ee6817e7SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, err);
314*ee6817e7SMatthew Wilcox (Oracle) }
315*ee6817e7SMatthew Wilcox (Oracle)
31609cbfeafSKirill A. Shutemov for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
317501bb988SMatthew Wilcox (Oracle) mp = folio_to_mp(folio, offset);
3187fab479bSDave Kleikamp if (mp && test_bit(META_io, &mp->flag)) {
3197fab479bSDave Kleikamp if (mp->lsn)
3207fab479bSDave Kleikamp remove_from_logsync(mp);
3217fab479bSDave Kleikamp clear_bit(META_io, &mp->flag);
3221da177e4SLinus Torvalds }
3237fab479bSDave Kleikamp /*
3247fab479bSDave Kleikamp * I'd like to call drop_metapage here, but I don't think it's
3257fab479bSDave Kleikamp * safe unless I have the page locked
3267fab479bSDave Kleikamp */
3277fab479bSDave Kleikamp }
3281f0dc610SMatthew Wilcox (Oracle) folio_end_writeback(folio);
3297fab479bSDave Kleikamp }
3307fab479bSDave Kleikamp
metapage_write_end_io(struct bio * bio)3314246a0b6SChristoph Hellwig static void metapage_write_end_io(struct bio *bio)
3327fab479bSDave Kleikamp {
33335474d52SMatthew Wilcox (Oracle) struct folio *folio = bio->bi_private;
3347fab479bSDave Kleikamp
33535474d52SMatthew Wilcox (Oracle) BUG_ON(!folio->private);
3367fab479bSDave Kleikamp
337*ee6817e7SMatthew Wilcox (Oracle) dec_io(folio, bio->bi_status, last_write_complete);
3387fab479bSDave Kleikamp bio_put(bio);
3397fab479bSDave Kleikamp }
3407fab479bSDave Kleikamp
metapage_write_folio(struct folio * folio,struct writeback_control * wbc,void * unused)34135474d52SMatthew Wilcox (Oracle) static int metapage_write_folio(struct folio *folio,
34235474d52SMatthew Wilcox (Oracle) struct writeback_control *wbc, void *unused)
3437fab479bSDave Kleikamp {
3447fab479bSDave Kleikamp struct bio *bio = NULL;
345967c9ec4SDave Kleikamp int block_offset; /* block offset of mp within page */
34635474d52SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
347967c9ec4SDave Kleikamp int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
348967c9ec4SDave Kleikamp int len;
349967c9ec4SDave Kleikamp int xlen;
3507fab479bSDave Kleikamp struct metapage *mp;
3517fab479bSDave Kleikamp int redirty = 0;
3527fab479bSDave Kleikamp sector_t lblock;
35329a424f2SDave Kleikamp int nr_underway = 0;
3547fab479bSDave Kleikamp sector_t pblock;
3557fab479bSDave Kleikamp sector_t next_block = 0;
3567fab479bSDave Kleikamp sector_t page_start;
3577fab479bSDave Kleikamp unsigned long bio_bytes = 0;
3587fab479bSDave Kleikamp unsigned long bio_offset = 0;
359967c9ec4SDave Kleikamp int offset;
3601ad53a98SDave Kleikamp int bad_blocks = 0;
3617fab479bSDave Kleikamp
36235474d52SMatthew Wilcox (Oracle) page_start = folio_pos(folio) >> inode->i_blkbits;
36335474d52SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
36435474d52SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
36535474d52SMatthew Wilcox (Oracle) folio_start_writeback(folio);
3667fab479bSDave Kleikamp
36709cbfeafSKirill A. Shutemov for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
368501bb988SMatthew Wilcox (Oracle) mp = folio_to_mp(folio, offset);
3697fab479bSDave Kleikamp
3707fab479bSDave Kleikamp if (!mp || !test_bit(META_dirty, &mp->flag))
3717fab479bSDave Kleikamp continue;
3727fab479bSDave Kleikamp
3737fab479bSDave Kleikamp if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
3747fab479bSDave Kleikamp redirty = 1;
375ac17b8b5SDave Kleikamp /*
376ac17b8b5SDave Kleikamp * Make sure this page isn't blocked indefinitely.
377ac17b8b5SDave Kleikamp * If the journal isn't undergoing I/O, push it
378ac17b8b5SDave Kleikamp */
379ac17b8b5SDave Kleikamp if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
380ac17b8b5SDave Kleikamp jfs_flush_journal(mp->log, 0);
3817fab479bSDave Kleikamp continue;
3827fab479bSDave Kleikamp }
3837fab479bSDave Kleikamp
3847fab479bSDave Kleikamp clear_bit(META_dirty, &mp->flag);
3851ad53a98SDave Kleikamp set_bit(META_io, &mp->flag);
3867fab479bSDave Kleikamp block_offset = offset >> inode->i_blkbits;
3877fab479bSDave Kleikamp lblock = page_start + block_offset;
3887fab479bSDave Kleikamp if (bio) {
3897fab479bSDave Kleikamp if (xlen && lblock == next_block) {
3907fab479bSDave Kleikamp /* Contiguous, in memory & on disk */
3917fab479bSDave Kleikamp len = min(xlen, blocks_per_mp);
3927fab479bSDave Kleikamp xlen -= len;
3937fab479bSDave Kleikamp bio_bytes += len << inode->i_blkbits;
3947fab479bSDave Kleikamp continue;
3957fab479bSDave Kleikamp }
3967fab479bSDave Kleikamp /* Not contiguous */
39735474d52SMatthew Wilcox (Oracle) bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
3987fab479bSDave Kleikamp /*
3997fab479bSDave Kleikamp * Increment counter before submitting i/o to keep
4007fab479bSDave Kleikamp * count from hitting zero before we're through
4017fab479bSDave Kleikamp */
402d9c36002SMatthew Wilcox (Oracle) inc_io(folio);
4034f024f37SKent Overstreet if (!bio->bi_iter.bi_size)
4047fab479bSDave Kleikamp goto dump_bio;
4054e49ea4aSMike Christie submit_bio(bio);
40629a424f2SDave Kleikamp nr_underway++;
4077fab479bSDave Kleikamp bio = NULL;
40829a424f2SDave Kleikamp } else
409d9c36002SMatthew Wilcox (Oracle) inc_io(folio);
41035474d52SMatthew Wilcox (Oracle) xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
4117fab479bSDave Kleikamp pblock = metapage_get_blocks(inode, lblock, &xlen);
4127fab479bSDave Kleikamp if (!pblock) {
4137fab479bSDave Kleikamp printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
4141ad53a98SDave Kleikamp /*
4151ad53a98SDave Kleikamp * We already called inc_io(), but can't cancel it
4161ad53a98SDave Kleikamp * with dec_io() until we're done with the page
4171ad53a98SDave Kleikamp */
4181ad53a98SDave Kleikamp bad_blocks++;
4197fab479bSDave Kleikamp continue;
4207fab479bSDave Kleikamp }
421967c9ec4SDave Kleikamp len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
4227fab479bSDave Kleikamp
42307888c66SChristoph Hellwig bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
4244f024f37SKent Overstreet bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
4257fab479bSDave Kleikamp bio->bi_end_io = metapage_write_end_io;
42635474d52SMatthew Wilcox (Oracle) bio->bi_private = folio;
4277fab479bSDave Kleikamp
4287fab479bSDave Kleikamp /* Don't call bio_add_page yet, we may add to this vec */
4297fab479bSDave Kleikamp bio_offset = offset;
4307fab479bSDave Kleikamp bio_bytes = len << inode->i_blkbits;
4317fab479bSDave Kleikamp
4327fab479bSDave Kleikamp xlen -= len;
4337fab479bSDave Kleikamp next_block = lblock + len;
4347fab479bSDave Kleikamp }
4357fab479bSDave Kleikamp if (bio) {
43635474d52SMatthew Wilcox (Oracle) bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
4374f024f37SKent Overstreet if (!bio->bi_iter.bi_size)
4387fab479bSDave Kleikamp goto dump_bio;
4397fab479bSDave Kleikamp
4404e49ea4aSMike Christie submit_bio(bio);
44129a424f2SDave Kleikamp nr_underway++;
4427fab479bSDave Kleikamp }
4437fab479bSDave Kleikamp if (redirty)
44435474d52SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio);
4457fab479bSDave Kleikamp
44635474d52SMatthew Wilcox (Oracle) folio_unlock(folio);
4477fab479bSDave Kleikamp
4481ad53a98SDave Kleikamp if (bad_blocks)
4491ad53a98SDave Kleikamp goto err_out;
4501ad53a98SDave Kleikamp
45129a424f2SDave Kleikamp if (nr_underway == 0)
45235474d52SMatthew Wilcox (Oracle) folio_end_writeback(folio);
45329a424f2SDave Kleikamp
4547fab479bSDave Kleikamp return 0;
4557fab479bSDave Kleikamp dump_bio:
456288e4d83SDave Kleikamp print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
457288e4d83SDave Kleikamp 4, bio, sizeof(*bio), 0);
4587fab479bSDave Kleikamp bio_put(bio);
45935474d52SMatthew Wilcox (Oracle) folio_unlock(folio);
460*ee6817e7SMatthew Wilcox (Oracle) dec_io(folio, BLK_STS_OK, last_write_complete);
4611ad53a98SDave Kleikamp err_out:
4621ad53a98SDave Kleikamp while (bad_blocks--)
463*ee6817e7SMatthew Wilcox (Oracle) dec_io(folio, BLK_STS_OK, last_write_complete);
4647fab479bSDave Kleikamp return -EIO;
4657fab479bSDave Kleikamp }
4667fab479bSDave Kleikamp
metapage_writepages(struct address_space * mapping,struct writeback_control * wbc)46735474d52SMatthew Wilcox (Oracle) static int metapage_writepages(struct address_space *mapping,
46835474d52SMatthew Wilcox (Oracle) struct writeback_control *wbc)
46935474d52SMatthew Wilcox (Oracle) {
47035474d52SMatthew Wilcox (Oracle) struct blk_plug plug;
47135474d52SMatthew Wilcox (Oracle) int err;
47235474d52SMatthew Wilcox (Oracle)
47335474d52SMatthew Wilcox (Oracle) blk_start_plug(&plug);
47435474d52SMatthew Wilcox (Oracle) err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
47535474d52SMatthew Wilcox (Oracle) blk_finish_plug(&plug);
47635474d52SMatthew Wilcox (Oracle)
47735474d52SMatthew Wilcox (Oracle) return err;
47835474d52SMatthew Wilcox (Oracle) }
47935474d52SMatthew Wilcox (Oracle)
metapage_read_folio(struct file * fp,struct folio * folio)480bb8e283aSMatthew Wilcox (Oracle) static int metapage_read_folio(struct file *fp, struct folio *folio)
4817fab479bSDave Kleikamp {
4829b4b3f84SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
4837fab479bSDave Kleikamp struct bio *bio = NULL;
484967c9ec4SDave Kleikamp int block_offset;
4859b4b3f84SMatthew Wilcox (Oracle) int blocks_per_page = i_blocks_per_folio(inode, folio);
4867fab479bSDave Kleikamp sector_t page_start; /* address of page in fs blocks */
4877fab479bSDave Kleikamp sector_t pblock;
488967c9ec4SDave Kleikamp int xlen;
4897fab479bSDave Kleikamp unsigned int len;
490967c9ec4SDave Kleikamp int offset;
4917fab479bSDave Kleikamp
4929b4b3f84SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
4939b4b3f84SMatthew Wilcox (Oracle) page_start = folio_pos(folio) >> inode->i_blkbits;
4947fab479bSDave Kleikamp
4957fab479bSDave Kleikamp block_offset = 0;
4967fab479bSDave Kleikamp while (block_offset < blocks_per_page) {
4977fab479bSDave Kleikamp xlen = blocks_per_page - block_offset;
4987fab479bSDave Kleikamp pblock = metapage_get_blocks(inode, page_start + block_offset,
4997fab479bSDave Kleikamp &xlen);
5007fab479bSDave Kleikamp if (pblock) {
5019b4b3f84SMatthew Wilcox (Oracle) if (!folio->private)
5029346476dSMatthew Wilcox (Oracle) insert_metapage(folio, NULL);
503d9c36002SMatthew Wilcox (Oracle) inc_io(folio);
5047fab479bSDave Kleikamp if (bio)
5054e49ea4aSMike Christie submit_bio(bio);
5067fab479bSDave Kleikamp
50707888c66SChristoph Hellwig bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
50807888c66SChristoph Hellwig GFP_NOFS);
5094f024f37SKent Overstreet bio->bi_iter.bi_sector =
5104f024f37SKent Overstreet pblock << (inode->i_blkbits - 9);
5117fab479bSDave Kleikamp bio->bi_end_io = metapage_read_end_io;
5129b4b3f84SMatthew Wilcox (Oracle) bio->bi_private = folio;
5137fab479bSDave Kleikamp len = xlen << inode->i_blkbits;
5147fab479bSDave Kleikamp offset = block_offset << inode->i_blkbits;
5159b4b3f84SMatthew Wilcox (Oracle) bio_add_folio_nofail(bio, folio, len, offset);
5167fab479bSDave Kleikamp block_offset += xlen;
5177fab479bSDave Kleikamp } else
5187fab479bSDave Kleikamp block_offset++;
5197fab479bSDave Kleikamp }
5207fab479bSDave Kleikamp if (bio)
5214e49ea4aSMike Christie submit_bio(bio);
5227fab479bSDave Kleikamp else
5239b4b3f84SMatthew Wilcox (Oracle) folio_unlock(folio);
5247fab479bSDave Kleikamp
5257fab479bSDave Kleikamp return 0;
5267fab479bSDave Kleikamp }
5277fab479bSDave Kleikamp
metapage_release_folio(struct folio * folio,gfp_t gfp_mask)528a613b861SMatthew Wilcox (Oracle) static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
5297fab479bSDave Kleikamp {
5307fab479bSDave Kleikamp struct metapage *mp;
531a613b861SMatthew Wilcox (Oracle) bool ret = true;
532967c9ec4SDave Kleikamp int offset;
5337fab479bSDave Kleikamp
53409cbfeafSKirill A. Shutemov for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
535501bb988SMatthew Wilcox (Oracle) mp = folio_to_mp(folio, offset);
5367fab479bSDave Kleikamp
5377fab479bSDave Kleikamp if (!mp)
5387fab479bSDave Kleikamp continue;
5397fab479bSDave Kleikamp
540a613b861SMatthew Wilcox (Oracle) jfs_info("metapage_release_folio: mp = 0x%p", mp);
541b964638fSDave Kleikamp if (mp->count || mp->nohomeok ||
542b964638fSDave Kleikamp test_bit(META_dirty, &mp->flag)) {
5437fab479bSDave Kleikamp jfs_info("count = %ld, nohomeok = %d", mp->count,
5447fab479bSDave Kleikamp mp->nohomeok);
545a613b861SMatthew Wilcox (Oracle) ret = false;
5467fab479bSDave Kleikamp continue;
5477fab479bSDave Kleikamp }
5487fab479bSDave Kleikamp if (mp->lsn)
5497fab479bSDave Kleikamp remove_from_logsync(mp);
550dd23bf31SMatthew Wilcox (Oracle) remove_metapage(folio, mp);
5517fab479bSDave Kleikamp INCREMENT(mpStat.pagefree);
5527fab479bSDave Kleikamp free_metapage(mp);
5537fab479bSDave Kleikamp }
554b964638fSDave Kleikamp return ret;
5557fab479bSDave Kleikamp }
5567fab479bSDave Kleikamp
metapage_invalidate_folio(struct folio * folio,size_t offset,size_t length)557c5b56b50SMatthew Wilcox (Oracle) static void metapage_invalidate_folio(struct folio *folio, size_t offset,
558c5b56b50SMatthew Wilcox (Oracle) size_t length)
5597fab479bSDave Kleikamp {
560c5b56b50SMatthew Wilcox (Oracle) BUG_ON(offset || length < folio_size(folio));
5617fab479bSDave Kleikamp
562c5b56b50SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
5637fab479bSDave Kleikamp
564a613b861SMatthew Wilcox (Oracle) metapage_release_folio(folio, 0);
5657fab479bSDave Kleikamp }
5667fab479bSDave Kleikamp
567f5e54d6eSChristoph Hellwig const struct address_space_operations jfs_metapage_aops = {
568bb8e283aSMatthew Wilcox (Oracle) .read_folio = metapage_read_folio,
56935474d52SMatthew Wilcox (Oracle) .writepages = metapage_writepages,
570a613b861SMatthew Wilcox (Oracle) .release_folio = metapage_release_folio,
571c5b56b50SMatthew Wilcox (Oracle) .invalidate_folio = metapage_invalidate_folio,
572187c82cbSMatthew Wilcox (Oracle) .dirty_folio = filemap_dirty_folio,
5737fab479bSDave Kleikamp };
5741da177e4SLinus Torvalds
__get_metapage(struct inode * inode,unsigned long lblock,unsigned int size,int absolute,unsigned long new)5751da177e4SLinus Torvalds struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
5761da177e4SLinus Torvalds unsigned int size, int absolute,
5771da177e4SLinus Torvalds unsigned long new)
5781da177e4SLinus Torvalds {
5791da177e4SLinus Torvalds int l2BlocksPerPage;
5801da177e4SLinus Torvalds int l2bsize;
5811da177e4SLinus Torvalds struct address_space *mapping;
5827fab479bSDave Kleikamp struct metapage *mp = NULL;
5832dcd9630SMatthew Wilcox (Oracle) struct folio *folio;
5841da177e4SLinus Torvalds unsigned long page_index;
5851da177e4SLinus Torvalds unsigned long page_offset;
5861da177e4SLinus Torvalds
5877fab479bSDave Kleikamp jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
5887fab479bSDave Kleikamp inode->i_ino, lblock, absolute);
5891da177e4SLinus Torvalds
5907fab479bSDave Kleikamp l2bsize = inode->i_blkbits;
59109cbfeafSKirill A. Shutemov l2BlocksPerPage = PAGE_SHIFT - l2bsize;
5927fab479bSDave Kleikamp page_index = lblock >> l2BlocksPerPage;
5937fab479bSDave Kleikamp page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
59409cbfeafSKirill A. Shutemov if ((page_offset + size) > PAGE_SIZE) {
5957fab479bSDave Kleikamp jfs_err("MetaData crosses page boundary!!");
5967fab479bSDave Kleikamp jfs_err("lblock = %lx, size = %d", lblock, size);
5977fab479bSDave Kleikamp dump_stack();
5987fab479bSDave Kleikamp return NULL;
5997fab479bSDave Kleikamp }
6001da177e4SLinus Torvalds if (absolute)
6017fab479bSDave Kleikamp mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
6021da177e4SLinus Torvalds else {
6031da177e4SLinus Torvalds /*
6041da177e4SLinus Torvalds * If an nfs client tries to read an inode that is larger
6051da177e4SLinus Torvalds * than any existing inodes, we may try to read past the
6061da177e4SLinus Torvalds * end of the inode map
6071da177e4SLinus Torvalds */
6081da177e4SLinus Torvalds if ((lblock << inode->i_blkbits) >= inode->i_size)
6091da177e4SLinus Torvalds return NULL;
6101da177e4SLinus Torvalds mapping = inode->i_mapping;
6111da177e4SLinus Torvalds }
6121da177e4SLinus Torvalds
61309cbfeafSKirill A. Shutemov if (new && (PSIZE == PAGE_SIZE)) {
6142dcd9630SMatthew Wilcox (Oracle) folio = filemap_grab_folio(mapping, page_index);
6152dcd9630SMatthew Wilcox (Oracle) if (IS_ERR(folio)) {
6162dcd9630SMatthew Wilcox (Oracle) jfs_err("filemap_grab_folio failed!");
6177fab479bSDave Kleikamp return NULL;
6187fab479bSDave Kleikamp }
6192dcd9630SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
6207fab479bSDave Kleikamp } else {
6212dcd9630SMatthew Wilcox (Oracle) folio = read_mapping_folio(mapping, page_index, NULL);
6222dcd9630SMatthew Wilcox (Oracle) if (IS_ERR(folio)) {
623090d2b18SPekka Enberg jfs_err("read_mapping_page failed!");
6247fab479bSDave Kleikamp return NULL;
6257fab479bSDave Kleikamp }
6262dcd9630SMatthew Wilcox (Oracle) folio_lock(folio);
6277fab479bSDave Kleikamp }
6287fab479bSDave Kleikamp
629501bb988SMatthew Wilcox (Oracle) mp = folio_to_mp(folio, page_offset);
6301da177e4SLinus Torvalds if (mp) {
6317fab479bSDave Kleikamp if (mp->logical_size != size) {
6327fab479bSDave Kleikamp jfs_error(inode->i_sb,
633eb8630d7SJoe Perches "get_mp->logical_size != size\n");
6347fab479bSDave Kleikamp jfs_err("logical_size = %d, size = %d",
6357fab479bSDave Kleikamp mp->logical_size, size);
6367fab479bSDave Kleikamp dump_stack();
6377fab479bSDave Kleikamp goto unlock;
6381da177e4SLinus Torvalds }
6391da177e4SLinus Torvalds mp->count++;
6401da177e4SLinus Torvalds lock_metapage(mp);
6411da177e4SLinus Torvalds if (test_bit(META_discard, &mp->flag)) {
6421da177e4SLinus Torvalds if (!new) {
6431da177e4SLinus Torvalds jfs_error(inode->i_sb,
644eb8630d7SJoe Perches "using a discarded metapage\n");
6457fab479bSDave Kleikamp discard_metapage(mp);
6467fab479bSDave Kleikamp goto unlock;
6471da177e4SLinus Torvalds }
6481da177e4SLinus Torvalds clear_bit(META_discard, &mp->flag);
6491da177e4SLinus Torvalds }
6501da177e4SLinus Torvalds } else {
6517fab479bSDave Kleikamp INCREMENT(mpStat.pagealloc);
6521da177e4SLinus Torvalds mp = alloc_metapage(GFP_NOFS);
65388a96fa8SJuerg Haefliger if (!mp)
65488a96fa8SJuerg Haefliger goto unlock;
655ad6c19e5SMatthew Wilcox (Oracle) mp->folio = folio;
65611ab8319SDave Kleikamp mp->sb = inode->i_sb;
6571da177e4SLinus Torvalds mp->flag = 0;
6581da177e4SLinus Torvalds mp->xflag = COMMIT_PAGE;
6591da177e4SLinus Torvalds mp->count = 1;
6607fab479bSDave Kleikamp mp->nohomeok = 0;
6611da177e4SLinus Torvalds mp->logical_size = size;
6622dcd9630SMatthew Wilcox (Oracle) mp->data = folio_address(folio) + page_offset;
6637fab479bSDave Kleikamp mp->index = lblock;
6649346476dSMatthew Wilcox (Oracle) if (unlikely(insert_metapage(folio, mp))) {
6657fab479bSDave Kleikamp free_metapage(mp);
6667fab479bSDave Kleikamp goto unlock;
6677fab479bSDave Kleikamp }
6687fab479bSDave Kleikamp lock_metapage(mp);
6697fab479bSDave Kleikamp }
6701da177e4SLinus Torvalds
6711da177e4SLinus Torvalds if (new) {
6727fab479bSDave Kleikamp jfs_info("zeroing mp = 0x%p", mp);
6731da177e4SLinus Torvalds memset(mp->data, 0, PSIZE);
6747fab479bSDave Kleikamp }
6751da177e4SLinus Torvalds
6762dcd9630SMatthew Wilcox (Oracle) folio_unlock(folio);
6777fab479bSDave Kleikamp jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
6781da177e4SLinus Torvalds return mp;
6791da177e4SLinus Torvalds
6807fab479bSDave Kleikamp unlock:
6812dcd9630SMatthew Wilcox (Oracle) folio_unlock(folio);
6821da177e4SLinus Torvalds return NULL;
6831da177e4SLinus Torvalds }
6841da177e4SLinus Torvalds
grab_metapage(struct metapage * mp)6857fab479bSDave Kleikamp void grab_metapage(struct metapage * mp)
6861da177e4SLinus Torvalds {
6877fab479bSDave Kleikamp jfs_info("grab_metapage: mp = 0x%p", mp);
688ad6c19e5SMatthew Wilcox (Oracle) folio_get(mp->folio);
689ad6c19e5SMatthew Wilcox (Oracle) folio_lock(mp->folio);
6907fab479bSDave Kleikamp mp->count++;
6917fab479bSDave Kleikamp lock_metapage(mp);
692ad6c19e5SMatthew Wilcox (Oracle) folio_unlock(mp->folio);
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds
metapage_write_one(struct folio * folio)6951252ad13SMatthew Wilcox (Oracle) static int metapage_write_one(struct folio *folio)
6962d683175SChristoph Hellwig {
6972d683175SChristoph Hellwig struct address_space *mapping = folio->mapping;
6982d683175SChristoph Hellwig struct writeback_control wbc = {
6992d683175SChristoph Hellwig .sync_mode = WB_SYNC_ALL,
7002d683175SChristoph Hellwig .nr_to_write = folio_nr_pages(folio),
7012d683175SChristoph Hellwig };
7022d683175SChristoph Hellwig int ret = 0;
7032d683175SChristoph Hellwig
7042d683175SChristoph Hellwig BUG_ON(!folio_test_locked(folio));
7052d683175SChristoph Hellwig
7062d683175SChristoph Hellwig folio_wait_writeback(folio);
7072d683175SChristoph Hellwig
7082d683175SChristoph Hellwig if (folio_clear_dirty_for_io(folio)) {
7092d683175SChristoph Hellwig folio_get(folio);
71035474d52SMatthew Wilcox (Oracle) ret = metapage_write_folio(folio, &wbc, NULL);
7112d683175SChristoph Hellwig if (ret == 0)
7122d683175SChristoph Hellwig folio_wait_writeback(folio);
7132d683175SChristoph Hellwig folio_put(folio);
7142d683175SChristoph Hellwig } else {
7152d683175SChristoph Hellwig folio_unlock(folio);
7162d683175SChristoph Hellwig }
7172d683175SChristoph Hellwig
7182d683175SChristoph Hellwig if (!ret)
7192d683175SChristoph Hellwig ret = filemap_check_errors(mapping);
7202d683175SChristoph Hellwig return ret;
7212d683175SChristoph Hellwig }
7222d683175SChristoph Hellwig
force_metapage(struct metapage * mp)7237fab479bSDave Kleikamp void force_metapage(struct metapage *mp)
7241da177e4SLinus Torvalds {
725ad6c19e5SMatthew Wilcox (Oracle) struct folio *folio = mp->folio;
7267fab479bSDave Kleikamp jfs_info("force_metapage: mp = 0x%p", mp);
7277fab479bSDave Kleikamp set_bit(META_forcewrite, &mp->flag);
7287fab479bSDave Kleikamp clear_bit(META_sync, &mp->flag);
7291252ad13SMatthew Wilcox (Oracle) folio_get(folio);
7301252ad13SMatthew Wilcox (Oracle) folio_lock(folio);
7311252ad13SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
7321252ad13SMatthew Wilcox (Oracle) if (metapage_write_one(folio))
7332d683175SChristoph Hellwig jfs_error(mp->sb, "metapage_write_one() failed\n");
7347fab479bSDave Kleikamp clear_bit(META_forcewrite, &mp->flag);
7351252ad13SMatthew Wilcox (Oracle) folio_put(folio);
7361da177e4SLinus Torvalds }
7371da177e4SLinus Torvalds
hold_metapage(struct metapage * mp)7381868f4aaSDave Kleikamp void hold_metapage(struct metapage *mp)
7397fab479bSDave Kleikamp {
740ad6c19e5SMatthew Wilcox (Oracle) folio_lock(mp->folio);
7417fab479bSDave Kleikamp }
7427fab479bSDave Kleikamp
put_metapage(struct metapage * mp)7431868f4aaSDave Kleikamp void put_metapage(struct metapage *mp)
7447fab479bSDave Kleikamp {
7457fab479bSDave Kleikamp if (mp->count || mp->nohomeok) {
7467fab479bSDave Kleikamp /* Someone else will release this */
747ad6c19e5SMatthew Wilcox (Oracle) folio_unlock(mp->folio);
7487fab479bSDave Kleikamp return;
7497fab479bSDave Kleikamp }
750ad6c19e5SMatthew Wilcox (Oracle) folio_get(mp->folio);
7517fab479bSDave Kleikamp mp->count++;
7527fab479bSDave Kleikamp lock_metapage(mp);
753ad6c19e5SMatthew Wilcox (Oracle) folio_unlock(mp->folio);
7547fab479bSDave Kleikamp release_metapage(mp);
7557fab479bSDave Kleikamp }
7567fab479bSDave Kleikamp
release_metapage(struct metapage * mp)7571da177e4SLinus Torvalds void release_metapage(struct metapage * mp)
7581da177e4SLinus Torvalds {
759ad6c19e5SMatthew Wilcox (Oracle) struct folio *folio = mp->folio;
7601da177e4SLinus Torvalds jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
7611da177e4SLinus Torvalds
76240e1bd19SMatthew Wilcox (Oracle) folio_lock(folio);
7637fab479bSDave Kleikamp unlock_metapage(mp);
7641da177e4SLinus Torvalds
7651da177e4SLinus Torvalds assert(mp->count);
7667fab479bSDave Kleikamp if (--mp->count || mp->nohomeok) {
76740e1bd19SMatthew Wilcox (Oracle) folio_unlock(folio);
76840e1bd19SMatthew Wilcox (Oracle) folio_put(folio);
7691da177e4SLinus Torvalds return;
7701da177e4SLinus Torvalds }
7711da177e4SLinus Torvalds
7727fab479bSDave Kleikamp if (test_bit(META_dirty, &mp->flag)) {
77340e1bd19SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
7741da177e4SLinus Torvalds if (test_bit(META_sync, &mp->flag)) {
7751da177e4SLinus Torvalds clear_bit(META_sync, &mp->flag);
7761252ad13SMatthew Wilcox (Oracle) if (metapage_write_one(folio))
7772d683175SChristoph Hellwig jfs_error(mp->sb, "metapage_write_one() failed\n");
77840e1bd19SMatthew Wilcox (Oracle) folio_lock(folio);
7791da177e4SLinus Torvalds }
7807fab479bSDave Kleikamp } else if (mp->lsn) /* discard_metapage doesn't remove it */
7817fab479bSDave Kleikamp remove_from_logsync(mp);
7821da177e4SLinus Torvalds
7837fab479bSDave Kleikamp /* Try to keep metapages from using up too much memory */
784dd23bf31SMatthew Wilcox (Oracle) drop_metapage(folio, mp);
785d0e671a9SDave Kleikamp
78640e1bd19SMatthew Wilcox (Oracle) folio_unlock(folio);
78740e1bd19SMatthew Wilcox (Oracle) folio_put(folio);
7881da177e4SLinus Torvalds }
7891da177e4SLinus Torvalds
__invalidate_metapages(struct inode * ip,s64 addr,int len)7901da177e4SLinus Torvalds void __invalidate_metapages(struct inode *ip, s64 addr, int len)
7911da177e4SLinus Torvalds {
7927fab479bSDave Kleikamp sector_t lblock;
79309cbfeafSKirill A. Shutemov int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
7947fab479bSDave Kleikamp int BlocksPerPage = 1 << l2BlocksPerPage;
7951da177e4SLinus Torvalds /* All callers are interested in block device's mapping */
7967fab479bSDave Kleikamp struct address_space *mapping =
7977fab479bSDave Kleikamp JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
7981da177e4SLinus Torvalds struct metapage *mp;
7997fab479bSDave Kleikamp unsigned int offset;
8001da177e4SLinus Torvalds
8011da177e4SLinus Torvalds /*
8027fab479bSDave Kleikamp * Mark metapages to discard. They will eventually be
8031da177e4SLinus Torvalds * released, but should not be written.
8041da177e4SLinus Torvalds */
8057fab479bSDave Kleikamp for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
8067fab479bSDave Kleikamp lblock += BlocksPerPage) {
807f86a3a18SMatthew Wilcox (Oracle) struct folio *folio = filemap_lock_folio(mapping,
808f86a3a18SMatthew Wilcox (Oracle) lblock >> l2BlocksPerPage);
809f86a3a18SMatthew Wilcox (Oracle) if (IS_ERR(folio))
8107fab479bSDave Kleikamp continue;
81109cbfeafSKirill A. Shutemov for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
812501bb988SMatthew Wilcox (Oracle) mp = folio_to_mp(folio, offset);
8137fab479bSDave Kleikamp if (!mp)
8147fab479bSDave Kleikamp continue;
8157fab479bSDave Kleikamp if (mp->index < addr)
8167fab479bSDave Kleikamp continue;
8177fab479bSDave Kleikamp if (mp->index >= addr + len)
8187fab479bSDave Kleikamp break;
8191da177e4SLinus Torvalds
8201da177e4SLinus Torvalds clear_bit(META_dirty, &mp->flag);
8211da177e4SLinus Torvalds set_bit(META_discard, &mp->flag);
8227fab479bSDave Kleikamp if (mp->lsn)
8237fab479bSDave Kleikamp remove_from_logsync(mp);
8247fab479bSDave Kleikamp }
825f86a3a18SMatthew Wilcox (Oracle) folio_unlock(folio);
826f86a3a18SMatthew Wilcox (Oracle) folio_put(folio);
8271da177e4SLinus Torvalds }
8281da177e4SLinus Torvalds }
8291da177e4SLinus Torvalds
8301da177e4SLinus Torvalds #ifdef CONFIG_JFS_STATISTICS
jfs_mpstat_proc_show(struct seq_file * m,void * v)83107a3b8edSChristoph Hellwig int jfs_mpstat_proc_show(struct seq_file *m, void *v)
8321da177e4SLinus Torvalds {
833b2e03ca7SAlexey Dobriyan seq_printf(m,
8341da177e4SLinus Torvalds "JFS Metapage statistics\n"
8351da177e4SLinus Torvalds "=======================\n"
8361da177e4SLinus Torvalds "page allocations = %d\n"
8371da177e4SLinus Torvalds "page frees = %d\n"
8381da177e4SLinus Torvalds "lock waits = %d\n",
8391da177e4SLinus Torvalds mpStat.pagealloc,
8401da177e4SLinus Torvalds mpStat.pagefree,
8411da177e4SLinus Torvalds mpStat.lockwait);
842b2e03ca7SAlexey Dobriyan return 0;
8431da177e4SLinus Torvalds }
8441da177e4SLinus Torvalds #endif
845