xref: /linux/fs/bcachefs/fs-io-pagecache.h (revision ab52c59103002b49f2455371e4b9c56ba3ef1781)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FS_IO_PAGECACHE_H
3 #define _BCACHEFS_FS_IO_PAGECACHE_H
4 
5 #include <linux/pagemap.h>
6 
7 typedef DARRAY(struct folio *) folios;
8 
9 int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
10 				     u64, fgf_t, gfp_t, folios *);
11 int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
12 
13 /*
14  * Use u64 for the end pos and sector helpers because if the folio covers the
15  * max supported range of the mapping, the start offset of the next folio
16  * overflows loff_t. This breaks much of the range based processing in the
17  * buffered write path.
18  */
19 static inline u64 folio_end_pos(struct folio *folio)
20 {
21 	return folio_pos(folio) + folio_size(folio);
22 }
23 
24 static inline size_t folio_sectors(struct folio *folio)
25 {
26 	return PAGE_SECTORS << folio_order(folio);
27 }
28 
29 static inline loff_t folio_sector(struct folio *folio)
30 {
31 	return folio_pos(folio) >> 9;
32 }
33 
34 static inline u64 folio_end_sector(struct folio *folio)
35 {
36 	return folio_end_pos(folio) >> 9;
37 }
38 
39 #define BCH_FOLIO_SECTOR_STATE()	\
40 	x(unallocated)			\
41 	x(reserved)			\
42 	x(dirty)			\
43 	x(dirty_reserved)		\
44 	x(allocated)
45 
46 enum bch_folio_sector_state {
47 #define x(n)	SECTOR_##n,
48 	BCH_FOLIO_SECTOR_STATE()
49 #undef x
50 };
51 
52 struct bch_folio_sector {
53 	/* Uncompressed, fully allocated replicas (or on disk reservation): */
54 	u8			nr_replicas:4,
55 	/* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
56 				replicas_reserved:4;
57 	u8			state;
58 };
59 
60 struct bch_folio {
61 	spinlock_t		lock;
62 	atomic_t		write_count;
63 	/*
64 	 * Is the sector state up to date with the btree?
65 	 * (Not the data itself)
66 	 */
67 	bool			uptodate;
68 	struct bch_folio_sector	s[];
69 };
70 
71 /* Helper for when we need to add debug instrumentation: */
72 static inline void bch2_folio_sector_set(struct folio *folio,
73 			     struct bch_folio *s,
74 			     unsigned i, unsigned n)
75 {
76 	s->s[i].state = n;
77 }
78 
79 /* file offset (to folio offset) to bch_folio_sector index */
80 static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
81 {
82 	u64 f_offset = pos - folio_pos(folio);
83 
84 	BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
85 	return f_offset >> SECTOR_SHIFT;
86 }
87 
88 /* for newly allocated folios: */
89 static inline void __bch2_folio_release(struct folio *folio)
90 {
91 	kfree(folio_detach_private(folio));
92 }
93 
94 static inline void bch2_folio_release(struct folio *folio)
95 {
96 	EBUG_ON(!folio_test_locked(folio));
97 	__bch2_folio_release(folio);
98 }
99 
100 static inline struct bch_folio *__bch2_folio(struct folio *folio)
101 {
102 	return folio_has_private(folio)
103 		? (struct bch_folio *) folio_get_private(folio)
104 		: NULL;
105 }
106 
107 static inline struct bch_folio *bch2_folio(struct folio *folio)
108 {
109 	EBUG_ON(!folio_test_locked(folio));
110 
111 	return __bch2_folio(folio);
112 }
113 
114 struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
115 struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
116 
117 struct bch2_folio_reservation {
118 	struct disk_reservation	disk;
119 	struct quota_res	quota;
120 };
121 
122 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
123 {
124 	/* XXX: this should not be open coded */
125 	return inode->ei_inode.bi_data_replicas
126 		? inode->ei_inode.bi_data_replicas - 1
127 		: c->opts.data_replicas;
128 }
129 
130 static inline void bch2_folio_reservation_init(struct bch_fs *c,
131 			struct bch_inode_info *inode,
132 			struct bch2_folio_reservation *res)
133 {
134 	memset(res, 0, sizeof(*res));
135 
136 	res->disk.nr_replicas = inode_nr_replicas(c, inode);
137 }
138 
139 int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
140 void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
141 
142 void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
143 int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
144 
145 int bch2_get_folio_disk_reservation(struct bch_fs *,
146 				struct bch_inode_info *,
147 				struct folio *, bool);
148 
149 void bch2_folio_reservation_put(struct bch_fs *,
150 			struct bch_inode_info *,
151 			struct bch2_folio_reservation *);
152 int bch2_folio_reservation_get(struct bch_fs *,
153 			struct bch_inode_info *,
154 			struct folio *,
155 			struct bch2_folio_reservation *,
156 			unsigned, unsigned);
157 
158 void bch2_set_folio_dirty(struct bch_fs *,
159 			  struct bch_inode_info *,
160 			  struct folio *,
161 			  struct bch2_folio_reservation *,
162 			  unsigned, unsigned);
163 
164 vm_fault_t bch2_page_fault(struct vm_fault *);
165 vm_fault_t bch2_page_mkwrite(struct vm_fault *);
166 void bch2_invalidate_folio(struct folio *, size_t, size_t);
167 bool bch2_release_folio(struct folio *, gfp_t);
168 
169 loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
170 loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
171 int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
172 
173 #endif /* _BCACHEFS_FS_IO_PAGECACHE_H */
174