xref: /linux/fs/bcachefs/fs-io-pagecache.c (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "btree_iter.h"
6 #include "extents.h"
7 #include "fs-io.h"
8 #include "fs-io-pagecache.h"
9 #include "subvolume.h"
10 
11 #include <linux/pagevec.h>
12 #include <linux/writeback.h>
13 
14 int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
15 				     loff_t start, u64 end,
16 				     fgf_t fgp_flags, gfp_t gfp,
17 				     folios *fs)
18 {
19 	struct folio *f;
20 	u64 pos = start;
21 	int ret = 0;
22 
23 	while (pos < end) {
24 		if ((u64) pos >= (u64) start + (1ULL << 20))
25 			fgp_flags &= ~FGP_CREAT;
26 
27 		ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
28 		if (ret)
29 			break;
30 
31 		f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
32 		if (IS_ERR_OR_NULL(f))
33 			break;
34 
35 		BUG_ON(fs->nr && folio_pos(f) != pos);
36 
37 		pos = folio_end_pos(f);
38 		darray_push(fs, f);
39 	}
40 
41 	if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
42 		ret = -ENOMEM;
43 
44 	return fs->nr ? 0 : ret;
45 }
46 
47 /* pagecache_block must be held */
48 int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
49 					    loff_t start, loff_t end)
50 {
51 	int ret;
52 
53 	/*
54 	 * XXX: the way this is currently implemented, we can spin if a process
55 	 * is continually redirtying a specific page
56 	 */
57 	do {
58 		if (!mapping->nrpages)
59 			return 0;
60 
61 		ret = filemap_write_and_wait_range(mapping, start, end);
62 		if (ret)
63 			break;
64 
65 		if (!mapping->nrpages)
66 			return 0;
67 
68 		ret = invalidate_inode_pages2_range(mapping,
69 				start >> PAGE_SHIFT,
70 				end >> PAGE_SHIFT);
71 	} while (ret == -EBUSY);
72 
73 	return ret;
74 }
75 
76 #if 0
77 /* Useful for debug tracing: */
78 static const char * const bch2_folio_sector_states[] = {
79 #define x(n)	#n,
80 	BCH_FOLIO_SECTOR_STATE()
81 #undef x
82 	NULL
83 };
84 #endif
85 
86 static inline enum bch_folio_sector_state
87 folio_sector_dirty(enum bch_folio_sector_state state)
88 {
89 	switch (state) {
90 	case SECTOR_unallocated:
91 		return SECTOR_dirty;
92 	case SECTOR_reserved:
93 		return SECTOR_dirty_reserved;
94 	default:
95 		return state;
96 	}
97 }
98 
99 static inline enum bch_folio_sector_state
100 folio_sector_undirty(enum bch_folio_sector_state state)
101 {
102 	switch (state) {
103 	case SECTOR_dirty:
104 		return SECTOR_unallocated;
105 	case SECTOR_dirty_reserved:
106 		return SECTOR_reserved;
107 	default:
108 		return state;
109 	}
110 }
111 
112 static inline enum bch_folio_sector_state
113 folio_sector_reserve(enum bch_folio_sector_state state)
114 {
115 	switch (state) {
116 	case SECTOR_unallocated:
117 		return SECTOR_reserved;
118 	case SECTOR_dirty:
119 		return SECTOR_dirty_reserved;
120 	default:
121 		return state;
122 	}
123 }
124 
125 /* for newly allocated folios: */
126 struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
127 {
128 	struct bch_folio *s;
129 
130 	s = kzalloc(sizeof(*s) +
131 		    sizeof(struct bch_folio_sector) *
132 		    folio_sectors(folio), gfp);
133 	if (!s)
134 		return NULL;
135 
136 	spin_lock_init(&s->lock);
137 	folio_attach_private(folio, s);
138 	return s;
139 }
140 
141 struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
142 {
143 	return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
144 }
145 
146 static unsigned bkey_to_sector_state(struct bkey_s_c k)
147 {
148 	if (bkey_extent_is_reservation(k))
149 		return SECTOR_reserved;
150 	if (bkey_extent_is_allocation(k.k))
151 		return SECTOR_allocated;
152 	return SECTOR_unallocated;
153 }
154 
155 static void __bch2_folio_set(struct folio *folio,
156 			     unsigned pg_offset, unsigned pg_len,
157 			     unsigned nr_ptrs, unsigned state)
158 {
159 	struct bch_folio *s = bch2_folio(folio);
160 	unsigned i, sectors = folio_sectors(folio);
161 
162 	BUG_ON(pg_offset >= sectors);
163 	BUG_ON(pg_offset + pg_len > sectors);
164 
165 	spin_lock(&s->lock);
166 
167 	for (i = pg_offset; i < pg_offset + pg_len; i++) {
168 		s->s[i].nr_replicas	= nr_ptrs;
169 		bch2_folio_sector_set(folio, s, i, state);
170 	}
171 
172 	if (i == sectors)
173 		s->uptodate = true;
174 
175 	spin_unlock(&s->lock);
176 }
177 
178 /*
179  * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
180  * extents btree:
181  */
182 int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
183 		   struct folio **fs, unsigned nr_folios)
184 {
185 	u64 offset = folio_sector(fs[0]);
186 	bool need_set = false;
187 
188 	for (unsigned folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
189 		struct bch_folio *s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
190 		if (!s)
191 			return -ENOMEM;
192 
193 		need_set |= !s->uptodate;
194 	}
195 
196 	if (!need_set)
197 		return 0;
198 
199 	unsigned folio_idx = 0;
200 
201 	return bch2_trans_run(c,
202 		for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
203 				   POS(inum.inum, offset),
204 				   POS(inum.inum, U64_MAX),
205 				   inum.subvol, BTREE_ITER_slots, k, ({
206 			unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
207 			unsigned state = bkey_to_sector_state(k);
208 
209 			while (folio_idx < nr_folios) {
210 				struct folio *folio = fs[folio_idx];
211 				u64 folio_start	= folio_sector(folio);
212 				u64 folio_end	= folio_end_sector(folio);
213 				unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
214 					folio_start;
215 				unsigned folio_len = min(k.k->p.offset, folio_end) -
216 					folio_offset - folio_start;
217 
218 				BUG_ON(k.k->p.offset < folio_start);
219 				BUG_ON(bkey_start_offset(k.k) > folio_end);
220 
221 				if (!bch2_folio(folio)->uptodate)
222 					__bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
223 
224 				if (k.k->p.offset < folio_end)
225 					break;
226 				folio_idx++;
227 			}
228 
229 			if (folio_idx == nr_folios)
230 				break;
231 			0;
232 		})));
233 }
234 
235 void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
236 {
237 	struct bvec_iter iter;
238 	struct folio_vec fv;
239 	unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
240 		? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
241 	unsigned state = bkey_to_sector_state(k);
242 
243 	bio_for_each_folio(fv, bio, iter)
244 		__bch2_folio_set(fv.fv_folio,
245 				 fv.fv_offset >> 9,
246 				 fv.fv_len >> 9,
247 				 nr_ptrs, state);
248 }
249 
250 void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
251 				     u64 start, u64 end)
252 {
253 	pgoff_t index = start >> PAGE_SECTORS_SHIFT;
254 	pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
255 	struct folio_batch fbatch;
256 	unsigned i, j;
257 
258 	if (end <= start)
259 		return;
260 
261 	folio_batch_init(&fbatch);
262 
263 	while (filemap_get_folios(inode->v.i_mapping,
264 				  &index, end_index, &fbatch)) {
265 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
266 			struct folio *folio = fbatch.folios[i];
267 			u64 folio_start = folio_sector(folio);
268 			u64 folio_end = folio_end_sector(folio);
269 			unsigned folio_offset = max(start, folio_start) - folio_start;
270 			unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
271 			struct bch_folio *s;
272 
273 			BUG_ON(end <= folio_start);
274 
275 			folio_lock(folio);
276 			s = bch2_folio(folio);
277 
278 			if (s) {
279 				spin_lock(&s->lock);
280 				for (j = folio_offset; j < folio_offset + folio_len; j++)
281 					s->s[j].nr_replicas = 0;
282 				spin_unlock(&s->lock);
283 			}
284 
285 			folio_unlock(folio);
286 		}
287 		folio_batch_release(&fbatch);
288 		cond_resched();
289 	}
290 }
291 
292 int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
293 				 u64 *start, u64 end,
294 				 bool nonblocking)
295 {
296 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
297 	pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
298 	pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
299 	struct folio_batch fbatch;
300 	s64 i_sectors_delta = 0;
301 	int ret = 0;
302 
303 	if (end <= *start)
304 		return 0;
305 
306 	folio_batch_init(&fbatch);
307 
308 	while (filemap_get_folios(inode->v.i_mapping,
309 				  &index, end_index, &fbatch)) {
310 		for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
311 			struct folio *folio = fbatch.folios[i];
312 
313 			if (!nonblocking)
314 				folio_lock(folio);
315 			else if (!folio_trylock(folio)) {
316 				folio_batch_release(&fbatch);
317 				ret = -EAGAIN;
318 				break;
319 			}
320 
321 			u64 folio_start = folio_sector(folio);
322 			u64 folio_end = folio_end_sector(folio);
323 
324 			BUG_ON(end <= folio_start);
325 
326 			*start = min(end, folio_end);
327 
328 			struct bch_folio *s = bch2_folio(folio);
329 			if (s) {
330 				unsigned folio_offset = max(*start, folio_start) - folio_start;
331 				unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
332 
333 				spin_lock(&s->lock);
334 				for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
335 					i_sectors_delta -= s->s[j].state == SECTOR_dirty;
336 					bch2_folio_sector_set(folio, s, j,
337 						folio_sector_reserve(s->s[j].state));
338 				}
339 				spin_unlock(&s->lock);
340 			}
341 
342 			folio_unlock(folio);
343 		}
344 		folio_batch_release(&fbatch);
345 		cond_resched();
346 	}
347 
348 	bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
349 	return ret;
350 }
351 
352 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
353 					  unsigned nr_replicas)
354 {
355 	return max(0, (int) nr_replicas -
356 		   s->nr_replicas -
357 		   s->replicas_reserved);
358 }
359 
360 int bch2_get_folio_disk_reservation(struct bch_fs *c,
361 				struct bch_inode_info *inode,
362 				struct folio *folio, bool check_enospc)
363 {
364 	struct bch_folio *s = bch2_folio_create(folio, 0);
365 	unsigned nr_replicas = inode_nr_replicas(c, inode);
366 	struct disk_reservation disk_res = { 0 };
367 	unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
368 	int ret;
369 
370 	if (!s)
371 		return -ENOMEM;
372 
373 	for (i = 0; i < sectors; i++)
374 		disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
375 
376 	if (!disk_res_sectors)
377 		return 0;
378 
379 	ret = bch2_disk_reservation_get(c, &disk_res,
380 					disk_res_sectors, 1,
381 					!check_enospc
382 					? BCH_DISK_RESERVATION_NOFAIL
383 					: 0);
384 	if (unlikely(ret))
385 		return ret;
386 
387 	for (i = 0; i < sectors; i++)
388 		s->s[i].replicas_reserved +=
389 			sectors_to_reserve(&s->s[i], nr_replicas);
390 
391 	return 0;
392 }
393 
394 void bch2_folio_reservation_put(struct bch_fs *c,
395 			struct bch_inode_info *inode,
396 			struct bch2_folio_reservation *res)
397 {
398 	bch2_disk_reservation_put(c, &res->disk);
399 	bch2_quota_reservation_put(c, inode, &res->quota);
400 }
401 
402 int bch2_folio_reservation_get(struct bch_fs *c,
403 			struct bch_inode_info *inode,
404 			struct folio *folio,
405 			struct bch2_folio_reservation *res,
406 			size_t offset, size_t len)
407 {
408 	struct bch_folio *s = bch2_folio_create(folio, 0);
409 	unsigned i, disk_sectors = 0, quota_sectors = 0;
410 	int ret;
411 
412 	if (!s)
413 		return -ENOMEM;
414 
415 	BUG_ON(!s->uptodate);
416 
417 	for (i = round_down(offset, block_bytes(c)) >> 9;
418 	     i < round_up(offset + len, block_bytes(c)) >> 9;
419 	     i++) {
420 		disk_sectors += sectors_to_reserve(&s->s[i], res->disk.nr_replicas);
421 		quota_sectors += s->s[i].state == SECTOR_unallocated;
422 	}
423 
424 	if (disk_sectors) {
425 		ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
426 		if (unlikely(ret))
427 			return ret;
428 	}
429 
430 	if (quota_sectors) {
431 		ret = bch2_quota_reservation_add(c, inode, &res->quota, quota_sectors, true);
432 		if (unlikely(ret)) {
433 			struct disk_reservation tmp = { .sectors = disk_sectors };
434 
435 			bch2_disk_reservation_put(c, &tmp);
436 			res->disk.sectors -= disk_sectors;
437 			return ret;
438 		}
439 	}
440 
441 	return 0;
442 }
443 
444 ssize_t bch2_folio_reservation_get_partial(struct bch_fs *c,
445 			struct bch_inode_info *inode,
446 			struct folio *folio,
447 			struct bch2_folio_reservation *res,
448 			size_t offset, size_t len)
449 {
450 	size_t l, reserved = 0;
451 	int ret;
452 
453 	while ((l = len - reserved)) {
454 		while ((ret = bch2_folio_reservation_get(c, inode, folio, res, offset, l))) {
455 			if ((offset & (block_bytes(c) - 1)) + l <= block_bytes(c))
456 				return reserved ?: ret;
457 
458 			len = reserved + l;
459 			l /= 2;
460 		}
461 
462 		offset += l;
463 		reserved += l;
464 	}
465 
466 	return reserved;
467 }
468 
469 static void bch2_clear_folio_bits(struct folio *folio)
470 {
471 	struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
472 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
473 	struct bch_folio *s = bch2_folio(folio);
474 	struct disk_reservation disk_res = { 0 };
475 	int i, sectors = folio_sectors(folio), dirty_sectors = 0;
476 
477 	if (!s)
478 		return;
479 
480 	EBUG_ON(!folio_test_locked(folio));
481 	EBUG_ON(folio_test_writeback(folio));
482 
483 	for (i = 0; i < sectors; i++) {
484 		disk_res.sectors += s->s[i].replicas_reserved;
485 		s->s[i].replicas_reserved = 0;
486 
487 		dirty_sectors -= s->s[i].state == SECTOR_dirty;
488 		bch2_folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
489 	}
490 
491 	bch2_disk_reservation_put(c, &disk_res);
492 
493 	bch2_i_sectors_acct(c, inode, NULL, dirty_sectors);
494 
495 	bch2_folio_release(folio);
496 }
497 
498 void bch2_set_folio_dirty(struct bch_fs *c,
499 			  struct bch_inode_info *inode,
500 			  struct folio *folio,
501 			  struct bch2_folio_reservation *res,
502 			  unsigned offset, unsigned len)
503 {
504 	struct bch_folio *s = bch2_folio(folio);
505 	unsigned i, dirty_sectors = 0;
506 
507 	WARN_ON((u64) folio_pos(folio) + offset + len >
508 		round_up((u64) i_size_read(&inode->v), block_bytes(c)));
509 
510 	BUG_ON(!s->uptodate);
511 
512 	spin_lock(&s->lock);
513 
514 	for (i = round_down(offset, block_bytes(c)) >> 9;
515 	     i < round_up(offset + len, block_bytes(c)) >> 9;
516 	     i++) {
517 		unsigned sectors = sectors_to_reserve(&s->s[i],
518 						res->disk.nr_replicas);
519 
520 		/*
521 		 * This can happen if we race with the error path in
522 		 * bch2_writepage_io_done():
523 		 */
524 		sectors = min_t(unsigned, sectors, res->disk.sectors);
525 
526 		s->s[i].replicas_reserved += sectors;
527 		res->disk.sectors -= sectors;
528 
529 		dirty_sectors += s->s[i].state == SECTOR_unallocated;
530 
531 		bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
532 	}
533 
534 	spin_unlock(&s->lock);
535 
536 	bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors);
537 
538 	if (!folio_test_dirty(folio))
539 		filemap_dirty_folio(inode->v.i_mapping, folio);
540 }
541 
542 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
543 {
544 	struct file *file = vmf->vma->vm_file;
545 	struct address_space *mapping = file->f_mapping;
546 	struct address_space *fdm = faults_disabled_mapping();
547 	struct bch_inode_info *inode = file_bch_inode(file);
548 	vm_fault_t ret;
549 
550 	if (fdm == mapping)
551 		return VM_FAULT_SIGBUS;
552 
553 	/* Lock ordering: */
554 	if (fdm > mapping) {
555 		struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
556 
557 		if (bch2_pagecache_add_tryget(inode))
558 			goto got_lock;
559 
560 		bch2_pagecache_block_put(fdm_host);
561 
562 		bch2_pagecache_add_get(inode);
563 		bch2_pagecache_add_put(inode);
564 
565 		bch2_pagecache_block_get(fdm_host);
566 
567 		/* Signal that lock has been dropped: */
568 		set_fdm_dropped_locks();
569 		return VM_FAULT_SIGBUS;
570 	}
571 
572 	bch2_pagecache_add_get(inode);
573 got_lock:
574 	ret = filemap_fault(vmf);
575 	bch2_pagecache_add_put(inode);
576 
577 	return ret;
578 }
579 
580 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
581 {
582 	struct folio *folio = page_folio(vmf->page);
583 	struct file *file = vmf->vma->vm_file;
584 	struct bch_inode_info *inode = file_bch_inode(file);
585 	struct address_space *mapping = file->f_mapping;
586 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
587 	struct bch2_folio_reservation res;
588 	unsigned len;
589 	loff_t isize;
590 	vm_fault_t ret;
591 
592 	bch2_folio_reservation_init(c, inode, &res);
593 
594 	sb_start_pagefault(inode->v.i_sb);
595 	file_update_time(file);
596 
597 	/*
598 	 * Not strictly necessary, but helps avoid dio writes livelocking in
599 	 * bch2_write_invalidate_inode_pages_range() - can drop this if/when we get
600 	 * a bch2_write_invalidate_inode_pages_range() that works without dropping
601 	 * page lock before invalidating page
602 	 */
603 	bch2_pagecache_add_get(inode);
604 
605 	folio_lock(folio);
606 	isize = i_size_read(&inode->v);
607 
608 	if (folio->mapping != mapping || folio_pos(folio) >= isize) {
609 		folio_unlock(folio);
610 		ret = VM_FAULT_NOPAGE;
611 		goto out;
612 	}
613 
614 	len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
615 
616 	if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
617 	    bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
618 		folio_unlock(folio);
619 		ret = VM_FAULT_SIGBUS;
620 		goto out;
621 	}
622 
623 	bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
624 	bch2_folio_reservation_put(c, inode, &res);
625 
626 	folio_wait_stable(folio);
627 	ret = VM_FAULT_LOCKED;
628 out:
629 	bch2_pagecache_add_put(inode);
630 	sb_end_pagefault(inode->v.i_sb);
631 
632 	return ret;
633 }
634 
635 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
636 {
637 	if (offset || length < folio_size(folio))
638 		return;
639 
640 	bch2_clear_folio_bits(folio);
641 }
642 
643 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
644 {
645 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
646 		return false;
647 
648 	bch2_clear_folio_bits(folio);
649 	return true;
650 }
651 
652 /* fseek: */
653 
654 static int folio_data_offset(struct folio *folio, loff_t pos,
655 			     unsigned min_replicas)
656 {
657 	struct bch_folio *s = bch2_folio(folio);
658 	unsigned i, sectors = folio_sectors(folio);
659 
660 	if (s)
661 		for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
662 			if (s->s[i].state >= SECTOR_dirty &&
663 			    s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
664 				return i << SECTOR_SHIFT;
665 
666 	return -1;
667 }
668 
669 loff_t bch2_seek_pagecache_data(struct inode *vinode,
670 				loff_t start_offset,
671 				loff_t end_offset,
672 				unsigned min_replicas,
673 				bool nonblock)
674 {
675 	struct folio_batch fbatch;
676 	pgoff_t start_index	= start_offset >> PAGE_SHIFT;
677 	pgoff_t end_index	= end_offset >> PAGE_SHIFT;
678 	pgoff_t index		= start_index;
679 	unsigned i;
680 	loff_t ret;
681 	int offset;
682 
683 	folio_batch_init(&fbatch);
684 
685 	while (filemap_get_folios(vinode->i_mapping,
686 				  &index, end_index, &fbatch)) {
687 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
688 			struct folio *folio = fbatch.folios[i];
689 
690 			if (!nonblock) {
691 				folio_lock(folio);
692 			} else if (!folio_trylock(folio)) {
693 				folio_batch_release(&fbatch);
694 				return -EAGAIN;
695 			}
696 
697 			offset = folio_data_offset(folio,
698 					max(folio_pos(folio), start_offset),
699 					min_replicas);
700 			if (offset >= 0) {
701 				ret = clamp(folio_pos(folio) + offset,
702 					    start_offset, end_offset);
703 				folio_unlock(folio);
704 				folio_batch_release(&fbatch);
705 				return ret;
706 			}
707 			folio_unlock(folio);
708 		}
709 		folio_batch_release(&fbatch);
710 		cond_resched();
711 	}
712 
713 	return end_offset;
714 }
715 
716 /*
717  * Search for a hole in a folio.
718  *
719  * The filemap layer returns -ENOENT if no folio exists, so reuse the same error
720  * code to indicate a pagecache hole exists at the returned offset. Otherwise
721  * return 0 if the folio is filled with data, or an error code. This function
722  * can return -EAGAIN if nonblock is specified.
723  */
724 static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
725 			      unsigned min_replicas, bool nonblock)
726 {
727 	struct folio *folio;
728 	struct bch_folio *s;
729 	unsigned i, sectors;
730 	int ret = -ENOENT;
731 
732 	folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
733 				    FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
734 	if (IS_ERR(folio))
735 		return PTR_ERR(folio);
736 
737 	s = bch2_folio(folio);
738 	if (!s)
739 		goto unlock;
740 
741 	sectors = folio_sectors(folio);
742 	for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
743 		if (s->s[i].state < SECTOR_dirty ||
744 		    s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
745 			*offset = max(*offset,
746 				      folio_pos(folio) + (i << SECTOR_SHIFT));
747 			goto unlock;
748 		}
749 
750 	*offset = folio_end_pos(folio);
751 	ret = 0;
752 unlock:
753 	folio_unlock(folio);
754 	folio_put(folio);
755 	return ret;
756 }
757 
758 loff_t bch2_seek_pagecache_hole(struct inode *vinode,
759 				loff_t start_offset,
760 				loff_t end_offset,
761 				unsigned min_replicas,
762 				bool nonblock)
763 {
764 	struct address_space *mapping = vinode->i_mapping;
765 	loff_t offset = start_offset;
766 	loff_t ret = 0;
767 
768 	while (!ret && offset < end_offset)
769 		ret = folio_hole_offset(mapping, &offset, min_replicas, nonblock);
770 
771 	if (ret && ret != -ENOENT)
772 		return ret;
773 	return min(offset, end_offset);
774 }
775 
776 int bch2_clamp_data_hole(struct inode *inode,
777 			 u64 *hole_start,
778 			 u64 *hole_end,
779 			 unsigned min_replicas,
780 			 bool nonblock)
781 {
782 	loff_t ret;
783 
784 	ret = bch2_seek_pagecache_hole(inode,
785 		*hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
786 	if (ret < 0)
787 		return ret;
788 
789 	*hole_start = ret;
790 
791 	if (*hole_start == *hole_end)
792 		return 0;
793 
794 	ret = bch2_seek_pagecache_data(inode,
795 		*hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
796 	if (ret < 0)
797 		return ret;
798 
799 	*hole_end = ret;
800 	return 0;
801 }
802 
803 #endif /* NO_BCACHEFS_FS */
804