Lines Matching refs:fs_info
68 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) in btrfs_is_subpage() argument
70 if (fs_info->sectorsize >= PAGE_SIZE) in btrfs_is_subpage()
85 if (fs_info->nodesize < PAGE_SIZE) in btrfs_is_subpage()
91 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, in btrfs_attach_subpage() argument
104 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) in btrfs_attach_subpage()
107 subpage = btrfs_alloc_subpage(fs_info, type); in btrfs_attach_subpage()
115 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) in btrfs_detach_subpage() argument
120 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) in btrfs_detach_subpage()
128 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, in btrfs_alloc_subpage() argument
134 ASSERT(fs_info->sectorsize < PAGE_SIZE); in btrfs_alloc_subpage()
137 BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page)); in btrfs_alloc_subpage()
166 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) in btrfs_folio_inc_eb_refs() argument
170 if (!btrfs_is_subpage(fs_info, folio->mapping)) in btrfs_folio_inc_eb_refs()
180 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) in btrfs_folio_dec_eb_refs() argument
184 if (!btrfs_is_subpage(fs_info, folio->mapping)) in btrfs_folio_dec_eb_refs()
195 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, in btrfs_subpage_assert() argument
203 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && in btrfs_subpage_assert()
204 IS_ALIGNED(len, fs_info->sectorsize)); in btrfs_subpage_assert()
214 #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ argument
218 btrfs_subpage_assert(fs_info, folio, start, len); \
219 __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
220 __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
224 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, in btrfs_subpage_start_reader() argument
228 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_start_reader()
229 const int nbits = len >> fs_info->sectorsize_bits; in btrfs_subpage_start_reader()
233 btrfs_subpage_assert(fs_info, folio, start, len); in btrfs_subpage_start_reader()
246 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, in btrfs_subpage_end_reader() argument
250 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_end_reader()
251 const int nbits = len >> fs_info->sectorsize_bits; in btrfs_subpage_end_reader()
256 btrfs_subpage_assert(fs_info, folio, start, len); in btrfs_subpage_end_reader()
298 static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, in btrfs_subpage_start_writer() argument
302 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_start_writer()
303 const int nbits = (len >> fs_info->sectorsize_bits); in btrfs_subpage_start_writer()
307 btrfs_subpage_assert(fs_info, folio, start, len); in btrfs_subpage_start_writer()
318 static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, in btrfs_subpage_end_and_test_writer() argument
322 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_end_and_test_writer()
323 const int nbits = (len >> fs_info->sectorsize_bits); in btrfs_subpage_end_and_test_writer()
329 btrfs_subpage_assert(fs_info, folio, start, len); in btrfs_subpage_end_and_test_writer()
364 int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, in btrfs_folio_start_writer_lock() argument
367 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { in btrfs_folio_start_writer_lock()
377 btrfs_subpage_start_writer(fs_info, folio, start, len); in btrfs_folio_start_writer_lock()
397 void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, in btrfs_folio_end_writer_lock() argument
404 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { in btrfs_folio_end_writer_lock()
423 if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) in btrfs_folio_end_writer_lock()
427 void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info, in btrfs_folio_end_writer_lock_bitmap() argument
431 const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked; in btrfs_folio_end_writer_lock_bitmap()
437 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { in btrfs_folio_end_writer_lock_bitmap()
449 for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) { in btrfs_folio_end_writer_lock_bitmap()
460 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ argument
462 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
463 fs_info->sectors_per_page)
465 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ argument
467 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
468 fs_info->sectors_per_page)
470 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, in btrfs_subpage_set_uptodate() argument
474 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_set_uptodate()
479 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_set_uptodate()
480 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) in btrfs_subpage_set_uptodate()
485 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_uptodate() argument
489 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_clear_uptodate()
494 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_clear_uptodate()
499 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, in btrfs_subpage_set_dirty() argument
503 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_set_dirty()
508 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_set_dirty()
523 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_and_test_dirty() argument
527 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_clear_and_test_dirty()
533 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_clear_and_test_dirty()
534 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) in btrfs_subpage_clear_and_test_dirty()
540 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_dirty() argument
545 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len); in btrfs_subpage_clear_dirty()
550 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, in btrfs_subpage_set_writeback() argument
554 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_set_writeback()
559 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_set_writeback()
565 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_writeback() argument
569 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_clear_writeback()
574 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_clear_writeback()
575 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { in btrfs_subpage_clear_writeback()
582 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, in btrfs_subpage_set_ordered() argument
586 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_set_ordered()
591 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_set_ordered()
596 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_ordered() argument
600 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_clear_ordered()
605 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_clear_ordered()
606 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) in btrfs_subpage_clear_ordered()
611 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, in btrfs_subpage_set_checked() argument
615 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_set_checked()
620 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_set_checked()
621 if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) in btrfs_subpage_set_checked()
626 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, in btrfs_subpage_clear_checked() argument
630 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_clear_checked()
635 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); in btrfs_subpage_clear_checked()
645 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
649 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
656 len >> fs_info->sectorsize_bits); \
673 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
676 if (unlikely(!fs_info) || \
677 !btrfs_is_subpage(fs_info, folio->mapping)) { \
681 btrfs_subpage_set_##name(fs_info, folio, start, len); \
683 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
686 if (unlikely(!fs_info) || \
687 !btrfs_is_subpage(fs_info, folio->mapping)) { \
691 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
693 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
696 if (unlikely(!fs_info) || \
697 !btrfs_is_subpage(fs_info, folio->mapping)) \
699 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
701 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
704 if (unlikely(!fs_info) || \
705 !btrfs_is_subpage(fs_info, folio->mapping)) { \
710 btrfs_subpage_set_##name(fs_info, folio, start, len); \
712 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
715 if (unlikely(!fs_info) || \
716 !btrfs_is_subpage(fs_info, folio->mapping)) { \
721 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
723 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
726 if (unlikely(!fs_info) || \
727 !btrfs_is_subpage(fs_info, folio->mapping)) \
730 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
747 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, in btrfs_folio_assert_not_dirty() argument
758 if (!btrfs_is_subpage(fs_info, folio->mapping)) { in btrfs_folio_assert_not_dirty()
763 start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); in btrfs_folio_assert_not_dirty()
764 nbits = len >> fs_info->sectorsize_bits; in btrfs_folio_assert_not_dirty()
779 void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, in btrfs_folio_set_writer_lock() argument
789 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) in btrfs_folio_set_writer_lock()
793 start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_folio_set_writer_lock()
794 nbits = len >> fs_info->sectorsize_bits; in btrfs_folio_set_writer_lock()
800 ASSERT(ret <= fs_info->sectors_per_page); in btrfs_folio_set_writer_lock()
812 bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info, in btrfs_subpage_find_writer_locked() argument
817 const u32 sectors_per_page = fs_info->sectors_per_page; in btrfs_subpage_find_writer_locked()
819 const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, in btrfs_subpage_find_writer_locked()
837 ((first_set - locked_bitmap_start) << fs_info->sectorsize_bits); in btrfs_subpage_find_writer_locked()
845 *found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits; in btrfs_subpage_find_writer_locked()
851 #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \ argument
853 const int sectors_per_page = fs_info->sectors_per_page; \
861 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, in btrfs_subpage_dump_bitmap() argument
865 const u32 sectors_per_page = fs_info->sectors_per_page; in btrfs_subpage_dump_bitmap()
878 GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap); in btrfs_subpage_dump_bitmap()
879 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap); in btrfs_subpage_dump_bitmap()
880 GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap); in btrfs_subpage_dump_bitmap()
881 GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap); in btrfs_subpage_dump_bitmap()
882 GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap); in btrfs_subpage_dump_bitmap()
883 GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap); in btrfs_subpage_dump_bitmap()
887 btrfs_warn(fs_info, in btrfs_subpage_dump_bitmap()
897 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info, in btrfs_get_subpage_dirty_bitmap() argument
905 ASSERT(fs_info->sectors_per_page > 1); in btrfs_get_subpage_dirty_bitmap()
909 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap); in btrfs_get_subpage_dirty_bitmap()