Lines Matching full:chunk
72 * Chunk that is read and written for each GC operation.
74 * Note that for writes to actual zoned devices, the chunk can be split when
97 * GC chunk is operating on.
615 struct xfs_gc_bio *chunk = in xfs_zone_gc_end_io() local
617 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_end_io()
619 WRITE_ONCE(chunk->state, XFS_GC_BIO_DONE); in xfs_zone_gc_end_io()
678 struct xfs_gc_bio *chunk; in xfs_zone_gc_start_chunk() local
698 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_start_chunk()
699 chunk->ip = ip; in xfs_zone_gc_start_chunk()
700 chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); in xfs_zone_gc_start_chunk()
701 chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount); in xfs_zone_gc_start_chunk()
702 chunk->old_startblock = in xfs_zone_gc_start_chunk()
704 chunk->new_daddr = daddr; in xfs_zone_gc_start_chunk()
705 chunk->is_seq = is_seq; in xfs_zone_gc_start_chunk()
706 chunk->scratch = &data->scratch[data->scratch_idx]; in xfs_zone_gc_start_chunk()
707 chunk->data = data; in xfs_zone_gc_start_chunk()
708 chunk->oz = oz; in xfs_zone_gc_start_chunk()
709 chunk->victim_rtg = iter->victim_rtg; in xfs_zone_gc_start_chunk()
710 atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); in xfs_zone_gc_start_chunk()
711 atomic_inc(&chunk->victim_rtg->rtg_gccount); in xfs_zone_gc_start_chunk()
713 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); in xfs_zone_gc_start_chunk()
715 bio_add_folio_nofail(bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_start_chunk()
716 chunk->scratch->offset); in xfs_zone_gc_start_chunk()
717 chunk->scratch->offset += chunk->len; in xfs_zone_gc_start_chunk()
718 if (chunk->scratch->offset == XFS_GC_CHUNK_SIZE) { in xfs_zone_gc_start_chunk()
722 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_start_chunk()
723 list_add_tail(&chunk->entry, &data->reading); in xfs_zone_gc_start_chunk()
732 struct xfs_gc_bio *chunk) in xfs_zone_gc_free_chunk() argument
734 atomic_dec(&chunk->victim_rtg->rtg_gccount); in xfs_zone_gc_free_chunk()
735 xfs_rtgroup_rele(chunk->victim_rtg); in xfs_zone_gc_free_chunk()
736 list_del(&chunk->entry); in xfs_zone_gc_free_chunk()
737 xfs_open_zone_put(chunk->oz); in xfs_zone_gc_free_chunk()
738 xfs_irele(chunk->ip); in xfs_zone_gc_free_chunk()
739 bio_put(&chunk->bio); in xfs_zone_gc_free_chunk()
745 struct xfs_gc_bio *chunk) in xfs_zone_gc_submit_write() argument
747 if (chunk->is_seq) { in xfs_zone_gc_submit_write()
748 chunk->bio.bi_opf &= ~REQ_OP_WRITE; in xfs_zone_gc_submit_write()
749 chunk->bio.bi_opf |= REQ_OP_ZONE_APPEND; in xfs_zone_gc_submit_write()
751 chunk->bio.bi_iter.bi_sector = chunk->new_daddr; in xfs_zone_gc_submit_write()
752 chunk->bio.bi_end_io = xfs_zone_gc_end_io; in xfs_zone_gc_submit_write()
753 submit_bio(&chunk->bio); in xfs_zone_gc_submit_write()
759 struct xfs_gc_bio *chunk) in xfs_zone_gc_split_write() argument
762 &bdev_get_queue(chunk->bio.bi_bdev)->limits; in xfs_zone_gc_split_write()
769 if (!chunk->is_seq) in xfs_zone_gc_split_write()
772 split_sectors = bio_split_rw_at(&chunk->bio, lim, &nsegs, in xfs_zone_gc_split_write()
777 /* ensure the split chunk is still block size aligned */ in xfs_zone_gc_split_write()
782 split = bio_split(&chunk->bio, split_sectors, GFP_NOFS, &data->bio_set); in xfs_zone_gc_split_write()
785 ihold(VFS_I(chunk->ip)); in xfs_zone_gc_split_write()
786 split_chunk->ip = chunk->ip; in xfs_zone_gc_split_write()
787 split_chunk->is_seq = chunk->is_seq; in xfs_zone_gc_split_write()
788 split_chunk->scratch = chunk->scratch; in xfs_zone_gc_split_write()
789 split_chunk->offset = chunk->offset; in xfs_zone_gc_split_write()
791 split_chunk->old_startblock = chunk->old_startblock; in xfs_zone_gc_split_write()
792 split_chunk->new_daddr = chunk->new_daddr; in xfs_zone_gc_split_write()
793 split_chunk->oz = chunk->oz; in xfs_zone_gc_split_write()
794 atomic_inc(&chunk->oz->oz_ref); in xfs_zone_gc_split_write()
796 split_chunk->victim_rtg = chunk->victim_rtg; in xfs_zone_gc_split_write()
797 atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); in xfs_zone_gc_split_write()
798 atomic_inc(&chunk->victim_rtg->rtg_gccount); in xfs_zone_gc_split_write()
800 chunk->offset += split_len; in xfs_zone_gc_split_write()
801 chunk->len -= split_len; in xfs_zone_gc_split_write()
802 chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); in xfs_zone_gc_split_write()
804 /* add right before the original chunk */ in xfs_zone_gc_split_write()
806 list_add_tail(&split_chunk->entry, &chunk->entry); in xfs_zone_gc_split_write()
812 struct xfs_gc_bio *chunk) in xfs_zone_gc_write_chunk() argument
814 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_write_chunk()
815 struct xfs_mount *mp = chunk->ip->i_mount; in xfs_zone_gc_write_chunk()
817 bvec_phys(bio_first_bvec_all(&chunk->bio)); in xfs_zone_gc_write_chunk()
820 if (chunk->bio.bi_status) in xfs_zone_gc_write_chunk()
823 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_write_chunk()
827 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_write_chunk()
828 list_move_tail(&chunk->entry, &data->writing); in xfs_zone_gc_write_chunk()
830 bio_reset(&chunk->bio, mp->m_rtdev_targp->bt_bdev, REQ_OP_WRITE); in xfs_zone_gc_write_chunk()
831 bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_write_chunk()
832 offset_in_folio(chunk->scratch->folio, bvec_paddr)); in xfs_zone_gc_write_chunk()
834 while ((split_chunk = xfs_zone_gc_split_write(data, chunk))) in xfs_zone_gc_write_chunk()
836 xfs_zone_gc_submit_write(data, chunk); in xfs_zone_gc_write_chunk()
841 struct xfs_gc_bio *chunk) in xfs_zone_gc_finish_chunk() argument
844 struct xfs_inode *ip = chunk->ip; in xfs_zone_gc_finish_chunk()
848 if (chunk->bio.bi_status) in xfs_zone_gc_finish_chunk()
851 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_finish_chunk()
855 chunk->scratch->freed += chunk->len; in xfs_zone_gc_finish_chunk()
856 if (chunk->scratch->freed == chunk->scratch->offset) { in xfs_zone_gc_finish_chunk()
857 chunk->scratch->offset = 0; in xfs_zone_gc_finish_chunk()
858 chunk->scratch->freed = 0; in xfs_zone_gc_finish_chunk()
877 if (chunk->is_seq) in xfs_zone_gc_finish_chunk()
878 chunk->new_daddr = chunk->bio.bi_iter.bi_sector; in xfs_zone_gc_finish_chunk()
879 error = xfs_zoned_end_io(ip, chunk->offset, chunk->len, in xfs_zone_gc_finish_chunk()
880 chunk->new_daddr, chunk->oz, chunk->old_startblock); in xfs_zone_gc_finish_chunk()
884 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_finish_chunk()
889 struct xfs_gc_bio *chunk) in xfs_zone_gc_finish_reset() argument
891 struct xfs_rtgroup *rtg = chunk->bio.bi_private; in xfs_zone_gc_finish_reset()
895 if (chunk->bio.bi_status) { in xfs_zone_gc_finish_reset()
907 list_del(&chunk->entry); in xfs_zone_gc_finish_reset()
908 bio_put(&chunk->bio); in xfs_zone_gc_finish_reset()
961 struct xfs_gc_bio *chunk; in xfs_zone_gc_reset_zones() local
974 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_reset_zones()
975 chunk->data = data; in xfs_zone_gc_reset_zones()
976 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_reset_zones()
977 list_add_tail(&chunk->entry, &data->resetting); in xfs_zone_gc_reset_zones()
1023 struct xfs_gc_bio *chunk, *next; in xfs_zone_gc_handle_work() local
1037 list_for_each_entry_safe(chunk, next, &data->resetting, entry) { in xfs_zone_gc_handle_work()
1038 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1041 xfs_zone_gc_finish_reset(chunk); in xfs_zone_gc_handle_work()
1044 list_for_each_entry_safe(chunk, next, &data->writing, entry) { in xfs_zone_gc_handle_work()
1045 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1048 xfs_zone_gc_finish_chunk(chunk); in xfs_zone_gc_handle_work()
1052 list_for_each_entry_safe(chunk, next, &data->reading, entry) { in xfs_zone_gc_handle_work()
1053 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1056 xfs_zone_gc_write_chunk(chunk); in xfs_zone_gc_handle_work()