1*9533d804SMatthew Sakai /* SPDX-License-Identifier: GPL-2.0-only */ 2*9533d804SMatthew Sakai /* 3*9533d804SMatthew Sakai * Copyright 2023 Red Hat 4*9533d804SMatthew Sakai */ 5*9533d804SMatthew Sakai 6*9533d804SMatthew Sakai #ifndef VIO_H 7*9533d804SMatthew Sakai #define VIO_H 8*9533d804SMatthew Sakai 9*9533d804SMatthew Sakai #include <linux/bio.h> 10*9533d804SMatthew Sakai #include <linux/blkdev.h> 11*9533d804SMatthew Sakai #include <linux/compiler.h> 12*9533d804SMatthew Sakai #include <linux/kernel.h> 13*9533d804SMatthew Sakai #include <linux/list.h> 14*9533d804SMatthew Sakai 15*9533d804SMatthew Sakai #include "completion.h" 16*9533d804SMatthew Sakai #include "constants.h" 17*9533d804SMatthew Sakai #include "types.h" 18*9533d804SMatthew Sakai #include "vdo.h" 19*9533d804SMatthew Sakai 20*9533d804SMatthew Sakai enum { 21*9533d804SMatthew Sakai MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE, 22*9533d804SMatthew Sakai }; 23*9533d804SMatthew Sakai 24*9533d804SMatthew Sakai struct pooled_vio { 25*9533d804SMatthew Sakai /* The underlying vio */ 26*9533d804SMatthew Sakai struct vio vio; 27*9533d804SMatthew Sakai /* The list entry for chaining pooled vios together */ 28*9533d804SMatthew Sakai struct list_head list_entry; 29*9533d804SMatthew Sakai /* The context set by the pool */ 30*9533d804SMatthew Sakai void *context; 31*9533d804SMatthew Sakai /* The list entry used by the pool */ 32*9533d804SMatthew Sakai struct list_head pool_entry; 33*9533d804SMatthew Sakai }; 34*9533d804SMatthew Sakai 35*9533d804SMatthew Sakai /** 36*9533d804SMatthew Sakai * as_vio() - Convert a generic vdo_completion to a vio. 37*9533d804SMatthew Sakai * @completion: The completion to convert. 38*9533d804SMatthew Sakai * 39*9533d804SMatthew Sakai * Return: The completion as a vio. 40*9533d804SMatthew Sakai */ 41*9533d804SMatthew Sakai static inline struct vio *as_vio(struct vdo_completion *completion) 42*9533d804SMatthew Sakai { 43*9533d804SMatthew Sakai vdo_assert_completion_type(completion, VIO_COMPLETION); 44*9533d804SMatthew Sakai return container_of(completion, struct vio, completion); 45*9533d804SMatthew Sakai } 46*9533d804SMatthew Sakai 47*9533d804SMatthew Sakai /** 48*9533d804SMatthew Sakai * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit 49*9533d804SMatthew Sakai * its I/O. 50*9533d804SMatthew Sakai * @vio: The vio. 51*9533d804SMatthew Sakai * 52*9533d804SMatthew Sakai * Return: The id of the bio zone thread the vio should use. 53*9533d804SMatthew Sakai */ 54*9533d804SMatthew Sakai static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio) 55*9533d804SMatthew Sakai { 56*9533d804SMatthew Sakai return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone]; 57*9533d804SMatthew Sakai } 58*9533d804SMatthew Sakai 59*9533d804SMatthew Sakai physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio); 60*9533d804SMatthew Sakai 61*9533d804SMatthew Sakai /** 62*9533d804SMatthew Sakai * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone. 63*9533d804SMatthew Sakai * @vio: The vio to check. 64*9533d804SMatthew Sakai */ 65*9533d804SMatthew Sakai static inline void assert_vio_in_bio_zone(struct vio *vio) 66*9533d804SMatthew Sakai { 67*9533d804SMatthew Sakai thread_id_t expected = get_vio_bio_zone_thread_id(vio); 68*9533d804SMatthew Sakai thread_id_t thread_id = vdo_get_callback_thread_id(); 69*9533d804SMatthew Sakai 70*9533d804SMatthew Sakai ASSERT_LOG_ONLY((expected == thread_id), 71*9533d804SMatthew Sakai "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", 72*9533d804SMatthew Sakai (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, 73*9533d804SMatthew Sakai expected); 74*9533d804SMatthew Sakai } 75*9533d804SMatthew Sakai 76*9533d804SMatthew Sakai int vdo_create_bio(struct bio **bio_ptr); 77*9533d804SMatthew Sakai void vdo_free_bio(struct bio *bio); 78*9533d804SMatthew Sakai int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, 79*9533d804SMatthew Sakai enum vio_priority priority, void *parent, 80*9533d804SMatthew Sakai unsigned int block_count, char *data, struct vio *vio); 81*9533d804SMatthew Sakai int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, 82*9533d804SMatthew Sakai enum vio_priority priority, 83*9533d804SMatthew Sakai void *parent, unsigned int block_count, 84*9533d804SMatthew Sakai char *data, struct vio **vio_ptr); 85*9533d804SMatthew Sakai 86*9533d804SMatthew Sakai static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type, 87*9533d804SMatthew Sakai enum vio_priority priority, 88*9533d804SMatthew Sakai void *parent, char *data, 89*9533d804SMatthew Sakai struct vio **vio_ptr) 90*9533d804SMatthew Sakai { 91*9533d804SMatthew Sakai return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data, 92*9533d804SMatthew Sakai vio_ptr); 93*9533d804SMatthew Sakai } 94*9533d804SMatthew Sakai 95*9533d804SMatthew Sakai void free_vio_components(struct vio *vio); 96*9533d804SMatthew Sakai void free_vio(struct vio *vio); 97*9533d804SMatthew Sakai 98*9533d804SMatthew Sakai /** 99*9533d804SMatthew Sakai * initialize_vio() - Initialize a vio. 100*9533d804SMatthew Sakai * @vio: The vio to initialize. 101*9533d804SMatthew Sakai * @bio: The bio this vio should use for its I/O. 102*9533d804SMatthew Sakai * @block_count: The size of this vio in vdo blocks. 103*9533d804SMatthew Sakai * @vio_type: The vio type. 104*9533d804SMatthew Sakai * @priority: The relative priority of the vio. 105*9533d804SMatthew Sakai * @vdo: The vdo for this vio. 106*9533d804SMatthew Sakai */ 107*9533d804SMatthew Sakai static inline void initialize_vio(struct vio *vio, struct bio *bio, 108*9533d804SMatthew Sakai unsigned int block_count, enum vio_type vio_type, 109*9533d804SMatthew Sakai enum vio_priority priority, struct vdo *vdo) 110*9533d804SMatthew Sakai { 111*9533d804SMatthew Sakai /* data_vio's may not span multiple blocks */ 112*9533d804SMatthew Sakai BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1)); 113*9533d804SMatthew Sakai 114*9533d804SMatthew Sakai vio->bio = bio; 115*9533d804SMatthew Sakai vio->block_count = block_count; 116*9533d804SMatthew Sakai vio->type = vio_type; 117*9533d804SMatthew Sakai vio->priority = priority; 118*9533d804SMatthew Sakai vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION); 119*9533d804SMatthew Sakai } 120*9533d804SMatthew Sakai 121*9533d804SMatthew Sakai void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback, 122*9533d804SMatthew Sakai unsigned int bi_opf, physical_block_number_t pbn); 123*9533d804SMatthew Sakai 124*9533d804SMatthew Sakai int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback, 125*9533d804SMatthew Sakai unsigned int bi_opf, physical_block_number_t pbn); 126*9533d804SMatthew Sakai 127*9533d804SMatthew Sakai void update_vio_error_stats(struct vio *vio, const char *format, ...) 128*9533d804SMatthew Sakai __printf(2, 3); 129*9533d804SMatthew Sakai 130*9533d804SMatthew Sakai /** 131*9533d804SMatthew Sakai * is_data_vio() - Check whether a vio is servicing an external data request. 132*9533d804SMatthew Sakai * @vio: The vio to check. 133*9533d804SMatthew Sakai */ 134*9533d804SMatthew Sakai static inline bool is_data_vio(struct vio *vio) 135*9533d804SMatthew Sakai { 136*9533d804SMatthew Sakai return (vio->type == VIO_TYPE_DATA); 137*9533d804SMatthew Sakai } 138*9533d804SMatthew Sakai 139*9533d804SMatthew Sakai /** 140*9533d804SMatthew Sakai * get_metadata_priority() - Convert a vio's priority to a work item priority. 141*9533d804SMatthew Sakai * @vio: The vio. 142*9533d804SMatthew Sakai * 143*9533d804SMatthew Sakai * Return: The priority with which to submit the vio's bio. 144*9533d804SMatthew Sakai */ 145*9533d804SMatthew Sakai static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio) 146*9533d804SMatthew Sakai { 147*9533d804SMatthew Sakai return ((vio->priority == VIO_PRIORITY_HIGH) ? 148*9533d804SMatthew Sakai BIO_Q_HIGH_PRIORITY : 149*9533d804SMatthew Sakai BIO_Q_METADATA_PRIORITY); 150*9533d804SMatthew Sakai } 151*9533d804SMatthew Sakai 152*9533d804SMatthew Sakai /** 153*9533d804SMatthew Sakai * continue_vio() - Enqueue a vio to run its next callback. 154*9533d804SMatthew Sakai * @vio: The vio to continue. 155*9533d804SMatthew Sakai * 156*9533d804SMatthew Sakai * Return: The result of the current operation. 157*9533d804SMatthew Sakai */ 158*9533d804SMatthew Sakai static inline void continue_vio(struct vio *vio, int result) 159*9533d804SMatthew Sakai { 160*9533d804SMatthew Sakai if (unlikely(result != VDO_SUCCESS)) 161*9533d804SMatthew Sakai vdo_set_completion_result(&vio->completion, result); 162*9533d804SMatthew Sakai 163*9533d804SMatthew Sakai vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY); 164*9533d804SMatthew Sakai } 165*9533d804SMatthew Sakai 166*9533d804SMatthew Sakai void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio); 167*9533d804SMatthew Sakai void vdo_count_completed_bios(struct bio *bio); 168*9533d804SMatthew Sakai 169*9533d804SMatthew Sakai /** 170*9533d804SMatthew Sakai * continue_vio_after_io() - Continue a vio now that its I/O has returned. 171*9533d804SMatthew Sakai */ 172*9533d804SMatthew Sakai static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback, 173*9533d804SMatthew Sakai thread_id_t thread) 174*9533d804SMatthew Sakai { 175*9533d804SMatthew Sakai vdo_count_completed_bios(vio->bio); 176*9533d804SMatthew Sakai vdo_set_completion_callback(&vio->completion, callback, thread); 177*9533d804SMatthew Sakai continue_vio(vio, blk_status_to_errno(vio->bio->bi_status)); 178*9533d804SMatthew Sakai } 179*9533d804SMatthew Sakai 180*9533d804SMatthew Sakai void vio_record_metadata_io_error(struct vio *vio); 181*9533d804SMatthew Sakai 182*9533d804SMatthew Sakai /* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */ 183*9533d804SMatthew Sakai 184*9533d804SMatthew Sakai static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio) 185*9533d804SMatthew Sakai { 186*9533d804SMatthew Sakai return container_of(vio, struct pooled_vio, vio); 187*9533d804SMatthew Sakai } 188*9533d804SMatthew Sakai 189*9533d804SMatthew Sakai struct vio_pool; 190*9533d804SMatthew Sakai 191*9533d804SMatthew Sakai int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id, 192*9533d804SMatthew Sakai enum vio_type vio_type, enum vio_priority priority, 193*9533d804SMatthew Sakai void *context, struct vio_pool **pool_ptr); 194*9533d804SMatthew Sakai void free_vio_pool(struct vio_pool *pool); 195*9533d804SMatthew Sakai bool __must_check is_vio_pool_busy(struct vio_pool *pool); 196*9533d804SMatthew Sakai void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter); 197*9533d804SMatthew Sakai void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio); 198*9533d804SMatthew Sakai 199*9533d804SMatthew Sakai #endif /* VIO_H */ 200