1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2023 Red Hat
4 */
5
6 #ifndef VIO_H
7 #define VIO_H
8
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14
15 #include "completion.h"
16 #include "constants.h"
17 #include "types.h"
18 #include "vdo.h"
19
20 enum {
21 MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE,
22 };
23
24 struct pooled_vio {
25 /* The underlying vio */
26 struct vio vio;
27 /* The list entry for chaining pooled vios together */
28 struct list_head list_entry;
29 /* The context set by the pool */
30 void *context;
31 /* The list entry used by the pool */
32 struct list_head pool_entry;
33 };
34
35 /**
36 * as_vio() - Convert a generic vdo_completion to a vio.
37 * @completion: The completion to convert.
38 *
39 * Return: The completion as a vio.
40 */
as_vio(struct vdo_completion * completion)41 static inline struct vio *as_vio(struct vdo_completion *completion)
42 {
43 vdo_assert_completion_type(completion, VIO_COMPLETION);
44 return container_of(completion, struct vio, completion);
45 }
46
47 /**
48 * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit
49 * its I/O.
50 * @vio: The vio.
51 *
52 * Return: The id of the bio zone thread the vio should use.
53 */
get_vio_bio_zone_thread_id(struct vio * vio)54 static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio)
55 {
56 return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone];
57 }
58
59 physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio);
60
61 /**
62 * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone.
63 * @vio: The vio to check.
64 */
assert_vio_in_bio_zone(struct vio * vio)65 static inline void assert_vio_in_bio_zone(struct vio *vio)
66 {
67 thread_id_t expected = get_vio_bio_zone_thread_id(vio);
68 thread_id_t thread_id = vdo_get_callback_thread_id();
69
70 VDO_ASSERT_LOG_ONLY((expected == thread_id),
71 "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
72 (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
73 expected);
74 }
75
76 int vdo_create_bio(struct bio **bio_ptr);
77 void vdo_free_bio(struct bio *bio);
78 int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
79 enum vio_priority priority, void *parent,
80 unsigned int block_count, char *data, struct vio *vio);
81 int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
82 enum vio_priority priority,
83 void *parent, unsigned int block_count,
84 char *data, struct vio **vio_ptr);
85
create_metadata_vio(struct vdo * vdo,enum vio_type vio_type,enum vio_priority priority,void * parent,char * data,struct vio ** vio_ptr)86 static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
87 enum vio_priority priority,
88 void *parent, char *data,
89 struct vio **vio_ptr)
90 {
91 return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data,
92 vio_ptr);
93 }
94
95 void free_vio_components(struct vio *vio);
96 void free_vio(struct vio *vio);
97
98 /**
99 * initialize_vio() - Initialize a vio.
100 * @vio: The vio to initialize.
101 * @bio: The bio this vio should use for its I/O.
102 * @block_count: The size of this vio in vdo blocks.
103 * @vio_type: The vio type.
104 * @priority: The relative priority of the vio.
105 * @vdo: The vdo for this vio.
106 */
initialize_vio(struct vio * vio,struct bio * bio,unsigned int block_count,enum vio_type vio_type,enum vio_priority priority,struct vdo * vdo)107 static inline void initialize_vio(struct vio *vio, struct bio *bio,
108 unsigned int block_count, enum vio_type vio_type,
109 enum vio_priority priority, struct vdo *vdo)
110 {
111 /* data_vio's may not span multiple blocks */
112 BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1));
113
114 vio->bio = bio;
115 vio->block_count = block_count;
116 vio->type = vio_type;
117 vio->priority = priority;
118 vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION);
119 }
120
121 void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
122 blk_opf_t bi_opf, physical_block_number_t pbn);
123
124 int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
125 blk_opf_t bi_opf, physical_block_number_t pbn);
126
127 void update_vio_error_stats(struct vio *vio, const char *format, ...)
128 __printf(2, 3);
129
130 /**
131 * is_data_vio() - Check whether a vio is servicing an external data request.
132 * @vio: The vio to check.
133 */
is_data_vio(struct vio * vio)134 static inline bool is_data_vio(struct vio *vio)
135 {
136 return (vio->type == VIO_TYPE_DATA);
137 }
138
139 /**
140 * get_metadata_priority() - Convert a vio's priority to a work item priority.
141 * @vio: The vio.
142 *
143 * Return: The priority with which to submit the vio's bio.
144 */
get_metadata_priority(struct vio * vio)145 static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio)
146 {
147 return ((vio->priority == VIO_PRIORITY_HIGH) ?
148 BIO_Q_HIGH_PRIORITY :
149 BIO_Q_METADATA_PRIORITY);
150 }
151
152 /**
153 * continue_vio() - Enqueue a vio to run its next callback.
154 * @vio: The vio to continue.
155 *
156 * Return: The result of the current operation.
157 */
continue_vio(struct vio * vio,int result)158 static inline void continue_vio(struct vio *vio, int result)
159 {
160 if (unlikely(result != VDO_SUCCESS))
161 vdo_set_completion_result(&vio->completion, result);
162
163 vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY);
164 }
165
166 void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio);
167 void vdo_count_completed_bios(struct bio *bio);
168
169 /**
170 * continue_vio_after_io() - Continue a vio now that its I/O has returned.
171 */
continue_vio_after_io(struct vio * vio,vdo_action_fn callback,thread_id_t thread)172 static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback,
173 thread_id_t thread)
174 {
175 vdo_count_completed_bios(vio->bio);
176 vdo_set_completion_callback(&vio->completion, callback, thread);
177 continue_vio(vio, blk_status_to_errno(vio->bio->bi_status));
178 }
179
180 void vio_record_metadata_io_error(struct vio *vio);
181
182 /* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */
183
vio_as_pooled_vio(struct vio * vio)184 static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
185 {
186 return container_of(vio, struct pooled_vio, vio);
187 }
188
189 struct vio_pool;
190
191 int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
192 enum vio_type vio_type, enum vio_priority priority,
193 void *context, struct vio_pool **pool_ptr);
194 void free_vio_pool(struct vio_pool *pool);
195 bool __must_check is_vio_pool_busy(struct vio_pool *pool);
196 void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
197 void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
198
199 #endif /* VIO_H */
200