xref: /linux/drivers/md/dm-vdo/vio.h (revision 5014bebee0cffda14fafae5a2534d08120b7b9e8)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #ifndef VIO_H
7 #define VIO_H
8 
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 
15 #include "completion.h"
16 #include "constants.h"
17 #include "types.h"
18 #include "vdo.h"
19 
20 enum {
21 	MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE,
22 };
23 
24 struct pooled_vio {
25 	/* The underlying vio */
26 	struct vio vio;
27 	/* The list entry for chaining pooled vios together */
28 	struct list_head list_entry;
29 	/* The context set by the pool */
30 	void *context;
31 	/* The list entry used by the pool */
32 	struct list_head pool_entry;
33 	/* The pool this vio is allocated from */
34 	struct vio_pool *pool;
35 };
36 
37 /**
38  * as_vio() - Convert a generic vdo_completion to a vio.
39  * @completion: The completion to convert.
40  *
41  * Return: The completion as a vio.
42  */
as_vio(struct vdo_completion * completion)43 static inline struct vio *as_vio(struct vdo_completion *completion)
44 {
45 	vdo_assert_completion_type(completion, VIO_COMPLETION);
46 	return container_of(completion, struct vio, completion);
47 }
48 
49 /**
50  * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit
51  *                                its I/O.
52  * @vio: The vio.
53  *
54  * Return: The id of the bio zone thread the vio should use.
55  */
get_vio_bio_zone_thread_id(struct vio * vio)56 static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio)
57 {
58 	return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone];
59 }
60 
61 physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio);
62 
63 /**
64  * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone.
65  * @vio: The vio to check.
66  */
assert_vio_in_bio_zone(struct vio * vio)67 static inline void assert_vio_in_bio_zone(struct vio *vio)
68 {
69 	thread_id_t expected = get_vio_bio_zone_thread_id(vio);
70 	thread_id_t thread_id = vdo_get_callback_thread_id();
71 
72 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
73 			    "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
74 			    (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
75 			    expected);
76 }
77 
78 int vdo_create_bio(struct bio **bio_ptr);
79 void vdo_free_bio(struct bio *bio);
80 int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
81 			    enum vio_priority priority, void *parent,
82 			    unsigned int block_count, char *data, struct vio *vio);
83 int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
84 						 enum vio_priority priority,
85 						 void *parent, unsigned int block_count,
86 						 char *data, struct vio **vio_ptr);
87 
create_metadata_vio(struct vdo * vdo,enum vio_type vio_type,enum vio_priority priority,void * parent,char * data,struct vio ** vio_ptr)88 static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
89 						   enum vio_priority priority,
90 						   void *parent, char *data,
91 						   struct vio **vio_ptr)
92 {
93 	return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data,
94 					       vio_ptr);
95 }
96 
97 void free_vio_components(struct vio *vio);
98 void free_vio(struct vio *vio);
99 
100 /**
101  * initialize_vio() - Initialize a vio.
102  * @vio: The vio to initialize.
103  * @bio: The bio this vio should use for its I/O.
104  * @block_count: The size of this vio in vdo blocks.
105  * @vio_type: The vio type.
106  * @priority: The relative priority of the vio.
107  * @vdo: The vdo for this vio.
108  */
initialize_vio(struct vio * vio,struct bio * bio,unsigned int block_count,enum vio_type vio_type,enum vio_priority priority,struct vdo * vdo)109 static inline void initialize_vio(struct vio *vio, struct bio *bio,
110 				  unsigned int block_count, enum vio_type vio_type,
111 				  enum vio_priority priority, struct vdo *vdo)
112 {
113 	/* data_vio's may not span multiple blocks */
114 	BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1));
115 
116 	vio->bio = bio;
117 	vio->block_count = block_count;
118 	vio->type = vio_type;
119 	vio->priority = priority;
120 	vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION);
121 }
122 
123 void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
124 			    blk_opf_t bi_opf, physical_block_number_t pbn);
125 
126 int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
127 		  blk_opf_t bi_opf, physical_block_number_t pbn);
128 int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback,
129 			    blk_opf_t bi_opf, physical_block_number_t pbn);
130 
131 void update_vio_error_stats(struct vio *vio, const char *format, ...)
132 	__printf(2, 3);
133 
134 /**
135  * is_data_vio() - Check whether a vio is servicing an external data request.
136  * @vio: The vio to check.
137  */
is_data_vio(struct vio * vio)138 static inline bool is_data_vio(struct vio *vio)
139 {
140 	return (vio->type == VIO_TYPE_DATA);
141 }
142 
143 /**
144  * get_metadata_priority() - Convert a vio's priority to a work item priority.
145  * @vio: The vio.
146  *
147  * Return: The priority with which to submit the vio's bio.
148  */
get_metadata_priority(struct vio * vio)149 static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio)
150 {
151 	return ((vio->priority == VIO_PRIORITY_HIGH) ?
152 		BIO_Q_HIGH_PRIORITY :
153 		BIO_Q_METADATA_PRIORITY);
154 }
155 
156 /**
157  * continue_vio() - Enqueue a vio to run its next callback.
158  * @vio: The vio to continue.
159  *
160  * Return: The result of the current operation.
161  */
continue_vio(struct vio * vio,int result)162 static inline void continue_vio(struct vio *vio, int result)
163 {
164 	if (unlikely(result != VDO_SUCCESS))
165 		vdo_set_completion_result(&vio->completion, result);
166 
167 	vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY);
168 }
169 
170 void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio);
171 void vdo_count_completed_bios(struct bio *bio);
172 
173 /**
174  * continue_vio_after_io() - Continue a vio now that its I/O has returned.
175  */
continue_vio_after_io(struct vio * vio,vdo_action_fn callback,thread_id_t thread)176 static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback,
177 					 thread_id_t thread)
178 {
179 	vdo_count_completed_bios(vio->bio);
180 	vdo_set_completion_callback(&vio->completion, callback, thread);
181 	continue_vio(vio, blk_status_to_errno(vio->bio->bi_status));
182 }
183 
184 void vio_record_metadata_io_error(struct vio *vio);
185 
186 /* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */
187 
vio_as_pooled_vio(struct vio * vio)188 static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
189 {
190 	return container_of(vio, struct pooled_vio, vio);
191 }
192 
193 struct vio_pool;
194 
195 int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count,
196 			       thread_id_t thread_id, enum vio_type vio_type,
197 			       enum vio_priority priority, void *context,
198 			       struct vio_pool **pool_ptr);
199 void free_vio_pool(struct vio_pool *pool);
200 bool __must_check is_vio_pool_busy(struct vio_pool *pool);
201 void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
202 void return_vio_to_pool(struct pooled_vio *vio);
203 
204 #endif /* VIO_H */
205