xref: /linux/drivers/md/dm-vdo/data-vio.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #ifndef DATA_VIO_H
7 #define DATA_VIO_H
8 
9 #include <linux/atomic.h>
10 #include <linux/bio.h>
11 #include <linux/list.h>
12 
13 #include "permassert.h"
14 
15 #include "indexer.h"
16 
17 #include "block-map.h"
18 #include "completion.h"
19 #include "constants.h"
20 #include "dedupe.h"
21 #include "encodings.h"
22 #include "logical-zone.h"
23 #include "physical-zone.h"
24 #include "types.h"
25 #include "vdo.h"
26 #include "vio.h"
27 #include "wait-queue.h"
28 
29 /* Codes for describing the last asynchronous operation performed on a vio. */
30 enum async_operation_number {
31 	MIN_VIO_ASYNC_OPERATION_NUMBER,
32 	VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
33 	VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
34 	VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
35 	VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
36 	VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
37 	VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
38 	VIO_ASYNC_OP_CLEANUP,
39 	VIO_ASYNC_OP_COMPRESS_DATA_VIO,
40 	VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
41 	VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
42 	VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
43 	VIO_ASYNC_OP_HASH_DATA_VIO,
44 	VIO_ASYNC_OP_JOURNAL_REMAPPING,
45 	VIO_ASYNC_OP_ATTEMPT_PACKING,
46 	VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
47 	VIO_ASYNC_OP_READ_DATA_VIO,
48 	VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
49 	VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
50 	VIO_ASYNC_OP_VERIFY_DUPLICATION,
51 	VIO_ASYNC_OP_WRITE_DATA_VIO,
52 	MAX_VIO_ASYNC_OPERATION_NUMBER,
53 } __packed;
54 
55 struct lbn_lock {
56 	logical_block_number_t lbn;
57 	bool locked;
58 	struct vdo_wait_queue waiters;
59 	struct logical_zone *zone;
60 };
61 
62 /* A position in the arboreal block map at a specific level. */
63 struct block_map_tree_slot {
64 	page_number_t page_index;
65 	struct block_map_slot block_map_slot;
66 };
67 
68 /* Fields for using the arboreal block map. */
69 struct tree_lock {
70 	/* The current height at which this data_vio is operating */
71 	height_t height;
72 	/* The block map tree for this LBN */
73 	root_count_t root_index;
74 	/* Whether we hold a page lock */
75 	bool locked;
76 	/* The key for the lock map */
77 	u64 key;
78 	/* The queue of waiters for the page this vio is allocating or loading */
79 	struct vdo_wait_queue waiters;
80 	/* The block map tree slots for this LBN */
81 	struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
82 };
83 
84 struct zoned_pbn {
85 	physical_block_number_t pbn;
86 	enum block_mapping_state state;
87 	struct physical_zone *zone;
88 };
89 
90 /*
91  * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
92  * this enum.
93  */
94 enum data_vio_compression_stage {
95 	/* A data_vio which has not yet entered the compression path */
96 	DATA_VIO_PRE_COMPRESSOR,
97 	/* A data_vio which is in the compressor */
98 	DATA_VIO_COMPRESSING,
99 	/* A data_vio which is blocked in the packer */
100 	DATA_VIO_PACKING,
101 	/* A data_vio which is no longer on the compression path (and never will be) */
102 	DATA_VIO_POST_PACKER,
103 };
104 
105 struct data_vio_compression_status {
106 	enum data_vio_compression_stage stage;
107 	bool may_not_compress;
108 };
109 
110 struct compression_state {
111 	/*
112 	 * The current compression status of this data_vio. This field contains a value which
113 	 * consists of a data_vio_compression_stage and a flag indicating whether a request has
114 	 * been made to cancel (or prevent) compression for this data_vio.
115 	 *
116 	 * This field should be accessed through the get_data_vio_compression_status() and
117 	 * set_data_vio_compression_status() methods. It should not be accessed directly.
118 	 */
119 	atomic_t status;
120 
121 	/* The compressed size of this block */
122 	u16 size;
123 
124 	/* The packer input or output bin slot which holds the enclosing data_vio */
125 	slot_number_t slot;
126 
127 	/* The packer bin to which the enclosing data_vio has been assigned */
128 	struct packer_bin *bin;
129 
130 	/* A link in the chain of data_vios which have been packed together */
131 	struct data_vio *next_in_batch;
132 
133 	/* A vio which is blocked in the packer while holding a lock this vio needs. */
134 	struct data_vio *lock_holder;
135 
136 	/*
137 	 * The compressed block used to hold the compressed form of this block and that of any
138 	 * other blocks for which this data_vio is the compressed write agent.
139 	 */
140 	struct compressed_block *block;
141 };
142 
143 /* Fields supporting allocation of data blocks. */
144 struct allocation {
145 	/* The physical zone in which to allocate a physical block */
146 	struct physical_zone *zone;
147 
148 	/* The block allocated to this vio */
149 	physical_block_number_t pbn;
150 
151 	/*
152 	 * If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until
153 	 * the block has been written, after which it will become a read lock.
154 	 */
155 	struct pbn_lock *lock;
156 
157 	/* The type of write lock to obtain on the allocated block */
158 	enum pbn_lock_type write_lock_type;
159 
160 	/* The zone which was the start of the current allocation cycle */
161 	zone_count_t first_allocation_zone;
162 
163 	/* Whether this vio should wait for a clean slab */
164 	bool wait_for_clean_slab;
165 };
166 
167 struct reference_updater {
168 	enum journal_operation operation;
169 	bool increment;
170 	struct zoned_pbn zpbn;
171 	struct pbn_lock *lock;
172 	struct vdo_waiter waiter;
173 };
174 
175 /* A vio for processing user data requests. */
176 struct data_vio {
177 	/* The vdo_wait_queue entry structure */
178 	struct vdo_waiter waiter;
179 
180 	/* The logical block of this request */
181 	struct lbn_lock logical;
182 
183 	/* The state for traversing the block map tree */
184 	struct tree_lock tree_lock;
185 
186 	/* The current partition address of this block */
187 	struct zoned_pbn mapped;
188 
189 	/* The hash of this vio (if not zero) */
190 	struct uds_record_name record_name;
191 
192 	/* Used for logging and debugging */
193 	enum async_operation_number last_async_operation;
194 
195 	/* The operations to record in the recovery and slab journals */
196 	struct reference_updater increment_updater;
197 	struct reference_updater decrement_updater;
198 
199 	u16 read : 1;
200 	u16 write : 1;
201 	u16 fua : 1;
202 	u16 is_zero : 1;
203 	u16 is_discard : 1;
204 	u16 is_partial : 1;
205 	u16 is_duplicate : 1;
206 	u16 first_reference_operation_complete : 1;
207 	u16 downgrade_allocation_lock : 1;
208 
209 	struct allocation allocation;
210 
211 	/*
212 	 * Whether this vio has received an allocation. This field is examined from threads not in
213 	 * the allocation zone.
214 	 */
215 	bool allocation_succeeded;
216 
217 	/* The new partition address of this block after the vio write completes */
218 	struct zoned_pbn new_mapped;
219 
220 	/* The hash zone responsible for the name (NULL if is_zero_block) */
221 	struct hash_zone *hash_zone;
222 
223 	/* The lock this vio holds or shares with other vios with the same data */
224 	struct hash_lock *hash_lock;
225 
226 	/* All data_vios sharing a hash lock are kept in a list linking these list entries */
227 	struct list_head hash_lock_entry;
228 
229 	/* The block number in the partition of the UDS deduplication advice */
230 	struct zoned_pbn duplicate;
231 
232 	/*
233 	 * The sequence number of the recovery journal block containing the increment entry for
234 	 * this vio.
235 	 */
236 	sequence_number_t recovery_sequence_number;
237 
238 	/* The point in the recovery journal where this write last made an entry */
239 	struct journal_point recovery_journal_point;
240 
241 	/* The list of vios in user initiated write requests */
242 	struct list_head write_entry;
243 
244 	/* The generation number of the VDO that this vio belongs to */
245 	sequence_number_t flush_generation;
246 
247 	/* The completion to use for fetching block map pages for this vio */
248 	struct vdo_page_completion page_completion;
249 
250 	/* The user bio that initiated this VIO */
251 	struct bio *user_bio;
252 
253 	/* partial block support */
254 	block_size_t offset;
255 
256 	/*
257 	 * The number of bytes to be discarded. For discards, this field will always be positive,
258 	 * whereas for non-discards it will always be 0. Hence it can be used to determine whether
259 	 * a data_vio is processing a discard, even after the user_bio has been acknowledged.
260 	 */
261 	u32 remaining_discard;
262 
263 	struct dedupe_context *dedupe_context;
264 
265 	/* Fields beyond this point will not be reset when a pooled data_vio is reused. */
266 
267 	struct vio vio;
268 
269 	/* The completion for making reference count decrements */
270 	struct vdo_completion decrement_completion;
271 
272 	/* All of the fields necessary for the compression path */
273 	struct compression_state compression;
274 
275 	/* A block used as output during compression or uncompression */
276 	char *scratch_block;
277 
278 	struct list_head pool_entry;
279 };
280 
281 static inline struct data_vio *vio_as_data_vio(struct vio *vio)
282 {
283 	VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
284 	return container_of(vio, struct data_vio, vio);
285 }
286 
287 static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
288 {
289 	return vio_as_data_vio(as_vio(completion));
290 }
291 
292 static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
293 {
294 	if (waiter == NULL)
295 		return NULL;
296 
297 	return container_of(waiter, struct data_vio, waiter);
298 }
299 
300 static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
301 {
302 	if (updater->increment)
303 		return container_of(updater, struct data_vio, increment_updater);
304 
305 	return container_of(updater, struct data_vio, decrement_updater);
306 }
307 
308 static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
309 {
310 	return !list_empty(&data_vio->write_entry);
311 }
312 
313 static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
314 {
315 	return data_vio->vio.completion.vdo;
316 }
317 
318 static inline bool data_vio_has_allocation(struct data_vio *data_vio)
319 {
320 	return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
321 }
322 
323 struct data_vio_compression_status __must_check
324 advance_data_vio_compression_stage(struct data_vio *data_vio);
325 struct data_vio_compression_status __must_check
326 get_data_vio_compression_status(struct data_vio *data_vio);
327 bool cancel_data_vio_compression(struct data_vio *data_vio);
328 
329 struct data_vio_pool;
330 
331 int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
332 		       data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
333 void free_data_vio_pool(struct data_vio_pool *pool);
334 void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
335 void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
336 void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
337 
338 void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
339 data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
340 data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
341 data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
342 
343 void complete_data_vio(struct vdo_completion *completion);
344 void handle_data_vio_error(struct vdo_completion *completion);
345 
346 static inline void continue_data_vio(struct data_vio *data_vio)
347 {
348 	vdo_launch_completion(&data_vio->vio.completion);
349 }
350 
351 /**
352  * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
353  *
354  * This will not mask older errors. This function can be called with a success code, but it is more
355  * efficient to call continue_data_vio() if the caller knows the result was a success.
356  */
357 static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
358 {
359 	vdo_continue_completion(&data_vio->vio.completion, result);
360 }
361 
362 const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
363 
364 static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
365 {
366 	thread_id_t expected = data_vio->hash_zone->thread_id;
367 	thread_id_t thread_id = vdo_get_callback_thread_id();
368 	/*
369 	 * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
370 	 * inline, and the LBN better than nothing as an identifier.
371 	 */
372 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
373 			    "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
374 			    (unsigned long long) data_vio->logical.lbn, thread_id, expected);
375 }
376 
377 static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
378 						   vdo_action_fn callback)
379 {
380 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
381 				    data_vio->hash_zone->thread_id);
382 }
383 
384 /**
385  * launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it
386  *					  immediately.
387  */
388 static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
389 						      vdo_action_fn callback)
390 {
391 	set_data_vio_hash_zone_callback(data_vio, callback);
392 	vdo_launch_completion(&data_vio->vio.completion);
393 }
394 
395 static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
396 {
397 	thread_id_t expected = data_vio->logical.zone->thread_id;
398 	thread_id_t thread_id = vdo_get_callback_thread_id();
399 
400 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
401 			    "data_vio for logical block %llu on thread %u, should be on thread %u",
402 			    (unsigned long long) data_vio->logical.lbn, thread_id, expected);
403 }
404 
405 static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
406 						 vdo_action_fn callback)
407 {
408 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
409 				    data_vio->logical.zone->thread_id);
410 }
411 
412 /**
413  * launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it
414  *					immediately.
415  */
416 static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
417 						    vdo_action_fn callback)
418 {
419 	set_data_vio_logical_callback(data_vio, callback);
420 	vdo_launch_completion(&data_vio->vio.completion);
421 }
422 
423 static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
424 {
425 	thread_id_t expected = data_vio->allocation.zone->thread_id;
426 	thread_id_t thread_id = vdo_get_callback_thread_id();
427 
428 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
429 			    "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
430 			    (unsigned long long) data_vio->allocation.pbn, thread_id,
431 			    expected);
432 }
433 
434 static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
435 							vdo_action_fn callback)
436 {
437 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
438 				    data_vio->allocation.zone->thread_id);
439 }
440 
441 /**
442  * launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a
443  *					       data_vio's allocated zone and queue the data_vio and
444  *					       invoke it immediately.
445  */
446 static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
447 							   vdo_action_fn callback)
448 {
449 	set_data_vio_allocated_zone_callback(data_vio, callback);
450 	vdo_launch_completion(&data_vio->vio.completion);
451 }
452 
453 static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
454 {
455 	thread_id_t expected = data_vio->duplicate.zone->thread_id;
456 	thread_id_t thread_id = vdo_get_callback_thread_id();
457 
458 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
459 			    "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
460 			    (unsigned long long) data_vio->duplicate.pbn, thread_id,
461 			    expected);
462 }
463 
464 static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
465 							vdo_action_fn callback)
466 {
467 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
468 				    data_vio->duplicate.zone->thread_id);
469 }
470 
471 /**
472  * launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a
473  *					       data_vio's duplicate zone and queue the data_vio and
474  *					       invoke it immediately.
475  */
476 static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
477 							   vdo_action_fn callback)
478 {
479 	set_data_vio_duplicate_zone_callback(data_vio, callback);
480 	vdo_launch_completion(&data_vio->vio.completion);
481 }
482 
483 static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
484 {
485 	thread_id_t expected = data_vio->mapped.zone->thread_id;
486 	thread_id_t thread_id = vdo_get_callback_thread_id();
487 
488 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
489 			    "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
490 			    (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
491 }
492 
493 static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
494 						     vdo_action_fn callback)
495 {
496 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
497 				    data_vio->mapped.zone->thread_id);
498 }
499 
500 static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
501 {
502 	thread_id_t expected = data_vio->new_mapped.zone->thread_id;
503 	thread_id_t thread_id = vdo_get_callback_thread_id();
504 
505 	VDO_ASSERT_LOG_ONLY((expected == thread_id),
506 			    "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
507 			    (unsigned long long) data_vio->new_mapped.pbn, thread_id,
508 			    expected);
509 }
510 
511 static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
512 							 vdo_action_fn callback)
513 {
514 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
515 				    data_vio->new_mapped.zone->thread_id);
516 }
517 
518 static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
519 {
520 	thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
521 	thread_id_t thread_id = vdo_get_callback_thread_id();
522 
523 	VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
524 			    "data_vio for logical block %llu on thread %u, should be on journal thread %u",
525 			    (unsigned long long) data_vio->logical.lbn, thread_id,
526 			    journal_thread);
527 }
528 
529 static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
530 						 vdo_action_fn callback)
531 {
532 	thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
533 
534 	vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
535 }
536 
537 /**
538  * launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it
539  *					immediately.
540  */
541 static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
542 						    vdo_action_fn callback)
543 {
544 	set_data_vio_journal_callback(data_vio, callback);
545 	vdo_launch_completion(&data_vio->vio.completion);
546 }
547 
548 static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
549 {
550 	thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
551 	thread_id_t thread_id = vdo_get_callback_thread_id();
552 
553 	VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
554 			    "data_vio for logical block %llu on thread %u, should be on packer thread %u",
555 			    (unsigned long long) data_vio->logical.lbn, thread_id,
556 			    packer_thread);
557 }
558 
559 static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
560 						vdo_action_fn callback)
561 {
562 	thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
563 
564 	vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
565 }
566 
567 /**
568  * launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it
569  *				       immediately.
570  */
571 static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
572 						   vdo_action_fn callback)
573 {
574 	set_data_vio_packer_callback(data_vio, callback);
575 	vdo_launch_completion(&data_vio->vio.completion);
576 }
577 
578 static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
579 {
580 	thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
581 	thread_id_t thread_id = vdo_get_callback_thread_id();
582 
583 	VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
584 			    "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
585 			    (unsigned long long) data_vio->logical.lbn, thread_id,
586 			    cpu_thread);
587 }
588 
589 static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
590 					     vdo_action_fn callback)
591 {
592 	thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
593 
594 	vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
595 }
596 
597 /**
598  * launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it
599  *				    immediately.
600  */
601 static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
602 						vdo_action_fn callback,
603 						enum vdo_completion_priority priority)
604 {
605 	set_data_vio_cpu_callback(data_vio, callback);
606 	vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
607 }
608 
609 static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
610 						  vdo_action_fn callback)
611 {
612 	vdo_set_completion_callback(&data_vio->vio.completion, callback,
613 				    get_vio_bio_zone_thread_id(&data_vio->vio));
614 }
615 
616 /**
617  * launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it
618  *					 immediately.
619  */
620 static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
621 						     vdo_action_fn callback)
622 {
623 	set_data_vio_bio_zone_callback(data_vio, callback);
624 	vdo_launch_completion_with_priority(&data_vio->vio.completion,
625 					    BIO_Q_DATA_PRIORITY);
626 }
627 
628 /**
629  * launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on
630  *					it and invoke it immediately, otherwise, just run the
631  *					callback on the current thread.
632  */
633 static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
634 						    vdo_action_fn callback)
635 {
636 	struct vdo_completion *completion = &data_vio->vio.completion;
637 	struct vdo *vdo = completion->vdo;
638 
639 	if (!vdo_uses_bio_ack_queue(vdo)) {
640 		callback(completion);
641 		return;
642 	}
643 
644 	vdo_set_completion_callback(completion, callback,
645 				    vdo->thread_config.bio_ack_thread);
646 	vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
647 }
648 
649 void data_vio_allocate_data_block(struct data_vio *data_vio,
650 				  enum pbn_lock_type write_lock_type,
651 				  vdo_action_fn callback, vdo_action_fn error_handler);
652 
653 void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
654 
655 int __must_check uncompress_data_vio(struct data_vio *data_vio,
656 				     enum block_mapping_state mapping_state,
657 				     char *buffer);
658 
659 void update_metadata_for_data_vio_write(struct data_vio *data_vio,
660 					struct pbn_lock *lock);
661 void write_data_vio(struct data_vio *data_vio);
662 void launch_compress_data_vio(struct data_vio *data_vio);
663 void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
664 
665 #endif /* DATA_VIO_H */
666