1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2023 Red Hat
4 */
5
6 #ifndef VDO_H
7 #define VDO_H
8
9 #include <linux/atomic.h>
10 #include <linux/blk_types.h>
11 #include <linux/completion.h>
12 #include <linux/dm-kcopyd.h>
13 #include <linux/list.h>
14 #include <linux/spinlock.h>
15
16 #include "admin-state.h"
17 #include "encodings.h"
18 #include "funnel-workqueue.h"
19 #include "packer.h"
20 #include "physical-zone.h"
21 #include "statistics.h"
22 #include "thread-registry.h"
23 #include "types.h"
24
25 enum notifier_state {
26 /* Notifications are allowed but not in progress */
27 MAY_NOTIFY,
28 /* A notification is in progress */
29 NOTIFYING,
30 /* Notifications are not allowed */
31 MAY_NOT_NOTIFY,
32 /* A notification has completed */
33 NOTIFIED,
34 };
35
36 /**
37 * typedef vdo_read_only_notification_fn - A function to notify a listener that the VDO has gone
38 * read-only.
39 * @listener: The object to notify.
40 * @parent: The completion to notify in order to acknowledge the notification.
41 */
42 typedef void (*vdo_read_only_notification_fn)(void *listener, struct vdo_completion *parent);
43
44 /*
45 * An object to be notified when the VDO enters read-only mode
46 */
47 struct read_only_listener {
48 /* The listener */
49 void *listener;
50 /* The method to call to notify the listener */
51 vdo_read_only_notification_fn notify;
52 /* A pointer to the next listener */
53 struct read_only_listener *next;
54 };
55
56 struct vdo_thread {
57 struct vdo *vdo;
58 thread_id_t thread_id;
59 struct vdo_work_queue *queue;
60 /*
61 * Each thread maintains its own notion of whether the VDO is read-only so that the
62 * read-only state can be checked from any base thread without worrying about
63 * synchronization or thread safety. This does mean that knowledge of the VDO going
64 * read-only does not occur simultaneously across the VDO's threads, but that does not seem
65 * to cause any problems.
66 */
67 bool is_read_only;
68 /*
69 * A list of objects waiting to be notified on this thread that the VDO has entered
70 * read-only mode.
71 */
72 struct read_only_listener *listeners;
73 struct registered_thread allocating_thread;
74 };
75
76 /* Keep struct bio statistics atomically */
77 struct atomic_bio_stats {
78 atomic64_t read; /* Number of not REQ_WRITE bios */
79 atomic64_t write; /* Number of REQ_WRITE bios */
80 atomic64_t discard; /* Number of REQ_DISCARD bios */
81 atomic64_t flush; /* Number of REQ_FLUSH bios */
82 atomic64_t empty_flush; /* Number of REQ_PREFLUSH bios without data */
83 atomic64_t fua; /* Number of REQ_FUA bios */
84 };
85
86 /* Counters are atomic since updates can arrive concurrently from arbitrary threads. */
87 struct atomic_statistics {
88 atomic64_t bios_submitted;
89 atomic64_t bios_completed;
90 atomic64_t flush_out;
91 atomic64_t invalid_advice_pbn_count;
92 atomic64_t no_space_error_count;
93 atomic64_t read_only_error_count;
94 struct atomic_bio_stats bios_in;
95 struct atomic_bio_stats bios_in_partial;
96 struct atomic_bio_stats bios_out;
97 struct atomic_bio_stats bios_out_completed;
98 struct atomic_bio_stats bios_acknowledged;
99 struct atomic_bio_stats bios_acknowledged_partial;
100 struct atomic_bio_stats bios_meta;
101 struct atomic_bio_stats bios_meta_completed;
102 struct atomic_bio_stats bios_journal;
103 struct atomic_bio_stats bios_journal_completed;
104 struct atomic_bio_stats bios_page_cache;
105 struct atomic_bio_stats bios_page_cache_completed;
106 };
107
108 struct read_only_notifier {
109 /* The completion for entering read-only mode */
110 struct vdo_completion completion;
111 /* A completion waiting for notifications to be drained or enabled */
112 struct vdo_completion *waiter;
113 /* Lock to protect the next two fields */
114 spinlock_t lock;
115 /* The code of the error which put the VDO into read-only mode */
116 int read_only_error;
117 /* The current state of the notifier (values described above) */
118 enum notifier_state state;
119 };
120
121 /*
122 * The thread ID returned when the current thread is not a vdo thread, or can not be determined
123 * (usually due to being at interrupt context).
124 */
125 #define VDO_INVALID_THREAD_ID ((thread_id_t) -1)
126
127 struct thread_config {
128 zone_count_t logical_zone_count;
129 zone_count_t physical_zone_count;
130 zone_count_t hash_zone_count;
131 thread_count_t bio_thread_count;
132 thread_count_t thread_count;
133 thread_id_t admin_thread;
134 thread_id_t journal_thread;
135 thread_id_t packer_thread;
136 thread_id_t dedupe_thread;
137 thread_id_t bio_ack_thread;
138 thread_id_t cpu_thread;
139 thread_id_t *logical_threads;
140 thread_id_t *physical_threads;
141 thread_id_t *hash_zone_threads;
142 thread_id_t *bio_threads;
143 };
144
145 struct thread_count_config;
146
147 struct vdo_super_block {
148 /* The vio for reading and writing the super block to disk */
149 struct vio vio;
150 /* A buffer to hold the super block */
151 u8 *buffer;
152 /* Whether this super block may not be written */
153 bool unwritable;
154 };
155
156 struct data_vio_pool;
157
158 struct vdo_administrator {
159 struct vdo_completion completion;
160 struct admin_state state;
161 atomic_t busy;
162 u32 phase;
163 struct completion callback_sync;
164 };
165
166 struct vdo {
167 char thread_name_prefix[MAX_VDO_WORK_QUEUE_NAME_LEN];
168 struct vdo_thread *threads;
169 vdo_action_fn action;
170 struct vdo_completion *completion;
171 struct vio_tracer *vio_tracer;
172
173 /* The atomic version of the state of this vdo */
174 atomic_t state;
175 /* The full state of all components */
176 struct vdo_component_states states;
177 /*
178 * A counter value to attach to thread names and log messages to identify the individual
179 * device.
180 */
181 unsigned int instance;
182 /* The read-only notifier */
183 struct read_only_notifier read_only_notifier;
184 /* The load-time configuration of this vdo */
185 struct device_config *device_config;
186 /* The thread mapping */
187 struct thread_config thread_config;
188
189 /* The super block */
190 struct vdo_super_block super_block;
191
192 /* The partitioning of the underlying storage */
193 struct layout layout;
194 struct layout next_layout;
195 struct dm_kcopyd_client *partition_copier;
196
197 /* The block map */
198 struct block_map *block_map;
199
200 /* The journal for block map recovery */
201 struct recovery_journal *recovery_journal;
202
203 /* The slab depot */
204 struct slab_depot *depot;
205
206 /* The compressed-block packer */
207 struct packer *packer;
208 /* Whether incoming data should be compressed */
209 bool compressing;
210
211 /* The handler for flush requests */
212 struct flusher *flusher;
213
214 /* The state the vdo was in when loaded (primarily for unit tests) */
215 enum vdo_state load_state;
216
217 /* The logical zones of this vdo */
218 struct logical_zones *logical_zones;
219
220 /* The physical zones of this vdo */
221 struct physical_zones *physical_zones;
222
223 /* The hash lock zones of this vdo */
224 struct hash_zones *hash_zones;
225
226 /* Bio submission manager used for sending bios to the storage device. */
227 struct io_submitter *io_submitter;
228
229 /* The pool of data_vios for servicing incoming bios */
230 struct data_vio_pool *data_vio_pool;
231
232 /* The manager for administrative operations */
233 struct vdo_administrator admin;
234
235 /* Flags controlling administrative operations */
236 const struct admin_state_code *suspend_type;
237 bool allocations_allowed;
238 bool dump_on_shutdown;
239 atomic_t processing_message;
240
241 /*
242 * Statistics
243 * Atomic stats counters
244 */
245 struct atomic_statistics stats;
246 /* Used to gather statistics without allocating memory */
247 struct vdo_statistics stats_buffer;
248 /* Protects the stats_buffer */
249 struct mutex stats_mutex;
250
251 /* A list of all device_configs referencing this vdo */
252 struct list_head device_config_list;
253
254 /* This VDO's list entry for the device registry */
255 struct list_head registration;
256
257 /* Underlying block device info. */
258 u64 starting_sector_offset;
259 struct volume_geometry geometry;
260
261 /* N blobs of context data for LZ4 code, one per CPU thread. */
262 char **compression_context;
263 };
264
265 /**
266 * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
267 * for acknowledging received and processed bios.
268 * @vdo: The vdo.
269 *
270 * Note that this directly controls the handling of write operations, but the compile-time flag
271 * VDO_USE_BIO_ACK_QUEUE_FOR_READ is also checked for read operations.
272 *
273 * Return: Whether a bio-acknowledgement work queue is in use.
274 */
vdo_uses_bio_ack_queue(struct vdo * vdo)275 static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
276 {
277 return vdo->device_config->thread_counts.bio_ack_threads > 0;
278 }
279
280 /**
281 * typedef vdo_filter_fn - Method type for vdo matching methods.
282 *
283 * A filter function returns false if the vdo doesn't match.
284 */
285 typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
286
287 void vdo_initialize_device_registry_once(void);
288 struct vdo * __must_check vdo_find_matching(vdo_filter_fn filter, const void *context);
289
290 int __must_check vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
291 const struct vdo_work_queue_type *type,
292 unsigned int queue_count, void *contexts[]);
293
vdo_make_default_thread(struct vdo * vdo,thread_id_t thread_id)294 static inline int __must_check vdo_make_default_thread(struct vdo *vdo,
295 thread_id_t thread_id)
296 {
297 return vdo_make_thread(vdo, thread_id, NULL, 1, NULL);
298 }
299
300 int __must_check vdo_make(unsigned int instance, struct device_config *config,
301 char **reason, struct vdo **vdo_ptr);
302
303 void vdo_destroy(struct vdo *vdo);
304
305 void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
306
307 struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
308
309 const char * __must_check vdo_get_device_name(const struct dm_target *target);
310
311 int __must_check vdo_synchronous_flush(struct vdo *vdo);
312
313 const struct admin_state_code * __must_check vdo_get_admin_state(const struct vdo *vdo);
314
315 bool vdo_set_compressing(struct vdo *vdo, bool enable);
316
317 bool vdo_get_compressing(struct vdo *vdo);
318
319 void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats);
320
321 thread_id_t vdo_get_callback_thread_id(void);
322
323 enum vdo_state __must_check vdo_get_state(const struct vdo *vdo);
324
325 void vdo_set_state(struct vdo *vdo, enum vdo_state state);
326
327 void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent);
328
329 int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
330 vdo_read_only_notification_fn notification,
331 thread_id_t thread_id);
332
333 int vdo_enable_read_only_entry(struct vdo *vdo);
334
335 void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent);
336
337 void vdo_allow_read_only_mode_entry(struct vdo_completion *parent);
338
339 void vdo_enter_read_only_mode(struct vdo *vdo, int error_code);
340
341 bool __must_check vdo_is_read_only(struct vdo *vdo);
342
343 bool __must_check vdo_in_read_only_mode(const struct vdo *vdo);
344
345 bool __must_check vdo_in_recovery_mode(const struct vdo *vdo);
346
347 void vdo_enter_recovery_mode(struct vdo *vdo);
348
349 void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name);
350
351 void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
352 const char *name);
353
354 void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone,
355 const char *name);
356
357 int __must_check vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
358 struct physical_zone **zone_ptr);
359
360 void vdo_dump_status(const struct vdo *vdo);
361
362 #endif /* VDO_H */
363