xref: /linux/drivers/md/dm-vdo/flush.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #include "flush.h"
7 
8 #include <linux/mempool.h>
9 #include <linux/spinlock.h>
10 
11 #include "logger.h"
12 #include "memory-alloc.h"
13 #include "permassert.h"
14 
15 #include "admin-state.h"
16 #include "completion.h"
17 #include "io-submitter.h"
18 #include "logical-zone.h"
19 #include "slab-depot.h"
20 #include "types.h"
21 #include "vdo.h"
22 
23 struct flusher {
24 	struct vdo_completion completion;
25 	/* The vdo to which this flusher belongs */
26 	struct vdo *vdo;
27 	/* The administrative state of the flusher */
28 	struct admin_state state;
29 	/* The current flush generation of the vdo */
30 	sequence_number_t flush_generation;
31 	/* The first unacknowledged flush generation */
32 	sequence_number_t first_unacknowledged_generation;
33 	/* The queue of flush requests waiting to notify other threads */
34 	struct vdo_wait_queue notifiers;
35 	/* The queue of flush requests waiting for VIOs to complete */
36 	struct vdo_wait_queue pending_flushes;
37 	/* The flush generation for which notifications are being sent */
38 	sequence_number_t notify_generation;
39 	/* The logical zone to notify next */
40 	struct logical_zone *logical_zone_to_notify;
41 	/* The ID of the thread on which flush requests should be made */
42 	thread_id_t thread_id;
43 	/* The pool of flush requests */
44 	mempool_t *flush_pool;
45 	/* Bios waiting for a flush request to become available */
46 	struct bio_list waiting_flush_bios;
47 	/* The lock to protect the previous fields */
48 	spinlock_t lock;
49 	/* The rotor for selecting the bio queue for submitting flush bios */
50 	zone_count_t bio_queue_rotor;
51 	/* The number of flushes submitted to the current bio queue */
52 	int flush_count;
53 };
54 
55 /**
56  * assert_on_flusher_thread() - Check that we are on the flusher thread.
57  * @flusher: The flusher.
58  * @caller: The function which is asserting.
59  */
assert_on_flusher_thread(struct flusher * flusher,const char * caller)60 static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
61 {
62 	VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
63 			    "%s() called from flusher thread", caller);
64 }
65 
66 /**
67  * as_flusher() - Convert a generic vdo_completion to a flusher.
68  * @completion: The completion to convert.
69  *
70  * Return: The completion as a flusher.
71  */
as_flusher(struct vdo_completion * completion)72 static struct flusher *as_flusher(struct vdo_completion *completion)
73 {
74 	vdo_assert_completion_type(completion, VDO_FLUSH_NOTIFICATION_COMPLETION);
75 	return container_of(completion, struct flusher, completion);
76 }
77 
78 /**
79  * completion_as_vdo_flush() - Convert a generic vdo_completion to a vdo_flush.
80  * @completion: The completion to convert.
81  *
82  * Return: The completion as a vdo_flush.
83  */
completion_as_vdo_flush(struct vdo_completion * completion)84 static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *completion)
85 {
86 	vdo_assert_completion_type(completion, VDO_FLUSH_COMPLETION);
87 	return container_of(completion, struct vdo_flush, completion);
88 }
89 
90 /**
91  * vdo_waiter_as_flush() - Convert a vdo_flush's generic wait queue entry back to the vdo_flush.
92  * @waiter: The wait queue entry to convert.
93  *
94  * Return: The wait queue entry as a vdo_flush.
95  */
vdo_waiter_as_flush(struct vdo_waiter * waiter)96 static struct vdo_flush *vdo_waiter_as_flush(struct vdo_waiter *waiter)
97 {
98 	return container_of(waiter, struct vdo_flush, waiter);
99 }
100 
allocate_flush(gfp_t gfp_mask,void * pool_data)101 static void *allocate_flush(gfp_t gfp_mask, void *pool_data)
102 {
103 	struct vdo_flush *flush = NULL;
104 
105 	if ((gfp_mask & GFP_NOWAIT) == GFP_NOWAIT) {
106 		flush = vdo_allocate_memory_nowait(sizeof(struct vdo_flush), __func__);
107 	} else {
108 		int result = vdo_allocate(1, struct vdo_flush, __func__, &flush);
109 
110 		if (result != VDO_SUCCESS)
111 			vdo_log_error_strerror(result, "failed to allocate spare flush");
112 	}
113 
114 	if (flush != NULL) {
115 		struct flusher *flusher = pool_data;
116 
117 		vdo_initialize_completion(&flush->completion, flusher->vdo,
118 					  VDO_FLUSH_COMPLETION);
119 	}
120 
121 	return flush;
122 }
123 
free_flush(void * element,void * pool_data __always_unused)124 static void free_flush(void *element, void *pool_data __always_unused)
125 {
126 	vdo_free(element);
127 }
128 
129 /**
130  * vdo_make_flusher() - Make a flusher for a vdo.
131  * @vdo: The vdo which owns the flusher.
132  *
133  * Return: VDO_SUCCESS or an error.
134  */
vdo_make_flusher(struct vdo * vdo)135 int vdo_make_flusher(struct vdo *vdo)
136 {
137 	int result = vdo_allocate(1, struct flusher, __func__, &vdo->flusher);
138 
139 	if (result != VDO_SUCCESS)
140 		return result;
141 
142 	vdo->flusher->vdo = vdo;
143 	vdo->flusher->thread_id = vdo->thread_config.packer_thread;
144 	vdo_set_admin_state_code(&vdo->flusher->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
145 	vdo_initialize_completion(&vdo->flusher->completion, vdo,
146 				  VDO_FLUSH_NOTIFICATION_COMPLETION);
147 
148 	spin_lock_init(&vdo->flusher->lock);
149 	bio_list_init(&vdo->flusher->waiting_flush_bios);
150 	vdo->flusher->flush_pool = mempool_create(1, allocate_flush, free_flush,
151 						  vdo->flusher);
152 	return ((vdo->flusher->flush_pool == NULL) ? -ENOMEM : VDO_SUCCESS);
153 }
154 
155 /**
156  * vdo_free_flusher() - Free a flusher.
157  * @flusher: The flusher to free.
158  */
vdo_free_flusher(struct flusher * flusher)159 void vdo_free_flusher(struct flusher *flusher)
160 {
161 	if (flusher == NULL)
162 		return;
163 
164 	if (flusher->flush_pool != NULL)
165 		mempool_destroy(vdo_forget(flusher->flush_pool));
166 	vdo_free(flusher);
167 }
168 
169 /**
170  * vdo_get_flusher_thread_id() - Get the ID of the thread on which flusher functions should be
171  *                               called.
172  * @flusher: The flusher to query.
173  *
174  * Return: The ID of the thread which handles the flusher.
175  */
vdo_get_flusher_thread_id(struct flusher * flusher)176 thread_id_t vdo_get_flusher_thread_id(struct flusher *flusher)
177 {
178 	return flusher->thread_id;
179 }
180 
181 static void notify_flush(struct flusher *flusher);
182 static void vdo_complete_flush(struct vdo_flush *flush);
183 
184 /**
185  * finish_notification() - Finish the notification process.
186  * @completion: The flusher completion.
187  *
188  * Finishes the notification process by checking if any flushes have completed and then starting
189  * the notification of the next flush request if one came in while the current notification was in
190  * progress. This callback is registered in flush_packer_callback().
191  */
finish_notification(struct vdo_completion * completion)192 static void finish_notification(struct vdo_completion *completion)
193 {
194 	struct flusher *flusher = as_flusher(completion);
195 
196 	assert_on_flusher_thread(flusher, __func__);
197 
198 	vdo_waitq_enqueue_waiter(&flusher->pending_flushes,
199 				 vdo_waitq_dequeue_waiter(&flusher->notifiers));
200 	vdo_complete_flushes(flusher);
201 	if (vdo_waitq_has_waiters(&flusher->notifiers))
202 		notify_flush(flusher);
203 }
204 
205 /**
206  * flush_packer_callback() - Flush the packer.
207  * @completion: The flusher completion.
208  *
209  * Flushes the packer now that all of the logical and physical zones have been notified of the new
210  * flush request. This callback is registered in increment_generation().
211  */
flush_packer_callback(struct vdo_completion * completion)212 static void flush_packer_callback(struct vdo_completion *completion)
213 {
214 	struct flusher *flusher = as_flusher(completion);
215 
216 	vdo_increment_packer_flush_generation(flusher->vdo->packer);
217 	vdo_launch_completion_callback(completion, finish_notification,
218 				       flusher->thread_id);
219 }
220 
221 /**
222  * increment_generation() - Increment the flush generation in a logical zone.
223  * @completion: The flusher as a completion.
224  *
225  * If there are more logical zones, go on to the next one, otherwise, prepare the physical zones.
226  * This callback is registered both in notify_flush() and in itself.
227  */
increment_generation(struct vdo_completion * completion)228 static void increment_generation(struct vdo_completion *completion)
229 {
230 	struct flusher *flusher = as_flusher(completion);
231 	struct logical_zone *zone = flusher->logical_zone_to_notify;
232 
233 	vdo_increment_logical_zone_flush_generation(zone, flusher->notify_generation);
234 	if (zone->next == NULL) {
235 		vdo_launch_completion_callback(completion, flush_packer_callback,
236 					       flusher->thread_id);
237 		return;
238 	}
239 
240 	flusher->logical_zone_to_notify = zone->next;
241 	vdo_launch_completion_callback(completion, increment_generation,
242 				       flusher->logical_zone_to_notify->thread_id);
243 }
244 
245 /**
246  * notify_flush() - Launch a flush notification.
247  * @flusher: The flusher doing the notification.
248  */
notify_flush(struct flusher * flusher)249 static void notify_flush(struct flusher *flusher)
250 {
251 	struct vdo_flush *flush =
252 		vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->notifiers));
253 
254 	flusher->notify_generation = flush->flush_generation;
255 	flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0];
256 	flusher->completion.requeue = true;
257 	vdo_launch_completion_callback(&flusher->completion, increment_generation,
258 				       flusher->logical_zone_to_notify->thread_id);
259 }
260 
261 /**
262  * flush_vdo() - Start processing a flush request.
263  * @completion: A flush request (as a vdo_completion)
264  *
265  * This callback is registered in launch_flush().
266  */
flush_vdo(struct vdo_completion * completion)267 static void flush_vdo(struct vdo_completion *completion)
268 {
269 	struct vdo_flush *flush = completion_as_vdo_flush(completion);
270 	struct flusher *flusher = completion->vdo->flusher;
271 	bool may_notify;
272 	int result;
273 
274 	assert_on_flusher_thread(flusher, __func__);
275 	result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
276 			    "flusher is in normal operation");
277 	if (result != VDO_SUCCESS) {
278 		vdo_enter_read_only_mode(flusher->vdo, result);
279 		vdo_complete_flush(flush);
280 		return;
281 	}
282 
283 	flush->flush_generation = flusher->flush_generation++;
284 	may_notify = !vdo_waitq_has_waiters(&flusher->notifiers);
285 	vdo_waitq_enqueue_waiter(&flusher->notifiers, &flush->waiter);
286 	if (may_notify)
287 		notify_flush(flusher);
288 }
289 
290 /**
291  * check_for_drain_complete() - Check whether the flusher has drained.
292  * @flusher: The flusher.
293  */
check_for_drain_complete(struct flusher * flusher)294 static void check_for_drain_complete(struct flusher *flusher)
295 {
296 	bool drained;
297 
298 	if (!vdo_is_state_draining(&flusher->state) ||
299 	    vdo_waitq_has_waiters(&flusher->pending_flushes))
300 		return;
301 
302 	spin_lock(&flusher->lock);
303 	drained = bio_list_empty(&flusher->waiting_flush_bios);
304 	spin_unlock(&flusher->lock);
305 
306 	if (drained)
307 		vdo_finish_draining(&flusher->state);
308 }
309 
310 /**
311  * vdo_complete_flushes() - Attempt to complete any flushes which might have finished.
312  * @flusher: The flusher.
313  */
vdo_complete_flushes(struct flusher * flusher)314 void vdo_complete_flushes(struct flusher *flusher)
315 {
316 	sequence_number_t oldest_active_generation = U64_MAX;
317 	struct logical_zone *zone;
318 
319 	assert_on_flusher_thread(flusher, __func__);
320 
321 	for (zone = &flusher->vdo->logical_zones->zones[0]; zone != NULL; zone = zone->next)
322 		oldest_active_generation =
323 			min(oldest_active_generation,
324 			    READ_ONCE(zone->oldest_active_generation));
325 
326 	while (vdo_waitq_has_waiters(&flusher->pending_flushes)) {
327 		struct vdo_flush *flush =
328 			vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->pending_flushes));
329 
330 		if (flush->flush_generation >= oldest_active_generation)
331 			return;
332 
333 		VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
334 				     flusher->first_unacknowledged_generation),
335 				    "acknowledged next expected flush, %llu, was: %llu",
336 				    (unsigned long long) flusher->first_unacknowledged_generation,
337 				    (unsigned long long) flush->flush_generation);
338 		vdo_waitq_dequeue_waiter(&flusher->pending_flushes);
339 		vdo_complete_flush(flush);
340 		flusher->first_unacknowledged_generation++;
341 	}
342 
343 	check_for_drain_complete(flusher);
344 }
345 
346 /**
347  * vdo_dump_flusher() - Dump the flusher, in a thread-unsafe fashion.
348  * @flusher: The flusher.
349  */
vdo_dump_flusher(const struct flusher * flusher)350 void vdo_dump_flusher(const struct flusher *flusher)
351 {
352 	vdo_log_info("struct flusher");
353 	vdo_log_info("  flush_generation=%llu first_unacknowledged_generation=%llu",
354 		     (unsigned long long) flusher->flush_generation,
355 		     (unsigned long long) flusher->first_unacknowledged_generation);
356 	vdo_log_info("  notifiers queue is %s; pending_flushes queue is %s",
357 		     (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"),
358 		     (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty"));
359 }
360 
361 /**
362  * initialize_flush() - Initialize a vdo_flush structure.
363  * @flush: The flush to initialize.
364  * @vdo: The vdo being flushed.
365  *
366  * Initializes a vdo_flush structure, transferring all the bios in the flusher's waiting_flush_bios
367  * list to it. The caller MUST already hold the lock.
368  */
initialize_flush(struct vdo_flush * flush,struct vdo * vdo)369 static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
370 {
371 	bio_list_init(&flush->bios);
372 	bio_list_merge_init(&flush->bios, &vdo->flusher->waiting_flush_bios);
373 }
374 
launch_flush(struct vdo_flush * flush)375 static void launch_flush(struct vdo_flush *flush)
376 {
377 	struct vdo_completion *completion = &flush->completion;
378 
379 	vdo_prepare_completion(completion, flush_vdo, flush_vdo,
380 			       completion->vdo->thread_config.packer_thread, NULL);
381 	vdo_enqueue_completion(completion, VDO_DEFAULT_Q_FLUSH_PRIORITY);
382 }
383 
384 /**
385  * vdo_launch_flush() - Function called to start processing a flush request.
386  * @vdo: The vdo.
387  * @bio: The bio containing an empty flush request.
388  *
389  * This is called when we receive an empty flush bio from the block layer, and before acknowledging
390  * a non-empty bio with the FUA flag set.
391  */
vdo_launch_flush(struct vdo * vdo,struct bio * bio)392 void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
393 {
394 	/*
395 	 * Try to allocate a vdo_flush to represent the flush request. If the allocation fails,
396 	 * we'll deal with it later.
397 	 */
398 	struct vdo_flush *flush = mempool_alloc(vdo->flusher->flush_pool, GFP_NOWAIT);
399 	struct flusher *flusher = vdo->flusher;
400 	const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
401 
402 	VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
403 			    code->name);
404 
405 	spin_lock(&flusher->lock);
406 
407 	/* We have a new bio to start. Add it to the list. */
408 	bio_list_add(&flusher->waiting_flush_bios, bio);
409 
410 	if (flush == NULL) {
411 		spin_unlock(&flusher->lock);
412 		return;
413 	}
414 
415 	/* We have flushes to start. Capture them in the vdo_flush structure. */
416 	initialize_flush(flush, vdo);
417 	spin_unlock(&flusher->lock);
418 
419 	/* Finish launching the flushes. */
420 	launch_flush(flush);
421 }
422 
423 /**
424  * release_flush() - Release a vdo_flush structure that has completed its work.
425  * @flush: The completed flush structure to re-use or free.
426  *
427  * If there are any pending flush requests whose vdo_flush allocation failed, they will be launched
428  * by immediately re-using the released vdo_flush. If there is no spare vdo_flush, the released
429  * structure will become the spare. Otherwise, the vdo_flush will be freed.
430  */
release_flush(struct vdo_flush * flush)431 static void release_flush(struct vdo_flush *flush)
432 {
433 	bool relaunch_flush;
434 	struct flusher *flusher = flush->completion.vdo->flusher;
435 
436 	spin_lock(&flusher->lock);
437 	if (bio_list_empty(&flusher->waiting_flush_bios)) {
438 		relaunch_flush = false;
439 	} else {
440 		/* We have flushes to start. Capture them in a flush request. */
441 		initialize_flush(flush, flusher->vdo);
442 		relaunch_flush = true;
443 	}
444 	spin_unlock(&flusher->lock);
445 
446 	if (relaunch_flush) {
447 		/* Finish launching the flushes. */
448 		launch_flush(flush);
449 		return;
450 	}
451 
452 	mempool_free(flush, flusher->flush_pool);
453 }
454 
455 /**
456  * vdo_complete_flush_callback() - Function called to complete and free a flush request, registered
457  *                                 in vdo_complete_flush().
458  * @completion: The flush request.
459  */
vdo_complete_flush_callback(struct vdo_completion * completion)460 static void vdo_complete_flush_callback(struct vdo_completion *completion)
461 {
462 	struct vdo_flush *flush = completion_as_vdo_flush(completion);
463 	struct vdo *vdo = completion->vdo;
464 	struct bio *bio;
465 
466 	while ((bio = bio_list_pop(&flush->bios)) != NULL) {
467 		/*
468 		 * We're not acknowledging this bio now, but we'll never touch it again, so this is
469 		 * the last chance to account for it.
470 		 */
471 		vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
472 
473 		/* Update the device, and send it on down... */
474 		bio_set_dev(bio, vdo_get_backing_device(vdo));
475 		atomic64_inc(&vdo->stats.flush_out);
476 		submit_bio_noacct(bio);
477 	}
478 
479 
480 	/*
481 	 * Release the flush structure, freeing it, re-using it as the spare, or using it to launch
482 	 * any flushes that had to wait when allocations failed.
483 	 */
484 	release_flush(flush);
485 }
486 
487 /**
488  * select_bio_queue() - Select the bio queue on which to finish a flush request.
489  * @flusher: The flusher finishing the request.
490  */
select_bio_queue(struct flusher * flusher)491 static thread_id_t select_bio_queue(struct flusher *flusher)
492 {
493 	struct vdo *vdo = flusher->vdo;
494 	zone_count_t bio_threads = flusher->vdo->thread_config.bio_thread_count;
495 	int interval;
496 
497 	if (bio_threads == 1)
498 		return vdo->thread_config.bio_threads[0];
499 
500 	interval = vdo->device_config->thread_counts.bio_rotation_interval;
501 	if (flusher->flush_count == interval) {
502 		flusher->flush_count = 1;
503 		flusher->bio_queue_rotor = ((flusher->bio_queue_rotor + 1) % bio_threads);
504 	} else {
505 		flusher->flush_count++;
506 	}
507 
508 	return vdo->thread_config.bio_threads[flusher->bio_queue_rotor];
509 }
510 
511 /**
512  * vdo_complete_flush() - Complete and free a vdo flush request.
513  * @flush: The flush request.
514  */
vdo_complete_flush(struct vdo_flush * flush)515 static void vdo_complete_flush(struct vdo_flush *flush)
516 {
517 	struct vdo_completion *completion = &flush->completion;
518 
519 	vdo_prepare_completion(completion, vdo_complete_flush_callback,
520 			       vdo_complete_flush_callback,
521 			       select_bio_queue(completion->vdo->flusher), NULL);
522 	vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY);
523 }
524 
525 /**
526  * initiate_drain() - Initiate a drain.
527  *
528  * Implements vdo_admin_initiator_fn.
529  */
initiate_drain(struct admin_state * state)530 static void initiate_drain(struct admin_state *state)
531 {
532 	check_for_drain_complete(container_of(state, struct flusher, state));
533 }
534 
535 /**
536  * vdo_drain_flusher() - Drain the flusher.
537  * @flusher: The flusher to drain.
538  * @completion: The completion to finish when the flusher has drained.
539  *
540  * Drains the flusher by preventing any more VIOs from entering the flusher and then flushing. The
541  * flusher will be left in the suspended state.
542  */
vdo_drain_flusher(struct flusher * flusher,struct vdo_completion * completion)543 void vdo_drain_flusher(struct flusher *flusher, struct vdo_completion *completion)
544 {
545 	assert_on_flusher_thread(flusher, __func__);
546 	vdo_start_draining(&flusher->state, VDO_ADMIN_STATE_SUSPENDING, completion,
547 			   initiate_drain);
548 }
549 
550 /**
551  * vdo_resume_flusher() - Resume a flusher which has been suspended.
552  * @flusher: The flusher to resume.
553  * @parent: The completion to finish when the flusher has resumed.
554  */
vdo_resume_flusher(struct flusher * flusher,struct vdo_completion * parent)555 void vdo_resume_flusher(struct flusher *flusher, struct vdo_completion *parent)
556 {
557 	assert_on_flusher_thread(flusher, __func__);
558 	vdo_continue_completion(parent, vdo_resume_if_quiescent(&flusher->state));
559 }
560