xref: /linux/block/blk-flush.c (revision 2dbc0838bcf24ca59cabc3130cf3b1d6809cdcd4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions to sequence PREFLUSH and FUA writes.
4  *
5  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
6  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
7  *
8  * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10  * properties and hardware capability.
11  *
12  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
14  * that the device cache should be flushed before the data is executed, and
15  * REQ_FUA means that the data must be on non-volatile media on request
16  * completion.
17  *
18  * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19  * difference.  The requests are either completed immediately if there's no data
20  * or executed as normal requests otherwise.
21  *
22  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24  *
25  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27  *
28  * The actual execution of flush is double buffered.  Whenever a request
29  * needs to execute PRE or POSTFLUSH, it queues at
30  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
31  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
32  * completes, all the requests which were pending are proceeded to the next
33  * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
34  * requests.
35  *
36  * Currently, the following conditions are used to determine when to issue
37  * flush.
38  *
39  * C1. At any given time, only one flush shall be in progress.  This makes
40  *     double buffering sufficient.
41  *
42  * C2. Flush is deferred if any request is executing DATA of its sequence.
43  *     This avoids issuing separate POSTFLUSHes for requests which shared
44  *     PREFLUSH.
45  *
46  * C3. The second condition is ignored if there is a request which has
47  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
48  *     starvation in the unlikely case where there are continuous stream of
49  *     FUA (without PREFLUSH) requests.
50  *
51  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52  * is beneficial.
53  *
54  * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55  * Once while executing DATA and again after the whole sequence is
56  * complete.  The first completion updates the contained bio but doesn't
57  * finish it so that the bio submitter is notified only after the whole
58  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
59  * req_bio_endio().
60  *
61  * The above peculiarity requires that each PREFLUSH/FUA request has only one
62  * bio attached to it, which is guaranteed as they aren't allowed to be
63  * merged in the usual way.
64  */
65 
66 #include <linux/kernel.h>
67 #include <linux/module.h>
68 #include <linux/bio.h>
69 #include <linux/blkdev.h>
70 #include <linux/gfp.h>
71 #include <linux/blk-mq.h>
72 
73 #include "blk.h"
74 #include "blk-mq.h"
75 #include "blk-mq-tag.h"
76 #include "blk-mq-sched.h"
77 
78 /* PREFLUSH/FUA sequences */
79 enum {
80 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
81 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
82 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
83 	REQ_FSEQ_DONE		= (1 << 3),
84 
85 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 				  REQ_FSEQ_POSTFLUSH,
87 
88 	/*
89 	 * If flush has been pending longer than the following timeout,
90 	 * it's issued even if flush_data requests are still in flight.
91 	 */
92 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
93 };
94 
95 static void blk_kick_flush(struct request_queue *q,
96 			   struct blk_flush_queue *fq, unsigned int flags);
97 
98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
99 {
100 	unsigned int policy = 0;
101 
102 	if (blk_rq_sectors(rq))
103 		policy |= REQ_FSEQ_DATA;
104 
105 	if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 		if (rq->cmd_flags & REQ_PREFLUSH)
107 			policy |= REQ_FSEQ_PREFLUSH;
108 		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 		    (rq->cmd_flags & REQ_FUA))
110 			policy |= REQ_FSEQ_POSTFLUSH;
111 	}
112 	return policy;
113 }
114 
115 static unsigned int blk_flush_cur_seq(struct request *rq)
116 {
117 	return 1 << ffz(rq->flush.seq);
118 }
119 
120 static void blk_flush_restore_request(struct request *rq)
121 {
122 	/*
123 	 * After flush data completion, @rq->bio is %NULL but we need to
124 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
125 	 * original @rq->bio.  Restore it.
126 	 */
127 	rq->bio = rq->biotail;
128 
129 	/* make @rq a normal request */
130 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 	rq->end_io = rq->flush.saved_end_io;
132 }
133 
134 static void blk_flush_queue_rq(struct request *rq, bool add_front)
135 {
136 	blk_mq_add_to_requeue_list(rq, add_front, true);
137 }
138 
139 /**
140  * blk_flush_complete_seq - complete flush sequence
141  * @rq: PREFLUSH/FUA request being sequenced
142  * @fq: flush queue
143  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
144  * @error: whether an error occurred
145  *
146  * @rq just completed @seq part of its flush sequence, record the
147  * completion and trigger the next step.
148  *
149  * CONTEXT:
150  * spin_lock_irq(fq->mq_flush_lock)
151  *
152  * RETURNS:
153  * %true if requests were added to the dispatch queue, %false otherwise.
154  */
155 static void blk_flush_complete_seq(struct request *rq,
156 				   struct blk_flush_queue *fq,
157 				   unsigned int seq, blk_status_t error)
158 {
159 	struct request_queue *q = rq->q;
160 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
161 	unsigned int cmd_flags;
162 
163 	BUG_ON(rq->flush.seq & seq);
164 	rq->flush.seq |= seq;
165 	cmd_flags = rq->cmd_flags;
166 
167 	if (likely(!error))
168 		seq = blk_flush_cur_seq(rq);
169 	else
170 		seq = REQ_FSEQ_DONE;
171 
172 	switch (seq) {
173 	case REQ_FSEQ_PREFLUSH:
174 	case REQ_FSEQ_POSTFLUSH:
175 		/* queue for flush */
176 		if (list_empty(pending))
177 			fq->flush_pending_since = jiffies;
178 		list_move_tail(&rq->flush.list, pending);
179 		break;
180 
181 	case REQ_FSEQ_DATA:
182 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
183 		blk_flush_queue_rq(rq, true);
184 		break;
185 
186 	case REQ_FSEQ_DONE:
187 		/*
188 		 * @rq was previously adjusted by blk_flush_issue() for
189 		 * flush sequencing and may already have gone through the
190 		 * flush data request completion path.  Restore @rq for
191 		 * normal completion and end it.
192 		 */
193 		BUG_ON(!list_empty(&rq->queuelist));
194 		list_del_init(&rq->flush.list);
195 		blk_flush_restore_request(rq);
196 		blk_mq_end_request(rq, error);
197 		break;
198 
199 	default:
200 		BUG();
201 	}
202 
203 	blk_kick_flush(q, fq, cmd_flags);
204 }
205 
206 static void flush_end_io(struct request *flush_rq, blk_status_t error)
207 {
208 	struct request_queue *q = flush_rq->q;
209 	struct list_head *running;
210 	struct request *rq, *n;
211 	unsigned long flags = 0;
212 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
213 	struct blk_mq_hw_ctx *hctx;
214 
215 	/* release the tag's ownership to the req cloned from */
216 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
217 	hctx = flush_rq->mq_hctx;
218 	if (!q->elevator) {
219 		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
220 		flush_rq->tag = -1;
221 	} else {
222 		blk_mq_put_driver_tag(flush_rq);
223 		flush_rq->internal_tag = -1;
224 	}
225 
226 	running = &fq->flush_queue[fq->flush_running_idx];
227 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
228 
229 	/* account completion of the flush request */
230 	fq->flush_running_idx ^= 1;
231 
232 	/* and push the waiting requests to the next stage */
233 	list_for_each_entry_safe(rq, n, running, flush.list) {
234 		unsigned int seq = blk_flush_cur_seq(rq);
235 
236 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
237 		blk_flush_complete_seq(rq, fq, seq, error);
238 	}
239 
240 	fq->flush_queue_delayed = 0;
241 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
242 }
243 
244 /**
245  * blk_kick_flush - consider issuing flush request
246  * @q: request_queue being kicked
247  * @fq: flush queue
248  * @flags: cmd_flags of the original request
249  *
250  * Flush related states of @q have changed, consider issuing flush request.
251  * Please read the comment at the top of this file for more info.
252  *
253  * CONTEXT:
254  * spin_lock_irq(fq->mq_flush_lock)
255  *
256  */
257 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
258 			   unsigned int flags)
259 {
260 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
261 	struct request *first_rq =
262 		list_first_entry(pending, struct request, flush.list);
263 	struct request *flush_rq = fq->flush_rq;
264 
265 	/* C1 described at the top of this file */
266 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
267 		return;
268 
269 	/* C2 and C3
270 	 *
271 	 * For blk-mq + scheduling, we can risk having all driver tags
272 	 * assigned to empty flushes, and we deadlock if we are expecting
273 	 * other requests to make progress. Don't defer for that case.
274 	 */
275 	if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
276 	    time_before(jiffies,
277 			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
278 		return;
279 
280 	/*
281 	 * Issue flush and toggle pending_idx.  This makes pending_idx
282 	 * different from running_idx, which means flush is in flight.
283 	 */
284 	fq->flush_pending_idx ^= 1;
285 
286 	blk_rq_init(q, flush_rq);
287 
288 	/*
289 	 * In case of none scheduler, borrow tag from the first request
290 	 * since they can't be in flight at the same time. And acquire
291 	 * the tag's ownership for flush req.
292 	 *
293 	 * In case of IO scheduler, flush rq need to borrow scheduler tag
294 	 * just for cheating put/get driver tag.
295 	 */
296 	flush_rq->mq_ctx = first_rq->mq_ctx;
297 	flush_rq->mq_hctx = first_rq->mq_hctx;
298 
299 	if (!q->elevator) {
300 		fq->orig_rq = first_rq;
301 		flush_rq->tag = first_rq->tag;
302 		blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
303 	} else {
304 		flush_rq->internal_tag = first_rq->internal_tag;
305 	}
306 
307 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
308 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
309 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
310 	flush_rq->rq_disk = first_rq->rq_disk;
311 	flush_rq->end_io = flush_end_io;
312 
313 	blk_flush_queue_rq(flush_rq, false);
314 }
315 
316 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
317 {
318 	struct request_queue *q = rq->q;
319 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
320 	struct blk_mq_ctx *ctx = rq->mq_ctx;
321 	unsigned long flags;
322 	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
323 
324 	if (q->elevator) {
325 		WARN_ON(rq->tag < 0);
326 		blk_mq_put_driver_tag(rq);
327 	}
328 
329 	/*
330 	 * After populating an empty queue, kick it to avoid stall.  Read
331 	 * the comment in flush_end_io().
332 	 */
333 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
334 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
335 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
336 
337 	blk_mq_sched_restart(hctx);
338 }
339 
340 /**
341  * blk_insert_flush - insert a new PREFLUSH/FUA request
342  * @rq: request to insert
343  *
344  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
345  * or __blk_mq_run_hw_queue() to dispatch request.
346  * @rq is being submitted.  Analyze what needs to be done and put it on the
347  * right queue.
348  */
349 void blk_insert_flush(struct request *rq)
350 {
351 	struct request_queue *q = rq->q;
352 	unsigned long fflags = q->queue_flags;	/* may change, cache */
353 	unsigned int policy = blk_flush_policy(fflags, rq);
354 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
355 
356 	/*
357 	 * @policy now records what operations need to be done.  Adjust
358 	 * REQ_PREFLUSH and FUA for the driver.
359 	 */
360 	rq->cmd_flags &= ~REQ_PREFLUSH;
361 	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
362 		rq->cmd_flags &= ~REQ_FUA;
363 
364 	/*
365 	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
366 	 * of those flags, we have to set REQ_SYNC to avoid skewing
367 	 * the request accounting.
368 	 */
369 	rq->cmd_flags |= REQ_SYNC;
370 
371 	/*
372 	 * An empty flush handed down from a stacking driver may
373 	 * translate into nothing if the underlying device does not
374 	 * advertise a write-back cache.  In this case, simply
375 	 * complete the request.
376 	 */
377 	if (!policy) {
378 		blk_mq_end_request(rq, 0);
379 		return;
380 	}
381 
382 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
383 
384 	/*
385 	 * If there's data but flush is not necessary, the request can be
386 	 * processed directly without going through flush machinery.  Queue
387 	 * for normal execution.
388 	 */
389 	if ((policy & REQ_FSEQ_DATA) &&
390 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
391 		blk_mq_request_bypass_insert(rq, false);
392 		return;
393 	}
394 
395 	/*
396 	 * @rq should go through flush machinery.  Mark it part of flush
397 	 * sequence and submit for further processing.
398 	 */
399 	memset(&rq->flush, 0, sizeof(rq->flush));
400 	INIT_LIST_HEAD(&rq->flush.list);
401 	rq->rq_flags |= RQF_FLUSH_SEQ;
402 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
403 
404 	rq->end_io = mq_flush_data_end_io;
405 
406 	spin_lock_irq(&fq->mq_flush_lock);
407 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
408 	spin_unlock_irq(&fq->mq_flush_lock);
409 }
410 
411 /**
412  * blkdev_issue_flush - queue a flush
413  * @bdev:	blockdev to issue flush for
414  * @gfp_mask:	memory allocation flags (for bio_alloc)
415  * @error_sector:	error sector
416  *
417  * Description:
418  *    Issue a flush for the block device in question. Caller can supply
419  *    room for storing the error offset in case of a flush error, if they
420  *    wish to.
421  */
422 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
423 		sector_t *error_sector)
424 {
425 	struct request_queue *q;
426 	struct bio *bio;
427 	int ret = 0;
428 
429 	if (bdev->bd_disk == NULL)
430 		return -ENXIO;
431 
432 	q = bdev_get_queue(bdev);
433 	if (!q)
434 		return -ENXIO;
435 
436 	/*
437 	 * some block devices may not have their queue correctly set up here
438 	 * (e.g. loop device without a backing file) and so issuing a flush
439 	 * here will panic. Ensure there is a request function before issuing
440 	 * the flush.
441 	 */
442 	if (!q->make_request_fn)
443 		return -ENXIO;
444 
445 	bio = bio_alloc(gfp_mask, 0);
446 	bio_set_dev(bio, bdev);
447 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
448 
449 	ret = submit_bio_wait(bio);
450 
451 	/*
452 	 * The driver must store the error location in ->bi_sector, if
453 	 * it supports it. For non-stacked drivers, this should be
454 	 * copied from blk_rq_pos(rq).
455 	 */
456 	if (error_sector)
457 		*error_sector = bio->bi_iter.bi_sector;
458 
459 	bio_put(bio);
460 	return ret;
461 }
462 EXPORT_SYMBOL(blkdev_issue_flush);
463 
464 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
465 		int node, int cmd_size, gfp_t flags)
466 {
467 	struct blk_flush_queue *fq;
468 	int rq_sz = sizeof(struct request);
469 
470 	fq = kzalloc_node(sizeof(*fq), flags, node);
471 	if (!fq)
472 		goto fail;
473 
474 	spin_lock_init(&fq->mq_flush_lock);
475 
476 	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
477 	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
478 	if (!fq->flush_rq)
479 		goto fail_rq;
480 
481 	INIT_LIST_HEAD(&fq->flush_queue[0]);
482 	INIT_LIST_HEAD(&fq->flush_queue[1]);
483 	INIT_LIST_HEAD(&fq->flush_data_in_flight);
484 
485 	return fq;
486 
487  fail_rq:
488 	kfree(fq);
489  fail:
490 	return NULL;
491 }
492 
493 void blk_free_flush_queue(struct blk_flush_queue *fq)
494 {
495 	/* bio based request queue hasn't flush queue */
496 	if (!fq)
497 		return;
498 
499 	kfree(fq->flush_rq);
500 	kfree(fq);
501 }
502