xref: /linux/block/blk-flush.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * Functions to sequence FLUSH and FUA writes.
3  *
4  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11  * properties and hardware capability.
12  *
13  * If a request doesn't have data, only REQ_FLUSH makes sense, which
14  * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
15  * that the device cache should be flushed before the data is executed, and
16  * REQ_FUA means that the data must be on non-volatile media on request
17  * completion.
18  *
19  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20  * difference.  The requests are either completed immediately if there's no
21  * data or executed as normal requests otherwise.
22  *
23  * If the device has writeback cache and supports FUA, REQ_FLUSH is
24  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25  *
26  * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27  * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28  *
29  * The actual execution of flush is double buffered.  Whenever a request
30  * needs to execute PRE or POSTFLUSH, it queues at
31  * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
32  * flush is issued and the pending_idx is toggled.  When the flush
33  * completes, all the requests which were pending are proceeded to the next
34  * step.  This allows arbitrary merging of different types of FLUSH/FUA
35  * requests.
36  *
37  * Currently, the following conditions are used to determine when to issue
38  * flush.
39  *
40  * C1. At any given time, only one flush shall be in progress.  This makes
41  *     double buffering sufficient.
42  *
43  * C2. Flush is deferred if any request is executing DATA of its sequence.
44  *     This avoids issuing separate POSTFLUSHes for requests which shared
45  *     PREFLUSH.
46  *
47  * C3. The second condition is ignored if there is a request which has
48  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49  *     starvation in the unlikely case where there are continuous stream of
50  *     FUA (without FLUSH) requests.
51  *
52  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53  * is beneficial.
54  *
55  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56  * Once while executing DATA and again after the whole sequence is
57  * complete.  The first completion updates the contained bio but doesn't
58  * finish it so that the bio submitter is notified only after the whole
59  * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
60  * req_bio_endio().
61  *
62  * The above peculiarity requires that each FLUSH/FUA request has only one
63  * bio attached to it, which is guaranteed as they aren't allowed to be
64  * merged in the usual way.
65  */
66 
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
73 
74 #include "blk.h"
75 #include "blk-mq.h"
76 
77 /* FLUSH/FUA sequences */
78 enum {
79 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
80 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
81 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
82 	REQ_FSEQ_DONE		= (1 << 3),
83 
84 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 				  REQ_FSEQ_POSTFLUSH,
86 
87 	/*
88 	 * If flush has been pending longer than the following timeout,
89 	 * it's issued even if flush_data requests are still in flight.
90 	 */
91 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
92 };
93 
94 static bool blk_kick_flush(struct request_queue *q);
95 
96 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97 {
98 	unsigned int policy = 0;
99 
100 	if (blk_rq_sectors(rq))
101 		policy |= REQ_FSEQ_DATA;
102 
103 	if (fflags & REQ_FLUSH) {
104 		if (rq->cmd_flags & REQ_FLUSH)
105 			policy |= REQ_FSEQ_PREFLUSH;
106 		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107 			policy |= REQ_FSEQ_POSTFLUSH;
108 	}
109 	return policy;
110 }
111 
112 static unsigned int blk_flush_cur_seq(struct request *rq)
113 {
114 	return 1 << ffz(rq->flush.seq);
115 }
116 
117 static void blk_flush_restore_request(struct request *rq)
118 {
119 	/*
120 	 * After flush data completion, @rq->bio is %NULL but we need to
121 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
122 	 * original @rq->bio.  Restore it.
123 	 */
124 	rq->bio = rq->biotail;
125 
126 	/* make @rq a normal request */
127 	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128 	rq->end_io = rq->flush.saved_end_io;
129 
130 	blk_clear_rq_complete(rq);
131 }
132 
133 static void mq_flush_data_run(struct work_struct *work)
134 {
135 	struct request *rq;
136 
137 	rq = container_of(work, struct request, mq_flush_data);
138 
139 	memset(&rq->csd, 0, sizeof(rq->csd));
140 	blk_mq_run_request(rq, true, false);
141 }
142 
143 static void blk_mq_flush_data_insert(struct request *rq)
144 {
145 	INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
146 	kblockd_schedule_work(rq->q, &rq->mq_flush_data);
147 }
148 
149 /**
150  * blk_flush_complete_seq - complete flush sequence
151  * @rq: FLUSH/FUA request being sequenced
152  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
153  * @error: whether an error occurred
154  *
155  * @rq just completed @seq part of its flush sequence, record the
156  * completion and trigger the next step.
157  *
158  * CONTEXT:
159  * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
160  *
161  * RETURNS:
162  * %true if requests were added to the dispatch queue, %false otherwise.
163  */
164 static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
165 				   int error)
166 {
167 	struct request_queue *q = rq->q;
168 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
169 	bool queued = false, kicked;
170 
171 	BUG_ON(rq->flush.seq & seq);
172 	rq->flush.seq |= seq;
173 
174 	if (likely(!error))
175 		seq = blk_flush_cur_seq(rq);
176 	else
177 		seq = REQ_FSEQ_DONE;
178 
179 	switch (seq) {
180 	case REQ_FSEQ_PREFLUSH:
181 	case REQ_FSEQ_POSTFLUSH:
182 		/* queue for flush */
183 		if (list_empty(pending))
184 			q->flush_pending_since = jiffies;
185 		list_move_tail(&rq->flush.list, pending);
186 		break;
187 
188 	case REQ_FSEQ_DATA:
189 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
190 		if (q->mq_ops)
191 			blk_mq_flush_data_insert(rq);
192 		else {
193 			list_add(&rq->queuelist, &q->queue_head);
194 			queued = true;
195 		}
196 		break;
197 
198 	case REQ_FSEQ_DONE:
199 		/*
200 		 * @rq was previously adjusted by blk_flush_issue() for
201 		 * flush sequencing and may already have gone through the
202 		 * flush data request completion path.  Restore @rq for
203 		 * normal completion and end it.
204 		 */
205 		BUG_ON(!list_empty(&rq->queuelist));
206 		list_del_init(&rq->flush.list);
207 		blk_flush_restore_request(rq);
208 		if (q->mq_ops)
209 			blk_mq_end_io(rq, error);
210 		else
211 			__blk_end_request_all(rq, error);
212 		break;
213 
214 	default:
215 		BUG();
216 	}
217 
218 	kicked = blk_kick_flush(q);
219 	/* blk_mq_run_flush will run queue */
220 	if (q->mq_ops)
221 		return queued;
222 	return kicked | queued;
223 }
224 
225 static void flush_end_io(struct request *flush_rq, int error)
226 {
227 	struct request_queue *q = flush_rq->q;
228 	struct list_head *running;
229 	bool queued = false;
230 	struct request *rq, *n;
231 	unsigned long flags = 0;
232 
233 	if (q->mq_ops) {
234 		blk_mq_free_request(flush_rq);
235 		spin_lock_irqsave(&q->mq_flush_lock, flags);
236 	}
237 	running = &q->flush_queue[q->flush_running_idx];
238 	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239 
240 	/* account completion of the flush request */
241 	q->flush_running_idx ^= 1;
242 
243 	if (!q->mq_ops)
244 		elv_completed_request(q, flush_rq);
245 
246 	/* and push the waiting requests to the next stage */
247 	list_for_each_entry_safe(rq, n, running, flush.list) {
248 		unsigned int seq = blk_flush_cur_seq(rq);
249 
250 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
251 		queued |= blk_flush_complete_seq(rq, seq, error);
252 	}
253 
254 	/*
255 	 * Kick the queue to avoid stall for two cases:
256 	 * 1. Moving a request silently to empty queue_head may stall the
257 	 * queue.
258 	 * 2. When flush request is running in non-queueable queue, the
259 	 * queue is hold. Restart the queue after flush request is finished
260 	 * to avoid stall.
261 	 * This function is called from request completion path and calling
262 	 * directly into request_fn may confuse the driver.  Always use
263 	 * kblockd.
264 	 */
265 	if (queued || q->flush_queue_delayed) {
266 		if (!q->mq_ops)
267 			blk_run_queue_async(q);
268 		else
269 		/*
270 		 * This can be optimized to only run queues with requests
271 		 * queued if necessary.
272 		 */
273 			blk_mq_run_queues(q, true);
274 	}
275 	q->flush_queue_delayed = 0;
276 	if (q->mq_ops)
277 		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
278 }
279 
280 static void mq_flush_work(struct work_struct *work)
281 {
282 	struct request_queue *q;
283 	struct request *rq;
284 
285 	q = container_of(work, struct request_queue, mq_flush_work);
286 
287 	/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
288 	rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
289 		__GFP_WAIT|GFP_ATOMIC, true);
290 	rq->cmd_type = REQ_TYPE_FS;
291 	rq->end_io = flush_end_io;
292 
293 	blk_mq_run_request(rq, true, false);
294 }
295 
296 /*
297  * We can't directly use q->flush_rq, because it doesn't have tag and is not in
298  * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
299  * so offload the work to workqueue.
300  *
301  * Note: we assume a flush request finished in any hardware queue will flush
302  * the whole disk cache.
303  */
304 static void mq_run_flush(struct request_queue *q)
305 {
306 	kblockd_schedule_work(q, &q->mq_flush_work);
307 }
308 
309 /**
310  * blk_kick_flush - consider issuing flush request
311  * @q: request_queue being kicked
312  *
313  * Flush related states of @q have changed, consider issuing flush request.
314  * Please read the comment at the top of this file for more info.
315  *
316  * CONTEXT:
317  * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
318  *
319  * RETURNS:
320  * %true if flush was issued, %false otherwise.
321  */
322 static bool blk_kick_flush(struct request_queue *q)
323 {
324 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
325 	struct request *first_rq =
326 		list_first_entry(pending, struct request, flush.list);
327 
328 	/* C1 described at the top of this file */
329 	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
330 		return false;
331 
332 	/* C2 and C3 */
333 	if (!list_empty(&q->flush_data_in_flight) &&
334 	    time_before(jiffies,
335 			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
336 		return false;
337 
338 	/*
339 	 * Issue flush and toggle pending_idx.  This makes pending_idx
340 	 * different from running_idx, which means flush is in flight.
341 	 */
342 	q->flush_pending_idx ^= 1;
343 	if (q->mq_ops) {
344 		mq_run_flush(q);
345 		return true;
346 	}
347 
348 	blk_rq_init(q, &q->flush_rq);
349 	q->flush_rq.cmd_type = REQ_TYPE_FS;
350 	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
351 	q->flush_rq.rq_disk = first_rq->rq_disk;
352 	q->flush_rq.end_io = flush_end_io;
353 
354 	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
355 	return true;
356 }
357 
358 static void flush_data_end_io(struct request *rq, int error)
359 {
360 	struct request_queue *q = rq->q;
361 
362 	/*
363 	 * After populating an empty queue, kick it to avoid stall.  Read
364 	 * the comment in flush_end_io().
365 	 */
366 	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
367 		blk_run_queue_async(q);
368 }
369 
370 static void mq_flush_data_end_io(struct request *rq, int error)
371 {
372 	struct request_queue *q = rq->q;
373 	struct blk_mq_hw_ctx *hctx;
374 	struct blk_mq_ctx *ctx;
375 	unsigned long flags;
376 
377 	ctx = rq->mq_ctx;
378 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
379 
380 	/*
381 	 * After populating an empty queue, kick it to avoid stall.  Read
382 	 * the comment in flush_end_io().
383 	 */
384 	spin_lock_irqsave(&q->mq_flush_lock, flags);
385 	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
386 		blk_mq_run_hw_queue(hctx, true);
387 	spin_unlock_irqrestore(&q->mq_flush_lock, flags);
388 }
389 
390 /**
391  * blk_insert_flush - insert a new FLUSH/FUA request
392  * @rq: request to insert
393  *
394  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
395  * or __blk_mq_run_hw_queue() to dispatch request.
396  * @rq is being submitted.  Analyze what needs to be done and put it on the
397  * right queue.
398  *
399  * CONTEXT:
400  * spin_lock_irq(q->queue_lock) in !mq case
401  */
402 void blk_insert_flush(struct request *rq)
403 {
404 	struct request_queue *q = rq->q;
405 	unsigned int fflags = q->flush_flags;	/* may change, cache */
406 	unsigned int policy = blk_flush_policy(fflags, rq);
407 
408 	/*
409 	 * @policy now records what operations need to be done.  Adjust
410 	 * REQ_FLUSH and FUA for the driver.
411 	 */
412 	rq->cmd_flags &= ~REQ_FLUSH;
413 	if (!(fflags & REQ_FUA))
414 		rq->cmd_flags &= ~REQ_FUA;
415 
416 	/*
417 	 * An empty flush handed down from a stacking driver may
418 	 * translate into nothing if the underlying device does not
419 	 * advertise a write-back cache.  In this case, simply
420 	 * complete the request.
421 	 */
422 	if (!policy) {
423 		if (q->mq_ops)
424 			blk_mq_end_io(rq, 0);
425 		else
426 			__blk_end_bidi_request(rq, 0, 0, 0);
427 		return;
428 	}
429 
430 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
431 
432 	/*
433 	 * If there's data but flush is not necessary, the request can be
434 	 * processed directly without going through flush machinery.  Queue
435 	 * for normal execution.
436 	 */
437 	if ((policy & REQ_FSEQ_DATA) &&
438 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
439 		if (q->mq_ops) {
440 			blk_mq_run_request(rq, false, true);
441 		} else
442 			list_add_tail(&rq->queuelist, &q->queue_head);
443 		return;
444 	}
445 
446 	/*
447 	 * @rq should go through flush machinery.  Mark it part of flush
448 	 * sequence and submit for further processing.
449 	 */
450 	memset(&rq->flush, 0, sizeof(rq->flush));
451 	INIT_LIST_HEAD(&rq->flush.list);
452 	rq->cmd_flags |= REQ_FLUSH_SEQ;
453 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
454 	if (q->mq_ops) {
455 		rq->end_io = mq_flush_data_end_io;
456 
457 		spin_lock_irq(&q->mq_flush_lock);
458 		blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
459 		spin_unlock_irq(&q->mq_flush_lock);
460 		return;
461 	}
462 	rq->end_io = flush_data_end_io;
463 
464 	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
465 }
466 
467 /**
468  * blk_abort_flushes - @q is being aborted, abort flush requests
469  * @q: request_queue being aborted
470  *
471  * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
472  * FLUSH/FUA requests for abortion.
473  *
474  * CONTEXT:
475  * spin_lock_irq(q->queue_lock)
476  */
477 void blk_abort_flushes(struct request_queue *q)
478 {
479 	struct request *rq, *n;
480 	int i;
481 
482 	/*
483 	 * Requests in flight for data are already owned by the dispatch
484 	 * queue or the device driver.  Just restore for normal completion.
485 	 */
486 	list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
487 		list_del_init(&rq->flush.list);
488 		blk_flush_restore_request(rq);
489 	}
490 
491 	/*
492 	 * We need to give away requests on flush queues.  Restore for
493 	 * normal completion and put them on the dispatch queue.
494 	 */
495 	for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
496 		list_for_each_entry_safe(rq, n, &q->flush_queue[i],
497 					 flush.list) {
498 			list_del_init(&rq->flush.list);
499 			blk_flush_restore_request(rq);
500 			list_add_tail(&rq->queuelist, &q->queue_head);
501 		}
502 	}
503 }
504 
505 /**
506  * blkdev_issue_flush - queue a flush
507  * @bdev:	blockdev to issue flush for
508  * @gfp_mask:	memory allocation flags (for bio_alloc)
509  * @error_sector:	error sector
510  *
511  * Description:
512  *    Issue a flush for the block device in question. Caller can supply
513  *    room for storing the error offset in case of a flush error, if they
514  *    wish to. If WAIT flag is not passed then caller may check only what
515  *    request was pushed in some internal queue for later handling.
516  */
517 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
518 		sector_t *error_sector)
519 {
520 	struct request_queue *q;
521 	struct bio *bio;
522 	int ret = 0;
523 
524 	if (bdev->bd_disk == NULL)
525 		return -ENXIO;
526 
527 	q = bdev_get_queue(bdev);
528 	if (!q)
529 		return -ENXIO;
530 
531 	/*
532 	 * some block devices may not have their queue correctly set up here
533 	 * (e.g. loop device without a backing file) and so issuing a flush
534 	 * here will panic. Ensure there is a request function before issuing
535 	 * the flush.
536 	 */
537 	if (!q->make_request_fn)
538 		return -ENXIO;
539 
540 	bio = bio_alloc(gfp_mask, 0);
541 	bio->bi_bdev = bdev;
542 
543 	ret = submit_bio_wait(WRITE_FLUSH, bio);
544 
545 	/*
546 	 * The driver must store the error location in ->bi_sector, if
547 	 * it supports it. For non-stacked drivers, this should be
548 	 * copied from blk_rq_pos(rq).
549 	 */
550 	if (error_sector)
551 		*error_sector = bio->bi_iter.bi_sector;
552 
553 	bio_put(bio);
554 	return ret;
555 }
556 EXPORT_SYMBOL(blkdev_issue_flush);
557 
558 void blk_mq_init_flush(struct request_queue *q)
559 {
560 	spin_lock_init(&q->mq_flush_lock);
561 	INIT_WORK(&q->mq_flush_work, mq_flush_work);
562 }
563