xref: /linux/block/blk-flush.c (revision 6419945e3313fd894af79caefca6823d4511133f)
1 /*
2  * Functions to sequence PREFLUSH and FUA writes.
3  *
4  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
10  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11  * properties and hardware capability.
12  *
13  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
14  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
15  * that the device cache should be flushed before the data is executed, and
16  * REQ_FUA means that the data must be on non-volatile media on request
17  * completion.
18  *
19  * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
20  * difference.  The requests are either completed immediately if there's no data
21  * or executed as normal requests otherwise.
22  *
23  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25  *
26  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
27  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28  *
29  * The actual execution of flush is double buffered.  Whenever a request
30  * needs to execute PRE or POSTFLUSH, it queues at
31  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
32  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
33  * completes, all the requests which were pending are proceeded to the next
34  * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
35  * requests.
36  *
37  * Currently, the following conditions are used to determine when to issue
38  * flush.
39  *
40  * C1. At any given time, only one flush shall be in progress.  This makes
41  *     double buffering sufficient.
42  *
43  * C2. Flush is deferred if any request is executing DATA of its sequence.
44  *     This avoids issuing separate POSTFLUSHes for requests which shared
45  *     PREFLUSH.
46  *
47  * C3. The second condition is ignored if there is a request which has
48  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49  *     starvation in the unlikely case where there are continuous stream of
50  *     FUA (without PREFLUSH) requests.
51  *
52  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53  * is beneficial.
54  *
55  * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
56  * Once while executing DATA and again after the whole sequence is
57  * complete.  The first completion updates the contained bio but doesn't
58  * finish it so that the bio submitter is notified only after the whole
59  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
60  * req_bio_endio().
61  *
62  * The above peculiarity requires that each PREFLUSH/FUA request has only one
63  * bio attached to it, which is guaranteed as they aren't allowed to be
64  * merged in the usual way.
65  */
66 
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
73 
74 #include "blk.h"
75 #include "blk-mq.h"
76 #include "blk-mq-tag.h"
77 #include "blk-mq-sched.h"
78 
79 /* PREFLUSH/FUA sequences */
80 enum {
81 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
82 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
83 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
84 	REQ_FSEQ_DONE		= (1 << 3),
85 
86 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 				  REQ_FSEQ_POSTFLUSH,
88 
89 	/*
90 	 * If flush has been pending longer than the following timeout,
91 	 * it's issued even if flush_data requests are still in flight.
92 	 */
93 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
94 };
95 
96 static bool blk_kick_flush(struct request_queue *q,
97 			   struct blk_flush_queue *fq, unsigned int flags);
98 
99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100 {
101 	unsigned int policy = 0;
102 
103 	if (blk_rq_sectors(rq))
104 		policy |= REQ_FSEQ_DATA;
105 
106 	if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 		if (rq->cmd_flags & REQ_PREFLUSH)
108 			policy |= REQ_FSEQ_PREFLUSH;
109 		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 		    (rq->cmd_flags & REQ_FUA))
111 			policy |= REQ_FSEQ_POSTFLUSH;
112 	}
113 	return policy;
114 }
115 
116 static unsigned int blk_flush_cur_seq(struct request *rq)
117 {
118 	return 1 << ffz(rq->flush.seq);
119 }
120 
121 static void blk_flush_restore_request(struct request *rq)
122 {
123 	/*
124 	 * After flush data completion, @rq->bio is %NULL but we need to
125 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
126 	 * original @rq->bio.  Restore it.
127 	 */
128 	rq->bio = rq->biotail;
129 
130 	/* make @rq a normal request */
131 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 	rq->end_io = rq->flush.saved_end_io;
133 }
134 
135 static bool blk_flush_queue_rq(struct request *rq, bool add_front)
136 {
137 	if (rq->q->mq_ops) {
138 		blk_mq_add_to_requeue_list(rq, add_front, true);
139 		return false;
140 	} else {
141 		if (add_front)
142 			list_add(&rq->queuelist, &rq->q->queue_head);
143 		else
144 			list_add_tail(&rq->queuelist, &rq->q->queue_head);
145 		return true;
146 	}
147 }
148 
149 /**
150  * blk_flush_complete_seq - complete flush sequence
151  * @rq: PREFLUSH/FUA request being sequenced
152  * @fq: flush queue
153  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
154  * @error: whether an error occurred
155  *
156  * @rq just completed @seq part of its flush sequence, record the
157  * completion and trigger the next step.
158  *
159  * CONTEXT:
160  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
161  *
162  * RETURNS:
163  * %true if requests were added to the dispatch queue, %false otherwise.
164  */
165 static bool blk_flush_complete_seq(struct request *rq,
166 				   struct blk_flush_queue *fq,
167 				   unsigned int seq, blk_status_t error)
168 {
169 	struct request_queue *q = rq->q;
170 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
171 	bool queued = false, kicked;
172 
173 	BUG_ON(rq->flush.seq & seq);
174 	rq->flush.seq |= seq;
175 
176 	if (likely(!error))
177 		seq = blk_flush_cur_seq(rq);
178 	else
179 		seq = REQ_FSEQ_DONE;
180 
181 	switch (seq) {
182 	case REQ_FSEQ_PREFLUSH:
183 	case REQ_FSEQ_POSTFLUSH:
184 		/* queue for flush */
185 		if (list_empty(pending))
186 			fq->flush_pending_since = jiffies;
187 		list_move_tail(&rq->flush.list, pending);
188 		break;
189 
190 	case REQ_FSEQ_DATA:
191 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192 		queued = blk_flush_queue_rq(rq, true);
193 		break;
194 
195 	case REQ_FSEQ_DONE:
196 		/*
197 		 * @rq was previously adjusted by blk_flush_issue() for
198 		 * flush sequencing and may already have gone through the
199 		 * flush data request completion path.  Restore @rq for
200 		 * normal completion and end it.
201 		 */
202 		BUG_ON(!list_empty(&rq->queuelist));
203 		list_del_init(&rq->flush.list);
204 		blk_flush_restore_request(rq);
205 		if (q->mq_ops)
206 			blk_mq_end_request(rq, error);
207 		else
208 			__blk_end_request_all(rq, error);
209 		break;
210 
211 	default:
212 		BUG();
213 	}
214 
215 	kicked = blk_kick_flush(q, fq, rq->cmd_flags);
216 	return kicked | queued;
217 }
218 
219 static void flush_end_io(struct request *flush_rq, blk_status_t error)
220 {
221 	struct request_queue *q = flush_rq->q;
222 	struct list_head *running;
223 	bool queued = false;
224 	struct request *rq, *n;
225 	unsigned long flags = 0;
226 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
227 
228 	if (q->mq_ops) {
229 		struct blk_mq_hw_ctx *hctx;
230 
231 		/* release the tag's ownership to the req cloned from */
232 		spin_lock_irqsave(&fq->mq_flush_lock, flags);
233 		hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
234 		if (!q->elevator) {
235 			blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
236 			flush_rq->tag = -1;
237 		} else {
238 			blk_mq_put_driver_tag_hctx(hctx, flush_rq);
239 			flush_rq->internal_tag = -1;
240 		}
241 	}
242 
243 	running = &fq->flush_queue[fq->flush_running_idx];
244 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
245 
246 	/* account completion of the flush request */
247 	fq->flush_running_idx ^= 1;
248 
249 	if (!q->mq_ops)
250 		elv_completed_request(q, flush_rq);
251 
252 	/* and push the waiting requests to the next stage */
253 	list_for_each_entry_safe(rq, n, running, flush.list) {
254 		unsigned int seq = blk_flush_cur_seq(rq);
255 
256 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
257 		queued |= blk_flush_complete_seq(rq, fq, seq, error);
258 	}
259 
260 	/*
261 	 * Kick the queue to avoid stall for two cases:
262 	 * 1. Moving a request silently to empty queue_head may stall the
263 	 * queue.
264 	 * 2. When flush request is running in non-queueable queue, the
265 	 * queue is hold. Restart the queue after flush request is finished
266 	 * to avoid stall.
267 	 * This function is called from request completion path and calling
268 	 * directly into request_fn may confuse the driver.  Always use
269 	 * kblockd.
270 	 */
271 	if (queued || fq->flush_queue_delayed) {
272 		WARN_ON(q->mq_ops);
273 		blk_run_queue_async(q);
274 	}
275 	fq->flush_queue_delayed = 0;
276 	if (q->mq_ops)
277 		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
278 }
279 
280 /**
281  * blk_kick_flush - consider issuing flush request
282  * @q: request_queue being kicked
283  * @fq: flush queue
284  * @flags: cmd_flags of the original request
285  *
286  * Flush related states of @q have changed, consider issuing flush request.
287  * Please read the comment at the top of this file for more info.
288  *
289  * CONTEXT:
290  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
291  *
292  * RETURNS:
293  * %true if flush was issued, %false otherwise.
294  */
295 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
296 			   unsigned int flags)
297 {
298 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
299 	struct request *first_rq =
300 		list_first_entry(pending, struct request, flush.list);
301 	struct request *flush_rq = fq->flush_rq;
302 
303 	/* C1 described at the top of this file */
304 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
305 		return false;
306 
307 	/* C2 and C3
308 	 *
309 	 * For blk-mq + scheduling, we can risk having all driver tags
310 	 * assigned to empty flushes, and we deadlock if we are expecting
311 	 * other requests to make progress. Don't defer for that case.
312 	 */
313 	if (!list_empty(&fq->flush_data_in_flight) &&
314 	    !(q->mq_ops && q->elevator) &&
315 	    time_before(jiffies,
316 			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
317 		return false;
318 
319 	/*
320 	 * Issue flush and toggle pending_idx.  This makes pending_idx
321 	 * different from running_idx, which means flush is in flight.
322 	 */
323 	fq->flush_pending_idx ^= 1;
324 
325 	blk_rq_init(q, flush_rq);
326 
327 	/*
328 	 * In case of none scheduler, borrow tag from the first request
329 	 * since they can't be in flight at the same time. And acquire
330 	 * the tag's ownership for flush req.
331 	 *
332 	 * In case of IO scheduler, flush rq need to borrow scheduler tag
333 	 * just for cheating put/get driver tag.
334 	 */
335 	if (q->mq_ops) {
336 		struct blk_mq_hw_ctx *hctx;
337 
338 		flush_rq->mq_ctx = first_rq->mq_ctx;
339 
340 		if (!q->elevator) {
341 			fq->orig_rq = first_rq;
342 			flush_rq->tag = first_rq->tag;
343 			hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
344 			blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
345 		} else {
346 			flush_rq->internal_tag = first_rq->internal_tag;
347 		}
348 	}
349 
350 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
351 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
352 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
353 	flush_rq->rq_disk = first_rq->rq_disk;
354 	flush_rq->end_io = flush_end_io;
355 
356 	return blk_flush_queue_rq(flush_rq, false);
357 }
358 
359 static void flush_data_end_io(struct request *rq, blk_status_t error)
360 {
361 	struct request_queue *q = rq->q;
362 	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
363 
364 	lockdep_assert_held(q->queue_lock);
365 
366 	/*
367 	 * Updating q->in_flight[] here for making this tag usable
368 	 * early. Because in blk_queue_start_tag(),
369 	 * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
370 	 * reserve tags for sync I/O.
371 	 *
372 	 * More importantly this way can avoid the following I/O
373 	 * deadlock:
374 	 *
375 	 * - suppose there are 40 fua requests comming to flush queue
376 	 *   and queue depth is 31
377 	 * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
378 	 *   tag for async I/O any more
379 	 * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
380 	 *   and flush_data_end_io() is called
381 	 * - the other rqs still can't go ahead if not updating
382 	 *   q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
383 	 *   are held in flush data queue and make no progress of
384 	 *   handling post flush rq
385 	 * - only after the post flush rq is handled, all these rqs
386 	 *   can be completed
387 	 */
388 
389 	elv_completed_request(q, rq);
390 
391 	/* for avoiding double accounting */
392 	rq->rq_flags &= ~RQF_STARTED;
393 
394 	/*
395 	 * After populating an empty queue, kick it to avoid stall.  Read
396 	 * the comment in flush_end_io().
397 	 */
398 	if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
399 		blk_run_queue_async(q);
400 }
401 
402 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
403 {
404 	struct request_queue *q = rq->q;
405 	struct blk_mq_hw_ctx *hctx;
406 	struct blk_mq_ctx *ctx = rq->mq_ctx;
407 	unsigned long flags;
408 	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
409 
410 	hctx = blk_mq_map_queue(q, ctx->cpu);
411 
412 	if (q->elevator) {
413 		WARN_ON(rq->tag < 0);
414 		blk_mq_put_driver_tag_hctx(hctx, rq);
415 	}
416 
417 	/*
418 	 * After populating an empty queue, kick it to avoid stall.  Read
419 	 * the comment in flush_end_io().
420 	 */
421 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
422 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
423 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
424 
425 	blk_mq_run_hw_queue(hctx, true);
426 }
427 
428 /**
429  * blk_insert_flush - insert a new PREFLUSH/FUA request
430  * @rq: request to insert
431  *
432  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
433  * or __blk_mq_run_hw_queue() to dispatch request.
434  * @rq is being submitted.  Analyze what needs to be done and put it on the
435  * right queue.
436  */
437 void blk_insert_flush(struct request *rq)
438 {
439 	struct request_queue *q = rq->q;
440 	unsigned long fflags = q->queue_flags;	/* may change, cache */
441 	unsigned int policy = blk_flush_policy(fflags, rq);
442 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
443 
444 	if (!q->mq_ops)
445 		lockdep_assert_held(q->queue_lock);
446 
447 	/*
448 	 * @policy now records what operations need to be done.  Adjust
449 	 * REQ_PREFLUSH and FUA for the driver.
450 	 */
451 	rq->cmd_flags &= ~REQ_PREFLUSH;
452 	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
453 		rq->cmd_flags &= ~REQ_FUA;
454 
455 	/*
456 	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
457 	 * of those flags, we have to set REQ_SYNC to avoid skewing
458 	 * the request accounting.
459 	 */
460 	rq->cmd_flags |= REQ_SYNC;
461 
462 	/*
463 	 * An empty flush handed down from a stacking driver may
464 	 * translate into nothing if the underlying device does not
465 	 * advertise a write-back cache.  In this case, simply
466 	 * complete the request.
467 	 */
468 	if (!policy) {
469 		if (q->mq_ops)
470 			blk_mq_end_request(rq, 0);
471 		else
472 			__blk_end_request(rq, 0, 0);
473 		return;
474 	}
475 
476 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
477 
478 	/*
479 	 * If there's data but flush is not necessary, the request can be
480 	 * processed directly without going through flush machinery.  Queue
481 	 * for normal execution.
482 	 */
483 	if ((policy & REQ_FSEQ_DATA) &&
484 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
485 		if (q->mq_ops)
486 			blk_mq_request_bypass_insert(rq, false);
487 		else
488 			list_add_tail(&rq->queuelist, &q->queue_head);
489 		return;
490 	}
491 
492 	/*
493 	 * @rq should go through flush machinery.  Mark it part of flush
494 	 * sequence and submit for further processing.
495 	 */
496 	memset(&rq->flush, 0, sizeof(rq->flush));
497 	INIT_LIST_HEAD(&rq->flush.list);
498 	rq->rq_flags |= RQF_FLUSH_SEQ;
499 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
500 	if (q->mq_ops) {
501 		rq->end_io = mq_flush_data_end_io;
502 
503 		spin_lock_irq(&fq->mq_flush_lock);
504 		blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
505 		spin_unlock_irq(&fq->mq_flush_lock);
506 		return;
507 	}
508 	rq->end_io = flush_data_end_io;
509 
510 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
511 }
512 
513 /**
514  * blkdev_issue_flush - queue a flush
515  * @bdev:	blockdev to issue flush for
516  * @gfp_mask:	memory allocation flags (for bio_alloc)
517  * @error_sector:	error sector
518  *
519  * Description:
520  *    Issue a flush for the block device in question. Caller can supply
521  *    room for storing the error offset in case of a flush error, if they
522  *    wish to.
523  */
524 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
525 		sector_t *error_sector)
526 {
527 	struct request_queue *q;
528 	struct bio *bio;
529 	int ret = 0;
530 
531 	if (bdev->bd_disk == NULL)
532 		return -ENXIO;
533 
534 	q = bdev_get_queue(bdev);
535 	if (!q)
536 		return -ENXIO;
537 
538 	/*
539 	 * some block devices may not have their queue correctly set up here
540 	 * (e.g. loop device without a backing file) and so issuing a flush
541 	 * here will panic. Ensure there is a request function before issuing
542 	 * the flush.
543 	 */
544 	if (!q->make_request_fn)
545 		return -ENXIO;
546 
547 	bio = bio_alloc(gfp_mask, 0);
548 	bio_set_dev(bio, bdev);
549 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
550 
551 	ret = submit_bio_wait(bio);
552 
553 	/*
554 	 * The driver must store the error location in ->bi_sector, if
555 	 * it supports it. For non-stacked drivers, this should be
556 	 * copied from blk_rq_pos(rq).
557 	 */
558 	if (error_sector)
559 		*error_sector = bio->bi_iter.bi_sector;
560 
561 	bio_put(bio);
562 	return ret;
563 }
564 EXPORT_SYMBOL(blkdev_issue_flush);
565 
566 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
567 		int node, int cmd_size)
568 {
569 	struct blk_flush_queue *fq;
570 	int rq_sz = sizeof(struct request);
571 
572 	fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
573 	if (!fq)
574 		goto fail;
575 
576 	if (q->mq_ops)
577 		spin_lock_init(&fq->mq_flush_lock);
578 
579 	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
580 	fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
581 	if (!fq->flush_rq)
582 		goto fail_rq;
583 
584 	INIT_LIST_HEAD(&fq->flush_queue[0]);
585 	INIT_LIST_HEAD(&fq->flush_queue[1]);
586 	INIT_LIST_HEAD(&fq->flush_data_in_flight);
587 
588 	return fq;
589 
590  fail_rq:
591 	kfree(fq);
592  fail:
593 	return NULL;
594 }
595 
596 void blk_free_flush_queue(struct blk_flush_queue *fq)
597 {
598 	/* bio based request queue hasn't flush queue */
599 	if (!fq)
600 		return;
601 
602 	kfree(fq->flush_rq);
603 	kfree(fq);
604 }
605